{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "execution_count": 6, "metadata": { "id": "cCXb6F65XhI_" }, "outputs": [], "source": [ "import logging\n", "from abc import ABCMeta, abstractmethod\n", "from dataclasses import dataclass, replace\n", "from math import cos, pi, sqrt\n", "from typing import Any, Dict, List, Optional, Tuple, Union\n", "\n", "import torch\n", "import torch.distributed as dist\n", "import torch.nn as nn\n", "from torch.distributed.fsdp import FullyShardedDataParallel\n", "from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n", "from torch.optim.optimizer import Optimizer as OptimizerBase\n", "\n", "#from . import LayerNormBase\n", "#from .config import OptimizerType, SchedulerConfig, SchedulerType, TrainConfig\n", "#from .torch_util import get_default_device, is_distributed\n", "\n", "\"\"\" Simulate import from .torch_util \"\"\"\n", "\n", "import gc\n", "import os\n", "from typing import Optional, TypeVar\n", "\n", "import torch\n", "import torch.distributed as dist\n", "\n", "T = TypeVar(\"T\")\n", "\n", "\n", "def is_distributed() -> bool:\n", " return dist.is_available() and dist.is_initialized()\n", "\n", "def get_default_device() -> torch.device:\n", " if torch.cuda.is_available() and torch.cuda.is_initialized():\n", " return torch.device(\"cuda\")\n", " elif torch.backends.mps.is_available():\n", " return torch.device(\"mps\")\n", " else:\n", " return torch.device(\"cpu\")\n", "\n", "\n", "\"\"\" end of simulation \"\"\"\n", "\n", "\n", "\n", "\n", "__all__ = [\n", " \"Optimizer\",\n", " \"LionW\",\n", " \"AdamW\",\n", " \"MuonW\",\n", " \"Scheduler\",\n", " \"CosWithWarmup\",\n", " \"LinearWithWarmup\",\n", " \"InvSqrtWithWarmup\",\n", " \"MaxScheduler\",\n", " \"ConstantScheduler\",\n", " \"CosLinearEnvelope\",\n", " \"BoltOnWarmupScheduler\",\n", " \"build_optimizer\",\n", " \"build_scheduler\",\n", "]\n", "\n", "\n", "log = logging.getLogger(__name__)" ] }, { "cell_type": "code", "source": [ "class Optimizer(OptimizerBase):\n", " def __init__(self, *args, record_update_metrics: bool = False, selective_updates: bool = False, **kwargs):\n", " super().__init__(*args, **kwargs)\n", " self._record_update_metrics = record_update_metrics\n", " self._collecting_metrics = False\n", " self._selective_updates = selective_updates\n", "\n", " def _clean_param_name(self, name: str) -> str:\n", " return name.replace(\"_fsdp_wrapped_module.\", \"\")\n", "\n", " @torch.no_grad()\n", " def clip_grads_and_collect_metrics(\n", " self,\n", " global_step: int,\n", " collect_param_metrics: bool = True,\n", " process_group: Optional[dist.ProcessGroup] = None,\n", " device: Optional[torch.device] = None,\n", " ) -> Dict[str, torch.Tensor]:\n", " \"\"\"\n", " Clips gradients for every group that has the field `max_grad_norm`.\n", " At the same time collect metrics for each parameter and its gradient.\n", " \"\"\"\n", " self._collecting_metrics = collect_param_metrics\n", " device = get_default_device() if device is None else device\n", "\n", " # NOTE (epwalsh): during distributed training we're making an assumption that the order of\n", " # the param groups and the params within each group are the same across all ranks.\n", " # This is justified since we initialize the parameter groups in every rank by iterating over\n", " # `module.parameters()` or `module.named_modules()` / `module.named_parameters()`, each of which\n", " # provides a consistent order.\n", " # For each parameter (with a gradient) we'll collect:\n", " # - min, max, avg, norm of the param itself\n", " # - min, max, avg, norm of the param's gradient\n", " # - min, max, avg, norm of any additional per-parameter optimizer state metrics returned from\n", " # `self.get_state_for_param()`.\n", " # Afterwards we'll reduce these all over all ranks.\n", " per_param_min_metrics: List[torch.Tensor] = []\n", " per_param_max_metrics: List[torch.Tensor] = []\n", " per_param_sum_metrics: List[torch.Tensor] = []\n", " per_param_norm_metrics: List[torch.Tensor] = []\n", " per_param_numel_metrics: List[torch.Tensor] = []\n", "\n", " per_param_min_metric_names: List[str] = []\n", " per_param_max_metric_names: List[str] = []\n", " per_param_avg_metric_names: List[str] = []\n", " per_param_norm_metric_names: List[str] = []\n", "\n", " dst_rank = 0\n", " if process_group is not None:\n", " dst_rank = dist.get_global_rank(process_group, 0)\n", "\n", " #######################################################################\n", " # part 1: collect metrics locally\n", " #######################################################################\n", " for group in self.param_groups:\n", " for name, p in zip(group[\"param_names\"], group[\"params\"]):\n", " name = self._clean_param_name(name)\n", " # Always need to collect the norm of gradients for clipping, even if we're not collecting\n", " # other metrics.\n", " tensors: List[Optional[torch.Tensor]] = [p.grad]\n", " prefixes: List[str] = [f\"grad/{name}\"]\n", " if collect_param_metrics:\n", " state = self.get_state_for_param(p)\n", " sorted_state_keys = sorted([k for k in state.keys()])\n", " tensors.extend([p] + [state[key] for key in sorted_state_keys])\n", " prefixes.extend([f\"param/{name}\"] + [f\"{key}/{name}\" for key in sorted_state_keys])\n", " assert len(tensors) == len(prefixes)\n", "\n", " # Get min, max, avg, and norm for all `tensors` associated with the parameter.\n", " for x, prefix in zip(tensors, prefixes):\n", " # grad or state tensors could be none for params that have their shards completely on\n", " # other ranks.\n", " if x is not None and x.numel() > 0:\n", " if collect_param_metrics:\n", " x_abs = x.abs()\n", " per_param_min_metrics.append(x_abs.min().unsqueeze(0).to(dtype=torch.float32))\n", " per_param_max_metrics.append(x_abs.max().unsqueeze(0).to(dtype=torch.float32))\n", " per_param_sum_metrics.append(x.sum().unsqueeze(0).to(dtype=torch.float32))\n", " per_param_numel_metrics.append(\n", " torch.tensor([x.numel()], device=device, dtype=torch.float32)\n", " )\n", " per_param_norm_metrics.append(\n", " torch.linalg.vector_norm(x, 2.0, dtype=torch.float32).unsqueeze(0)\n", " )\n", " else:\n", " if collect_param_metrics:\n", " per_param_min_metrics.append(\n", " torch.tensor([float(\"inf\")], device=device, dtype=torch.float32)\n", " )\n", " per_param_max_metrics.append(torch.tensor([0.0], device=device, dtype=torch.float32))\n", " per_param_sum_metrics.append(torch.tensor([0.0], device=device, dtype=torch.float32))\n", " per_param_numel_metrics.append(torch.tensor([0.0], device=device, dtype=torch.float32))\n", " per_param_norm_metrics.append(torch.tensor([0.0], device=device, dtype=torch.float32))\n", " if collect_param_metrics:\n", " per_param_min_metric_names.append(f\"{prefix}.min\")\n", " per_param_max_metric_names.append(f\"{prefix}.max\")\n", " per_param_avg_metric_names.append(f\"{prefix}.avg\")\n", " per_param_norm_metric_names.append(f\"{prefix}.norm\")\n", "\n", " assert (\n", " len(per_param_min_metrics)\n", " == len(per_param_min_metric_names)\n", " == len(per_param_max_metrics)\n", " == len(per_param_max_metric_names)\n", " == len(per_param_sum_metrics)\n", " == len(per_param_numel_metrics)\n", " == len(per_param_avg_metric_names)\n", " )\n", " assert len(per_param_norm_metrics) == len(per_param_norm_metric_names)\n", "\n", " def is_grad_norm_metric(metric_name: str) -> bool:\n", " return metric_name.startswith(\"grad/\") and metric_name.endswith(\".norm\")\n", "\n", " #######################################################################\n", " # part 2: reduce metrics over ranks\n", " #######################################################################\n", " param_group_sharded = False\n", " for group in self.param_groups:\n", " param_group_sharded = param_group_sharded or group.get(\"sharded\", False)\n", "\n", " total_grad_norm: torch.Tensor\n", " per_param_avg_metrics: List[torch.Tensor] = []\n", " if is_distributed() and param_group_sharded:\n", " # Reduce metrics across all ranks. Note that we can use a `reduce` for most cases\n", " # instead of an `all_reduce`, but we need `all_reduce` for norms so that all ranks\n", " # get the right value for gradient norms so they can clip correctly.\n", " # Reduce mins.\n", " if per_param_min_metrics:\n", " all_mins = torch.cat(per_param_min_metrics).to(device)\n", " dist.reduce(all_mins, dst_rank, op=dist.ReduceOp.MIN, group=process_group)\n", " per_param_min_metrics = all_mins.split(1)\n", " # Reduce maxs.\n", " if per_param_max_metrics:\n", " all_maxs = torch.cat(per_param_max_metrics).to(device)\n", " dist.reduce(all_maxs, dst_rank, op=dist.ReduceOp.MAX, group=process_group)\n", " per_param_max_metrics = all_maxs.split(1)\n", " # Reduce sums or just norms.\n", " all_norms = torch.cat(per_param_norm_metrics).to(device) ** 2.0\n", " if per_param_sum_metrics and per_param_numel_metrics:\n", " all_sums = torch.cat(per_param_sum_metrics).to(device)\n", " all_numels = torch.cat(per_param_numel_metrics).to(device)\n", " all_sums_norms_numels = torch.cat(\n", " [all_sums.unsqueeze(0), all_norms.unsqueeze(0), all_numels.unsqueeze(0)], dim=0\n", " )\n", " dist.all_reduce(all_sums_norms_numels, op=dist.ReduceOp.SUM, group=process_group)\n", " all_sums, all_norms, all_numels = all_sums_norms_numels.split(1)\n", " # Get averages.\n", " # NOTE: could get infs for non-rank0 processes but that's okay.\n", " per_param_avg_metrics = (all_sums / all_numels).squeeze(0).split(1)\n", " else:\n", " dist.all_reduce(all_norms, op=dist.ReduceOp.SUM, group=process_group)\n", " grad_norm_metric_mask = torch.tensor(\n", " [float(is_grad_norm_metric(n)) for n in per_param_norm_metric_names], device=all_norms.device\n", " )\n", " total_grad_norm = (all_norms * grad_norm_metric_mask).sum() ** 0.5\n", " per_param_norm_metrics = (all_norms ** (0.5)).squeeze(0).split(1)\n", " else:\n", " total_grad_norm = (\n", " torch.cat(\n", " [\n", " m\n", " for m, n in zip(per_param_norm_metrics, per_param_norm_metric_names)\n", " if is_grad_norm_metric(n)\n", " ]\n", " )\n", " ** 2.0\n", " ).sum() ** 0.5\n", " per_param_avg_metrics = [x / n for x, n in zip(per_param_sum_metrics, per_param_numel_metrics)]\n", "\n", " assert len(per_param_avg_metrics) == len(per_param_avg_metric_names)\n", "\n", " # Collect all metrics into a single dict.\n", " all_metrics: Dict[str, torch.Tensor] = {}\n", " if collect_param_metrics:\n", " for metric_name, metric in zip(per_param_min_metric_names, per_param_min_metrics):\n", " all_metrics[metric_name] = metric.squeeze(0)\n", " for metric_name, metric in zip(per_param_max_metric_names, per_param_max_metrics):\n", " all_metrics[metric_name] = metric.squeeze(0)\n", " for metric_name, metric in zip(per_param_avg_metric_names, per_param_avg_metrics):\n", " all_metrics[metric_name] = metric.squeeze(0)\n", "\n", " for metric_name, metric in zip(per_param_norm_metric_names, per_param_norm_metrics):\n", " all_metrics[metric_name] = metric.squeeze(0)\n", " all_metrics[\"total_grad_norm\"] = total_grad_norm\n", "\n", " #######################################################################\n", " # part 3: clip grads\n", " #######################################################################\n", " num_grads_clipped = 0\n", " num_eligible_grads = 0\n", " for group in self.param_groups:\n", " if (max_norm_ratio := group.get(\"max_grad_norm_ratio\")) is not None:\n", " num_clipped = self._do_adaptive_clipping(\n", " group, max_norm_ratio, global_step, all_metrics, collect_param_metrics=collect_param_metrics\n", " )\n", " elif (max_norm := group.get(\"max_grad_norm\")) is not None:\n", " num_clipped = self._do_global_fixed_clipping(\n", " group, max_norm, all_metrics, collect_param_metrics=collect_param_metrics\n", " )\n", " else:\n", " # No clipping needed.\n", " continue\n", " num_eligible_grads += len(group[\"params\"])\n", " if num_clipped is not None:\n", " num_grads_clipped += num_clipped\n", "\n", " if collect_param_metrics:\n", " if num_eligible_grads > 0:\n", " clipping_rate = torch.tensor(num_grads_clipped / num_eligible_grads, device=\"cpu\")\n", " else:\n", " clipping_rate = torch.tensor(0.0, device=\"cpu\")\n", " all_metrics[\"clipping_rate\"] = clipping_rate\n", "\n", " # total_grad_norm is computed at all steps, even when collect_param_metrics is set to False\n", " return all_metrics\n", "\n", " @torch.no_grad()\n", " def _do_adaptive_clipping(\n", " self,\n", " group: Dict[str, Any],\n", " max_norm_ratio: float,\n", " global_step: int,\n", " all_metrics: Dict[str, torch.Tensor],\n", " collect_param_metrics: bool = True,\n", " device: Optional[torch.device] = None,\n", " ) -> Optional[int]:\n", " \"\"\"\n", " Do adaptive gradient clipping on a param group.\n", "\n", " If ``collect_param_metrics`` is ``True`` this will return the total number of gradients clipped.\n", " \"\"\"\n", " device = get_default_device() if device is None else device\n", " num_grads_clipped = 0\n", " # We'll use the bigger of beta1 and beta2 to update the exponential average of the norm of\n", " # the gradient (a scalar), not to be confused with the exponential average of the gradient.\n", " # TODO (epwalsh): handle optimizers that don't have betas.\n", " beta1, beta2 = group[\"betas\"]\n", " beta = max(beta1, beta2)\n", " for name, p in zip(group[\"param_names\"], group[\"params\"]):\n", " name = self._clean_param_name(name)\n", " grad_norm = all_metrics.get(f\"grad/{name}.norm\")\n", " if grad_norm is None:\n", " continue\n", "\n", " # Get or initialize the exponential average of grad norm.\n", " # TODO: The way we have it right now, every rank tracks the `grad_norm_exp_avg` of every parameter,\n", " # even parameters for which the corresponding local shard is empty. This has the potential to\n", " # cause some issues with the optimizer, as we ran into with https://github.com/allenai/LLM/pull/372.\n", " # So we should consider changing how we do this at some point so that we don't add any state\n", " # to parameters for which the local shard is empty. That would probably add extra distributed\n", " # communication, at least on steps where we have to log (i.e. when `collect_param_metrics=True`).\n", " state = self.state[p]\n", " grad_norm_exp_avg = state.get(\"grad_norm_exp_avg\")\n", " if grad_norm_exp_avg is None:\n", " grad_norm_exp_avg = grad_norm.clone().to(device)\n", " # We don't want to add anything to `state` until `state` has been initialized, otherwise\n", " # this will crash some optimizers which rely on checking `len(state)`. The downside here\n", " # is that we won't start tracking `grad_norm_exp_avg` until the 2nd training step.\n", " if global_step > 1:\n", " state[\"grad_norm_exp_avg\"] = grad_norm_exp_avg\n", "\n", " max_allowed_norm = max_norm_ratio * grad_norm_exp_avg\n", " clip_coef = max_allowed_norm / (grad_norm + 1e-6)\n", "\n", " # Clip the gradients and update the exponential average.\n", " # Note that multiplying by the clamped coefficient is meaningless when it is\n", " # equal to 1, but it avoids the host-device sync that would result from `if clip_coef_clamped < 1`.\n", " clip_coef_clamped = torch.clamp(clip_coef, max=1.0)\n", " if p.grad is not None:\n", " # p.grad could be none for some ranks when using FSDP.\n", " p.grad.detach().mul_(clip_coef_clamped.to(p.grad.device, p.grad.dtype))\n", "\n", " # Update the exponential average of the norm of the gradient with the clipped norm of the gradient.\n", " grad_norm_exp_avg.lerp_((grad_norm * clip_coef_clamped).to(grad_norm_exp_avg.device), 1 - beta)\n", " # Alternative: update with the *unclipped* norm of the gradient.\n", " # grad_norm_exp_avg.lerp_(grad_norm.to(grad_norm_exp_avg.device), 1 - beta)\n", "\n", " if collect_param_metrics:\n", " # Can't avoid host-device sync here.\n", " if clip_coef_clamped < 1.0:\n", " num_grads_clipped += 1\n", " all_metrics[f\"grad_norm_exp_avg/{name}\"] = grad_norm_exp_avg\n", " return num_grads_clipped if collect_param_metrics else None\n", "\n", " @torch.no_grad()\n", " def _do_global_fixed_clipping(\n", " self,\n", " group: Dict[str, Any],\n", " max_norm: float,\n", " all_metrics: Dict[str, torch.Tensor],\n", " collect_param_metrics: bool = True,\n", " device: Optional[torch.device] = None,\n", " ) -> Optional[int]:\n", " \"\"\"\n", " Do global fixed gradient clipping on a param group.\n", "\n", " If ``collect_param_metrics`` is ``True`` this will return the total number of gradients clipped.\n", " \"\"\"\n", " device = get_default_device() if device is None else device\n", " total_grad_norm = all_metrics[\"total_grad_norm\"]\n", " clip_coef = max_norm / (total_grad_norm.to(device) + 1e-6)\n", " clip_coef_clamped = torch.clamp(clip_coef, max=1.0)\n", " num_grads_clipped: Optional[int] = None\n", " if collect_param_metrics:\n", " # Can't avoid host-device sync here.\n", " if clip_coef_clamped < 1.0:\n", " num_grads_clipped = len(group[\"params\"])\n", " for p in group[\"params\"]:\n", " # Clip the gradients.\n", " # Note that multiplying by the clamped coefficient is meaningless when it is\n", " # equal to 1, but it avoids the host-device sync that would result from `if clip_coef_clamped < 1`.\n", " if p.grad is not None:\n", " # p.grad could be none for some ranks when using FSDP.\n", " p.grad.detach().mul_(clip_coef_clamped.to(p.grad.device, p.grad.dtype))\n", " return num_grads_clipped\n", "\n", " def get_post_step_metrics(\n", " self, module: nn.Module, process_group: Optional[dist.ProcessGroup] = None\n", " ) -> Dict[str, torch.Tensor]:\n", " del module, process_group\n", " return {}\n", "\n", " def get_state_for_param(self, param: nn.Parameter) -> Dict[str, Optional[torch.Tensor]]:\n", " del param\n", " return {}" ], "metadata": { "id": "o9dFXoh2YSVn" }, "execution_count": 7, "outputs": [] }, { "cell_type": "code", "source": [ "class MuonW(Optimizer):\n", " \"\"\"\n", " Distributed implementation of Muon optimizer with weight decay.\n", "\n", " Muon applies orthogonalization to matrix parameter(2D+) updates using\n", " Newton-Schulz orthogonalization iterations to compute the zeroth power. For non-matrix\n", " parameters(embeddings, heads, bias), it uses AdamW as a backup.\n", "\n", " \"\"\"\n", "\n", " def __init__(\n", " self,\n", " params,\n", " lr=0.01,\n", " betas=(0.95, 0.95), # Muon uses single momentum param\n", " weight_decay=0.0,\n", " ns_steps=5,\n", " nesterov=True,\n", " eps=1e-8, # For AdamW backup\n", " record_update_metrics=False,\n", " selective_updates=False,\n", " device=None,\n", " ):\n", " if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):\n", " # User provided param groups\n", " for param_group in params:\n", " if 'use_muon' not in param_group:\n", " param_group['use_muon'] = True\n", " else:\n", " # Convert single params list to a param group\n", " params = [{'params': params, 'use_muon': True}]\n", "\n", " defaults = dict(\n", " lr=lr,\n", " betas=betas,\n", " weight_decay=weight_decay,\n", " ns_steps=ns_steps,\n", " nesterov=nesterov,\n", " eps=eps,\n", " use_muon=True, # Default to using Muon\n", " )\n", " super().__init__(\n", " params,\n", " defaults,\n", " record_update_metrics=record_update_metrics,\n", " selective_updates=selective_updates\n", " )\n", " self._device = device\n", " self._update_norms = None\n", " self._update_maxs = None\n", " self._update_param_names = None\n", "\n", " def zeropower_via_newtonschulz5(self, G, steps: int):\n", " \"\"\"\n", " Newton-Schulz iteration to compute the zeroth power / orthogonalization of G.\n", " \"\"\"\n", " assert G.ndim >= 2\n", " a, b, c = (3.4445, -4.7750, 2.0315)\n", " X = G.bfloat16()\n", " if G.size(-2) > G.size(-1):\n", " X = X.mT\n", "\n", " # Ensure spectral norm is at most 1\n", " X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7)\n", " # Perform the NS iterations\n", " for _ in range(steps):\n", " A = X @ X.mT\n", " B = b * A + c * A @ A\n", " X = a * X + B @ X\n", "\n", " if G.size(-2) > G.size(-1):\n", " X = X.mT\n", " return X\n", "\n", " def get_state_for_param(self, param: nn.Parameter) -> Dict[str, Optional[torch.Tensor]]:\n", " \"\"\"Return optimizer state for a parameter.\"\"\"\n", " state = self.state[param]\n", " if not state:\n", " return {}\n", "\n", " result = {}\n", " if 'momentum_buffer' in state:\n", " result['momentum_buffer'] = state['momentum_buffer']\n", " if 'exp_avg' in state:\n", " result['exp_avg'] = state['exp_avg']\n", " if 'exp_avg_sq' in state:\n", " result['exp_avg_sq'] = state['exp_avg_sq']\n", "\n", " return result\n", "\n", " @torch.no_grad()\n", " def step(self, closure=None):\n", " \"\"\"Perform a single optimization step.\"\"\"\n", " if closure is not None:\n", " with torch.enable_grad():\n", " closure()\n", "\n", " device = get_default_device() if self._device is None else self._device\n", " update_norms = []\n", " update_maxs = []\n", " update_param_names = []\n", "\n", " collecting_metrics = self._collecting_metrics and self._record_update_metrics\n", "\n", " for group in self.param_groups:\n", " lr = group['lr']\n", " weight_decay = group['weight_decay']\n", " beta1, beta2 = group['betas']\n", " ns_steps = group['ns_steps']\n", " nesterov = group['nesterov']\n", " eps = group['eps']\n", " use_muon = group['use_muon']\n", "\n", " for name, p in zip(group[\"param_names\"], group[\"params\"]):\n", " name = self._clean_param_name(name)\n", "\n", " if p.grad is None:\n", " if collecting_metrics:\n", " update_param_names.append(name)\n", " update_norms.append(torch.tensor([0.0], device=device))\n", " update_maxs.append(torch.tensor([0.0], device=device))\n", " continue\n", "\n", " # Apply weight decay\n", " #mask = p.grad != 0 if self._selective_updates else 1\n", " mask = (p.grad != 0) if self._selective_updates else torch.ones_like(p, dtype=torch.bool)\n", " p.mul_(1 - mask * (lr * weight_decay))\n", "\n", " grad = p.grad\n", " state = self.state[p]\n", "\n", " # Determine whether to use Muon or AdamW for this parameter\n", " # We use Muon for matrix parameters unless explicitly disabled\n", " should_use_muon = use_muon and p.ndim >= 2 and not ('embed' in name.lower() or 'head' in name.lower())\n", "\n", " if should_use_muon:\n", " # --- Muon Update Logic ---\n", "\n", " # Initialize momentum buffer if needed\n", " if 'momentum_buffer' not in state:\n", " state['momentum_buffer'] = torch.zeros_like(grad)\n", " momentum_buffer = state['momentum_buffer']\n", "\n", " # Update momentum\n", " momentum_buffer.lerp_(grad, mask * (1 - beta1))\n", "\n", " # Compute update\n", " if nesterov:\n", " update = momentum_buffer * beta1 + grad * (1 - beta1)\n", " else:\n", " update = momentum_buffer.clone()\n", "\n", " if isinstance(mask, torch.Tensor):\n", " update.mul_(mask)\n", "\n", " # Handle conv filters\n", " orig_shape = update.shape\n", " if update.ndim == 4:\n", " update = update.view(update.shape[0], -1)\n", "\n", " # Apply Newton-Schulz\n", " update = self.zeropower_via_newtonschulz5(update, steps=ns_steps)\n", "\n", " # Scale update\n", " update *= max(1, grad.size(-2) / grad.size(-1)) ** 0.5\n", "\n", " # Reshape if needed\n", " if len(orig_shape) == 4:\n", " update = update.view(orig_shape)\n", "\n", " else:\n", " # --- AdamW Update Logic ---\n", "\n", " # Initialize momentum buffers if needed\n", " if 'exp_avg' not in state:\n", " state['exp_avg'] = torch.zeros_like(grad)\n", " state['exp_avg_sq'] = torch.zeros_like(grad)\n", " state['step'] = 0\n", "\n", " # Update step count\n", " state['step'] += 1\n", " step = state['step']\n", "\n", " # Update momentum buffers\n", " state['exp_avg'].lerp_(grad, mask * (1 - beta1))\n", " state['exp_avg_sq'].mul_(1 - mask * (1 - beta2)).addcmul_(grad, grad, value=1 - beta2)\n", "\n", " # Bias correction\n", " bias_correction1 = 1 - beta1 ** step\n", " bias_correction2 = 1 - beta2 ** step\n", "\n", " # Compute AdamW update\n", " denom = (state['exp_avg_sq'].sqrt() / math.sqrt(bias_correction2)).add_(eps)\n", " update = state['exp_avg'] / bias_correction1 / denom\n", "\n", " if isinstance(mask, torch.Tensor):\n", " update.mul_(mask)\n", "\n", " # Apply update\n", " p.add_(update, alpha=-lr)\n", "\n", " # Collect metrics\n", " if collecting_metrics:\n", " update_param_names.append(name)\n", " update_norms.append(torch.linalg.vector_norm(update, 2.0, dtype=torch.float32).unsqueeze(0))\n", " update_maxs.append(update.abs().max().unsqueeze(0))\n", "\n", " # Store metrics\n", " if collecting_metrics:\n", " self._update_norms = update_norms\n", " self._update_maxs = update_maxs\n", " self._update_param_names = update_param_names\n", "\n", " return None\n", "\n", " def get_post_step_metrics(\n", " self, module: nn.Module, process_group: Optional[dist.ProcessGroup] = None\n", " ) -> Dict[str, torch.Tensor]:\n", " \"\"\"Get metrics about the optimization step.\"\"\"\n", " if not (self._record_update_metrics and self._collecting_metrics):\n", " return {}\n", "\n", " device = get_default_device() if self._device is None else self._device\n", " dst_rank = 0\n", " if process_group is not None:\n", " dst_rank = dist.get_global_rank(process_group, 0)\n", "\n", " param_names = self._update_param_names\n", " update_norms = self._update_norms\n", " update_maxs = self._update_maxs\n", "\n", " if param_names is None or update_norms is None or update_maxs is None:\n", " return {}\n", "\n", " # Reduce metrics if needed\n", " if is_distributed() and isinstance(module, FullyShardedDataParallel):\n", " # Reduce norms\n", " all_norms = torch.cat(update_norms).to(device) ** 2.0\n", " dist.reduce(all_norms, dst_rank, op=dist.ReduceOp.SUM, group=process_group)\n", " update_norms = (all_norms ** (0.5)).squeeze(0).split(1)\n", "\n", " # Reduce maxs\n", " all_maxs = torch.cat(update_maxs).to(device)\n", " dist.reduce(all_maxs, dst_rank, op=dist.ReduceOp.MAX, group=process_group)\n", " update_maxs = all_maxs.split(1)\n", "\n", " # Collect metrics\n", " metrics = {}\n", " for param_name, update_norm, update_max in zip(param_names, update_norms, update_maxs):\n", " metrics[f\"update/{param_name}.norm\"] = update_norm.squeeze(0)\n", " metrics[f\"update/{param_name}.max\"] = update_max.squeeze(0)\n", "\n", " # Reset stored metrics\n", " self._update_norms = None\n", " self._update_maxs = None\n", " self._update_param_names = None\n", "\n", " return metrics" ], "metadata": { "id": "UgBBhlu8YSOD" }, "execution_count": 9, "outputs": [] }, { "cell_type": "code", "source": [], "metadata": { "id": "apYTNxvcYSFf" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## testing suit" ], "metadata": { "id": "C7qri20wY61B" } }, { "cell_type": "code", "source": [ "# Quick debug test to see if Muon is actually updating\n", "import torch\n", "import torch.nn as nn\n", "\n", "model = nn.Linear(10, 5, bias=False)\n", "optimizer = MuonW([{'params': model.parameters(), 'param_names': ['weight']}], lr=0.1)\n", "\n", "# Initial weight\n", "init_weight = model.weight.data.clone()\n", "\n", "# Create gradient\n", "x = torch.randn(32, 10)\n", "y = model(x)\n", "loss = y.sum()\n", "loss.backward()\n", "\n", "print(f\"Gradient norm: {model.weight.grad.norm():.4f}\")\n", "\n", "# Step\n", "optimizer.step()\n", "\n", "# Check update\n", "weight_change = (model.weight.data - init_weight).norm()\n", "print(f\"Weight change: {weight_change:.4f}\")\n", "\n", "if weight_change < 1e-6:\n", " print(\"WARNING: Weights barely changed - check Newton-Schulz implementation\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "JsLd9EUbYfMw", "outputId": "447510b5-446c-48da-b10f-5ee35d1e137e" }, "execution_count": 12, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Gradient norm: 40.4564\n", "Weight change: 0.0680\n" ] } ] }, { "cell_type": "code", "source": [ "import math\n", "\n", "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import numpy as np\n", "from typing import Dict, Optional\n", "import unittest\n", "from unittest.mock import MagicMock, patch\n", "\n", "# Mock the required imports for testing\n", "class MockOptimizer:\n", " \"\"\"Mock base optimizer for testing\"\"\"\n", " def __init__(self, params, defaults, **kwargs):\n", " self.param_groups = []\n", " self.state = {}\n", " self._collecting_metrics = False\n", " self._record_update_metrics = False\n", "\n", " if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):\n", " for group in params:\n", " param_group = {**defaults, **group}\n", " self.param_groups.append(param_group)\n", " else:\n", " self.param_groups = [{'params': list(params), **defaults}]\n", "\n", " def _clean_param_name(self, name):\n", " return name.replace(\"_fsdp_wrapped_module.\", \"\")\n", "\n", "def get_default_device():\n", " return torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "def is_distributed():\n", " return False\n", "\n", "# Insert your MuonW class here (copy from document 4)\n", "# For testing purposes, inherit from MockOptimizer instead of Optimizer\n", "\n", "class TestMuonW(unittest.TestCase):\n", " \"\"\"Test cases for MuonW optimizer\"\"\"\n", "\n", " def setUp(self):\n", " \"\"\"Set up test fixtures\"\"\"\n", " torch.manual_seed(42)\n", " self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", " def test_matrix_param_uses_muon(self):\n", " \"\"\"Test that matrix parameters use Muon update\"\"\"\n", " # Create a simple model with matrix parameter\n", " model = nn.Linear(10, 5)\n", " model.to(self.device)\n", "\n", " # Add parameter names\n", " params = [{'params': model.parameters(),\n", " 'param_names': ['weight', 'bias']}]\n", "\n", " optimizer = MuonW(params, lr=0.01)\n", "\n", " # Create dummy loss and backward\n", " x = torch.randn(32, 10, device=self.device)\n", " y = model(x)\n", " loss = y.sum()\n", " loss.backward()\n", "\n", " # Check initial state\n", " weight_state_before = model.weight.data.clone()\n", "\n", " # Step\n", " optimizer.step()\n", "\n", " # Verify weight changed (Muon was applied)\n", " assert not torch.allclose(weight_state_before, model.weight.data)\n", "\n", " # Check that momentum buffer was created for weight\n", " assert 'momentum_buffer' in optimizer.state[model.weight]\n", "\n", " print(\"✓ Matrix parameters use Muon update\")\n", "\n", " def test_scalar_param_uses_adamw(self):\n", " \"\"\"Test that scalar parameters use AdamW update\"\"\"\n", " class ModelWithScalar(nn.Module):\n", " def __init__(self):\n", " super().__init__()\n", " self.weight = nn.Parameter(torch.randn(5, 10)) # Fixed: shape should be (out_features, in_features)\n", " self.scalar = nn.Parameter(torch.randn(())) # scalar\n", "\n", " def forward(self, x):\n", " return F.linear(x, self.weight) * self.scalar\n", "\n", " model = ModelWithScalar().to(self.device)\n", "\n", " params = [{'params': model.parameters(),\n", " 'param_names': ['weight', 'scalar']}]\n", "\n", " optimizer = MuonW(params, lr=0.01)\n", "\n", " # Forward and backward\n", " x = torch.randn(32, 10, device=self.device)\n", " y = model(x)\n", " loss = y.sum()\n", " loss.backward()\n", "\n", " # Step\n", " optimizer.step()\n", "\n", " # Check that scalar parameter has AdamW state\n", " scalar_state = optimizer.state[model.scalar]\n", " assert 'exp_avg' in scalar_state\n", " assert 'exp_avg_sq' in scalar_state\n", " assert 'step' in scalar_state\n", "\n", " print(\"✓ Scalar parameters use AdamW update\")\n", "\n", " def test_embedding_uses_adamw(self):\n", " \"\"\"Test that embedding layers use AdamW by default\"\"\"\n", " model = nn.Embedding(100, 16).to(self.device)\n", "\n", " params = [{'params': model.parameters(),\n", " 'param_names': ['embedding.weight']}]\n", "\n", " optimizer = MuonW(params, lr=0.01)\n", "\n", " # Create dummy gradient\n", " idx = torch.randint(0, 100, (32,), device=self.device)\n", " y = model(idx)\n", " loss = y.sum()\n", " loss.backward()\n", "\n", " # Step\n", " optimizer.step()\n", "\n", " # Check that embedding has AdamW state (not Muon)\n", " embed_state = optimizer.state[model.weight]\n", " assert 'exp_avg' in embed_state\n", " assert 'exp_avg_sq' in embed_state\n", "\n", " print(\"✓ Embedding parameters use AdamW update\")\n", "\n", " def test_weight_decay(self):\n", " \"\"\"Test that weight decay is applied correctly\"\"\"\n", " model = nn.Linear(10, 5, bias=False).to(self.device)\n", "\n", " params = [{'params': model.parameters(),\n", " 'param_names': ['weight']}]\n", "\n", " weight_decay = 0.1\n", " optimizer = MuonW(params, lr=0.01, weight_decay=weight_decay)\n", "\n", " # Store initial weight\n", " initial_weight = model.weight.data.clone()\n", "\n", " # Create zero gradient (to isolate weight decay effect)\n", " model.weight.grad = torch.zeros_like(model.weight)\n", "\n", " # Step\n", " optimizer.step()\n", "\n", " # Check weight decay was applied: new_weight = old_weight * (1 - lr * wd)\n", " expected = initial_weight * (1 - 0.01 * weight_decay)\n", " assert torch.allclose(model.weight.data, expected, rtol=1e-5)\n", "\n", " print(\"✓ Weight decay applied correctly\")\n", "\n", " def test_nesterov_momentum(self):\n", " \"\"\"Test Nesterov momentum option\"\"\"\n", " # Test with Nesterov=True\n", " model1 = nn.Linear(10, 5, bias=False).to(self.device)\n", " model2 = nn.Linear(10, 5, bias=False).to(self.device)\n", "\n", " # Same initialization\n", " model2.weight.data.copy_(model1.weight.data)\n", "\n", " params1 = [{'params': model1.parameters(), 'param_names': ['weight']}]\n", " params2 = [{'params': model2.parameters(), 'param_names': ['weight']}]\n", "\n", " opt1 = MuonW(params1, lr=0.01, nesterov=True)\n", " opt2 = MuonW(params2, lr=0.01, nesterov=False)\n", "\n", " # Same gradients\n", " grad = torch.randn_like(model1.weight)\n", " model1.weight.grad = grad.clone()\n", " model2.weight.grad = grad.clone()\n", "\n", " opt1.step()\n", " opt2.step()\n", "\n", " # Updates should be different\n", " assert not torch.allclose(model1.weight.data, model2.weight.data)\n", "\n", " print(\"✓ Nesterov momentum works differently from standard momentum\")\n", "\n", " def test_conv_filters(self):\n", " \"\"\"Test that conv filters are handled correctly\"\"\"\n", " model = nn.Conv2d(3, 16, kernel_size=3).to(self.device)\n", "\n", " params = [{'params': model.parameters(),\n", " 'param_names': ['conv.weight', 'conv.bias']}]\n", "\n", " optimizer = MuonW(params, lr=0.01)\n", "\n", " # Forward and backward\n", " x = torch.randn(4, 3, 32, 32, device=self.device)\n", " y = model(x)\n", " loss = y.sum()\n", " loss.backward()\n", "\n", " initial_weight = model.weight.data.clone()\n", "\n", " # Step\n", " optimizer.step()\n", "\n", " # Check weight was updated\n", " assert not torch.allclose(initial_weight, model.weight.data)\n", "\n", " # Check state exists\n", " assert 'momentum_buffer' in optimizer.state[model.weight]\n", "\n", " print(\"✓ Conv filters handled correctly\")\n", "\n", " def test_multiple_param_groups(self):\n", " \"\"\"Test optimizer with multiple parameter groups\"\"\"\n", " model = nn.Sequential(\n", " nn.Linear(10, 20),\n", " nn.ReLU(),\n", " nn.Linear(20, 5)\n", " ).to(self.device)\n", "\n", " # Different learning rates for different layers\n", " params = [\n", " {'params': model[0].parameters(), 'lr': 0.01, 'param_names': ['layer0.weight', 'layer0.bias']},\n", " {'params': model[2].parameters(), 'lr': 0.001, 'param_names': ['layer2.weight', 'layer2.bias']}\n", " ]\n", "\n", " optimizer = MuonW(params)\n", "\n", " # Forward and backward\n", " x = torch.randn(32, 10, device=self.device)\n", " y = model(x)\n", " loss = y.sum()\n", " loss.backward()\n", "\n", " # Store initial weights\n", " w0_init = model[0].weight.data.clone()\n", " w2_init = model[2].weight.data.clone()\n", "\n", " # Step\n", " optimizer.step()\n", "\n", " # Both should be updated\n", " assert not torch.allclose(w0_init, model[0].weight.data)\n", " assert not torch.allclose(w2_init, model[2].weight.data)\n", "\n", " print(\"✓ Multiple parameter groups work correctly\")\n", "\n", " def test_zero_grad_handling(self):\n", " \"\"\"Test that parameters with zero gradients are handled correctly\"\"\"\n", " model = nn.Linear(10, 5).to(self.device)\n", "\n", " params = [{'params': model.parameters(),\n", " 'param_names': ['weight', 'bias']}]\n", "\n", " optimizer = MuonW(params, lr=0.01)\n", "\n", " # Set zero gradient\n", " model.weight.grad = torch.zeros_like(model.weight)\n", " model.bias.grad = torch.zeros_like(model.bias)\n", "\n", " initial_weight = model.weight.data.clone()\n", "\n", " # Step should not crash\n", " optimizer.step()\n", "\n", " # With zero grad and no weight decay, parameters shouldn't change much\n", " # (only numerical errors from Newton-Schulz on zero matrix)\n", " assert torch.allclose(initial_weight, model.weight.data, atol=1e-6)\n", "\n", " print(\"✓ Zero gradients handled correctly\")\n", "\n", "def test_distributed_mock():\n", " \"\"\"Test distributed functionality using mocks\"\"\"\n", " print(\"\\nTesting distributed functionality with mocks...\")\n", "\n", " with patch('torch.distributed.is_initialized', return_value=True):\n", " with patch('torch.distributed.get_global_rank', return_value=0):\n", " with patch('torch.distributed.reduce') as mock_reduce:\n", " # This simulates distributed metric collection\n", " model = nn.Linear(10, 5)\n", " params = [{'params': model.parameters(),\n", " 'param_names': ['weight', 'bias']}]\n", "\n", " optimizer = MuonW(params, lr=0.01, record_update_metrics=True)\n", " optimizer._collecting_metrics = True\n", "\n", " # Create gradient\n", " model.weight.grad = torch.randn_like(model.weight)\n", " model.bias.grad = torch.randn_like(model.bias)\n", "\n", " # Step\n", " optimizer.step()\n", "\n", " # Check if metrics were collected\n", " assert optimizer._update_norms is not None\n", " assert optimizer._update_param_names is not None\n", "\n", " print(\"✓ Distributed mock test passed\")\n", "\n", "def run_convergence_test():\n", " \"\"\"Test that the optimizer actually optimizes a simple problem\"\"\"\n", " print(\"\\nRunning convergence test...\")\n", "\n", " torch.manual_seed(42)\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", " # Simple regression problem\n", " X = torch.randn(100, 10, device=device)\n", " true_w = torch.randn(10, 1, device=device)\n", " y = X @ true_w + 0.1 * torch.randn(100, 1, device=device)\n", "\n", " model = nn.Linear(10, 1, bias=False).to(device)\n", " params = [{'params': model.parameters(), 'param_names': ['weight']}]\n", " optimizer = MuonW(params, lr=0.1) # Increased learning rate for better convergence\n", "\n", " losses = []\n", " for epoch in range(200): # More epochs for convergence\n", " # Forward\n", " pred = model(X)\n", " loss = F.mse_loss(pred, y)\n", " losses.append(loss.item())\n", "\n", " # Backward\n", " model.zero_grad() # Use model.zero_grad() instead\n", " loss.backward()\n", "\n", " # Update\n", " optimizer.step()\n", "\n", " # Check that loss decreased - relaxed threshold\n", " assert losses[-1] < losses[0] * 0.7, f\"Loss didn't decrease enough: {losses[0]:.4f} -> {losses[-1]:.4f}\"\n", "\n", " print(f\"✓ Convergence test passed: {losses[0]:.4f} -> {losses[-1]:.4f}\")\n", "\n", "if __name__ == \"__main__\":\n", " print(\"Running MuonW Optimizer Tests\")\n", " print(\"=\" * 50)\n", "\n", " # Run unit tests\n", " suite = unittest.TestLoader().loadTestsFromTestCase(TestMuonW)\n", " runner = unittest.TextTestRunner(verbosity=0)\n", " result = runner.run(suite)\n", "\n", " # Run additional tests\n", " test_distributed_mock()\n", " run_convergence_test()\n", "\n", " print(\"\\n\" + \"=\" * 50)\n", " if result.wasSuccessful():\n", " print(\"All tests passed! ✅\")\n", " else:\n", " print(f\"Some tests failed. Failures: {len(result.failures)}, Errors: {len(result.errors)}\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "CrWv9OuRYfHl", "outputId": "4a2ce32e-d9b8-43f3-ec0d-9c4f10a770ec" }, "execution_count": 13, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "----------------------------------------------------------------------\n", "Ran 8 tests in 0.021s\n", "\n", "OK\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Running MuonW Optimizer Tests\n", "==================================================\n", "✓ Conv filters handled correctly\n", "✓ Embedding parameters use AdamW update\n", "✓ Matrix parameters use Muon update\n", "✓ Multiple parameter groups work correctly\n", "✓ Nesterov momentum works differently from standard momentum\n", "✓ Scalar parameters use AdamW update\n", "✓ Weight decay applied correctly\n", "✓ Zero gradients handled correctly\n", "\n", "Testing distributed functionality with mocks...\n", "✓ Distributed mock test passed\n", "\n", "Running convergence test...\n", "✓ Convergence test passed: 20.7094 -> 0.0136\n", "\n", "==================================================\n", "All tests passed! ✅\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "Xa9ABULwYfAi" }, "execution_count": null, "outputs": [] } ] }