python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
class ModuleProxyWrapper(nn.Module):
"""
Wrap a DistributedDataParallel module and forward requests for missing
attributes to the module wrapped by DDP (the twice-wrapped module).
Also forward calls to :func:`state_dict` and :func:`load_state_dict`.
Usage::
module.xyz = "hello world"
wrapped_module = DistributedDataParallel(module, **ddp_args)
wrapped_module = ModuleProxyWrapper(wrapped_module)
assert wrapped_module.xyz == "hello world"
assert wrapped_module.state_dict().keys() == module.state_dict().keys()
Args:
module (nn.Module): module to wrap
"""
def __init__(self, module: nn.Module):
super().__init__()
assert hasattr(
module, "module"
), "ModuleProxyWrapper expects input to wrap another module"
self.module = module
def __getattr__(self, name):
"""Forward missing attributes to twice-wrapped module."""
try:
# defer to nn.Module's logic
return super().__getattr__(name)
except AttributeError:
try:
# forward to the once-wrapped module
return getattr(self.module, name)
except AttributeError:
# forward to the twice-wrapped module
return getattr(self.module.module, name)
def state_dict(self, *args, **kwargs):
"""Forward to the twice-wrapped module."""
return self.module.module.state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
"""Forward to the twice-wrapped module."""
return self.module.module.load_state_dict(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
|
flash_metaseq-main
|
metaseq/distributed/module_proxy_wrapper.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import types
import torch
def get_fused_adam_class():
"""
Look for the FusedAdam optimizer from apex. We first try to load the
"contrib" interface, which is a bit faster than the main interface,
but is technically deprecated.
"""
try:
# The "deprecated" interface in recent versions of apex is a bit
# faster than the main interface, since we don't use the apex
# optimizer. This can be installed by passing the
# `--deprecated_fused_adam` option when building apex.
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
return FusedAdamV1
except ImportError:
try:
# fallback to the newer interface
from apex.optimizers import FusedAdam as _FusedAdam # noqa
from apex.multi_tensor_apply import multi_tensor_applier
if multi_tensor_applier.available:
return FusedAdamV2
except ImportError:
pass
return None
class FusedAdamV1(torch.optim.Optimizer):
"""
Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Compared to the original version in Apex, the metaseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.0,
max_grad_norm=0.0,
amsgrad=False,
use_fp16_stats=False,
):
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
if amsgrad:
raise RuntimeError("FusedAdam does not support the AMSGrad variant.")
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"max_grad_norm": max_grad_norm,
}
super().__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
self.use_fp16_stats = use_fp16_stats
self.FLOAT16_MAX = 65504.0
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
@property
def supports_step_with_scale(self):
return True
def step(self, closure=None, grads=None, scale=1.0, grad_norms=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if grad_norms is None:
grad_norms = [None] * len(self.param_groups)
for group, grads_this_group, grad_norm in zip(
self.param_groups, grads_group, grad_norms
):
if grads_this_group is None:
grads_this_group = [None] * len(group["params"])
# compute combined scale factor for this group
combined_scale = scale
if group.get("max_grad_norm", 0) > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"]
if clip > 1:
combined_scale = clip * scale
bias_correction = 1 if group.get("bias_correction", 1) else 0
for p, grad in zip(group["params"], grads_this_group):
# note: p.grad should not ever be set for correct
# operation of mixed precision optimizer that sometimes
# sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
if p.device.type == "cpu":
p_data_fp32 = p.data.cuda(non_blocking=True).float()
out_p = torch.tensor([], dtype=torch.float)
else:
p_data_fp32 = p.data.float()
out_p = p.data
state = self.state[p]
# State initialization
dtype = torch.float16 if self.use_fp16_stats else p_data_fp32.dtype
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32, dtype=dtype)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32, dtype=dtype)
if self.use_fp16_stats:
state["exp_avg_scale"] = 1.0
state["exp_avg_sq_scale"] = 1.0
else:
device = p_data_fp32.device
state["exp_avg"] = state["exp_avg"].to(device, dtype)
state["exp_avg_sq"] = state["exp_avg_sq"].to(device, dtype)
exp_avg = state["exp_avg"]
exp_avg_sq = state["exp_avg_sq"]
if self.use_fp16_stats:
assert exp_avg.dtype == torch.float16
exp_avg = exp_avg.float() * state["exp_avg_scale"]
exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"]
beta1, beta2 = group["betas"]
state["step"] += 1
with torch.cuda.device(p_data_fp32.device):
fused_adam_cuda.adam(
p_data_fp32,
out_p,
exp_avg,
exp_avg_sq,
grad,
group["lr"],
beta1,
beta2,
group["eps"],
combined_scale,
state["step"],
self.eps_mode,
bias_correction,
group["weight_decay"],
)
if p.device.type == "cpu":
p.data.copy_(p_data_fp32, non_blocking=True)
if self.use_fp16_stats:
def inf_norm(t):
return torch.norm(t, float("inf"))
# from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py
state["exp_avg_scale"], state["exp_avg_sq_scale"] = (
1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX,
1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX,
)
state["exp_avg"], state["exp_avg_sq"] = (
(exp_avg / state["exp_avg_scale"]).half(),
(exp_avg_sq / state["exp_avg_sq_scale"]).half(),
)
return loss
try:
from apex.optimizers import FusedAdam
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdamV2(FusedAdam):
"""
Compared to the original version in Apex, the metaseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
"""
def __init__(self, *args, use_fp16_stats=False, **kwargs):
if use_fp16_stats:
raise NotImplementedError(
"--fp16-adam-stats is only supported with FusedAdamV1"
)
super().__init__(*args, **kwargs)
if not hasattr(self, "multi_tensor_adam"):
raise Exception(
"Apex installation is outdated. Please install an updated version of apex."
)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(
self,
closure=None,
grads=None,
output_params=None,
scale=None,
grad_norms=None,
):
"""Performs a single optimization step."""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group["bias_correction"] else 0
beta1, beta2 = group["betas"]
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if "step" in group:
group["step"] += 1
else:
group["step"] = 1
# create lists for multi-tensor apply
g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group["params"]:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p.data, dtype=torch.float
)
else:
state["exp_avg"] = state["exp_avg"].to(
device=p.data.device, dtype=torch.float
)
state["exp_avg_sq"] = state["exp_avg_sq"].to(
device=p.data.device, dtype=torch.float
)
if p.dtype == torch.float16:
g_16.append(p.grad.data.float())
p_16.append(p.data.float())
orig_p_16.append(p.data)
m_16.append(state["exp_avg"])
v_16.append(state["exp_avg_sq"])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state["exp_avg"])
v_32.append(state["exp_avg_sq"])
else:
raise RuntimeError("FusedAdam only support fp16 and fp32.")
with torch.cuda.device(p.device):
if len(g_16) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group["lr"],
beta1,
beta2,
group["eps"],
group["step"],
self.adam_w_mode,
bias_correction,
group["weight_decay"],
)
for orig_p, p in zip(orig_p_16, p_16):
orig_p.copy_(p.data)
if len(g_32) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group["lr"],
beta1,
beta2,
group["eps"],
group["step"],
self.adam_w_mode,
bias_correction,
group["weight_decay"],
)
return loss
except ImportError:
pass
|
flash_metaseq-main
|
metaseq/optim/fused_adam.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
class DynamicLossScaler(object):
def __init__(
self,
init_scale=4.0,
scale_factor=2.0,
scale_window=256,
tolerance=0.0,
threshold=None,
min_loss_scale=2**-5,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
logger.info(
f"*** SCALE_WINDOW: {self.scale_window}, loss scale: {self.loss_scale} ***"
)
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
self.min_loss_scale = min_loss_scale
def scale(self, outputs):
return self.loss_scale * outputs
def update(self):
if (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
# When scaling up loss_scale, also scale up the scale_window.
self.scale_window *= self.scale_factor
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
# also decrease the scale_window (lower loss scale, smaller window)
self.scale_window = max(int(self.scale_window / self.scale_factor), 1)
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
def check_overflow(self, grad_norm):
# detect inf and nan
if grad_norm == float("inf") or grad_norm != grad_norm:
# overflow has occurred
prev_scale = self.loss_scale
iter_since_rescale = self._iter - self._last_rescale_iter
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
if self.loss_scale < self.min_loss_scale:
# Don't scale down past min_loss_scale, just continue to skip grad after overflow error is raised.
self.loss_scale = prev_scale
self._iter += 1
raise OverflowError("setting loss scale to: " + str(self.loss_scale))
|
flash_metaseq-main
|
metaseq/optim/dynamic_loss_scaler.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from metaseq import utils
from metaseq.dataclass.utils import gen_parser_from_dataclass
class BaseOptimizer(object):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
@classmethod
def add_args(cls, parser):
"""Add optimizer-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer):
"""Reset optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
self._optimizer = optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
@property
def params(self):
"""Return an iterable of the parameters held by the optimizer."""
for param_group in self.param_groups:
for p in param_group["params"]:
yield p
@property
def param_groups(self):
return self.optimizer.param_groups
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
"""Return the current learning rate."""
return self.param_groups[0]["lr"]
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.param_groups:
param_group["lr"] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
loss.backward()
def all_reduce_grads(self, module):
"""Manually all-reduce gradients (if required)."""
if hasattr(module, "all_reduce_grads"):
module.all_reduce_grads()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for p in self.params:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm, norm_type="l2", aggregate_norm_fn=None):
"""Clips gradient norm."""
return utils.clip_grad_norm_(
self.params, max_norm, norm_type, aggregate_norm_fn
)
def step(self, closure=None, scale=1.0, groups=None):
"""Performs a single optimization step."""
if self.supports_step_with_scale:
if self.supports_groups:
self.optimizer.step(closure, scale=scale, groups=groups)
else:
self.optimizer.step(closure, scale=scale)
else:
if scale != 1.0:
self.multiply_grads(1.0 / scale)
if self.supports_groups:
self.optimizer.step(closure, groups=groups)
else:
self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, "supports_memory_efficient_fp16"):
return self.optimizer.supports_memory_efficient_fp16
return False
@property
def supports_step_with_scale(self):
if hasattr(self.optimizer, "supports_step_with_scale"):
return self.optimizer.supports_step_with_scale
return False
@property
def supports_groups(self):
if hasattr(self.optimizer, "supports_groups"):
return self.optimizer.supports_groups
return False
@property
def supports_flat_params(self):
"""
Whether the optimizer supports collapsing of the model
parameters/gradients into a single contiguous Tensor.
"""
if hasattr(self.optimizer, "supports_flat_params"):
return self.optimizer.supports_flat_params
return False
def broadcast_global_state_dict(self, state_dict):
"""
Broadcasts a global state dict to all ranks.
Useful for optimizers that shard state between ranks.
"""
if hasattr(self.optimizer, "broadcast_global_state_dict"):
return self.optimizer.broadcast_global_state_dict(state_dict)
else:
return state_dict
class LegacyOptimizer(BaseOptimizer):
def __init__(self, args):
self.args = args
|
flash_metaseq-main
|
metaseq/optim/base_optimizer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.optim.optimizer import Optimizer, required
from . import LegacyOptimizer, register_optimizer
@register_optimizer("sgd")
class MetaseqSGDW(LegacyOptimizer):
"""
Note that this implements SGDW from this paper:
https://arxiv.org/abs/1711.05101
"""
def __init__(self, args, params):
super().__init__(args)
self._optimizer = SGDW(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--momentum', default=0.0, type=float, metavar='M',
help='momentum factor')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"momentum": self.args.momentum,
"weight_decay": self.args.weight_decay,
}
class SGDW(Optimizer):
def __init__(
self,
params,
lr=required,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
if d_p.dtype in {torch.float16, torch.bfloat16}:
d_p = d_p.float()
p_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_fp32 = p_fp32.float()
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(d_p).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
if weight_decay != 0:
p_fp32.add_(p_fp32, alpha=-weight_decay * group["lr"])
p_fp32.add_(d_p, alpha=-group["lr"])
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_fp32)
return loss
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
|
flash_metaseq-main
|
metaseq/optim/sgd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from metaseq import registry
from metaseq.optim.base_optimizer import ( # noqa
BaseOptimizer,
LegacyOptimizer,
)
from metaseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
from metaseq.optim.shard import shard_
from omegaconf import DictConfig
__all__ = [
"BaseOptimizer",
"FP16Optimizer",
"MemoryEfficientFP16Optimizer",
"shard_",
]
(
_build_optimizer,
register_optimizer,
OPTIMIZER_REGISTRY,
OPTIMIZER_DATACLASS_REGISTRY,
) = registry.setup_registry("--optimizer", base_class=BaseOptimizer, required=True)
def build_optimizer(cfg: DictConfig, params, *extra_args, **extra_kwargs):
if all(isinstance(p, dict) for p in params):
params = [t for p in params for t in p.values()]
params = list(filter(lambda p: p.requires_grad, params))
return _build_optimizer(cfg, params, *extra_args, **extra_kwargs)
# automatically import any Python files in the optim/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("metaseq.optim." + file_name)
|
flash_metaseq-main
|
metaseq/optim/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from itertools import chain
import torch
from omegaconf import DictConfig
from metaseq import optim
from .dynamic_loss_scaler import DynamicLossScaler
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in mro(method resolution order)
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
@property
def has_flat_params(self):
return torch.is_tensor(self.fp32_params) or (
isinstance(self.fp32_params, dict)
and all(torch.is_tensor(t) for t in self.fp32_params.values())
)
@classmethod
def build_fp32_params(cls, args, params, flatten=True):
# create FP32 copy of parameters and grads
if flatten:
total_param_size = sum(p.data.numel() for p in params)
devices = [torch.cuda.current_device()]
fp32_params = {}
for device in devices:
device_param_size = total_param_size
device_params = params
fp32_params[device] = (
device_params[0].new(0).float().new(device_param_size)
)
offset = 0
for p in device_params:
numel = p.data.numel()
fp32_params[device][offset : offset + numel].copy_(p.data.view(-1))
offset += numel
fp32_params[device] = torch.nn.Parameter(fp32_params[device])
fp32_params[device].grad = fp32_params[device].data.new(
device_param_size
)
return fp32_params
else:
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float())
p32.grad = torch.zeros_like(p32.data)
if hasattr(p, "param_group"):
p32.param_group = p.param_group
fp32_params.append(p32)
return fp32_params
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
if self.scaler is not None:
state_dict["loss_scale"] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if "loss_scale" in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict["loss_scale"]
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`metaseq.optim.BaseOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self):
if self._needs_sync:
# copy FP16 grads to FP32
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
if p.requires_grad:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
grad_data = (
p.grad.data
if p.grad is not None
else p.data.new_zeros(p.data.shape)
)
numel = grad_data.numel()
self.fp32_params[device].grad.data[
offset : offset + numel
].copy_(grad_data.view(-1))
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
if p.grad is not None:
if p32.grad is None:
p32.grad = p.grad.data.float()
else:
p32.grad.data.copy_(p.grad.data)
else:
p32.grad = torch.zeros_like(p.data, dtype=torch.float)
self._needs_sync = False
def _sync_fp32_params_to_fp16(self):
# copy FP32 params back into FP16 model
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
numel = p.data.numel()
p.data.copy_(
self.fp32_params[device]
.data[offset : offset + numel]
.view_as(p.data)
)
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
p.data.copy_(p32.data)
def _unscale_grads(self):
self._sync_fp16_grads_to_fp32()
if (
# Skip the multiplication if it's a no-op (i.e., if _multiply_factor
# is 1.0). At the same time, we want to avoid the device-to-host
# transfer by comparing it to 1.0. Since _multiply_factor starts as
# a Python float, we roughly assume that if it's a tensor then it's
# probably not =1.0 anymore and we do the multiplication. Otherwise
# we can safely check the value without a D2H transfer.
torch.is_tensor(self._multiply_factor)
or self._multiply_factor != 1.0
):
self.fp32_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
self._multiply_factor *= c
def clip_grad_norm(
self,
max_norm,
norm_type="l2",
aggregate_norm_fn=None,
skip_gradient_update_on_clip_norm=False,
):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm(
0, norm_type, aggregate_norm_fn
)
if self.scaler is not None:
self.scaler.check_overflow(grad_norm)
if skip_gradient_update_on_clip_norm:
# detect overflow and adjust loss scale
self.scaler.check_overflow(grad_norm)
if grad_norm > max_norm > 0.0:
raise OverflowError(
f"Grad norm: {grad_norm:.2f} exceeds threshold: {max_norm:.2f}, rejecting batch."
)
else:
if grad_norm > max_norm > 0.0:
self._multiply_factor *= max_norm / grad_norm
# detect overflow and adjust loss scale
self.scaler.check_overflow(grad_norm)
elif max_norm > 0.0:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
if getattr(self, "supports_step_with_scale", False):
self.fp32_optimizer.step(
closure, scale=(1.0 / self._multiply_factor), groups=groups
)
else:
self._unscale_grads()
self.fp32_optimizer.step(closure, groups=groups)
if self.scaler is not None:
self.scaler.update()
self._sync_fp32_params_to_fp16()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
if p32.grad is not None:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
class FP16Optimizer(_FP16OptimizerMixin, optim.BaseOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs):
super().__init__(cfg.optimizer)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
# No loss scaler required for training with bf16
self.scaler = (
None
if cfg.common.bf16
else DynamicLossScaler(
init_scale=cfg.common.fp16_init_scale,
scale_window=cfg.common.fp16_scale_window,
tolerance=cfg.common.fp16_scale_tolerance,
threshold=cfg.common.threshold_loss_scale,
min_loss_scale=cfg.common.min_loss_scale,
)
)
@classmethod
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
"""
Args:
cfg (omegaconf.DictConfig): metaseq args
params (iterable): iterable of parameters to optimize
"""
flatten = not getattr(cfg.common, "fp16_no_flatten_grads", False)
fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten)
if flatten:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params])
else:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params)
if flatten and not fp32_optimizer.supports_flat_params:
raise RuntimeError(
f"chosen optimizer {fp32_optimizer.__class__.__name__} does not "
f"support flat params, please set --fp16-no-flatten-grads"
)
return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.fp32_optimizer.optimizer = optimizer
@property
def lr_scheduler(self):
return getattr(self.fp32_optimizer, "lr_scheduler", None)
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.fp32_optimizer.all_reduce_grads(module)
@property
def supports_flat_params(self):
return self.fp32_optimizer.supports_flat_params
class _MemoryEfficientFP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in MRO (method resolution order)
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
@property
def has_flat_params(self):
return False
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.wrapped_optimizer.state_dict()
if self.scaler is not None:
state_dict["loss_scale"] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if "loss_scale" in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict["loss_scale"]
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
# Hack: PyTorch automatically casts the optimizer state to match the
# type of the current parameters. But with --memory-efficient-fp16 the
# params are FP16 while the optimizer state is FP32 and we don't want
# to cast. A workaround is to manually copy back the original state
# after the optimizer has been loaded.
if not getattr(self.optimizer, "disable_mem_eff_fp16_loading_hack", False):
groups = self.optimizer.param_groups
saved_groups = state_dict["param_groups"]
id_map = {
old_id: p
for old_id, p in zip(
chain(*(g["params"] for g in saved_groups)),
chain(*(g["params"] for g in groups)),
)
}
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
self.optimizer.state[param] = v
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`metaseq.optim.BaseOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
def _unscale_grads(self):
if (
# Skip the multiplication if it's a no-op (i.e., if _multiply_factor
# is 1.0). At the same time, we want to avoid the device-to-host
# transfer by comparing it to 1.0. Since _multiply_factor starts as
# a Python float, we roughly assume that if it's a tensor then it's
# probably not =1.0 anymore and we do the multiplication. Otherwise
# we can safely check the value without a D2H transfer.
torch.is_tensor(self._multiply_factor)
or self._multiply_factor != 1.0
):
self.wrapped_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
self._multiply_factor *= c
def clip_grad_norm(
self,
max_norm,
norm_type="l2",
aggregate_norm_fn=None,
skip_gradient_update_on_clip_norm=False,
):
"""Clips gradient norm and updates dynamic loss scaler."""
max_norm = float(max_norm)
grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm(
0, norm_type, aggregate_norm_fn
)
if self.scaler is not None:
grad_norm_cpu = float(grad_norm)
# If skip gradient on clip norm threshold then first detect gnorm overflows to update loss scale
# then additionally check for clip norm threshold but without updating loss scale.
if skip_gradient_update_on_clip_norm:
# detect overflow and adjust loss scale
self.scaler.check_overflow(grad_norm_cpu)
if grad_norm_cpu > max_norm > 0.0:
raise OverflowError(
f"Grad norm: {grad_norm:.2f} exceeds threshold: {max_norm:.2f}, rejecting batch."
)
else:
if grad_norm_cpu > max_norm > 0.0:
self._multiply_factor *= max_norm / grad_norm_cpu
# detect overflow and adjust loss scale
self.scaler.check_overflow(grad_norm_cpu)
elif max_norm > 0.0:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
"""Performs a single optimization step."""
if getattr(self, "supports_step_with_scale", False):
# NOTE(msb) optimizer divides by scale factor
self.wrapped_optimizer.step(
closure, scale=(1.0 / self._multiply_factor), groups=groups
)
else:
self._unscale_grads()
self.wrapped_optimizer.step(closure, groups=groups)
if self.scaler is not None:
self.scaler.update()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.wrapped_optimizer.zero_grad()
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
else:
self._multiply_factor = 1.0
@property
def supports_flat_params(self):
return self.wrapped_optimizer.supports_flat_params
class MemoryEfficientFP16Optimizer(
_MemoryEfficientFP16OptimizerMixin, optim.BaseOptimizer
):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
Compared to :class:`metaseq.optim.FP16Optimizer`, this version does not
maintain an FP32 copy of the model. We instead expect the optimizer to
convert the gradients to FP32 internally and sync the results back to the
FP16 model params. This significantly reduces memory usage but slightly
increases the time spent in the optimizer.
Since this wrapper depends on specific functionality in the wrapped
optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
optimizers can be wrapped. This is determined by the
*supports_memory_efficient_fp16* property.
"""
def __init__(
self, cfg: DictConfig, params, optimizer, allow_unsupported=False, **kwargs
):
if not allow_unsupported and not optimizer.supports_memory_efficient_fp16:
raise ValueError(
"Unsupported optimizer: {}".format(optimizer.__class__.__name__)
)
super().__init__(cfg.optimizer)
self.wrapped_optimizer = optimizer
# No loss scaler required for training with bf16
self.scaler = (
None
if cfg.common.bf16
else DynamicLossScaler(
init_scale=cfg.common.fp16_init_scale,
scale_window=cfg.common.fp16_scale_window,
tolerance=cfg.common.fp16_scale_tolerance,
threshold=cfg.common.threshold_loss_scale,
min_loss_scale=cfg.common.min_loss_scale,
)
)
@classmethod
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
"""
Args:
args (argparse.Namespace): metaseq args
params (iterable): iterable of parameters to optimize
"""
fp16_optimizer = optim.build_optimizer(cfg.optimizer, params)
return cls(cfg, params, fp16_optimizer, **kwargs)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.wrapped_optimizer.optimizer = optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
@property
def lr_scheduler(self):
return getattr(self.wrapped_optimizer, "lr_scheduler", None)
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.wrapped_optimizer.all_reduce_grads(module)
|
flash_metaseq-main
|
metaseq/optim/fp16_optimizer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from metaseq.distributed import utils
try:
from fairscale.optim import OSS
_has_fairscale = True
except ImportError:
_has_fairscale = False
def shard_(optimizer, group):
if not _has_fairscale:
raise ImportError(
"\n\nPlease install the fairscale package:" "\n\n pip install fairscale"
)
class MetaseqOSS(OSS):
@property
def disable_mem_eff_fp16_loading_hack(self):
return True
def __getattr__(self, name):
if name.startswith("supports") and hasattr(self.optim, name):
return getattr(self.optim, name)
raise AttributeError(
"'MetaseqOSS' object has no attribute {0!r}".format(name)
)
def broadcast_global_state_dict(
self, state_dict: Dict[str, Any]
) -> Dict[str, Any]:
"""
Broadcasts the entire state_dict to all other ranks
each rank is responsible to load their own partition of data
"""
return utils.broadcast_object(
state_dict,
src_rank=0,
group=self.group,
)
torch_optimizer = optimizer.optimizer
optim_cls = type(torch_optimizer)
optimizer.optimizer = MetaseqOSS(
torch_optimizer.param_groups,
optim_cls,
group=group,
**optimizer.optimizer_config
)
|
flash_metaseq-main
|
metaseq/optim/shard.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
import torch.optim
from omegaconf import II, DictConfig
from metaseq.dataclass import MetaseqDataclass
from metaseq.optim import BaseOptimizer, register_optimizer
from metaseq.optim.fused_adam import get_fused_adam_class
logger = logging.getLogger(__name__)
@dataclass
class MetaseqAdamConfig(MetaseqDataclass):
adam_betas: str = field(
default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
use_old_adam: bool = field(
default=False, metadata={"help": "Use metaseq.optim.adam.Adam"}
)
fp16_adam_stats: bool = field(
default=False, metadata={"help": "use FP16 stats (with automatic scaling)"}
)
# TODO common vars below in parent
lr: List[float] = II("optimization.lr")
block_wise: bool = field(
default=False,
metadata={"help": "Enables block-wise optimization for 8-bit Adam"},
)
@register_optimizer("adam", dataclass=MetaseqAdamConfig)
class MetaseqAdam(BaseOptimizer):
"""Adam optimizer for metaseq.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = (
not getattr(cfg, "use_old_adam", False)
and fused_adam_cls is not None
and torch.cuda.is_available()
)
if use_fused_adam:
logger.info("using FusedAdam")
self._optimizer = fused_adam_cls(
params, use_fp16_stats=self.cfg.fp16_adam_stats, **self.optimizer_config
)
else:
if self.cfg.fp16_adam_stats:
raise NotImplementedError(
"--fp16-adam-stats is only supported with FusedAdamV1"
)
self._optimizer = Adam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
}
@register_optimizer("adam8bit", dataclass=MetaseqAdamConfig)
class MetaseqAdam8Bit(BaseOptimizer):
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"adam8bit requires bits and bytes: see https://gist.github.com/TimDettmers/c4ffe346f095ee4481aa3d4b4ad2ffe0"
)
bnb.optim.GlobalOptimManager.get_instance().register_parameters(params)
self._optimizer = bnb.optim.Adam(
params, optim_bits=8, **self.optimizer_config
) # equivalent
@property
def optimizer_config(self):
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
"block_wise": self.cfg.block_wise,
}
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
class Adam(torch.optim.Optimizer):
r"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
|
flash_metaseq-main
|
metaseq/optim/adam.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from metaseq import registry
from metaseq.optim.lr_scheduler.base_lr_scheduler import BaseLRScheduler
from omegaconf import DictConfig
(
build_lr_scheduler_,
register_lr_scheduler,
LR_SCHEDULER_REGISTRY,
LR_SCHEDULER_DATACLASS_REGISTRY,
) = registry.setup_registry(
"--lr-scheduler", base_class=BaseLRScheduler, default="fixed"
)
def build_lr_scheduler(cfg: DictConfig, optimizer):
return build_lr_scheduler_(cfg, optimizer)
# automatically import any Python files in the optim/lr_scheduler/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("metaseq.optim.lr_scheduler." + file_name)
|
flash_metaseq-main
|
metaseq/optim/lr_scheduler/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Optional, List
from omegaconf import II
from metaseq.dataclass import MetaseqDataclass
from metaseq.optim.lr_scheduler import BaseLRScheduler, register_lr_scheduler
@dataclass
class PolynomialDecayLRScheduleConfig(MetaseqDataclass):
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
force_anneal: Optional[int] = field(
default=None,
metadata={"help": "force annealing at specified epoch"},
)
end_learning_rate: float = field(
default=0.0,
metadata={"help": "learning rate to decay to"},
)
zero_lr_warmup_steps: int = field(
default=0,
metadata={
"help": "number of steps to run with lr = 0 in the beginning, before warmup_updates, to update EMAs"
},
)
power: float = field(
default=1.0,
metadata={"help": "decay exponent"},
)
total_num_update: float = field(
default=II("optimization.max_update"),
metadata={"help": "total number of updates over which to decay learning rate"},
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("polynomial_decay", dataclass=PolynomialDecayLRScheduleConfig)
class PolynomialDecayLRSchedule(BaseLRScheduler):
"""Decay the LR on a fixed schedule."""
def __init__(self, cfg: PolynomialDecayLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
assert cfg.total_num_update > 0
self.lr = cfg.lr[0]
if cfg.warmup_updates > 0:
self.warmup_factor = 1.0 / cfg.warmup_updates
else:
self.warmup_factor = 1
self.end_learning_rate = cfg.end_learning_rate
self.zero_lr_warmup_steps = cfg.zero_lr_warmup_steps
self.total_num_update = cfg.total_num_update
self.power = cfg.power
self.optimizer.set_lr(self.warmup_factor * self.lr)
def get_next_lr(self, epoch):
lrs = self.cfg.lr
if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = self.optimizer.get_lr()
return next_lr
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if self.zero_lr_warmup_steps > 0 and num_updates <= self.zero_lr_warmup_steps:
lr = 0
elif (
self.cfg.warmup_updates > 0
and num_updates <= self.cfg.warmup_updates + self.zero_lr_warmup_steps
):
self.warmup_factor = (num_updates - self.zero_lr_warmup_steps) / float(
self.cfg.warmup_updates
)
lr = self.warmup_factor * self.lr
elif num_updates >= self.total_num_update:
lr = self.end_learning_rate
else:
warmup = self.cfg.warmup_updates + self.zero_lr_warmup_steps
lr_range = self.lr - self.end_learning_rate
pct_remaining = 1 - (num_updates - warmup) / (
self.total_num_update - warmup
)
lr = lr_range * pct_remaining**self.power + self.end_learning_rate
self.optimizer.set_lr(lr)
return self.optimizer.get_lr()
|
flash_metaseq-main
|
metaseq/optim/lr_scheduler/polynomial_decay_schedule.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from metaseq.dataclass import MetaseqDataclass
from metaseq.optim.lr_scheduler import BaseLRScheduler, register_lr_scheduler
@dataclass
class InverseSquareRootLRScheduleConfig(MetaseqDataclass):
warmup_updates: int = field(
default=4000,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("inverse_sqrt", dataclass=InverseSquareRootLRScheduleConfig)
class InverseSquareRootSchedule(BaseLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = cfg.lr * sqrt(cfg.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, cfg: InverseSquareRootLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with inverse_sqrt."
" Consider --lr-scheduler=fixed instead."
)
warmup_end_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr
# linearly warmup for the first cfg.warmup_updates
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * cfg.warmup_updates**0.5
# initial learning rate
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr
|
flash_metaseq-main
|
metaseq/optim/lr_scheduler/inverse_square_root_schedule.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from metaseq.dataclass.utils import gen_parser_from_dataclass
from metaseq.optim import BaseOptimizer
class BaseLRScheduler(object):
def __init__(self, cfg, optimizer):
super().__init__()
if optimizer is not None and not isinstance(optimizer, BaseOptimizer):
raise ValueError("optimizer must be an instance of BaseOptimizer")
self.cfg = cfg
self.optimizer = optimizer
self.best = None
@classmethod
def add_args(cls, parser):
"""Add arguments to the parser for this LR scheduler."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
def state_dict(self):
"""Return the LR scheduler state dict."""
return {"best": self.best}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.best = state_dict["best"]
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
pass
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
if self.best is None:
self.best = val_loss
else:
self.best = min(self.best, val_loss)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.get_lr()
|
flash_metaseq-main
|
metaseq/optim/lr_scheduler/base_lr_scheduler.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from metaseq.dataclass import MetaseqDataclass
from metaseq.optim.lr_scheduler import BaseLRScheduler, register_lr_scheduler
@dataclass
class CosineLRScheduleConfig(MetaseqDataclass):
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = field(
default=II("optimization.lr"),
metadata={"help": "max learning rate, must be more than cfg.min_lr"},
)
min_lr: float = field(default=0.0, metadata={"help": "min learning rate"})
t_mult: float = field(
default=1.0, metadata={"help": "factor to grow the length of each period"}
)
lr_period_updates: float = field(
default=-1, metadata={"help": "initial number of updates per period"}
)
lr_shrink: float = field(
default=0.1, metadata={"help": "shrink factor for annealing"}
)
# This is not required, but is for convenience in inferring lr_period_updates
max_update: int = II("optimization.max_update")
@register_lr_scheduler("cosine", dataclass=CosineLRScheduleConfig)
class CosineLRSchedule(BaseLRScheduler):
"""Assign LR based on a cyclical schedule that follows the cosine function.
See https://arxiv.org/pdf/1608.03983.pdf for details.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
max learning rate (``--lr``).
During warmup::
lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates)
lr = lrs[update_num]
After warmup::
lr = cfg.min_lr + 0.5*(cfg.lr - cfg.min_lr)*(1 + cos(t_curr / t_i))
where ``t_curr`` is current percentage of updates within the current period
range and ``t_i`` is the current period range, which is scaled by ``t_mul``
after every iteration.
"""
def __init__(self, cfg: CosineLRScheduleConfig, metaseq_optimizer):
super().__init__(cfg, metaseq_optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with cosine."
f" Consider --lr-scheduler=fixed instead. ({cfg.lr})"
)
self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
assert (
self.max_lr > cfg.min_lr
), f"max_lr (={cfg.lr}) must be more than min_lr (={cfg.min_lr})"
warmup_end_lr = self.max_lr
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = cfg.min_lr
self.t_mult = cfg.t_mult
self.period = cfg.lr_period_updates
if self.period <= 0:
assert (
cfg.max_update > 0
), "Either --max_update or --lr-period-updates must be set"
self.period = cfg.max_update - cfg.warmup_updates
if cfg.warmup_updates > 0:
# linearly warmup for the first cfg.warmup_updates
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
else:
self.lr_step = 1
self.warmup_updates = cfg.warmup_updates
self.lr_shrink = cfg.lr_shrink
# initial learning rate
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
else:
curr_updates = num_updates - self.cfg.warmup_updates
if self.t_mult != 1:
i = math.floor(
math.log(
1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult
)
)
t_i = self.t_mult**i * self.period
t_curr = (
curr_updates
- (1 - self.t_mult**i) / (1 - self.t_mult) * self.period
)
else:
i = math.floor(curr_updates / self.period)
t_i = self.period
t_curr = curr_updates - (self.period * i)
lr_shrink = self.lr_shrink**i
min_lr = self.cfg.min_lr * lr_shrink
max_lr = self.max_lr * lr_shrink
self.lr = min_lr + 0.5 * (max_lr - min_lr) * (
1 + math.cos(math.pi * t_curr / t_i)
)
self.optimizer.set_lr(self.lr)
return self.lr
|
flash_metaseq-main
|
metaseq/optim/lr_scheduler/cosine_lr_scheduler.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional
import torch
from torch import Tensor
@torch.jit.script
def script_skip_tensor_list(x: List[Tensor], mask):
res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x]
outputs = []
for i, t in enumerate(res):
if t.numel() != 0:
outputs.append(t)
else:
outputs.append(x[i])
return outputs
@torch.jit.script
def script_skip_tensor(x: Tensor, mask):
# None case
if x.size(0) == 0:
return x
res = x[mask] if x.size(0) == mask.size(0) else x[:, mask]
if res.numel() == 0:
return x
else:
return res
@torch.jit.script
def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int):
"""
Expand 2D/3D tensor on dim=1
"""
if x is None:
return None
assert x.dim() == 2 or x.dim() == 3
assert trg_dim >= x.size(1), (trg_dim, x.size())
if trg_dim == x.size(1):
return x
dims = [x.size(0), trg_dim - x.size(1)]
if x.dim() == 3:
dims.append(x.size(2))
x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1)
return x
@torch.jit.script
def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor:
return x if x is not None else y
@torch.jit.script
def fill_tensors(
x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int
) -> Optional[Tensor]:
"""
Filling tensor x with y at masked positions (dim=0).
"""
if x is None or x.size()[0] == 0 or y is None:
return x
assert x.dim() == y.dim() and mask.size(0) == x.size(0)
assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2))
n_selected = mask.sum()
if n_selected == 0:
return x
assert n_selected == y.size(0)
if n_selected == x.size(0):
return y
if x.size(1) < y.size(1):
x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx)
x[mask] = y
elif x.size(1) > y.size(1):
x[mask] = torch.tensor(padding_idx).type_as(x)
if x.dim() == 2:
x[mask, : y.size(1)] = y
else:
x[mask, : y.size(1), :] = y
else:
x[mask] = y
return x
|
flash_metaseq-main
|
metaseq/models/model_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from metaseq.dataclass import MetaseqDataclass
from metaseq.dataclass.utils import merge_with_parent, populate_dataclass
from hydra.core.config_store import ConfigStore
from .distributed_model import DistributedModel
from .base_decoder import BaseDecoder
from .base_encoder import BaseEncoder
from .incremental_decoder import IncrementalDecoder
from .base_model import (
BaseModel,
EncoderDecoderModel,
LanguageModel,
)
MODEL_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
__all__ = [
"BaseModel",
"DistributedModel",
"BaseDecoder",
"BaseEncoder",
"EncoderDecoderModel",
"IncrementalDecoder",
"LanguageModel",
"register_model",
]
def build_model(cfg: MetaseqDataclass, task):
model = None
model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None)
if not model_type and len(cfg) == 1:
# this is hit if config object is nested in directory that is named after model type
model_type = next(iter(cfg))
if model_type in MODEL_DATACLASS_REGISTRY:
cfg = cfg[model_type]
else:
raise Exception(
"Could not infer model type from directory. Please add _name field to indicate model type. "
"Available models: "
+ str(MODEL_DATACLASS_REGISTRY.keys())
+ " Requested model type: "
+ model_type
)
if model_type in ARCH_MODEL_REGISTRY:
# case 1: legacy models
model = ARCH_MODEL_REGISTRY[model_type]
elif model_type in MODEL_DATACLASS_REGISTRY:
# case 2: config-driven models
model = MODEL_REGISTRY[model_type]
if model_type in MODEL_DATACLASS_REGISTRY:
# set defaults from dataclass. note that arch name and model name can be the same
dc = MODEL_DATACLASS_REGISTRY[model_type]
if isinstance(cfg, argparse.Namespace):
cfg = populate_dataclass(dc(), cfg)
else:
cfg = merge_with_parent(dc(), cfg)
assert model is not None, (
f"Could not infer model type from {cfg}. "
f"Available models: "
+ str(MODEL_DATACLASS_REGISTRY.keys())
+ " Requested model type: "
+ model_type
)
return model.build_model(cfg, task)
def register_model(name, dataclass=None):
"""
New model types can be added to metaseq with the :func:`register_model`
function decorator.
For example::
@register_model('lstm')
class LSTM(EncoderDecoderModel):
(...)
.. note:: All models must implement the :class:`BaseModel` interface.
Typically you will extend :class:`EncoderDecoderModel` for
sequence-to-sequence tasks or :class:`LanguageModel` for
language modeling tasks.
Args:
name (str): the name of the model
"""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError("Cannot register duplicate model ({})".format(name))
if not issubclass(cls, BaseModel):
raise ValueError(
"Model ({}: {}) must extend BaseModel".format(name, cls.__name__)
)
MODEL_REGISTRY[name] = cls
if dataclass is not None and not issubclass(dataclass, MetaseqDataclass):
raise ValueError(
"Dataclass {} must extend MetaseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
MODEL_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="model", node=node, provider="metaseq")
@register_model_architecture(name, name)
def noop(_):
pass
return cls
return register_model_cls
def register_model_architecture(model_name, arch_name):
"""
New model architectures can be added to metaseq with the
:func:`register_model_architecture` function decorator. After registration,
model architectures can be selected with the ``--arch`` command-line
argument.
For example::
@register_model_architecture('lstm', 'lstm_luong_wmt_en_de')
def lstm_luong_wmt_en_de(cfg):
args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000)
(...)
The decorated function should take a single argument *cfg*, which is a
:class:`omegaconf.DictConfig`. The decorated function should modify these
arguments in-place to match the desired architecture.
Args:
model_name (str): the name of the Model (Model must already be
registered)
arch_name (str): the name of the model architecture (``--arch``)
"""
def register_model_arch_fn(fn):
if model_name not in MODEL_REGISTRY:
raise ValueError(
"Cannot register model architecture for unknown model type ({})".format(
model_name
)
)
if arch_name in ARCH_MODEL_REGISTRY:
raise ValueError(
"Cannot register duplicate model architecture ({})".format(arch_name)
)
if not callable(fn):
raise ValueError(
"Model architecture must be callable ({})".format(arch_name)
)
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("metaseq.models." + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group("Additional command-line arguments")
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
|
flash_metaseq-main
|
metaseq/models/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Base classes for various metaseq models.
"""
import logging
from argparse import Namespace
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import DictConfig
from torch import Tensor
from metaseq.dataclass.utils import gen_parser_from_dataclass
from metaseq.models import BaseDecoder, BaseEncoder
logger = logging.getLogger(__name__)
def check_type(module, expected_type):
if hasattr(module, "unwrapped_module"):
assert isinstance(
module.unwrapped_module, expected_type
), f"{type(module.unwrapped_module)} != {expected_type}"
else:
assert isinstance(module, expected_type), f"{type(module)} != {expected_type}"
class BaseModel(nn.Module):
"""Base class for metaseq models."""
def __init__(self):
super().__init__()
self._is_generation_fast = False
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
# do not set defaults so that settings defaults from various architectures still works
gen_parser_from_dataclass(parser, dc(), delete_default=True)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
raise NotImplementedError("Model must implement the build_model method")
def get_targets(self, sample, net_output):
"""Get targets from either the sample or the net's output."""
return sample["target"]
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Scriptable helper function for get_normalized_probs in ~BaseModel"""
if hasattr(self, "decoder"):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
# syntactic sugar for simple models which don't have a decoder
# (e.g., the classification tutorial)
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def extract_features(self, *args, **kwargs):
"""Similar to *forward* but only return features."""
return self(*args, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return None
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg: Optional[DictConfig] = None,
args: Optional[Namespace] = None,
):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
self.upgrade_state_dict(state_dict)
return super().load_state_dict(state_dict, strict)
def upgrade_state_dict(self, state_dict):
"""Upgrade old state dicts to work with newer code."""
self.upgrade_state_dict_named(state_dict, "")
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code.
Args:
state_dict (dict): state dictionary to upgrade, in place
name (str): the state dict key corresponding to the current module
"""
assert state_dict is not None
def do_upgrade(m, prefix):
if len(prefix) > 0:
prefix += "."
for n, c in m.named_children():
name = prefix + n
if hasattr(c, "upgrade_state_dict_named"):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, "upgrade_state_dict"):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
for m in self.modules():
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
def prepare_for_inference_(self, cfg: DictConfig):
"""Prepare model for inference."""
kwargs = {}
kwargs["beamable_mm_beam_size"] = (
None
if getattr(cfg.generation, "no_beamable_mm", False)
else getattr(cfg.generation, "beam", 5)
)
kwargs["need_attn"] = False
self.make_generation_fast_(**kwargs)
def make_generation_fast_(self, **kwargs):
"""
Legacy entry point to optimize model for faster generation.
Prefer prepare_for_inference_.
"""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except (AttributeError, ValueError): # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module, prefix):
if len(prefix) > 0:
prefix += "."
base_func = BaseModel.make_generation_fast_
for n, m in module.named_modules():
if (
m != self
and hasattr(m, "make_generation_fast_")
# don't call this implementation again, e.g., if
# children modules also inherit from BaseModel
and m.make_generation_fast_.__func__ is not base_func
):
name = prefix + n
m.make_generation_fast_(name=name, **kwargs)
apply_make_generation_fast_(self, "")
def train(mode=True):
if mode:
raise RuntimeError("cannot train after make_generation_fast")
# this model should no longer be used for training
self.eval()
self.train = train
def prepare_for_onnx_export_(self, **kwargs):
"""Make model exportable via ONNX trace."""
seen = set()
def apply_prepare_for_onnx_export_(module):
if (
module != self
and hasattr(module, "prepare_for_onnx_export_")
and module not in seen
):
seen.add(module)
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
moe_disable_padding=True,
skip_prepare_for_inference=False,
**kwargs,
):
"""
Load from a pre-trained model file. Downloads and caches the pre-trained
model file if needed.
The base implementation returns a
:class:`~metaseq.hub_utils.GeneratorHubInterface`, which can be used to
generate translations or sample from language models.
Other models may override this to implement custom hub interfaces.
Args:
model_name_or_path (str): either the name of a pre-trained model to
load or a path/URL to a pre-trained model state dict
checkpoint_file (str, optional): colon-separated list of checkpoint
files in the model archive to ensemble (default: 'model.pt')
data_name_or_path (str, optional): point args.data to the archive
at the given path/URL. Can start with '.' or './' to reuse the
model archive path.
"""
from metaseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
logger.info(x["args"])
return hub_utils.GeneratorHubInterface(
x["args"],
x["task"],
x["models"],
moe_disable_padding=moe_disable_padding,
skip_prepare_for_inference=skip_prepare_for_inference,
)
@classmethod
def hub_models(cls):
return {}
class EncoderDecoderModel(BaseModel):
"""Base class for encoder-decoder models.
Args:
encoder (BaseEncoder): the encoder
decoder (BaseDecoder): the decoder
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
check_type(self.encoder, BaseEncoder)
check_type(self.decoder, BaseDecoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return features
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
class LanguageModel(BaseModel):
"""Base class for decoder-only models.
Args:
decoder (BaseDecoder): the decoder
"""
def __init__(self, decoder):
super().__init__()
self.decoder = decoder
check_type(self.decoder, BaseDecoder)
def forward(self, src_tokens, **kwargs):
"""
Run the forward pass for a decoder-only model.
Feeds a batch of tokens through the decoder to predict the next tokens.
Args:
src_tokens (LongTensor): tokens on which to condition the decoder,
of shape `(batch, tgt_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
Returns:
tuple:
- the decoder's output of shape `(batch, seq_len, vocab)`
- a dictionary with any model-specific outputs
"""
return self.decoder(src_tokens, **kwargs)
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, seq_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
return self.decoder.extract_features(src_tokens, **kwargs)
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return self.decoder.max_positions()
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
@property
def supported_targets(self):
return {"future"}
|
flash_metaseq-main
|
metaseq/models/base_model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from torch import Tensor
from metaseq import utils
class BaseDecoder(nn.Module):
"""Base class for decoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
self.onnx_trace = False
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
"""
Args:
prev_output_tokens (LongTensor): shifted output tokens of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (dict, optional): output from the encoder, used for
encoder-side attention
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
"""
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def output_layer(self, features, **kwargs):
"""
Project features to the default output size, e.g., vocabulary size.
Args:
features (Tensor): features returned by *extract_features*.
"""
raise NotImplementedError
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
else:
return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
def max_positions(self):
"""Maximum input length supported by the decoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code."""
return state_dict
def prepare_for_onnx_export_(self):
self.onnx_trace = True
|
flash_metaseq-main
|
metaseq/models/base_decoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from metaseq import utils
from metaseq.distributed import utils as dist_utils, fsdp_wrap
from metaseq.models import BaseEncoder, IncrementalDecoder
from metaseq.modules import (
Dropout,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from metaseq.modules.checkpoint_activations import checkpoint_wrapper
logger = logging.getLogger(__name__)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
class TransformerEncoder(BaseEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~metaseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = Dropout(args.dropout, module_name=self.__class__.__name__)
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if args.encoder_learned_pos
else None
)
self.layers = nn.ModuleList([])
for i in range(args.encoder_layers):
self.layers.append(self.build_encoder_layer(args))
self.num_layers = len(self.layers)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def build_encoder_layer(self, args):
layer = TransformerEncoderLayer(args)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
distribute_checkpointed_activations = getattr(
args, "distribute_checkpointed_activations", False
)
layer = checkpoint_wrapper(
layer,
offload_to_cpu=offload_to_cpu,
distribute_checkpointed_activations=distribute_checkpointed_activations,
)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
if not checkpoint
else 0
)
layer = fsdp_wrap(
layer,
min_num_params=min_params_to_wrap,
process_group=dist_utils.get_data_parallel_group(),
)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
x = self.dropout_module(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(
src_tokens, src_lengths, return_all_hiddens, token_embeddings
)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = encoder_padding_mask.any()
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
l_aux = []
for layer in self.layers:
x, l_aux_i = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
l_aux.append(l_aux_i)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
"l_aux": l_aux,
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of metaseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoderMultiLayerBlockModule(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = nn.ModuleList(layers)
def forward(self, x, **kwargs):
l_aux = []
inner_states = []
for layer in self.layers:
x, layer_attn, _, l_aux_i = layer(x, **kwargs)
inner_states.append(x)
return x, layer_attn, inner_states, l_aux
def _log_weight_stats(tensor, name):
logger.debug(
f"{name}, mean: {tensor.mean():.5f}, std: {tensor.std():.5f}, min: {tensor.min():.5f}, max: {tensor.max():.5f}"
)
class TransformerDecoder(IncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~metaseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = Dropout(args.dropout, module_name=self.__class__.__name__)
if getattr(args, "no_emb_dropout", False):
self.dropout_module = None
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.use_alibi: bool = getattr(args, "alibi", False)
initialize_params_on_gpu = getattr(
args, "tensor_parallel_init_model_on_gpu", False
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
learned_sinusoidal=getattr(args, "decoder_learned_sinusoidal", False),
full_megatron_init=getattr(args, "full_megatron_init", False),
megatron_init_sigma=getattr(args, "megatron_init_sigma", 0.006),
)
if args.decoder_learned_pos and not self.use_alibi
else None
)
if initialize_params_on_gpu and self.embed_positions is not None:
self.embed_positions = utils.floating_point_precision_convertor(
self.embed_positions.cuda(),
fp16=getattr(args, "fp16", False),
memory_efficient_fp16=getattr(args, "memory_efficient_fp16", False),
bf16=getattr(args, "bf16", False),
)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.layers = nn.ModuleList([])
layers = []
for i in range(args.decoder_layers):
layers.append(
self.build_decoder_layer(
args,
no_encoder_attn=no_encoder_attn,
)
)
if getattr(self.args, "fsdp_checkpoint_wrap_layer_frequency", 1) > 1:
assert (
len(layers) % self.args.fsdp_checkpoint_wrap_layer_frequency == 0
), "num layers should be divisible by checkpoint wrap frequency"
for i in range(
0, len(layers), self.args.fsdp_checkpoint_wrap_layer_frequency
):
layer_block = TransformerDecoderMultiLayerBlockModule(
layers[i : i + self.args.fsdp_checkpoint_wrap_layer_frequency]
)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
distribute_checkpointed_activations = getattr(
args, "distribute_checkpointed_activations", False
)
layer_block = checkpoint_wrapper(
layer_block,
offload_to_cpu=offload_to_cpu,
distribute_checkpointed_activations=distribute_checkpointed_activations,
)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
if not checkpoint
else 0
)
layer_block = fsdp_wrap(
layer_block,
min_num_params=min_params_to_wrap,
process_group=dist_utils.get_data_parallel_group(),
)
self.layers.append(layer_block)
else:
self.layers = nn.ModuleList(layers)
_log_weight_stats(self.embed_tokens.weight, "embed tokens")
self.num_layers = len(self.layers)
if args.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
if initialize_params_on_gpu:
self.layer_norm = utils.floating_point_precision_convertor(
self.layer_norm.cuda(),
fp16=getattr(args, "fp16", False),
memory_efficient_fp16=getattr(args, "memory_efficient_fp16", False),
bf16=getattr(args, "bf16", False),
)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim
else None
)
self.output_projection = None
if self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim**-0.5
)
if self.use_alibi:
self.alibi = self._build_alibi_tensor(
self.max_positions(), args.decoder_attention_heads
)
@staticmethod
def _build_alibi_tensor(max_seq_len: int, n_attention_heads: int):
"""Returns tensor shaped (n_head, 1, max_seq_len)"""
def get_slopes(n):
# In the paper, we only train models that have 2^a heads for some a. This function has some good
# properties that only occur when the input is a power of 2. To maintain that even when the number of
# heads is not a power of 2, we use this workaround.
def get_slopes_power_of_2(n):
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
if math.log2(n).is_integer():
return get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
return (
get_slopes_power_of_2(closest_power_of_2)
+ get_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]
)
slopes = torch.Tensor(get_slopes(n_attention_heads))
# In the next line, the part after the * is what constructs the diagonal matrix (right matrix in Figure 3 in
# the paper).
# It doesn't exactly print out the same matrix as we have in Figure 3, but one where all rows are identical.
# This works because the softmax operation is invariant to translation, and our bias functions are always
# linear.
alibi = slopes.unsqueeze(1).unsqueeze(1) * torch.arange(max_seq_len).unsqueeze(
0
).unsqueeze(0).expand(n_attention_heads, -1, -1)
alibi = alibi.view(n_attention_heads, 1, max_seq_len)
return alibi
def build_base_decoder_layer(self, args, no_encoder_attn=False):
return TransformerDecoderLayer(args, no_encoder_attn=no_encoder_attn)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = self.build_base_decoder_layer(args, no_encoder_attn)
for name, param in layer.named_parameters():
_log_weight_stats(param, name)
if getattr(args, "fsdp_checkpoint_wrap_layer_frequency", 1) > 1:
return layer
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
distribute_checkpointed_activations = getattr(
args, "distribute_checkpointed_activations", False
)
layer = checkpoint_wrapper(
layer,
offload_to_cpu=offload_to_cpu,
distribute_checkpointed_activations=distribute_checkpointed_activations,
)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
if not checkpoint
else 0
)
layer = fsdp_wrap(
layer,
min_num_params=min_params_to_wrap,
process_group=dist_utils.get_data_parallel_group(),
)
return layer
def forward_embedding(
self,
tokens,
token_embedding: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
# embed tokens and positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
tokens, incremental_state=incremental_state
)
# see IncrementalDecoder for important information about
# incremental state
if incremental_state:
tokens = tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
if token_embedding is None:
token_embedding = self.embed_tokens(tokens)
x = embed = self.embed_scale * token_embedding
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.dropout_module is not None:
x = self.dropout_module(x)
return x, embed, positions
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
self_attn_padding_mask (torch.Tensor, optional): precomputed padding
mask for self-attention (default None will recompute mask)
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
# see IncrementalDecoder for important information about
# incremental state
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
token_embeddings=token_embeddings,
self_attn_padding_mask=self_attn_padding_mask,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
token_embeddings: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
token_embeddings=token_embeddings,
self_attn_padding_mask=self_attn_padding_mask,
)
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
token_embeddings: Optional[Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
):
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy
of this function is made to be used in the subclass instead.
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# compute self-attention padding mask (involves device-to-host transfer,
# so put it at the top of the forward)
if self_attn_padding_mask is None and (
self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any()
):
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# embed tokens and positions
x, tok, pos = self.forward_embedding(
prev_output_tokens, token_embeddings, incremental_state
)
# see IncrementalDecoder for important information about
# incremental state. Note that it may be an empty dictionary.
if not incremental_state and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# decoder layers
attn: Optional[Tensor] = None
# store other representations for instrumentation in VocabParallelCrossEntCrit
# Note: we are only storing the embeddings output and output of final transformer block
# instead of all inner representations, as thats the only thing being logged and storing
# all intermediate representation causes OOM for large models during validation.
inner_states: List[Optional[Tensor]] = [{"tok": tok, "pos": pos, "emb": x}]
if encoder_out is None:
l_aux = []
else:
l_aux = encoder_out["l_aux"] if "l_aux" in encoder_out else []
for idx, layer in enumerate(self.layers):
x, layer_attn, _, l_aux_i = layer(
x,
encoder_out=encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_padding_mask=encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
incremental_state=incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
l_aux.append(l_aux_i)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
inner_states.append(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states, "l_aux": l_aux}
def output_layer(self, features):
"""Project features to the vocabulary size."""
return self.output_projection(features)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
batch_size, cur_seq_len = tensor.size(0), tensor.size(1)
max_seq_len = self.max_positions()
need_to_make_new_mask = (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(1) < max_seq_len
or (
self.use_alibi
and self._future_mask.size(0)
!= (batch_size * self.args.decoder_attention_heads)
)
)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if need_to_make_new_mask:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([max_seq_len, max_seq_len])), 1
)
if self.use_alibi:
alibi = self.alibi.repeat(batch_size, 1, 1) # batch_size, 1, 1
self._future_mask = self._future_mask.unsqueeze(0) + alibi
self._future_mask = self._future_mask.to(tensor)
if self.use_alibi:
return self._future_mask[
: batch_size * self.args.decoder_attention_heads,
:cur_seq_len,
:cur_seq_len,
]
else:
return self._future_mask[:cur_seq_len, :cur_seq_len]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of metaseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Embedding(
num_embeddings, embedding_dim, padding_idx, initialize_params_on_gpu=False
):
# Passing weights initialized on GPU.
device = torch.cuda.current_device() if initialize_params_on_gpu else None
dtype = torch.half if initialize_params_on_gpu else torch.float
weight = torch.empty(num_embeddings, embedding_dim, device=device, dtype=dtype)
nn.init.normal_(weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(weight[padding_idx], 0)
m = nn.Embedding(
num_embeddings, embedding_dim, padding_idx=padding_idx, _weight=weight
)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
|
flash_metaseq-main
|
metaseq/models/transformer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import II
from metaseq import utils
from metaseq.dataclass import ChoiceEnum, MetaseqDataclass
from metaseq.models import (
LanguageModel,
register_model,
register_model_architecture,
)
from metaseq.models.transformer import (
DEFAULT_MIN_PARAMS_TO_WRAP,
Embedding,
TransformerDecoder,
)
DEFAULT_MAX_TARGET_POSITIONS = 1024
logger = logging.getLogger(__name__)
@dataclass
class TransformerLanguageModelConfig(MetaseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
decoder_embed_dim: int = field(
default=512, metadata={"help": "decoder embedding dimension"}
)
decoder_output_dim: int = field(
default=512, metadata={"help": "decoder output dimension"}
)
decoder_input_dim: int = field(
default=512, metadata={"help": "decoder input dimension"}
)
decoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"})
decoder_attention_heads: int = field(
default=8, metadata={"help": "num decoder attention heads"}
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_learned_sinusoidal: bool = field(
default=False,
metadata={
"help": "use learned positional embeddings init with sinusoidal in the decoder"
},
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
# config for Fully Sharded Data Parallel (FSDP) training
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": (
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
)
},
)
use_stable_embedding: Optional[bool] = field(
default=False,
metadata={
"help": "Use bitsandbytes StableEmbeddingLayer which saves embedding state in fp32",
"argparse_alias": "--stable-emb",
},
)
# NormFormer
scale_fc: Optional[bool] = field(
default=False,
metadata={
"help": "Insert LayerNorm between fully connected layers",
},
)
scale_attn: Optional[bool] = field(
default=False, metadata={"help": "Insert LayerNorm after attention"}
)
scale_heads: Optional[bool] = field(
default=False,
metadata={"help": "Learn a scale coefficient for each attention head"},
)
# ALiBi
alibi: bool = field(
default=False,
metadata={
"help": "use the ALiBi position method instead of regular position embeddings"
},
)
fsdp_checkpoint_wrap_layer_frequency: int = field(
default=1,
metadata={
"help": "group transformer blocks and wrap the group in checkpoint and FSDP wrapper together"
},
)
distribute_checkpointed_activations: bool = field(
default=False,
metadata={
"help": "distribute offloaded checkpoints to tensor parallel gpus. "
"It adds extra within node all_reduce but reduces checkpointed activations significantly,"
"so a good way to trade speed for gpu memory."
},
)
tensor_parallel_init_model_on_gpu: bool = field(
default=False,
metadata={
"help": "initialize model directly on gpu and possibly fp16 for tensor parallel, shoudl be faster to init model."
},
)
full_megatron_init: bool = field(
default=False,
metadata={"help": "Exact same init as Megatron"},
)
megatron_init_sigma: float = field(
default=0.006,
metadata={"help": "Sigma for megatron initialization"},
)
sync_ln_variance: Optional[bool] = field(
default=False,
metadata={"help": "sync_ln_variance stats", "argparse_alias": "--sync-ln"},
)
no_emb_dropout: Optional[bool] = field(
default=False, metadata={"help": "Avoid emb dropout for decoder"}
)
# options from other parts of the config
add_bos_token: bool = II("task.add_bos_token")
tokens_per_sample: int = II("task.tokens_per_sample")
max_target_positions: Optional[int] = II("task.max_target_positions")
memory_efficient_fp16: bool = II("common.memory_efficient_fp16")
fp16: bool = II("common.fp16")
fp16_no_flatten_grads: bool = II("common.fp16_no_flatten_grads")
ddp_backend: str = II("distributed_training.ddp_backend")
world_size: int = II("distributed_training.distributed_world_size")
distributed_rank: int = II("distributed_training.distributed_rank")
batch_size: Optional[int] = II("dataset.batch_size")
batch_size_valid: Optional[int] = II("dataset.batch_size_valid")
model_parallel_size: int = II("common.model_parallel_size")
@register_model("transformer_lm", dataclass=TransformerLanguageModelConfig)
class TransformerLanguageModel(LanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
decoder = TransformerDecoder(
args,
task.target_dictionary,
embed_tokens,
no_encoder_attn=True,
)
return cls(decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
if getattr(args, "use_stable_embedding", False):
import bitsandbytes as bnb
if not args.no_scale_embedding:
logger.warning(
"It is recommended to pass --no-scale-embedding with --use-stable-embedding"
)
return bnb.nn.StableEmbedding(len(dictionary), embed_dim, dictionary.pad())
else:
return Embedding(
len(dictionary),
embed_dim,
dictionary.pad(),
initialize_params_on_gpu=getattr(
args, "tensor_parallel_init_model_on_gpu", False
),
)
def base_lm_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.decoder_learned_sinusoidal = getattr(args, "decoder_learned_sinusoidal", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.add_bos_token = getattr(args, "add_bos_token", False)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
@register_model_architecture("transformer_lm", "transformer_lm_gpt")
def transformer_lm_gpt(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny")
def transformer_lm_gpt2_tiny(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 64)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 64)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 1)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt2_bigger")
def transformer_lm_gpt2_bigger(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 2048)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 8192)
args.decoder_layers = getattr(args, "decoder_layers", 48)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
|
flash_metaseq-main
|
metaseq/models/transformer_lm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from metaseq.distributed import (
DistributedTimeoutWrapper,
ModuleProxyWrapper,
)
logger = logging.getLogger(__name__)
_GOSSIP_DISABLED = False
try:
import gossip # noqa: F401
except ImportError:
_GOSSIP_DISABLED = True
def DistributedModel(args, model, process_group, device):
"""
Wrap a *model* to support distributed data parallel training.
This is similar to the built-in DistributedDataParallel, but allows
additional configuration of the DistributedDataParallel class to
use, and also provides easier access to the wrapped model by
forwarding requests for missing attributes to the wrapped model.
Args:
args (argparse.Namespace): metaseq args
model (BaseModel): model to wrap
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
device: device to move model to
"""
assert isinstance(model, nn.Module)
if args.ddp_backend in {"c10d", "pytorch_ddp"}:
wrapped_model = DistributedDataParallel(
module=model.to(device),
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
bucket_cap_mb=args.bucket_cap_mb,
process_group=process_group,
find_unused_parameters=args.find_unused_parameters,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend == "fully_sharded":
try:
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
except ImportError:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
assert isinstance(model, FSDP), "expected model to already be wrapped in FSDP"
wrapped_model = model
if args.memory_efficient_fp16:
if args.bf16:
wrapped_model = wrapped_model.bfloat16()
else:
wrapped_model = wrapped_model.half()
if not args.cpu_offload:
wrapped_model = wrapped_model.to(device=device)
else:
raise ValueError("Unknown --ddp-backend: " + args.ddp_backend)
# kill hung distributed jobs after a timeout
if getattr(args, "heartbeat_timeout", -1) > 0:
wrapped_model = DistributedTimeoutWrapper(
wrapped_model, timeout=getattr(args, "heartbeat_timeout", -1)
)
return wrapped_model
|
flash_metaseq-main
|
metaseq/models/distributed_model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, Optional
from torch import Tensor
from metaseq.incremental_decoding_utils import with_incremental_state
from metaseq.models import BaseDecoder
logger = logging.getLogger(__name__)
@with_incremental_state
class IncrementalDecoder(BaseDecoder):
"""Base class for incremental decoders.
Incremental decoding is a special mode at inference time where the Model
only receives a single timestep of input corresponding to the previous
output token (for teacher forcing) and must produce the next output
*incrementally*. Thus the model must cache any long-term state that is
needed about the sequence, e.g., hidden states, convolutional states, etc.
Compared to the standard :class:`BaseDecoder` interface, the incremental
decoder interface allows :func:`forward` functions to take an extra keyword
argument (*incremental_state*) that can be used to cache state across
time-steps.
The :class:`IncrementalDecoder` interface also defines the
:func:`reorder_incremental_state` method, which is used during beam search
to select and reorder the incremental state based on the selection of beams.
To learn more about how incremental decoding works, refer to `this blog
<http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_.
Note that incremental_state will take different values depending on the
situation. At train and validation time, incremental_state will be None,
indicating that no incremental state is available and does not need to be
computed.
During generation, incremental_state will begin as an empty
dictionary, indicating no incremental_state is available, but SHOULD be
computed. This class modifies this dictionary inline via
reorder_incremental_state. After that first initial step, incremental_state
will be full of model-specific state.
"""
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
):
"""
Args:
prev_output_tokens (LongTensor): shifted output tokens of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (dict, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict, optional): dictionary used for storing
state during :ref:`Incremental decoding`. Note that this
dictionary is modified inline iff incremental_state is not None.
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
):
"""
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
pass
def reorder_incremental_state_scripting(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Main entry point for reordering the incremental state.
Due to limitations in TorchScript, we call this function in
:class:`metaseq.sequence_generator.SequenceGenerator` instead of
calling :func:`reorder_incremental_state` directly.
"""
for module in self.modules():
if hasattr(module, "reorder_incremental_state"):
result = module.reorder_incremental_state(incremental_state, new_order)
if result is not None:
incremental_state = result
def set_beam_size(self, beam_size):
"""Sets the beam size in the decoder and all children."""
if getattr(self, "_beam_size", -1) != beam_size:
seen = set()
def apply_set_beam_size(module):
if (
module != self
and hasattr(module, "set_beam_size")
and module not in seen
):
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
|
flash_metaseq-main
|
metaseq/models/incremental_decoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from torch import Tensor
EncoderOut = NamedTuple(
"EncoderOut",
[
("encoder_out", Tensor), # T x B x C
("encoder_padding_mask", Optional[Tensor]), # B x T
("encoder_embedding", Optional[Tensor]), # B x T x C
("encoder_states", Optional[List[Tensor]]), # List[T x B x C]
("src_tokens", Optional[Tensor]), # B x T
("src_lengths", Optional[Tensor]), # B x 1
],
)
class BaseEncoder(nn.Module):
"""Base class for encoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths=None, **kwargs):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
"""
raise NotImplementedError
def forward_torchscript(self, net_input: Dict[str, Tensor]):
"""A TorchScript-compatible version of forward.
Encoders which use additional arguments may want to override
this method for TorchScript compatibility.
"""
if torch.jit.is_scripting():
return self.forward(
src_tokens=net_input["src_tokens"],
src_lengths=net_input["src_lengths"],
)
else:
return self.forward_non_torchscript(net_input)
@torch.jit.unused
def forward_non_torchscript(self, net_input: Dict[str, Tensor]):
encoder_input = {
k: v for k, v in net_input.items() if k != "prev_output_tokens"
}
return self.forward(**encoder_input)
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
raise NotImplementedError
def max_positions(self):
"""Maximum input length supported by the encoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code."""
return state_dict
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
def _apply(m):
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
self.apply(_apply)
|
flash_metaseq-main
|
metaseq/models/base_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import functools
import torch.distributed as dist
from metaseq.dataclass.configs import MetaseqConfig
from metaseq.distributed import utils as distributed_utils
from metaseq.trainer import Trainer
try:
from megatron.mpu import (
get_cuda_rng_tracker,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class MegatronTrainer(Trainer):
"""Main class for model parallel with data parallel training."""
def __init__(self, cfg: MetaseqConfig, task, model, criterion, **kwargs):
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install megatron using the setup instructions!"
)
super().__init__(cfg, task, model, criterion, **kwargs)
def clip_grad_norm(
self, clip_norm, norm_type="l2", skip_gradient_update_on_clip_norm=False
):
def _aggregate_model_parallel_grad_norm(norm_type, total_norm):
norm_type2_reduce_op = {"l2": dist.ReduceOp.SUM, "inf": dist.ReduceOp.MAX}
reduce_op = norm_type2_reduce_op[norm_type]
if norm_type == "l2":
total_norm.pow_(2)
dist.all_reduce(
total_norm,
group=distributed_utils.get_model_parallel_group(),
op=reduce_op,
)
if norm_type == "l2":
total_norm.sqrt_()
return total_norm
return self.optimizer.clip_grad_norm(
clip_norm,
norm_type,
aggregate_norm_fn=functools.partial(
_aggregate_model_parallel_grad_norm, norm_type
),
skip_gradient_update_on_clip_norm=skip_gradient_update_on_clip_norm,
)
def save_checkpoint(self, filename, extra_state, **kwargs):
"""Save all training state in a checkpoint file."""
extra_state["rng_tracker_states"] = get_cuda_rng_tracker().get_states()
super().save_checkpoint(filename, extra_state, **kwargs)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
extra_state = super().load_checkpoint(
filename,
reset_optimizer=reset_optimizer,
reset_lr_scheduler=reset_lr_scheduler,
optimizer_overrides=optimizer_overrides,
reset_meters=reset_meters,
)
if extra_state is not None and "rng_tracker_states" in extra_state:
get_cuda_rng_tracker().set_states(extra_state["rng_tracker_states"])
return extra_state
|
flash_metaseq-main
|
metaseq/model_parallel/megatron_trainer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import criterions, models, modules # noqa
|
flash_metaseq-main
|
metaseq/model_parallel/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("metaseq.model_parallel.models." + model_name)
|
flash_metaseq-main
|
metaseq/model_parallel/models/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from metaseq.model_parallel.modules import (
ModelParallelTransformerDecoderLayer,
ModelParallelTransformerEncoderLayer,
)
from metaseq.models.transformer import TransformerDecoder, TransformerEncoder
try:
from megatron.mpu import (
copy_to_tensor_model_parallel_region,
gather_from_tensor_model_parallel_region,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
logger = logging.getLogger(__name__)
class ModelParallelTransformerEncoder(TransformerEncoder):
"""
Model parallel Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`ModelParallelTransformerEncoderLayer`.
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if args.no_final_layer_norm:
self.layer_norm = None
def build_encoder_layer(self, args):
return ModelParallelTransformerEncoderLayer(args)
class ModelParallelTransformerDecoder(TransformerDecoder):
"""
Model Parallel Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`ModelParallelTransformerDecoderLayer`.
"""
def build_base_decoder_layer(self, args, no_encoder_attn=False, **kwargs):
return ModelParallelTransformerDecoderLayer(args, no_encoder_attn)
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if not self.share_input_output_embed:
raise NotImplementedError(
"Model parallel training currently requires --share-decoder-input-output-embed"
)
features = copy_to_tensor_model_parallel_region(features)
# project back to size of vocabulary
x = self.output_projection(features)
# Gather output if model in in inference mode (i.e. evallm or generation) cause both are not yet compatible with
# parallel vocab embeddings
if getattr(self.args, "criterion") != "vocab_parallel_cross_entropy" or getattr(
self, "inference", False
):
x = gather_from_tensor_model_parallel_region(x).contiguous()
return x
# This hook used as proxy for tracking state if model is in eval or generation mode.
def make_generation_fast_(self, **unused):
self.inference = True
|
flash_metaseq-main
|
metaseq/model_parallel/models/transformer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from metaseq.model_parallel.models.transformer import ModelParallelTransformerDecoder
from metaseq.models import register_model, register_model_architecture
from metaseq.models.transformer_lm import TransformerLanguageModel
try:
from megatron.mpu import VocabParallelEmbedding
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("model_parallel_transformer_lm")
class ModelParallelTransformerLanguageModel(TransformerLanguageModel):
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install megatron using the setup instructions!"
)
# make sure all arguments are present in older models
base_lm_architecture(args)
task.source_dictionary.pad_to_multiple_(8)
task.target_dictionary.pad_to_multiple_(8)
# task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
# task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
assert getattr(
args, "use_sharded_state", False
), "Use sharded state must be True for tensor parallel, otherwise model saving and loaded might be broken"
decoder = ModelParallelTransformerDecoder(
args,
task.target_dictionary,
embed_tokens,
no_encoder_attn=True,
)
return cls(decoder)
@staticmethod
def add_args(parser):
TransformerLanguageModel.add_args(parser)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=embed_dim**-0.5)
nn.init.constant_(tensor[1], 0)
def _vocab_init_megatron(tensor, **kwargs):
nn.init.normal_(
tensor, mean=0, std=getattr(args, "megatron_init_sigma", 0.006)
)
nn.init.constant_(tensor[1], 0)
if getattr(args, "memory_efficient_fp16", False):
dtype = torch.bfloat16 if getattr(args, "bf16", False) else torch.half
else:
dtype = torch.float32
embed_tokens = VocabParallelEmbedding(
len(dictionary),
embed_dim,
dictionary.pad(),
init_method=_vocab_init_megatron
if getattr(args, "full_megatron_init", False)
else _vocab_init,
use_cpu_initialization=not getattr(
args, "tensor_parallel_init_model_on_gpu", False
),
dtype=dtype,
)
return embed_tokens
def base_lm_architecture(args):
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
# Model training is not stable without this
args.decoder_normalize_before = True
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.decoder_learned_sinusoidal = getattr(args, "decoder_learned_sinusoidal", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.add_bos_token = getattr(args, "add_bos_token", False)
@register_model_architecture("model_parallel_transformer_lm", "transformer_lm_megatron")
def transformer_lm_megatron(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 4)
args.decoder_layers = getattr(args, "decoder_layers", 72)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
|
flash_metaseq-main
|
metaseq/model_parallel/models/transformer_lm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from functools import partial
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor, nn
from metaseq import utils
from metaseq.incremental_decoding_utils import with_incremental_state
from metaseq.modules.dropout import Dropout
try:
from megatron.mpu import (
get_cuda_rng_tracker,
get_tensor_model_parallel_world_size,
ColumnParallelLinear,
RowParallelLinear,
split_tensor_along_last_dim,
)
from megatron.model.fused_softmax import ScaledUpperTriangMaskedSoftmax
from megatron.model import utils as megatron_utils
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
import logging
logger = logging.getLogger(__name__)
@with_incremental_state
class ModelParallelMultiheadAttention(nn.Module):
"""Model parallel Multi-headed attention.
This performs the Multi-headed attention over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
self_attention=False,
encoder_decoder_attention=False,
use_cpu_initialization=True,
full_megatron_init=False,
megatron_init_sigma=None,
num_layers=None,
dtype=torch.float32,
):
super().__init__()
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install megatron using the setup instructions!"
)
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.model_parallel_size = get_tensor_model_parallel_world_size()
self.num_heads_partition = num_heads // self.model_parallel_size
assert (
self.num_heads_partition * self.model_parallel_size == num_heads
), "Number of heads must be divisible by model parallel size"
self.dropout_module = Dropout(dropout, module_name=self.__class__.__name__)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert (
not self.self_attention or self.qkv_same_dim
), "Self-attention requires query, key and value to be of the same size"
self.combine_qkv_proj = True
if self.combine_qkv_proj:
def _init_method_weight_cpu(weight):
# Following is required to match gshard weight initialization
# because of how megatron splits initialized weights over model
# parallel workers.
model_parallel_matrix_splits = torch.split(
weight,
weight.size(0) // get_tensor_model_parallel_world_size(),
dim=0,
)
k_splits = []
v_splits = []
q_splits = []
for model_parallel_matrix_split in model_parallel_matrix_splits:
k_split, v_split, q_split = torch.split(
model_parallel_matrix_split,
model_parallel_matrix_split.size(0) // 3,
dim=0,
)
k_splits.append(k_split)
v_splits.append(v_split)
q_splits.append(q_split)
fan_in, fan_out = weight.size(0) // 3, weight.size(1)
std = 1 / math.sqrt(float(fan_in + fan_out))
a = (
math.sqrt(3.0) * std
) # Calculate uniform bounds from standard deviation
for k in k_splits:
nn.init._no_grad_uniform_(k, -a, a)
for v in v_splits:
nn.init._no_grad_uniform_(v, -a, a)
for q in q_splits:
nn.init._no_grad_uniform_(q, -a, a)
def _init_method_weight_gpu(weight):
k, v, q = torch.split(weight, weight.size(0) // 3, dim=0)
nn.init.xavier_uniform_(k, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(v, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(q, gain=1 / math.sqrt(2))
def _init_method_bias_cpu(fan_in, bias):
# Following is required to match gshard weight initialization
# because of how megatron splits initialized weights over model
# parallel workers.
model_parallel_bias_splits = torch.split(
bias, bias.size(0) // get_tensor_model_parallel_world_size(), dim=0
)
k_splits = []
v_splits = []
q_splits = []
for model_parallel_bias_split in model_parallel_bias_splits:
k_split, v_split, q_split = torch.split(
model_parallel_bias_split,
model_parallel_bias_split.size(0) // 3,
dim=0,
)
k_splits.append(k_split)
v_splits.append(v_split)
q_splits.append(q_split)
bound = 1 / math.sqrt(fan_in)
for k in k_splits:
nn.init.uniform_(k, -bound, bound)
for v in v_splits:
nn.init.uniform_(v, -bound, bound)
for q in q_splits:
nn.init.uniform_(q, -bound, bound)
def _init_method_bias_gpu(fan_in, bias):
k, v, q = split_tensor_along_last_dim(bias, 3)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(k, -bound, bound)
nn.init.uniform_(v, -bound, bound)
nn.init.uniform_(q, -bound, bound)
if full_megatron_init:
assert megatron_init_sigma is not None
init_method_weights = megatron_utils.init_method_normal(
megatron_init_sigma
)
init_method_bias = None
else:
init_method_weights = (
_init_method_weight_cpu
if use_cpu_initialization
else _init_method_weight_gpu
)
if use_cpu_initialization:
init_method_bias = partial(_init_method_bias_cpu, self.kdim)
else:
init_method_bias = partial(_init_method_bias_gpu, self.kdim)
self.qkv_proj = ColumnParallelLinear(
self.kdim,
3 * embed_dim,
bias=bias,
gather_output=False,
init_method=init_method_weights,
init_method_bias=init_method_bias,
use_cpu_initialization=use_cpu_initialization,
dtype=dtype,
)
else:
def _init_method_weight(weight):
nn.init.xavier_uniform_(weight, gain=1 / math.sqrt(2))
def _init_method_bias(fan_in, bias):
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(bias, -bound, bound)
self.k_proj = ColumnParallelLinear(
self.kdim,
embed_dim,
bias=bias,
gather_output=False,
init_method=_init_method_weight,
init_method_bias=None
if full_megatron_init
else partial(_init_method_bias, self.kdim),
use_cpu_initialization=use_cpu_initialization,
dtype=dtype,
)
self.v_proj = ColumnParallelLinear(
self.vdim,
embed_dim,
bias=bias,
gather_output=False,
init_method=_init_method_weight,
init_method_bias=None
if full_megatron_init
else partial(_init_method_bias, self.vdim),
use_cpu_initialization=use_cpu_initialization,
dtype=dtype,
)
self.q_proj = ColumnParallelLinear(
embed_dim,
embed_dim,
bias=bias,
gather_output=False,
init_method=_init_method_weight,
init_method_bias=None
if full_megatron_init
else partial(_init_method_bias, embed_dim),
use_cpu_initialization=use_cpu_initialization,
dtype=dtype,
)
def _init_method_weight(weight):
nn.init.xavier_uniform_(weight, gain=1)
init_method_weights = _init_method_weight
if full_megatron_init:
assert megatron_init_sigma is not None
assert num_layers is not None
init_method_weights = megatron_utils.scaled_init_method_normal(
megatron_init_sigma, num_layers
)
self.out_proj = RowParallelLinear(
embed_dim,
embed_dim,
bias=bias,
input_is_parallel=True,
init_method=init_method_weights,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
dtype=dtype,
)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
**unused_kwargs,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
# logger.info("query:" + str(query.float().norm().item()))
if self.self_attention:
if self.combine_qkv_proj:
kvq, _ = self.qkv_proj(query)
k, v, q = split_tensor_along_last_dim(
kvq, 3, contiguous_split_chunks=True
)
else:
q, _ = self.q_proj(query)
k, _ = self.k_proj(query)
v, _ = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q, _ = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q, _ = self.q_proj(query)
k, _ = self.k_proj(key)
v, _ = self.v_proj(value)
# Megatron's fused kernel: "ScaledUpperTriangMaskedSoftmax" seems to crash with odd shape across seq_len dimension.
# This is okay for training cause training we have all seq_len nice power of 2s but during evaluation and generation,
# we have seq_lens not power of 2.
CHANGES = not getattr(self, "inference", False)
if CHANGES:
output_size = (
q.size(1),
self.num_heads_partition,
q.size(0),
k.size(0),
)
q = q.view(tgt_len, bsz * self.num_heads_partition, self.head_dim)
if k is not None:
k = k.view(-1, bsz * self.num_heads_partition, self.head_dim)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
matmul_result = torch.empty(
output_size[0] * output_size[1],
output_size[2],
output_size[3],
dtype=q.dtype,
device=torch.cuda.current_device(),
)
# Scale q,k before matmul for stability see https://tinyurl.com/sudb9s96 for math
matmul_result = torch.baddbmm(
matmul_result,
math.sqrt(self.scaling) * q.transpose(0, 1), # [b * np, sq, hn]
math.sqrt(self.scaling)
* k.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
)
# Replace any non-finite values with finite equivalents, since otherwise
# we may get NaN when adding attn_mask or computing softmax.
if attn_mask is not None:
matmul_result = torch.nan_to_num(matmul_result)
# attention_scores = matmul_result.view(*output_size)
attn_probs = ScaledUpperTriangMaskedSoftmax.apply(matmul_result, 1.0)
# attn_probs = self.scale_mask_softmax(attention_scores,
# attn_mask)
with get_cuda_rng_tracker().fork():
attn_probs = self.dropout_module(attn_probs)
else:
q *= self.scaling
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(
bsz * self.num_heads_partition, -1, self.head_dim
)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(
bsz * self.num_heads_partition, -1, self.head_dim
)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
saved_state["prev_key"] = k.view(
bsz, self.num_heads_partition, -1, self.head_dim
)
saved_state["prev_value"] = v.view(
bsz, self.num_heads_partition, -1, self.head_dim
)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(
incremental_state, saved_state
)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [
bsz * self.num_heads_partition,
tgt_len,
src_len,
]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(
bsz, self.num_heads_partition, tgt_len, src_len
)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(
bsz * self.num_heads_partition, tgt_len, src_len
)
attn_weights_float = utils.softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
with get_cuda_rng_tracker().fork():
attn_probs = self.dropout_module(attn_weights)
# logger.info("attn_probs:" + str(attn_probs.float().norm().item()))
assert v is not None
attn = torch.bmm(attn_probs, v)
# logger.info("attn:" + str(attn.float().norm().item()))
assert list(attn.size()) == [
bsz * self.num_heads_partition,
tgt_len,
self.head_dim,
]
embed_dim_partition = embed_dim // self.model_parallel_size
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition)
attn, attn_bias = self.out_proj(attn)
# return attn_weights None to keep the return type same as single gpu multihead attention
# This will be deprecated.
attn_weights: Optional[Tensor] = None
# logger.info("output:" + str(attn.float().norm().item()))
return (attn, attn_bias), attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
if input_buffer[k] is not None:
input_buffer[k] = input_buffer[k].index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
# This hook used as proxy for tracking state if model is in eval or generation mode.
def make_generation_fast_(self, **unused):
self.inference = True
|
flash_metaseq-main
|
metaseq/model_parallel/modules/multihead_attention.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .multihead_attention import ModelParallelMultiheadAttention
from .transformer_layer import (
ModelParallelTransformerEncoderLayer,
ModelParallelTransformerDecoderLayer,
)
__all__ = [
"ModelParallelMultiheadAttention",
"ModelParallelTransformerEncoderLayer",
"ModelParallelTransformerDecoderLayer",
]
|
flash_metaseq-main
|
metaseq/model_parallel/modules/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch import nn, Tensor
from metaseq.model_parallel.modules import ModelParallelMultiheadAttention
from metaseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
try:
from megatron.mpu import (
ColumnParallelLinear,
RowParallelLinear,
)
from megatron.model import utils as megatron_utils
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer):
"""Encoder layer block over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def build_fc1(self, input_dim, output_dim):
return ColumnParallelLinear(
input_dim, output_dim, gather_output=False, skip_bias_add=True
)
def build_fc2(self, input_dim, output_dim):
return RowParallelLinear(
input_dim, output_dim, input_is_parallel=True, skip_bias_add=True
)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
)
def _weight_init(weight):
return nn.init.kaiming_uniform_(weight, a=math.sqrt(5))
class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer):
"""Decoder layer block.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def build_fc1(
self,
input_dim,
output_dim,
initialize_params_on_gpu,
full_megatron_init,
megatron_init_sigma,
dtype,
):
def _init_method_bias(bias):
fan_in = input_dim
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(bias, -bound, bound)
if full_megatron_init:
# Setting bias init method to None, initializes biases with zero.
init_method_weights = megatron_utils.init_method_normal(megatron_init_sigma)
init_method_bias = None
else:
init_method_weights = _weight_init
init_method_bias = _init_method_bias
return ColumnParallelLinear(
input_dim,
output_dim,
gather_output=False,
init_method=init_method_weights,
skip_bias_add=self.skip_bias_add,
init_method_bias=init_method_bias,
use_cpu_initialization=not initialize_params_on_gpu,
dtype=dtype,
)
def build_fc2(
self,
input_dim,
output_dim,
initialize_params_on_gpu,
full_megatron_init,
megatron_init_sigma,
num_layers,
dtype,
):
skip_bias_add = self.skip_bias_add
if full_megatron_init:
init_method_weights = megatron_utils.scaled_init_method_normal(
megatron_init_sigma, num_layers
)
else:
init_method_weights = _weight_init
fc2 = RowParallelLinear(
input_dim,
output_dim,
input_is_parallel=True,
init_method=init_method_weights,
skip_bias_add=skip_bias_add,
use_cpu_initialization=not initialize_params_on_gpu,
dtype=dtype,
)
if not full_megatron_init:
# Copy nn.linear initialization to get same initialization as of non-model-parallel.
# fan_in, _ = nn.init._calculate_fan_in_and_fan_out(fc2.weight)
fan_in = input_dim
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(fc2.bias, -bound, bound)
return fc2
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=not getattr(args, "cross_self_attention", False),
use_cpu_initialization=not getattr(
args, "tensor_parallel_init_model_on_gpu", False
),
full_megatron_init=getattr(args, "full_megatron_init", False),
megatron_init_sigma=getattr(args, "megatron_init_sigma", 0.006),
num_layers=args.decoder_layers,
dtype=self._get_model_init_dtype(),
)
def build_encoder_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
full_megatron_init=getattr(args, "full_megatron_init", False),
megatron_init_sigma=getattr(args, "megatron_init_sigma", 0.006),
num_layers=args.decoder_layers,
dtype=self._get_model_init_dtype(),
)
def forward_attention(
self,
query,
key,
value,
residual,
key_padding_mask=None,
incremental_state=None,
need_weights=False,
attn_mask=None,
):
(attn_output, attn_bias), attn_weights = self.self_attn(
query=query,
key=key,
value=value,
key_padding_mask=key_padding_mask,
incremental_state=incremental_state,
need_weights=need_weights,
attn_mask=attn_mask,
)
if self.training:
bias_dropout_add_func = bias_dropout_add_fused_train
else:
bias_dropout_add_func = bias_dropout_add_fused_inference
if self.c_attn is not None:
# NormFormer Head Scaling Logic
tgt_len, bsz = attn_output.size(0), attn_output.size(1)
attn_output = attn_output.view(tgt_len, bsz, self.nh, self.head_dim)
attn_output = torch.einsum("tbhd,h->tbhd", attn_output, self.c_attn)
attn_output = attn_output.reshape(tgt_len, bsz, self.embed_dim)
if self.attn_ln is None:
x = bias_dropout_add_func(
attn_output, attn_bias.view(1, 1, -1), residual, self.args.dropout
)
else:
x = torch.nn.functional.dropout(
attn_output + attn_bias.view(1, 1, -1),
p=self.args.dropout,
training=self.training,
)
x = self.attn_ln(x)
x = residual + x
return x, attn_weights
def bias_dropout_add(x, bias, residual, prob, training):
# type: (Tensor, Tensor, Tensor, float, bool) -> Tensor
out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
out = residual + out
return out
def get_bias_dropout_add(training):
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
@torch.jit.script
def bias_dropout_add_fused_train(x, bias, residual, prob):
# type: (Tensor, Tensor, Tensor, float) -> Tensor
return bias_dropout_add(x, bias, residual, prob, True)
@torch.jit.script
def bias_dropout_add_fused_inference(x, bias, residual, prob):
# type: (Tensor, Tensor, Tensor, float) -> Tensor
return bias_dropout_add(x, bias, residual, prob, False)
|
flash_metaseq-main
|
metaseq/model_parallel/modules/transformer_layer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from metaseq import metrics, utils
from metaseq.criterions import BaseCriterion, register_criterion
try:
from megatron.mpu.cross_entropy import (
vocab_parallel_cross_entropy,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@register_criterion("vocab_parallel_cross_entropy")
class VocabParallelCrossEntropyCriterion(BaseCriterion):
def __init__(self, task):
super().__init__(task)
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install megatron using the setup instructions!"
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
target = sample["target"]
has_pad = target.eq(self.padding_idx).any().item()
net_output = model(**sample["net_input"])
loss = vocab_parallel_cross_entropy(net_output[0].float(), target)
if has_pad:
loss = loss * (target != self.padding_idx)
loss = loss.sum()
# When using target loss only, use num tokens in target only as the sample_size
# See StreamingSrcTgtDataset
sample_size = (
sample["ntokens_target"]
if "ntokens_target" in sample
else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if "src_tokens" in sample["net_input"] and hasattr(self.task, "eod"):
logging_output["ndocseps"] = (sample["target"] == self.task.eod).sum()
if (
len(net_output) >= 2
and isinstance(net_output[1], dict)
and "inner_states" in net_output[1]
):
with torch.no_grad():
# yank out the inner states we wish to instrument
# see transformer.py TransformerDecoder.extract_features_scriptable
emb, *_, actv = net_output[1]["inner_states"]
assert isinstance(
emb, dict
), "Expecting the first inner state to be a dict of embedding representations"
emb["actv"] = actv # throw on final for code brevity
for key, value in emb.items():
if value is None:
# maybe future proofing relative positional embeddings
continue
value = emb[key]
logging_output[f"{key}_norm"] = value.norm(p=2, dim=-1).sum(
dtype=torch.float32
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
for type_ in ("actv", "pos", "tok", "emb"):
key = f"{type_}_norm"
if any(key in log for log in logging_outputs):
actv_norm = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(key, actv_norm / ntokens, round=3)
if any("ndocseps" in log for log in logging_outputs):
# nsentences = batch size
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
# ndocseps = number of document separators we found
ndocseps = sum(log.get("ndocseps", 0) for log in logging_outputs)
# so docs/example = (1 + ndocseps) / example = (ndocseps + nsents) / nsents
metrics.log_scalar("docsperex", (ndocseps + nsentences) / nsentences)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
flash_metaseq-main
|
metaseq/model_parallel/criterions/vocab_parallel_cross_entropy.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("metaseq.model_parallel.criterions." + module)
|
flash_metaseq-main
|
metaseq/model_parallel/criterions/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
flash_metaseq-main
|
metaseq/scripts/__init__.py
|
#!/usr/bin/env python
"""
Script for backing out of the MP-resharded (reshard.pt) files and getting back
a non-flattened state dict.
Particularly useful for converting our models to other repositories.
Usage:
$ ls 125m
dict.txt
gpt2-merges.txt
gpt2-vocab.json
reshard-model_part-0.pt
reshard-model_part-1.pt
$ python -m metaseq.scripts.convert_to_singleton 125m
$ ls 125m
dict.txt
gpt2-merges.txt
gpt2-vocab.json
reshard-model_part-0.pt
reshard-model_part-1.pt
restored.pt
"""
import argparse
import glob
import logging
import os
import sys
import torch
from metaseq import options, tasks, checkpoint_utils, utils
from metaseq.dataclass.configs import MetaseqConfig
from metaseq.dataclass.utils import convert_namespace_to_omegaconf
from metaseq.distributed import utils as dist_utils
from metaseq.distributed import fsdp_enable_wrap, fsdp_wrap
from metaseq.distributed.stitch_fsdp_ckpt import glue_megatron_parts
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("convert_to_singleton")
def worker_main(cfg: MetaseqConfig):
"""
Load up the model on all workers for Model Parallelism, then
unflatten, move to cpu, and save to "restored.pt".
"""
task = tasks.setup_task(cfg.task)
def _build_model(cfg, task):
# hardcoded to cpu & fp16
model = task.build_model(cfg.model).half().cuda()
return fsdp_wrap(model)
with fsdp_enable_wrap(
cfg.distributed_training,
use_sharded_state=cfg.distributed_training.use_sharded_state,
):
models, _model_args, _task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(cfg.common_eval.path),
arg_overrides=None,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=True,
num_shards=cfg.checkpoint.checkpoint_shard_count,
build_model_hook=_build_model,
)
model = models[0]
# consolidate everything on rank0
mp_size = dist_utils.get_model_parallel_world_size()
model_parts = [{} for _ in range(mp_size)]
with model.summon_full_params():
for name, p in model.named_parameters():
gathered = [torch.zeros_like(p) for _ in range(mp_size)]
torch.distributed.all_gather(
gathered, p, group=dist_utils.get_global_group()
)
for r, t in enumerate(gathered):
model_parts[r][name] = t.cpu()
glued = glue_megatron_parts(model_parts)
# glued['decoder.output_projection.weight'] = glued['decoder.embed_tokens.weight']
if "decoder.output_projection.weight" in glued:
del glued["decoder.output_projection.weight"]
output_sd = checkpoint_utils.load_checkpoint_to_cpu(
cfg.common_eval.path.replace("reshard.pt", "reshard-model_part-0.pt")
)
output_sd["model"] = utils.move_to_cpu(glued)
output_sd["cfg"]["model"].arch = "transformer_lm"
if dist_utils.get_global_rank() == 0:
with open(cfg.task.data + "/restored.pt", "wb") as f:
torch.save(output_sd, f)
def main():
# parser to be used like docstring shows
real_parser = argparse.ArgumentParser()
real_parser.add_argument("location")
args = real_parser.parse_args()
files = glob.glob(f"{args.location}/reshard*.pt")
MP = len(files)
BPE_MERGES = args.location + "/gpt2-merges.txt"
BPE_VOCAB = args.location + "/gpt2-vocab.json"
# Skeleton out all the annoying command line args we can infer
ARGS = [
"--model-parallel-size",
str(MP),
"--distributed-world-size",
str(MP),
"--task",
"language_modeling",
"--bpe-merges",
BPE_MERGES,
"--bpe-vocab",
BPE_VOCAB,
"--bpe",
"hf_byte_bpe",
"--path",
args.location + "/reshard.pt",
"--checkpoint-shard-count",
"1",
"--use-sharded-state",
args.location,
]
print(ARGS)
# build up the config file
parser = options.get_generation_parser()
# dumb defaults overriding
parser.set_defaults(lr_scheduler=None, criterion=None)
args = options.parse_args_and_arch(parser, input_args=ARGS)
cfg = convert_namespace_to_omegaconf(args)
cfg.distributed_training.distributed_world_size = MP
dist_utils.call_main(cfg, worker_main)
if __name__ == "__main__":
main()
|
flash_metaseq-main
|
metaseq/scripts/convert_to_singleton.py
|
#!/usr/bin/env python
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from metaseq.distributed.stitch_fsdp_ckpt import consolidate_fsdp_shards
import fire
if __name__ == "__main__":
# This is expected to be used before evaluation, not during training.
fire.Fire(consolidate_fsdp_shards)
|
flash_metaseq-main
|
metaseq/scripts/consolidate_fsdp_shards.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
from pathlib import Path
from typing import List
import torch
import torch.nn.functional as F
from fire import Fire
from tqdm import tqdm
from metaseq.checkpoint_utils import (
get_paths_to_load,
_merge_flat_fsdp_shards,
OPT_KEY,
is_singleton_tensor,
)
from metaseq.file_io import torch_load_cpu
logging.basicConfig(
format="%(asctime)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
stream=sys.stdout,
)
logger = logging.getLogger("mp_reshard")
def reshard_all_parts(
save_prefix, save_dir, mpart=16, target_ddp_size=512, no_pad=False
):
for i in range(mpart):
try:
reshard_mp(
save_prefix,
save_dir,
part=i,
target_ddp_size=target_ddp_size,
no_pad=no_pad,
)
except FileNotFoundError:
logger.info(f"Resharded {i} model parts")
return
def _save_shards_to_disk(
local_state_dicts,
dummy_model_state,
state,
save_dir,
middle,
local_opt_states=None,
target_ddp_size=512,
):
Path(save_dir).mkdir(exist_ok=True)
for i, local_state_dict in tqdm(
enumerate(local_state_dicts),
desc=f"Saving to {save_dir}/reshard-{middle}-shard[i].pt",
):
if target_ddp_size == 1:
save_path = f"{save_dir}/reshard-{middle}.pt"
else:
save_path = f"{save_dir}/reshard-{middle}-shard{i}.pt"
local_state_dict.update(dummy_model_state)
full_state = {"model": local_state_dict}
full_state.update(state)
if local_opt_states is not None:
full_state[OPT_KEY] = local_opt_states[i]
torch.save(full_state, save_path)
def reshard_mp(
save_prefix,
save_dir,
part=0,
target_ddp_size=512,
no_pad=False,
drop_optimizer_state=False,
):
middle = f"model_part-{part}"
do_pad = not no_pad
if not Path(f"{save_prefix}-{middle}-shard0.pt").exists():
raise FileNotFoundError(f"{save_prefix}-{middle}-shard0.pt")
paths_to_load = get_paths_to_load(
f"{save_prefix}-{middle}-shard0.pt", suffix="-shard"
)
logger.info(
f"Loading {len(paths_to_load)} paths for MP part{part}. Will shard into {target_ddp_size} files."
)
state = _merge_flat_fsdp_shards([torch_load_cpu(f) for f in paths_to_load])
model_state = state.pop("model")
dummy_model_state = {} # for decoder.version and other useless keys
local_state_dicts: List[dict] = [{} for _ in range(target_ddp_size)]
for k, v in model_state.items():
v = v.half()
if "flat_param" not in k:
dummy_model_state[k] = v
continue
chunks = list(torch.flatten(v).chunk(target_ddp_size))
assert len(chunks) == target_ddp_size
num_to_pad = chunks[0].numel() - chunks[-1].numel()
# Same logic as https://tinyurl.com/fairscale but there is no padding allowed!
# Notes on padding: https://github.com/fairinternal/fairseq-py/issues/2894
for rank, param in enumerate(chunks):
# This clone is essential. Not sure why.
local_state_dicts[rank][k] = param.clone()
if num_to_pad > 0 and do_pad:
local_state_dicts[-1][k] = F.pad(local_state_dicts[-1][k], [0, num_to_pad])
logger.info(f"Padding {k} with {num_to_pad} zeros")
state.pop("shard_metadata") # TODO: update shard metadata to be accurate
# DO OPT STATE HERE
if drop_optimizer_state and OPT_KEY in state:
state.pop(OPT_KEY)
if OPT_KEY not in state:
_save_shards_to_disk(
local_state_dicts, dummy_model_state, state, save_dir, middle
)
return
merged_opt_state = state.pop(OPT_KEY)
local_opt_states: List[dict] = [{"state": {}} for _ in range(target_ddp_size)]
for k in merged_opt_state["state"].keys():
# 0,1,2,3... if each layer wrapped, else 0
for k2 in merged_opt_state["state"][k].keys():
for i in range(target_ddp_size):
if k not in local_opt_states[i]["state"]:
local_opt_states[i]["state"][k] = {}
catted = merged_opt_state["state"][k][k2]
if not torch.is_tensor(catted) or is_singleton_tensor(catted):
for i in range(target_ddp_size):
local_opt_states[i]["state"][k][k2] = catted
else:
chunks = list(torch.flatten(catted).chunk(target_ddp_size))
assert len(chunks) == target_ddp_size
num_to_pad = chunks[0].numel() - chunks[-1].numel()
for rank, param in enumerate(chunks):
# This clone is essential. Not sure why.
local_opt_states[rank]["state"][k][k2] = param.clone()
if num_to_pad > 0 and do_pad:
local_opt_states[-1]["state"][k][k2] = F.pad(
local_opt_states[-1]["state"][k][k2], [0, num_to_pad]
)
# Update Opt keys that arent state
for k in merged_opt_state.keys():
if k == "state":
continue
for i in range(target_ddp_size):
local_opt_states[i][k] = merged_opt_state[k]
_save_shards_to_disk(
local_state_dicts,
dummy_model_state,
state,
save_dir,
middle,
local_opt_states=local_opt_states,
target_ddp_size=target_ddp_size,
)
"""
python scripts/reshard_mp.py $model_dir/checkpoint_last 125_mp_reshard --mpart 0
"""
if __name__ == "__main__":
Fire(reshard_mp)
|
flash_metaseq-main
|
metaseq/scripts/reshard_mp.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from dataclasses import _MISSING_TYPE, dataclass, field
from typing import Any, List, Optional
import torch
from omegaconf import II, MISSING
from metaseq.dataclass.constants import (
DATASET_IMPL_CHOICES,
DDP_BACKEND_CHOICES,
LOG_FORMAT_CHOICES,
ZERO_SHARDING_CHOICES,
CLIP_GRAD_NORM_TYPE_CHOICES,
)
@dataclass
class MetaseqDataclass:
"""metaseq base dataclass that supported fetching attributes and metas"""
_name: Optional[str] = None
@staticmethod
def name():
return None
def positional_args(self):
return ["data"]
def _get_all_attributes(self) -> List[str]:
return [k for k in self.__dataclass_fields__.keys()]
def _get_meta(
self, attribute_name: str, meta: str, default: Optional[Any] = None
) -> Any:
return self.__dataclass_fields__[attribute_name].metadata.get(meta, default)
def _get_name(self, attribute_name: str) -> str:
return self.__dataclass_fields__[attribute_name].name
def _get_default(self, attribute_name: str) -> Any:
if hasattr(self, attribute_name):
if str(getattr(self, attribute_name)).startswith("${"):
return str(getattr(self, attribute_name))
elif str(self.__dataclass_fields__[attribute_name].default).startswith(
"${"
):
return str(self.__dataclass_fields__[attribute_name].default)
elif (
getattr(self, attribute_name)
!= self.__dataclass_fields__[attribute_name].default
):
return getattr(self, attribute_name)
f = self.__dataclass_fields__[attribute_name]
if not isinstance(f.default_factory, _MISSING_TYPE):
return f.default_factory()
return f.default
def _get_type(self, attribute_name: str) -> Any:
return self.__dataclass_fields__[attribute_name].type
def _get_help(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "help")
def _get_argparse_const(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "argparse_const")
def _get_argparse_alias(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "argparse_alias")
def _get_choices(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "choices")
@dataclass
class CommonConfig(MetaseqDataclass):
# This is the core dataclass including common parameters shared by all
# different jobs. Please append your params to other dataclasses if they
# were used for a particular purpose or task, such as those dedicated for
# `distributed training`, `optimization`, etc.
log_interval: int = field(
default=100,
metadata={
"help": "log progress every N batches (when progress bar is disabled)"
},
)
log_format: Optional[LOG_FORMAT_CHOICES] = field(
default=None, metadata={"help": "log format to use"}
)
log_file: Optional[str] = field(
default=None, metadata={"help": "log file to copy metrics to."}
)
tensorboard_logdir: Optional[str] = field(
default=None,
metadata={
"help": "path to save logs for tensorboard, should match --logdir "
"of running tensorboard (default: no tensorboard logging)"
},
)
wandb_project: Optional[str] = field(
default=None,
metadata={"help": "Weights and Biases project name to use for logging"},
)
azureml_logging: Optional[bool] = field(
default=False,
metadata={"help": "Log scalars to AzureML context"},
)
seed: int = field(
default=1, metadata={"help": "pseudo random number generator seed"}
)
cpu: bool = field(default=False, metadata={"help": "use CPU instead of CUDA"})
fp16: bool = field(default=False, metadata={"help": "use FP16"})
memory_efficient_fp16: bool = field(
default=False,
metadata={
"help": "use a memory-efficient version of FP16 training; implies --fp16"
},
)
bf16: bool = field(
default=False,
metadata={
"help": "use BF16 format"
" Currently --bf16 is an added argument with --fp16 for mixed precision bf16 training"
" or with --memory-efficient-fp16 for pure bf16 training."
},
)
fp16_no_flatten_grads: bool = field(
default=False, metadata={"help": "don't flatten FP16 grads tensor"}
)
fp16_init_scale: int = field(
default=4, metadata={"help": "default FP16 loss scale"}
)
fp16_scale_window: Optional[int] = field(
default=256,
metadata={"help": "number of updates before increasing loss scale"},
)
fp16_scale_tolerance: float = field(
default=0.0,
metadata={
"help": "pct of updates that can overflow before decreasing the loss scale"
},
)
min_loss_scale: float = field(
default=2**-5,
metadata={"help": "minimum FP16 loss scale, after which training is stopped"},
)
threshold_loss_scale: Optional[float] = field(
default=None, metadata={"help": "threshold FP16 loss scale from below"}
)
user_dir: Optional[str] = field(
default=None,
metadata={
"help": "path to a python module containing custom extensions (tasks and/or architectures)"
},
)
empty_cache_freq: int = field(
default=0,
metadata={"help": "how often to clear the PyTorch CUDA cache (0 to disable)"},
)
all_gather_list_size: int = field(
default=16384,
metadata={"help": "number of bytes reserved for gathering stats from workers"},
)
model_parallel_size: int = field(
default=1, metadata={"help": "total number of GPUs to parallelize model over"}
)
profile: bool = field(
default=False, metadata={"help": "enable autograd profiler emit_nvtx"}
)
use_plasma_view: bool = field(
default=False, metadata={"help": "Store indices and sizes in shared memory"}
)
plasma_path: Optional[str] = field(
default="/tmp/plasma",
metadata={
"help": "path to run plasma_store, defaults to /tmp/plasma. Paths outside /tmp tend to fail."
},
)
log_nvidia_smi: bool = field(
default=False, metadata={"help": "log output from nvidia-smi during training"}
)
new_profiler: bool = field(
default=False, metadata={"help": "use pytorch profiler (v2)"}
)
dont_log_param_and_grad_norm: Optional[bool] = field(
default=False,
metadata={
"help": "Don't log grad/param norms for each parameter.",
"argparse_alias": "--quiet",
},
)
@dataclass
class DistributedTrainingConfig(MetaseqDataclass):
distributed_world_size: int = field(
default=max(1, torch.cuda.device_count()),
metadata={
"help": "total number of GPUs across all nodes (default: all visible GPUs)"
},
)
distributed_rank: Optional[int] = field(
default=0, metadata={"help": "rank of the current worker"}
)
distributed_backend: str = field(
default="nccl", metadata={"help": "distributed backend"}
)
distributed_init_method: Optional[str] = field(
default=None,
metadata={
"help": "typically tcp://hostname:port that will be used to "
"establish initial connetion"
},
)
distributed_port: int = field(
default=-1,
metadata={
"help": "port number (not required if using --distributed-init-method)"
},
)
device_id: int = field(
default=0,
metadata={
"help": "which GPU to use (usually configured automatically)",
"argparse_alias": "--local_rank",
},
)
distributed_no_spawn: bool = field(
default=False,
metadata={
"help": "do not spawn multiple processes even if multiple GPUs are visible"
},
)
ddp_backend: DDP_BACKEND_CHOICES = field(
default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"}
)
bucket_cap_mb: int = field(
default=25, metadata={"help": "bucket size for reduction"}
)
fix_batches_to_gpus: bool = field(
default=False,
metadata={
"help": "don't shuffle batches between GPUs; this reduces overall "
"randomness and may affect precision but avoids the cost of re-reading the data"
},
)
find_unused_parameters: bool = field(
default=False,
metadata={
"help": "disable unused parameter detection (not applicable to "
"--ddp-backend=legacy_ddp)"
},
)
fast_stat_sync: bool = field(
default=False,
metadata={"help": "[deprecated] this is now defined per Criterion"},
)
heartbeat_timeout: int = field(
default=-1,
metadata={
"help": "kill the job if no progress is made in N seconds; "
"set to -1 to disable"
},
)
broadcast_buffers: bool = field(
default=False,
metadata={
"help": "Copy non-trainable parameters between GPUs, such as "
"batchnorm population statistics"
},
)
zero_sharding: ZERO_SHARDING_CHOICES = field(
default="none", metadata={"help": "ZeRO sharding"}
)
fp16: bool = II("common.fp16")
memory_efficient_fp16: bool = II("common.memory_efficient_fp16")
bf16: bool = II("common.bf16")
# configuration for --ddp-backend=fully_sharded
no_reshard_after_forward: bool = field(
default=False,
metadata={"help": "don't reshard parameters after forward pass"},
)
fp32_reduce_scatter: bool = field(
default=False,
metadata={"help": "reduce-scatter grads in FP32"},
)
cpu_offload: bool = field(
default=False, metadata={"help": "offload FP32 params to CPU"}
)
use_sharded_state: Optional[bool] = field(
default=False, metadata={"help": "load and save local state dict"}
)
gradient_predivide_factor: Optional[float] = field(
default=None,
metadata={"help": "factor to predivide gradients before reducee scatter"},
)
@dataclass
class DatasetConfig(MetaseqDataclass):
num_workers: int = field(
default=1, metadata={"help": "how many subprocesses to use for data loading"}
)
num_workers_valid: int = field(
default=0,
metadata={
"help": "how many subprocesses to use for data loading during validation"
},
)
skip_invalid_size_inputs_valid_test: bool = field(
default=False,
metadata={"help": "ignore too long or too short lines in valid and test set"},
)
max_tokens: Optional[int] = field(
default=None, metadata={"help": "maximum number of tokens in a batch"}
)
batch_size: Optional[int] = field(
default=None,
metadata={
"help": "number of examples in a batch",
"argparse_alias": "--max-sentences",
},
)
required_batch_size_multiple: int = field(
default=8, metadata={"help": "batch size will be a multiplier of this value"}
)
dataset_impl: Optional[DATASET_IMPL_CHOICES] = field(
default=None, metadata={"help": "output dataset implementation"}
)
data_buffer_size: int = field(
default=10, metadata={"help": "Number of batches to preload"}
)
train_subset: str = field(
default="train",
metadata={"help": "data subset to use for training (e.g. train, valid, test)"},
)
valid_subset: str = field(
default="valid",
metadata={
"help": "comma separated list of data subsets to use for validation"
" (e.g. train, valid, test)"
},
)
combine_valid_subsets: Optional[bool] = field(
default=None,
metadata={
"help": "comma separated list of data subsets to use for validation"
" (e.g. train, valid, test)",
"argparse_alias": "--combine-val",
},
)
ignore_unused_valid_subsets: Optional[bool] = field(
default=False,
metadata={"help": "do not raise error if valid subsets are ignored"},
)
validate_interval: int = field(
default=1, metadata={"help": "validate every N epochs"}
)
validate_interval_updates: int = field(
default=0, metadata={"help": "validate every N updates"}
)
validate_after_updates: int = field(
default=0, metadata={"help": "dont validate until reaching this many updates"}
)
fixed_validation_seed: Optional[int] = field(
default=None, metadata={"help": "specified random seed for validation"}
)
disable_validation: bool = field(
default=False, metadata={"help": "disable validation"}
)
max_tokens_valid: Optional[int] = field(
default=II("dataset.max_tokens"),
metadata={
"help": "maximum number of tokens in a validation batch"
" (defaults to --max-tokens)"
},
)
batch_size_valid: Optional[int] = field(
default=II("dataset.batch_size"),
metadata={
"help": "batch size of the validation batch (defaults to --batch-size)",
"argparse_alias": "--max-sentences-valid",
},
)
max_valid_steps: Optional[int] = field(
default=None,
metadata={"help": "How many batches to evaluate", "argparse_alias": "--nval"},
)
gen_subset: str = field(
default="test",
metadata={"help": "data subset to generate (train, valid, test)"},
)
num_shards: int = field(
default=1, metadata={"help": "shard generation over N shards"}
)
shard_id: int = field(
default=0, metadata={"help": "id of the shard to generate (id < num_shards)"}
)
@dataclass
class OptimizationConfig(MetaseqDataclass):
max_epoch: int = field(
default=0, metadata={"help": "force stop training at specified epoch"}
)
max_update: int = field(
default=0, metadata={"help": "force stop training at specified update"}
)
clip_norm: float = field(
default=0.0, metadata={"help": "clip threshold of gradients"}
)
clip_norm_type: Optional[CLIP_GRAD_NORM_TYPE_CHOICES] = field(
default="l2",
metadata={"help": "either 'l2' or 'inf' to clip by l2 norm or max abs grad"},
)
skip_gradient_update_on_clip_norm: bool = field(
default=False,
metadata={
"help": "Skip gradient update if gnorm is higher than --clip-norm value"
},
)
update_freq: List[int] = field(
default_factory=lambda: [1],
metadata={"help": "update parameters every N_i batches, when in epoch i"},
)
lr: List[float] = field(
default_factory=lambda: [0.25],
metadata={
"help": "learning rate for the first N epochs; all epochs >N using LR_N"
" (note: this may be interpreted differently depending on --lr-scheduler)"
},
)
train_with_epoch_remainder_batch: Optional[bool] = field(
default=False,
metadata={
"help": "if set, include the last (partial) batch of each epoch in training"
" (default is to skip it)."
},
)
@dataclass
class CheckpointConfig(MetaseqDataclass):
save_dir: str = field(
default="checkpoints", metadata={"help": "path to save checkpoints"}
)
restore_file: str = field(
default="checkpoint_last.pt",
metadata={
"help": "filename from which to load checkpoint "
"(default: <save-dir>/checkpoint_last.pt"
},
)
finetune_from_model: Optional[str] = field(
default=None,
metadata={
"help": "finetune from a pretrained model; note that meters and lr scheduler will be reset"
},
)
reset_dataloader: bool = field(
default=False,
metadata={
"help": "if set, does not reload dataloader state from the checkpoint"
},
)
reset_lr_scheduler: bool = field(
default=False,
metadata={
"help": "if set, does not load lr scheduler state from the checkpoint"
},
)
reset_meters: bool = field(
default=False,
metadata={"help": "if set, does not load meters from the checkpoint"},
)
reset_optimizer: bool = field(
default=False,
metadata={"help": "if set, does not load optimizer state from the checkpoint"},
)
optimizer_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override optimizer args when loading a checkpoint"
},
)
save_interval: int = field(
default=1, metadata={"help": "save a checkpoint every N epochs"}
)
save_interval_updates: int = field(
default=0, metadata={"help": "save a checkpoint (and validate) every N updates"}
)
keep_interval_updates: int = field(
default=-1,
metadata={
"help": "keep the last N checkpoints saved with --save-interval-updates"
},
)
keep_last_epochs: int = field(
default=-1, metadata={"help": "keep last N epoch checkpoints"}
)
keep_best_checkpoints: int = field(
default=-1, metadata={"help": "keep best N checkpoints based on scores"}
)
no_save: bool = field(
default=False, metadata={"help": "don't save models or checkpoints"}
)
no_epoch_checkpoints: bool = field(
default=False, metadata={"help": "only store last and best checkpoints"}
)
no_last_checkpoints: bool = field(
default=False, metadata={"help": "don't store last checkpoints"}
)
no_best_checkpoints: bool = field(
default=False, metadata={"help": "don't store best checkpoints"}
)
no_save_optimizer_state: bool = field(
default=False,
metadata={"help": "don't save optimizer-state as part of checkpoint"},
)
no_save_optimizer_state_on_training_finished: bool = field(
default=False,
metadata={
"help": "don't save optimizer-state as part of checkpoint when training is done"
},
)
best_checkpoint_metric: str = field(
default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'}
)
maximize_best_checkpoint_metric: bool = field(
default=False,
metadata={
"help": 'select the largest metric value for saving "best" checkpoints'
},
)
patience: int = field(
default=-1,
metadata={
"help": (
"early stop training if valid performance doesn't "
"improve for N consecutive validation runs; note "
"that this is influenced by --validate-interval"
)
},
)
checkpoint_suffix: str = field(
default="", metadata={"help": "suffix to add to the checkpoint file name"}
)
checkpoint_shard_count: int = field(
default=1,
metadata={
"help": "Number of shards containing the checkpoint - "
"if the checkpoint is over 300GB, it is preferable "
"to split it into shards to prevent OOM on CPU while loading "
"the checkpoint"
},
)
load_checkpoint_on_all_dp_ranks: bool = field(
default=False,
metadata={
"help": "load checkpoints on all data parallel devices "
"(default: only load on rank 0 and broadcast to other devices)"
},
)
write_checkpoints_asynchronously: bool = field(
default=False,
metadata={
"help": (
"Write checkpoints asynchronously in a separate "
"thread. NOTE: This feature is currently being tested."
),
"argparse_alias": "--save-async",
},
)
cloud_upload_path: Optional[str] = field(
default=None,
metadata={
"help": (
"Upload checkpoints asynchronously in a separate "
"thread to blob store. NOTE: This feature is currently being tested."
),
"argparse_alias": "--cloud-dir",
},
)
# TODO(susanz): After https://github.com/fairinternal/fairseq-big-internal/issues/22 is tackled, modify this
# to use ComputeEnvs constant
cluster_env: str = field(
default="fair",
metadata={"help": "cluster we are running on: azure/aws/fair/rsc"},
)
model_parallel_size: int = II("common.model_parallel_size")
@dataclass
class GenerationConfig(MetaseqDataclass):
beam: int = field(
default=5,
metadata={"help": "beam size"},
)
nbest: int = field(
default=1,
metadata={"help": "number of hypotheses to output"},
)
max_len_a: float = field(
default=0,
metadata={
"help": "generate sequences of maximum length ax + b, where x is the source length"
},
)
max_len_b: int = field(
default=200,
metadata={
"help": "generate sequences of maximum length ax + b, where x is the source length"
},
)
min_len: int = field(
default=1,
metadata={"help": "minimum generation length"},
)
sampling: bool = field(
default=False,
metadata={"help": "sample hypotheses instead of using beam search"},
)
sampling_topk: int = field(
default=-1,
metadata={"help": "sample from top K likely next words instead of all words"},
)
sampling_topp: float = field(
default=-1.0,
metadata={
"help": "sample from the smallest set whose cumulative probability mass exceeds p for next words"
},
)
temperature: float = field(
default=1.0,
metadata={"help": "temperature for generation"},
)
no_seed_provided: bool = field(
default=False,
metadata={"help": "if set, dont use seed for initializing random generators"},
)
# former interactive args
buffer_size: int = field(
default=0,
metadata={
"help": "read this many sentences into a buffer before processing them"
},
)
input: str = field(
default="-",
metadata={"help": "file to read from; use - for stdin"},
)
@dataclass
class CommonEvalConfig(MetaseqDataclass):
path: Optional[str] = field(
default=None,
metadata={"help": "path(s) to model file(s), colon separated"},
)
quiet: bool = field(default=False, metadata={"help": "only print final scores"})
model_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override model args at generation that were used during model training"
},
)
results_path: Optional[str] = field(
default=None,
metadata={
"help": "path to save eval results (optional)",
"argparse_alias": "--sp",
},
)
@dataclass
class ReshardConfig(MetaseqDataclass):
save_dir: Optional[str] = field(
default=None,
metadata={
"help": "where to save the resharded checkpoints",
"argparse_alias": "--dest-dir",
},
)
save_prefix: Optional[str] = field(
default="reshard", metadata={"help": "save to dest-dir/save-prefix-shard{i}.pt"}
)
target_world_size: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum number of GPUs you want to use to evaluate. "
"AssertionError if any FSDP module's number of parameters is not "
"divisible by this."
)
},
)
do_pad: Optional[bool] = field(
default=False,
metadata={
"help": (
"Add padding to make sure that running on target world size "
"works. This reduces flexibility for world sizes smaller than "
"target world size."
)
},
)
@dataclass
class EvalLMConfig(MetaseqDataclass):
# TODO(anj): Remove this since we want to set this by default when running eval.
score_sequences: bool = field(
default=False,
metadata={"help": "if set, uses the ScoreSequencer class for evaluating."},
)
output_word_probs: bool = field(
default=False,
metadata={
"help": "if set, outputs words and their predicted log probabilities to standard output"
},
)
output_word_stats: bool = field(
default=False,
metadata={
"help": "if set, outputs word statistics such as word count, average probability, etc"
},
)
context_window: int = field(
default=0,
metadata={
"help": "ensures that every evaluated token has access to a context of at least this size, if possible"
},
)
softmax_batch: int = field(
default=sys.maxsize,
metadata={
"help": (
"if BxT is more than this, will batch the softmax over vocab to "
"this amount of tokens, in order to fit into GPU memory"
)
},
)
max_valid_steps: Optional[int] = field(
default=None,
metadata={"help": "How many batches to evaluate", "argparse_alias": "--nval"},
)
@dataclass
class MetaseqConfig(MetaseqDataclass):
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
optimization: OptimizationConfig = OptimizationConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
generation: GenerationConfig = GenerationConfig()
eval_lm: EvalLMConfig = EvalLMConfig()
reshard: ReshardConfig = ReshardConfig()
model: Any = MISSING
task: Any = MISSING
criterion: Any = MISSING
optimizer: Any = MISSING
lr_scheduler: Any = MISSING
bpe: Any = MISSING
tokenizer: Any = None
|
flash_metaseq-main
|
metaseq/dataclass/configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import logging
from hydra.core.config_store import ConfigStore
from metaseq.dataclass.configs import MetaseqConfig
logger = logging.getLogger(__name__)
def hydra_init(cfg_name="base_config") -> None:
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=MetaseqConfig)
for k in MetaseqConfig.__dataclass_fields__:
v = MetaseqConfig.__dataclass_fields__[k].default
try:
cs.store(name=k, node=v)
except BaseException:
logger.error(f"{k} - {v}")
raise
|
flash_metaseq-main
|
metaseq/dataclass/initialize.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, EnumMeta
from typing import List
class StrEnumMeta(EnumMeta):
# this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see
# https://github.com/facebookresearch/hydra/issues/1156
@classmethod
def __instancecheck__(cls, other):
return "enum" in str(type(other))
class StrEnum(Enum, metaclass=StrEnumMeta):
def __str__(self):
return self.value
def __eq__(self, other: str):
return self.value == other
def __repr__(self):
return self.value
def __hash__(self):
return hash(str(self))
def ChoiceEnum(choices: List[str]):
"""return the Enum class used to enforce list of choices"""
return StrEnum("Choices", {k: k for k in choices})
LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none"])
DDP_BACKEND_CHOICES = ChoiceEnum(
[
"c10d", # alias for pytorch_ddp
"fully_sharded", # FullyShardedDataParallel from fairscale
"pytorch_ddp",
]
)
DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta"])
ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"])
CLIP_GRAD_NORM_TYPE_CHOICES = ChoiceEnum(["l2", "inf"])
|
flash_metaseq-main
|
metaseq/dataclass/constants.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .configs import MetaseqDataclass
from .constants import ChoiceEnum
__all__ = [
"MetaseqDataclass",
"ChoiceEnum",
]
|
flash_metaseq-main
|
metaseq/dataclass/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import inspect
import logging
import os
import re
from argparse import ArgumentError, ArgumentParser, Namespace
from dataclasses import _MISSING_TYPE, MISSING
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type
from hydra import compose, initialize
from hydra.core.global_hydra import GlobalHydra
from omegaconf import DictConfig, OmegaConf, open_dict
from omegaconf.errors import ConfigKeyError
from metaseq.dataclass import MetaseqDataclass
from metaseq.dataclass.configs import MetaseqConfig
logger = logging.getLogger(__name__)
def eval_str_list(x, x_type=float):
if x is None:
return None
if isinstance(x, str):
if len(x) == 0:
return []
x = ast.literal_eval(x)
try:
return list(map(x_type, x))
except TypeError:
return [x_type(x)]
def interpret_dc_type(field_type):
if isinstance(field_type, str):
raise RuntimeError("field should be a type")
if field_type == Any:
return str
typestring = str(field_type)
if re.match(
r"(typing.|^)Union\[(.*), NoneType\]$", typestring
) or typestring.startswith("typing.Optional"):
return field_type.__args__[0]
return field_type
def gen_parser_from_dataclass(
parser: ArgumentParser,
dataclass_instance: MetaseqDataclass,
delete_default: bool = False,
) -> None:
"""convert a dataclass instance to tailing parser arguments"""
def argparse_name(name: str):
if name in dataclass_instance.positional_args():
return name
if name == "_name":
# private member, skip
return None
return "--" + name.replace("_", "-")
def get_kwargs_from_dc(
dataclass_instance: MetaseqDataclass, k: str
) -> Dict[str, Any]:
"""k: dataclass attributes"""
kwargs = {}
field_type = dataclass_instance._get_type(k)
inter_type = interpret_dc_type(field_type)
field_default = dataclass_instance._get_default(k)
if isinstance(inter_type, type) and issubclass(inter_type, Enum):
field_choices = [t.value for t in list(inter_type)]
else:
field_choices = None
field_help = dataclass_instance._get_help(k)
field_const = dataclass_instance._get_argparse_const(k)
if isinstance(field_default, str) and field_default.startswith("${"):
kwargs["default"] = field_default
else:
if field_default is MISSING:
kwargs["required"] = True
if field_choices is not None:
kwargs["choices"] = field_choices
if (
isinstance(inter_type, type)
and (issubclass(inter_type, List) or issubclass(inter_type, Tuple))
) or ("List" in str(inter_type) or "Tuple" in str(inter_type)):
if "int" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, int)
elif "float" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, float)
elif "str" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, str)
else:
raise NotImplementedError(
"parsing of type " + str(inter_type) + " is not implemented"
)
if field_default is not MISSING:
kwargs["default"] = (
",".join(map(str, field_default))
if field_default is not None
else None
)
elif (
isinstance(inter_type, type) and issubclass(inter_type, Enum)
) or "Enum" in str(inter_type):
kwargs["type"] = str
if field_default is not MISSING:
if isinstance(field_default, Enum):
kwargs["default"] = field_default.value
else:
kwargs["default"] = field_default
elif inter_type is bool:
kwargs["action"] = (
"store_false" if field_default is True else "store_true"
)
kwargs["default"] = field_default
else:
kwargs["type"] = inter_type
if field_default is not MISSING:
kwargs["default"] = field_default
kwargs["help"] = field_help
if field_const is not None:
kwargs["const"] = field_const
kwargs["nargs"] = "?"
return kwargs
for k in dataclass_instance._get_all_attributes():
field_name = argparse_name(dataclass_instance._get_name(k))
field_type = dataclass_instance._get_type(k)
if field_name is None:
continue
elif inspect.isclass(field_type) and issubclass(field_type, MetaseqDataclass):
gen_parser_from_dataclass(parser, field_type(), delete_default)
continue
kwargs = get_kwargs_from_dc(dataclass_instance, k)
field_args = [field_name]
alias = dataclass_instance._get_argparse_alias(k)
if alias is not None:
field_args.append(alias)
if "default" in kwargs:
if isinstance(kwargs["default"], str) and kwargs["default"].startswith(
"${"
):
if kwargs["help"] is None:
# this is a field with a name that will be added elsewhere
continue
else:
del kwargs["default"]
if delete_default and "default" in kwargs:
del kwargs["default"]
try:
parser.add_argument(*field_args, **kwargs)
except ArgumentError:
pass
def _set_legacy_defaults(args, cls):
"""Helper to set default arguments based on *add_args*."""
if not hasattr(cls, "add_args"):
return
import argparse
parser = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS, allow_abbrev=False
)
cls.add_args(parser)
# copied from argparse.py:
defaults = argparse.Namespace()
for action in parser._actions:
if action.dest is not argparse.SUPPRESS:
if not hasattr(defaults, action.dest):
if action.default is not argparse.SUPPRESS:
setattr(defaults, action.dest, action.default)
for key, default_value in vars(defaults).items():
if not hasattr(args, key):
setattr(args, key, default_value)
def _override_attr(
sub_node: str, data_class: Type[MetaseqDataclass], args: Namespace
) -> List[str]:
overrides = []
if not inspect.isclass(data_class) or not issubclass(data_class, MetaseqDataclass):
return overrides
def get_default(f):
if not isinstance(f.default_factory, _MISSING_TYPE):
return f.default_factory()
return f.default
for k, v in data_class.__dataclass_fields__.items():
if k.startswith("_"):
# private member, skip
continue
val = get_default(v) if not hasattr(args, k) else getattr(args, k)
field_type = interpret_dc_type(v.type)
if (
isinstance(val, str)
and not val.startswith("${") # not interpolation
and field_type != str
and (
not inspect.isclass(field_type) or not issubclass(field_type, Enum)
) # not choices enum
):
# upgrade old models that stored complex parameters as string
val = ast.literal_eval(val)
if isinstance(val, tuple):
val = list(val)
v_type = getattr(v.type, "__origin__", None)
if (
(v_type is List or v_type is list or v_type is Optional)
# skip interpolation
and not (isinstance(val, str) and val.startswith("${"))
):
# if type is int but val is float, then we will crash later - try to convert here
if hasattr(v.type, "__args__"):
t_args = v.type.__args__
if len(t_args) == 1 and (t_args[0] is float or t_args[0] is int):
val = list(map(t_args[0], val))
elif val is not None and (
field_type is int or field_type is bool or field_type is float
):
try:
# Future reader, if you experience something other than a ValueError here,
# we used to simply ignore all excepts. Add it to the allowlist.
val = field_type(val)
except ValueError:
pass # ignore errors here, they are often from interpolation args
if val is None:
overrides.append("{}.{}=null".format(sub_node, k))
elif val == "":
overrides.append("{}.{}=''".format(sub_node, k))
elif isinstance(val, str):
val = val.replace("'", r"\'")
overrides.append("{}.{}='{}'".format(sub_node, k, val))
elif isinstance(val, MetaseqDataclass):
overrides += _override_attr(f"{sub_node}.{k}", type(val), args)
elif isinstance(val, Namespace):
sub_overrides, _ = override_module_args(val)
for so in sub_overrides:
overrides.append(f"{sub_node}.{k}.{so}")
else:
overrides.append("{}.{}={}".format(sub_node, k, val))
return overrides
def migrate_registry(
name, value, registry, args, overrides, deletes, use_name_as_val=False
):
if value in registry:
overrides.append("{}={}".format(name, value))
overrides.append("{}._name={}".format(name, value))
overrides.extend(_override_attr(name, registry[value], args))
elif use_name_as_val and value is not None:
overrides.append("{}={}".format(name, value))
else:
deletes.append(name)
def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]:
"""use the field in args to overrides those in cfg"""
overrides = []
deletes = []
for k in MetaseqConfig.__dataclass_fields__.keys():
overrides.extend(
_override_attr(k, MetaseqConfig.__dataclass_fields__[k].type, args)
)
if args is not None:
if hasattr(args, "task"):
from metaseq.tasks import TASK_DATACLASS_REGISTRY
migrate_registry(
"task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes
)
else:
deletes.append("task")
# these options will be set to "None" if they have not yet been migrated
# so we can populate them with the entire flat args
CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"}
from metaseq.registry import REGISTRIES
for k, v in REGISTRIES.items():
if hasattr(args, k):
migrate_registry(
k,
getattr(args, k),
v["dataclass_registry"],
args,
overrides,
deletes,
use_name_as_val=k not in CORE_REGISTRIES,
)
else:
deletes.append(k)
no_dc = True
if hasattr(args, "arch"):
from metaseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY
if args.arch in ARCH_MODEL_REGISTRY:
m_cls = ARCH_MODEL_REGISTRY[args.arch]
dc = getattr(m_cls, "__dataclass", None)
if dc is not None:
m_name = ARCH_MODEL_NAME_REGISTRY[args.arch]
overrides.append("model={}".format(m_name))
overrides.append("model._name={}".format(args.arch))
# override model params with those exist in args
overrides.extend(_override_attr("model", dc, args))
no_dc = False
if no_dc:
deletes.append("model")
return overrides, deletes
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:
"""Convert a flat argparse.Namespace to a structured DictConfig."""
# Here we are using field values provided in args to override counterparts inside config object
overrides, deletes = override_module_args(args)
# configs will be in metaseq/config after installation
config_path = os.path.join("..", "config")
GlobalHydra.instance().clear()
with initialize(config_path=config_path):
try:
composed_cfg = compose("config", overrides=overrides)
except Exception:
logger.error("Error when composing. Overrides: " + str(overrides))
raise
for k in deletes:
composed_cfg[k] = None
cfg = OmegaConf.create(
OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True)
)
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = lambda _: True
if cfg.task is None and getattr(args, "task", None):
cfg.task = Namespace(**vars(args))
from metaseq.tasks import TASK_REGISTRY
_set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])
cfg.task._name = args.task
if cfg.model is None and getattr(args, "arch", None):
cfg.model = Namespace(**vars(args))
from metaseq.models import ARCH_MODEL_REGISTRY
_set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])
cfg.model._name = args.arch
if cfg.optimizer is None and getattr(args, "optimizer", None):
cfg.optimizer = Namespace(**vars(args))
from metaseq.optim import OPTIMIZER_REGISTRY
_set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])
cfg.optimizer._name = args.optimizer
if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None):
cfg.lr_scheduler = Namespace(**vars(args))
from metaseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
if args.lr_scheduler == "fixed":
# hack since we don't want to call a "fixed" LR scheduler.
logger.info("Overriding lr_scheduler config from fixed to inverse_sqrt")
args.lr_scheduler = "inverse_sqrt"
_set_legacy_defaults(cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler])
cfg.lr_scheduler._name = args.lr_scheduler
if cfg.criterion is None and getattr(args, "criterion", None):
cfg.criterion = Namespace(**vars(args))
from metaseq.criterions import CRITERION_REGISTRY
_set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])
cfg.criterion._name = args.criterion
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(cfg, True)
return cfg
def populate_dataclass(
dataclass: MetaseqDataclass,
args: Namespace,
) -> MetaseqDataclass:
for k in dataclass.__dataclass_fields__.keys():
if k.startswith("_"):
# private member, skip
continue
if hasattr(args, k):
setattr(dataclass, k, getattr(args, k))
return dataclass
def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]):
# this will be deprecated when we get rid of argparse and model_overrides logic
from metaseq.registry import REGISTRIES
with open_dict(cfg):
for k in cfg.keys():
# "k in cfg" will return false if its a "mandatory value (e.g. ???)"
if k in cfg and isinstance(cfg[k], DictConfig):
if k in overrides and isinstance(overrides[k], dict):
for ok, ov in overrides[k].items():
if isinstance(ov, dict) and cfg[k][ok] is not None:
overwrite_args_by_name(cfg[k][ok], ov)
else:
cfg[k][ok] = ov
else:
overwrite_args_by_name(cfg[k], overrides)
elif k in cfg and isinstance(cfg[k], Namespace):
for override_key, val in overrides.items():
setattr(cfg[k], override_key, val)
elif k in overrides:
if (
k in REGISTRIES
and overrides[k] in REGISTRIES[k]["dataclass_registry"]
):
cfg[k] = DictConfig(
REGISTRIES[k]["dataclass_registry"][overrides[k]]
)
overwrite_args_by_name(cfg[k], overrides)
cfg[k]._name = overrides[k]
else:
cfg[k] = overrides[k]
def merge_with_parent(dc: MetaseqDataclass, cfg: MetaseqDataclass):
try:
merged_cfg = OmegaConf.merge(dc, cfg)
except ConfigKeyError:
# Workaround for missing keys - reverse the merge direction, then
# merge back in to the flipped merge
# See https://github.com/fairinternal/fairseq-big-internal/issues/115#issuecomment-1073129691
flipped_merge = OmegaConf.merge(cfg, dc)
merged_cfg = OmegaConf.merge(flipped_merge, cfg)
# Logic from https://github.com/omry/omegaconf/issues/441#issuecomment-737558869 ?
merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"]
OmegaConf.set_struct(merged_cfg, True)
return merged_cfg
|
flash_metaseq-main
|
metaseq/dataclass/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
from queue import PriorityQueue
import random
class KeyedPriorityQueueCollection:
"""
Create a collection of priority queues that are ordered by
a key. Used for grouping specific types of workers
"""
def __init__(self):
self.queues = {}
def put(self, key, item):
"""
:param key: key of the queue to put the item into
:param item: item to add to the queue
"""
if key not in self.queues:
self.queues[key] = PriorityQueue()
self.queues[key].put(item)
# TODO: this can be a max heap to avoid linear lookup
def get_largest_queue_key(self):
"""
### Returns the key of the queue with the most jobs
"""
if len(self.queues):
return max(self.queues, key=lambda key: self.queues[key].qsize())
else:
return None
def get_largest_queue(self):
"""
### Returns the queue with the most jobs
"""
key = self.get_largest_queue_key()
if key:
return self.queues[key]
else:
return None
class PriorityQueueRingShardKeyable:
"""
Interface for ensuring that the put method
has a method to invoke for getting a queue_key
"""
def queue_key(
self,
) -> str:
pass
class PriorityQueueRingShard:
"""
Creates a hashed queue shard, with an
added deskewing factor for avoiding hot keys (i.e.
default settings on generation). The hashing algorithm
uses either a consistent modulo for bucketing, or in the
case of deskewing, there is the introduction of a deskew
factor which is inconsistent but ensures even distribution.
"""
@staticmethod
def key_from_dictionary(key_dict):
"""
:param key_dict: dictionary of keys and values to build shard key from
"""
return ":".join([f"{k}:{key_dict[k]}" for k in sorted(key_dict.keys())])
def __init__(self, num_shards=1, deskew_factor=1):
"""
:param num_shards: total number of shards to hash (i.e. number of workers)
:param deskew_factor: number of virtual keys per shard. Reduces key skewing.
"""
self.num_shards = num_shards
self.deskew_factor = deskew_factor
self.deskewing = deskew_factor > 1
self.queue_shards = [
KeyedPriorityQueueCollection() for i in range(self.num_shards)
]
def put(self, item: PriorityQueueRingShardKeyable):
"""
:param key: key of the queue to put the item into
:param item: item to add to the queue
"""
key = item.queue_key()
shard_index = self.get_shard_index_for_key(key)
self.queue_shards[shard_index].put(key, item)
def get_shard_index_for_key(self, key):
"""
### hashing is deterministic except when deskewing is enabled.
:param key: the key to be sharded.
"""
if self.deskewing:
deskew_offset = random.randint(0, self.deskew_factor * self.num_shards)
key = f"{deskew_offset}:{key}"
return int(hashlib.sha1(key.encode("utf-8")).hexdigest(), 16) % self.num_shards
|
flash_metaseq-main
|
metaseq/service/queue.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
import time
from metaseq.service.constants import CHECKPOINT_LOCAL
class OAIResponse:
def __init__(self, results: list) -> None:
self.results = results
self.response_id = str(uuid.uuid4())
self.created = int(time.time())
def __dict__(self):
return {
"id": self.response_id,
"object": "text_completion",
"created": self.created,
"model": CHECKPOINT_LOCAL,
"choices": [
{
"text": result["text"],
"logprobs": {
"tokens": result["tokens"],
"token_logprobs": result["token_scores"],
"text_offset": result["text_offset"],
"top_logprobs": result["top_logprobs"],
"finish_reason": "length", # TODO: implement this
},
}
for result in self.results
],
}
|
flash_metaseq-main
|
metaseq/service/responses.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
MAX_SEQ_LEN = 2048
BATCH_SIZE = 2048 # silly high bc we dynamically batch by MAX_BATCH_TOKENS
MAX_BATCH_TOKENS = 3072
DEFAULT_PORT = 6010
MODEL_PARALLEL = 8
TOTAL_WORLD_SIZE = 8
try:
from metaseq_internal.constants import LOCAL_SSD, MODEL_SHARED_FOLDER
except ModuleNotFoundError:
# MODEL_SHARED_FOLDER should point to a shared drive (e.g. NFS) where the
# checkpoints from S3 are stored. As an example:
# MODEL_SHARED_FOLDER = "/example/175B/reshard_no_os"
# $ ls /example/175B/reshard_no_os
# reshard-model_part-0.pt
# reshard-model_part-1.pt
# reshard-model_part-2.pt
# reshard-model_part-3.pt
# reshard-model_part-4.pt
# reshard-model_part-5.pt
# reshard-model_part-6.pt
# reshard-model_part-7.pt
MODEL_SHARED_FOLDER = ""
# LOCAL_SSD is optional, but it's assuming you have some sort of local
# hard disk where we can cache a copy of the weights for faster loading.
LOCAL_SSD = ""
if not LOCAL_SSD:
# don't use local cache
LOCAL_SSD = MODEL_SHARED_FOLDER
if not MODEL_SHARED_FOLDER:
raise RuntimeError(
"You must set the variables in metaseq.service.constants to launch the API."
)
# tokenizer files
BPE_MERGES = os.path.join(MODEL_SHARED_FOLDER, "gpt2-merges.txt")
BPE_VOCAB = os.path.join(MODEL_SHARED_FOLDER, "gpt2-vocab.json")
# where to find the raw files on nfs
CHECKPOINT_FOLDER = os.path.join(MODEL_SHARED_FOLDER, "175B", "reshard_no_os")
# where to store them on SSD for faster loading
CHECKPOINT_LOCAL = os.path.join(LOCAL_SSD, "175B", "reshard_no_os", "reshard.pt")
LAUNCH_ARGS = [
f"--model-parallel-size {MODEL_PARALLEL}",
f"--distributed-world-size {TOTAL_WORLD_SIZE}",
"--task language_modeling",
f"--bpe-merges {BPE_MERGES}",
f"--bpe-vocab {BPE_VOCAB}",
"--bpe hf_byte_bpe",
f"--merges-filename {BPE_MERGES}", # TODO(susanz): hack for getting interactive_hosted working on public repo
f"--vocab-filename {BPE_VOCAB}", # TODO(susanz): hack for getting interactive_hosted working on public repo
f"--path {CHECKPOINT_LOCAL}",
"--beam 1 --nbest 1",
"--distributed-port 13000",
"--checkpoint-shard-count 1",
"--use-sharded-state",
f"--batch-size {BATCH_SIZE}",
f"--buffer-size {BATCH_SIZE * MAX_SEQ_LEN}",
f"--max-tokens {BATCH_SIZE * MAX_SEQ_LEN}",
"/tmp", # required "data" argument.
]
|
flash_metaseq-main
|
metaseq/service/constants.py
|
# Copyright (c) Meta, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
flash_metaseq-main
|
metaseq/service/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import socket
import logging
import sys
import os
def normalize_newlines(s: str):
"""
normalizes new lines, i.e. '\r\n' to '\n'
"""
# note that web browsers send \r\n but our training data uses \n.
return s.replace("\r\n", "\n").replace("\r", "\n")
def get_my_ip():
"""
returns ip / hostname of current host
"""
return socket.gethostbyname(socket.gethostname())
def encode_fn(generator, x):
"""
encode a given value to list of bpe tokens
"""
assert generator.bpe is not None
return generator.bpe.bpe.encode(normalize_newlines(x)).ids
def build_logger():
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("metaseq_cli.interactive")
return logger
|
flash_metaseq-main
|
metaseq/service/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from metaseq.service.queue import PriorityQueueRingShard
from dataclasses import dataclass
from typing import Any
import queue
import math
@dataclass
class WorkItem:
"""
Sortable entry for the batching PriorityQueue.
"""
cost: int # lower is serviced first
uid: int # unique id to map back to multi-input requests
return_queue: queue.Queue
data: Any
# for sorting / priority queue
def __lt__(self, other: "WorkItem"):
return (self.cost, self.uid) < (other.cost, other.uid)
# for sorting / priority queue
def __eq__(self, other: "WorkItem"):
return (self.cost, self.uid) == (other.cost, other.uid)
def queue_key(self):
return PriorityQueueRingShard.key_from_dictionary(
{
"temperature": self.data["temperature"],
"top_p": self.data["top_p"],
"n": self.data["n"],
}
)
@staticmethod
def generate_worker(encoded_prompt, batch_queue, **generation_args):
request_object = {"input": encoded_prompt, **generation_args}
ret_queue = queue.Queue()
enc_len = len(encoded_prompt)
cost = enc_len + int(
math.ceil((enc_len / 10) ** 2)
) # account for the cost of both linear and attention layers
batch_queue.put(WorkItem(cost, 0, ret_queue, request_object))
_, result = ret_queue.get()
return result
|
flash_metaseq-main
|
metaseq/service/workers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
from metaseq import utils
from metaseq.incremental_decoding_utils import with_incremental_state
from metaseq.modules.dropout import Dropout
from metaseq.modules.linear import Linear
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
initialize_params_on_gpu=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = Dropout(dropout, module_name=self.__class__.__name__)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
random_state = torch.get_rng_state()
# random_state_cuda = torch.cuda.get_rng_state()
self.k_proj = Linear(
self.kdim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
)
self.v_proj = Linear(
self.vdim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
)
self.q_proj = Linear(
embed_dim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
)
self.out_proj = Linear(
embed_dim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
)
torch.set_rng_state(random_state)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
def _init_method_bias(weight, bias):
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(bias, -bound, bound)
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
_init_method_bias(self.k_proj.weight, self.k_proj.bias)
_init_method_bias(self.v_proj.weight, self.v_proj.bias)
_init_method_bias(self.q_proj.weight, self.q_proj.bias)
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
not self.onnx_trace
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
# Replace any non-finite values with finite equivalents, since otherwise
# we may get NaN when adding attn_mask or computing softmax.
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
if src_len > prev_key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask.float()
elif key_padding_mask is not None:
if src_len > key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = key_padding_mask.float()
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
|
flash_metaseq-main
|
metaseq/modules/multihead_attention.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from metaseq import utils
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
# we cannot use incremental state here because we must be aware of
# padding.
if positions is None:
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
|
flash_metaseq-main
|
metaseq/modules/learned_positional_embedding.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def checkpoint_wrapper(module, *args, **kwargs):
try:
from metaseq.modules.checkpoint_activation_wrapper.checkpoint_activations import (
checkpoint_wrapper as _checkpoint_wrapper,
)
except ImportError:
raise ImportError(
"Cannot find fairscale.nn.misc.checkpoint_activations. "
"Please install fairscale with: pip install fairscale"
)
module = _checkpoint_wrapper(module, *args, **kwargs)
if hasattr(module, "extra_repr"):
orig_extra_repr = module.extra_repr
else:
orig_extra_repr = None
def extra_repr():
return (
f"[checkpointed] {orig_extra_repr()}" if orig_extra_repr is not None else ""
)
module.extra_repr = extra_repr
return module
|
flash_metaseq-main
|
metaseq/modules/checkpoint_activations.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch import Tensor
from torch.nn import Module
from torch.nn import functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
class Linear(Module):
"""
Exact same as pytorch nn.Linear but with option to initialize weight and bias directly on GPU
"""
__constants__ = ["in_features", "out_features"]
in_features: int
out_features: int
weight: Tensor
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
initialize_params_on_gpu: bool = False,
) -> None:
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
device = torch.cuda.current_device() if initialize_params_on_gpu else None
dtype = torch.half if initialize_params_on_gpu else torch.float
self.weight = Parameter(
torch.empty(out_features, in_features, device=device, dtype=dtype)
)
if bias:
self.bias = Parameter(torch.empty(out_features, device=device, dtype=dtype))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return F.linear(input, self.weight, self.bias)
def extra_repr(self) -> str:
return "in_features={}, out_features={}, bias={}".format(
self.in_features, self.out_features, self.bias is not None
)
|
flash_metaseq-main
|
metaseq/modules/linear.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with
the corresponding GitHub repo: https://github.com/hendrycks/GELUs
"""
import math
import torch
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
|
flash_metaseq-main
|
metaseq/modules/gelu.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .dropout import Dropout
from .gelu import gelu, gelu_accurate
from .layer_norm import Fp32LayerNorm, LayerNorm
from .learned_positional_embedding import LearnedPositionalEmbedding
from .multihead_attention import MultiheadAttention
from .positional_embedding import PositionalEmbedding
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer
__all__ = [
"Dropout",
"Fp32LayerNorm",
"gelu",
"gelu_accurate",
"LayerNorm",
"LearnedPositionalEmbedding",
"MultiheadAttention",
"PositionalEmbedding",
"SinusoidalPositionalEmbedding",
"TransformerDecoderLayer",
"TransformerEncoderLayer",
]
|
flash_metaseq-main
|
metaseq/modules/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import torch
logger = logging.getLogger(__name__)
try:
from megatron.model.fused_bias_gelu import bias_gelu_impl
has_fused_bias_gelu = True
except ImportError:
has_fused_bias_gelu = False
def load_megatron_fused_kernel():
"""Compile and load fused kernels from Megatron."""
if getattr(load_megatron_fused_kernel, "has_run", False):
return
load_megatron_fused_kernel.has_run = True
from megatron import fused_kernels
from argparse import Namespace
if not torch.distributed.is_initialized():
args = Namespace(rank=0, masked_softmax_fusion=True)
fused_kernels.load(args)
return
global_rank = torch.distributed.get_rank()
args = Namespace(rank=global_rank, masked_softmax_fusion=True)
# Always build on rank zero first.
if global_rank == 0:
build_dir = os.path.join(os.path.dirname(fused_kernels.__file__), "build")
logger.info(
"Compiling and loading fused kernels\n\n"
"NOTE: If this hangs here, your megatron fused kernels may be corrupted. "
"This can happen if a previous job is interrupted during a build. "
"In that case, delete the megatron build directory and relaunch training. "
f"The megatron build directory is located at: {build_dir}"
)
fused_kernels.load(args)
torch.distributed.barrier()
else:
torch.distributed.barrier()
fused_kernels.load(args)
# Simple barrier to make sure all ranks have passed the
# compilation phase successfully before moving on to the
# rest of the program. We think this might ensure that
# the lock is released.
torch.distributed.barrier()
logger.info("Done with compiling and loading fused kernels.")
def fused_bias_gelu(x, bias):
if not has_fused_bias_gelu:
raise ImportError(
"Cannot find fused Megatron kernels, please install Megatron from: "
"github.com/NVIDIA/Megatron-LM"
)
load_megatron_fused_kernel()
return bias_gelu_impl(x, bias)
|
flash_metaseq-main
|
metaseq/modules/fused_bias_gelu.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from metaseq import distributed_utils as dist_utils, utils
from metaseq.modules import gelu, MultiheadAttention
from metaseq.modules.dropout import Dropout
from metaseq.modules.fused_bias_gelu import (
fused_bias_gelu,
has_fused_bias_gelu,
load_megatron_fused_kernel,
)
from metaseq.modules.layer_norm import LayerNorm, SyncedModelParallelFusedLayerNorm
from metaseq.modules.linear import Linear
def _linear(x, weight, bias=None):
return F.linear(x, weight, bias)
def _ffn(x, fc1, activation_fn, fc2, dropout_module, ffn_ln=None):
x_shape = x.shape
x = x.reshape(-1, x.size(-1))
# apex fused bias gelu is not yet supported with megatron model parallel
# TODO [namangoyal]: Find better way to do this
model_parallel = not isinstance(fc1, nn.Linear) and not isinstance(fc1, Linear)
if model_parallel and activation_fn == gelu and has_fused_bias_gelu:
# here, we do the bias computation outside fc1 and fc2 to take advantage of fused_bias_gelu
assert fc1.skip_bias_add
x, bias_fc1 = fc1(x)
x = fused_bias_gelu(x, bias_fc1)
if ffn_ln is not None:
x = ffn_ln(x)
x, bias_fc2 = fc2(x)
x = x + bias_fc2
elif model_parallel:
# here, we do the bias computation inside fc1 and fc2 AND gather_output
x, _ = fc1(x)
x = activation_fn(x)
if ffn_ln is not None:
x = ffn_ln(x)
x, _ = fc2(x)
elif has_fused_bias_gelu and activation_fn == gelu:
x = _linear(x, fc1.weight)
x = fused_bias_gelu(x, fc1.bias)
if ffn_ln is not None:
x = ffn_ln(x)
x = _linear(x, fc2.weight, fc2.bias)
else:
x = fc1(x)
x = activation_fn(x)
if ffn_ln is not None:
x = ffn_ln(x)
x = fc2(x)
x = x.view(x_shape)
x = dropout_module(x)
return x
class FeedForwardNetwork(nn.Module):
"""
Feed Forward Network layer in the Transformer model
"""
def __init__(self, args, embed_dim, ffn_dim, dropout_module=None):
super().__init__()
self.embed_dim = embed_dim
self.activation_fn = utils.get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
self.fc1 = Linear(self.embed_dim, ffn_dim)
self.fc2 = Linear(ffn_dim, self.embed_dim)
self.dropout_module = (
Dropout(args.dropout, module_name=self.__class__.__name__)
if not dropout_module
else dropout_module
)
def forward(self, x):
return _ffn(
x,
fc1=self.fc1,
activation_fn=self.activation_fn,
fc2=self.fc2,
dropout_module=self.dropout_module,
)
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.args = args
self.embed_dim = args.encoder_embed_dim
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = Dropout(args.dropout, module_name=self.__class__.__name__)
self.normalize_before = args.encoder_normalize_before
ffn_dim = args.encoder_ffn_embed_dim
self.attn_ln = (
LayerNorm(self.embed_dim) if getattr(args, "scale_attn", False) else None
)
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu") or "relu"
)
self.fc1 = Linear(self.embed_dim, ffn_dim)
self.fc2 = Linear(ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
need_weights=False,
attn_mask=attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = _ffn(
x,
self.fc1,
self.activation_fn,
self.fc2,
self.dropout_module,
ffn_ln=self.ffn_layernorm,
)
l_aux = None
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, l_aux
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
args,
no_encoder_attn=False,
add_bias_kv=False,
add_zero_attn=False,
):
super().__init__()
load_megatron_fused_kernel()
self.args = args
self.embed_dim = args.decoder_embed_dim
self.dropout_module = Dropout(args.dropout, module_name=self.__class__.__name__)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.attn_ln = (
LayerNorm(self.embed_dim) if getattr(args, "scale_attn", False) else None
)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
initialize_params_on_gpu = getattr(
args, "tensor_parallel_init_model_on_gpu", False
)
if initialize_params_on_gpu and self.attn_ln is not None:
self.attn_ln = utils.floating_point_precision_convertor(
self.attn_ln.cuda(),
fp16=getattr(args, "fp16", False),
memory_efficient_fp16=getattr(args, "memory_efficient_fp16", False),
bf16=getattr(args, "bf16", False),
)
self.nh = args.decoder_attention_heads
self.head_dim = int(self.embed_dim / self.nh)
scale_heads = getattr(args, "scale_heads", False)
self.c_attn = None
if scale_heads:
if initialize_params_on_gpu:
self.c_attn = nn.Parameter(
torch.ones((self.nh,), dtype=torch.float16).cuda(),
requires_grad=True,
)
else:
self.c_attn = nn.Parameter(torch.ones((self.nh,)), requires_grad=True)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if initialize_params_on_gpu:
self.self_attn_layer_norm = utils.floating_point_precision_convertor(
self.self_attn_layer_norm.cuda(),
fp16=getattr(args, "fp16", False),
memory_efficient_fp16=getattr(args, "memory_efficient_fp16", False),
bf16=getattr(args, "bf16", False),
)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if initialize_params_on_gpu:
self.encoder_attn_layer_norm = utils.floating_point_precision_convertor(
self.encoder_attn_layer_norm.cuda(),
fp16=getattr(args, "fp16", False),
memory_efficient_fp16=getattr(args, "memory_efficient_fp16", False),
bf16=getattr(args, "bf16", False),
)
ffn_dim = args.decoder_ffn_embed_dim
self.activation_fn = utils.get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
# separate ffn_ln args.model_parallel_size
mp_rank = (
dist_utils.get_model_parallel_rank()
if torch.distributed.is_initialized()
else None
)
self.ffn_layernorm = None
if getattr(args, "scale_fc", False):
if args.model_parallel_size > 1:
if not getattr(args, "sync_ln_variance", False):
self.ffn_layernorm = LayerNorm(ffn_dim // args.model_parallel_size)
else:
self.ffn_layernorm = SyncedModelParallelFusedLayerNorm(
ffn_dim,
args.model_parallel_size,
mp_rank=mp_rank,
initialize_params_on_gpu=initialize_params_on_gpu,
)
else:
self.ffn_layernorm = LayerNorm(ffn_dim)
if initialize_params_on_gpu:
self.ffn_layernorm = utils.floating_point_precision_convertor(
self.ffn_layernorm.cuda(),
fp16=getattr(args, "fp16", False),
memory_efficient_fp16=getattr(
args, "memory_efficient_fp16", False
),
bf16=getattr(args, "bf16", False),
)
self.skip_bias_add = (self.activation_fn == gelu) and has_fused_bias_gelu
self.fc1 = self.build_fc1(
self.embed_dim,
ffn_dim,
initialize_params_on_gpu=initialize_params_on_gpu,
full_megatron_init=getattr(args, "full_megatron_init", False),
megatron_init_sigma=getattr(args, "megatron_init_sigma", 0.006),
dtype=self._get_model_init_dtype(),
)
self.fc2 = self.build_fc2(
ffn_dim,
self.embed_dim,
initialize_params_on_gpu=initialize_params_on_gpu,
full_megatron_init=getattr(args, "full_megatron_init", False),
megatron_init_sigma=getattr(args, "megatron_init_sigma", 0.006),
num_layers=args.decoder_layers,
dtype=self._get_model_init_dtype(),
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
if initialize_params_on_gpu:
self.final_layer_norm = utils.floating_point_precision_convertor(
self.final_layer_norm.cuda(),
fp16=getattr(args, "fp16", False),
memory_efficient_fp16=getattr(args, "memory_efficient_fp16", False),
bf16=getattr(args, "bf16", False),
)
self.need_attn = True
self.onnx_trace = False
self.args = args
def _get_model_init_dtype(self):
if getattr(self.args, "memory_efficient_fp16", False):
return torch.bfloat16 if getattr(self.args, "bf16", False) else torch.half
return torch.float32
def build_fc1(
self, input_dim, output_dim, initialize_params_on_gpu=False, **unused_args
):
return Linear(
input_dim, output_dim, initialize_params_on_gpu=initialize_params_on_gpu
)
def build_fc2(
self, input_dim, output_dim, initialize_params_on_gpu=False, **unused_args
):
return Linear(
input_dim, output_dim, initialize_params_on_gpu=initialize_params_on_gpu
)
def build_self_attention(
self,
embed_dim,
args,
add_bias_kv=False,
add_zero_attn=False,
):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
initialize_params_on_gpu=getattr(
args, "tensor_parallel_init_model_on_gpu", False
),
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
initialize_params_on_gpu=getattr(
args, "tensor_parallel_init_model_on_gpu", False
),
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward_attention(
self,
query,
key,
value,
residual,
key_padding_mask=None,
incremental_state=None,
need_weights=False,
attn_mask=None,
):
x, attn = self.self_attn(
query=query,
key=key,
value=value,
key_padding_mask=key_padding_mask,
incremental_state=incremental_state,
need_weights=need_weights,
attn_mask=attn_mask,
)
if self.c_attn is not None:
tgt_len, bsz = x.size(0), x.size(1)
x = x.view(tgt_len, bsz, self.nh, self.head_dim)
x = torch.einsum("tbhd,h->tbhd", x, self.c_attn)
x = x.reshape(tgt_len, bsz, self.embed_dim)
x = self.dropout_module(x)
if self.attn_ln is not None:
x = self.attn_ln(x)
return self.residual_connection(x, residual), attn
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.forward_attention(
query=x,
key=y,
value=y,
residual=residual,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = _ffn(
x,
fc1=self.fc1,
activation_fn=self.activation_fn,
ffn_ln=self.ffn_layernorm,
fc2=self.fc2,
dropout_module=self.dropout_module,
)
l_aux = None
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None, l_aux
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
|
flash_metaseq-main
|
metaseq/modules/transformer_layer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class Dropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def extra_repr(self) -> str:
return "p={}".format(self.p)
def forward(self, x, inplace: bool = False):
if self.p > 0 and (self.training or self.apply_during_inference):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
|
flash_metaseq-main
|
metaseq/modules/dropout.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from .learned_positional_embedding import LearnedPositionalEmbedding
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
learned_sinusoidal: bool = False,
full_megatron_init=False,
megatron_init_sigma=None,
):
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
if full_megatron_init:
nn.init.normal_(m.weight, mean=0, std=megatron_init_sigma)
else:
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
elif learned_sinusoidal:
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
with torch.no_grad():
m.weight.copy_(
SinusoidalPositionalEmbedding.get_embedding(
num_embeddings,
embedding_dim,
padding_idx,
)
)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim,
padding_idx,
init_size=num_embeddings + padding_idx + 1,
)
return m
|
flash_metaseq-main
|
metaseq/modules/positional_embedding.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Optional
import torch
import torch.onnx.operators
from torch import Tensor, nn
from metaseq import utils
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx if padding_idx is not None else 0
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.onnx_trace = False
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.max_positions = int(1e5)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
if self.onnx_trace:
return (
self.weights.index_select(index=self.padding_idx + pos, dim=0)
.unsqueeze(1)
.repeat(bsz, 1, 1)
)
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
(bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
)
return embeddings
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
|
flash_metaseq-main
|
metaseq/modules/sinusoidal_positional_embedding.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from metaseq import distributed_utils as dist_utils
from typing import Tuple
import torch.distributed as dist
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if torch.jit.is_scripting():
export = True
if not export and torch.cuda.is_available() and has_fused_layernorm:
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class SyncedModelParallelFusedLayerNorm(nn.Module):
def __init__(
self,
hidden_size,
mp_size,
mp_rank=0,
eps=1e-5,
initialize_params_on_gpu=False,
use_bias=True,
mean_center=True,
):
super().__init__()
assert hidden_size % mp_size == 0
partition_size = hidden_size // mp_size
self.use_bias = use_bias
self.mean_center = mean_center
self.weight = nn.Parameter(torch.ones(partition_size, dtype=torch.float32))
self.bias = (
nn.Parameter(torch.zeros(partition_size, dtype=torch.float32))
if self.use_bias
else None
)
self.variance_epsilon = eps
self.mp_world_size = float(dist_utils.get_model_parallel_world_size())
self.mp_rank = mp_rank
if initialize_params_on_gpu:
self.weight.cuda().half()
if self.bias is not None:
self.bias.cuda().half()
@staticmethod
def get_statistics_from_all_workers(stat, rank, world_size):
"""Retuns tensor shaped (world_size, *stat_size)"""
buffer_size: Tuple[int] = (int(world_size),) + tuple(stat.size())
assert isinstance(
buffer_size, tuple
), f"b{buffer_size} {world_size} {stat.size()}"
buffer = torch.zeros(buffer_size, dtype=stat.dtype, device=stat.device)
buffer[rank] = stat
dist.all_reduce(buffer, group=dist_utils.get_model_parallel_group())
return buffer
def forward(self, hidden_states):
hid_fp32 = hidden_states.float()
local_variance = torch.var(hid_fp32, -1, keepdim=True, unbiased=True)
local_mean = hid_fp32.mean(-1, keepdim=True)
vs = SyncedModelParallelFusedLayerNorm.get_statistics_from_all_workers(
local_variance, self.mp_rank, self.mp_world_size
)
ms = SyncedModelParallelFusedLayerNorm.get_statistics_from_all_workers(
local_mean, self.mp_rank, self.mp_world_size
)
variance, mean = variance_formula(
ms, vs, self.mp_world_size, hidden_states.size(-1)
)
denom = torch.rsqrt(variance + self.variance_epsilon).to(hidden_states.dtype)
mean = mean.to(hidden_states.dtype)
if self.mean_center:
hidden_states = (hidden_states - mean) * denom
else:
hidden_states = hidden_states * denom
if self.use_bias:
return (self.weight * hidden_states) + self.bias
else:
return self.weight * hidden_states
def variance_formula(means, vs, g, k) -> Tuple[torch.Tensor]:
"""This only works with unbiased=False (No Bessel Correction)"""
d = g * k
var_ej = means.var(0) # Need unbiased True here (at least in toy example)
summation = vs.sum(0)
inner_coeff: float = (k * (g - 1)) / (k - 1)
outer_coeff: float = (k - 1) / (d - 1)
out: torch.Tensor = outer_coeff * (summation + (inner_coeff * var_ej))
return out, means.mean(0)
|
flash_metaseq-main
|
metaseq/modules/layer_norm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import threading
import weakref
from contextlib import contextmanager
from typing import Any, Dict, Generator, Optional, Tuple
import torch
import torch.nn as nn
import torch.utils.checkpoint as torch_checkpoint
from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm
from fairscale.utils.containers import (
pack_kwargs,
split_non_tensors,
unpack_kwargs,
unpack_non_tensors,
)
from megatron import mpu
from torch import Tensor
from metaseq.distributed.utils import get_model_parallel_group
# https://docs.python.org/3/library/threading.html#thread-local-data
# Manage the checkpoint context with thread-local data.
class ThreadLocal(threading.local):
def __init__(self) -> None:
self.is_checkpointing = False
self.is_recomputing = False
self.is_checkpointing_disabled = False
thread_local = ThreadLocal()
@contextmanager
def disable_checkpointing() -> Generator[None, None, None]:
"""Makes :func:`is_checkpointing_disabled` return :data:`True` within a context."""
orig = thread_local.is_checkpointing_disabled
thread_local.is_checkpointing_disabled = True
try:
yield
finally:
thread_local.is_checkpointing_disabled = orig
@contextmanager
def enable_checkpointing() -> Generator[None, None, None]:
"""Makes :func:`is_checkpointing` return :data:`True` within a context."""
orig = thread_local.is_checkpointing
thread_local.is_checkpointing = True
try:
yield
finally:
thread_local.is_checkpointing = orig
@contextmanager
def enable_recomputing() -> Generator[None, None, None]:
"""Makes :func:`is_recomputing` return :data:`True` within a context."""
orig = thread_local.is_recomputing
thread_local.is_recomputing = True
try:
yield
finally:
thread_local.is_recomputing = orig
def is_checkpointing() -> bool:
"""Whether the current forward propagation is under checkpointing.
Returns:
bool: :data:`True` if it's under checkpointing.
"""
return thread_local.is_checkpointing
def is_recomputing() -> bool:
"""Whether the current forward propagation is under checkpoint
recomputation. Use this to prevent duplicated side-effects at forward
propagation::
class Counter(nn.Module):
def __init__(self):
super().__init__()
self.counter = 0
def forward(self, input):
if not is_recomputing():
self.counter += 1
return input
Returns:
bool: :data:`True` if it's under checkpoint recomputation.
"""
return thread_local.is_recomputing
def checkpoint_wrapper(
module: nn.Module,
offload_to_cpu: bool = False,
distribute_checkpointed_activations: bool = False,
) -> nn.Module:
"""
A friendlier wrapper for performing activation checkpointing.
Compared to the PyTorch version, this version:
- wraps an nn.Module, so that all subsequent calls will use checkpointing
- handles keyword arguments in the forward
- handles non-Tensor outputs from the forward
- supports offloading activations to CPU
Usage::
checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True)
a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
To understand the benefits of checkpointing and the `offload_to_cpu` flag,
let's divide activations into 2 types: inner activations and outer
activations w.r.t. the checkpointed modules. The inner ones are saved
by activation checkpointing, the outer ones are saved by offload_to_cpu.
In terms of GPU memory savings:
- When inner ones are large in size and outer ones are small,
checkpointing helps a lot, offload_to_cpu may help a little.
- When inner ones are small and outer ones are large,
checkpointing helps little, offload_to_cpu helps a lot.
- When both inner and outer are large, both help and the
benefit is additive.
..Note::
The first and last layers are not likely to benefit from the `offload_to_cpu` flag
because (1) there are typically other references to the first layer's input, so
the GPU memory won't be freed; (2) the input to the last layer is immediately
used by the backward pass and won't result in memory savings.
Args:
module (nn.Module):
The module to be wrapped
offload_to_cpu (bool):
Whether to offload activations to CPU.
Returns:
(nn.Module):
Wrapped module
"""
# Patch the batchnorm layers in case there are any in this module.
patch_batchnorm(module)
# The use of weakref here is to prevent creating a ref cycle: m -> m.forward -> m.
# When such cycle exists, gc won't collect the module when the module is freed.
# That causes GPU memory to be leaked. See the unit test for how we catch that.
#
# We prefer this over a class wrapper since the class wrapper would have to
# proxy a lot of fields and methods.
module.forward = functools.partial( # type: ignore
_checkpointed_forward,
type(module).forward,
weakref.ref(module),
offload_to_cpu,
distribute_checkpointed_activations,
)
return module
def _checkpointed_forward(
original_forward: Any,
weak_self: Any,
offload_to_cpu: bool,
distribute_checkpointed_activations: bool,
*args: Any,
**kwargs: Any
) -> Any:
module = weak_self()
# If gradients are disabled, just use original `.forward()` method directly.
if not torch.is_grad_enabled() or thread_local.is_checkpointing_disabled:
return original_forward(module, *args, **kwargs)
# Autograd Functions in PyTorch work best with positional args, since
# the backward must return gradients (or None) for every input argument.
# We can flatten keyword arguments to make this easier.
args = (module,) + args
kwarg_keys, flat_args = pack_kwargs(*args, **kwargs)
parent_ctx_dict: Dict[str, Any] = {
"offload": offload_to_cpu,
"distribute_checkpointed_activations": distribute_checkpointed_activations,
}
# Dummy tensor with grad is used to ensure the backward pass is called. This is needed
# when original_forward's input are non-tensor (i.e. a tuple). Using this dummy tensor
# avoids requiring users to set their input tensors's requires_grad flag. In the case
# of tuple type inputs, setting the flag won't even trigger the backward pass.
#
# One implication of this is that since we always feed in a dummy tensor
# needing grad, then the output will always require grad, even if it originally
# wouldn't, such as if the module and original input both do not require grad.
# We get around this by saving the desired requires_grad value in output and
# detaching the output if needed.
output = CheckpointFunction.apply(
torch.tensor([], requires_grad=True),
original_forward,
parent_ctx_dict,
kwarg_keys,
*flat_args
)
output_requires_grad = parent_ctx_dict["output_requires_grad"]
if not isinstance(output, torch.Tensor):
# If output should not require grad, then detach it, since otherwise it will
# always have requires_grad = True due to our dummy tensor input above that
# requires_grad
output = [x.detach() if not output_requires_grad else x for x in output]
packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"]
if packed_non_tensor_outputs:
output = unpack_non_tensors(output, packed_non_tensor_outputs)
else:
# If output should not require grad, then detach it, since otherwise it will
# always have requires_grad = True due to our dummy tensor input above that
# requires_grad
if not output_requires_grad:
output = output.detach()
return output
def get_rng_state() -> Dict[str, Any]:
state = {"torch_rng_state": torch.get_rng_state()}
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state: Dict[str, Any]) -> None:
torch.set_rng_state(state["torch_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
def is_autocast_enabled() -> bool:
"""Similar to torch.is_autocast_enabled, but compatible with torch 1.5.1"""
if hasattr(torch, "is_autocast_enabled"):
return torch.is_autocast_enabled()
return False
@contextmanager
def autocast(enabled: bool) -> Generator:
"""Similar to torch.cuda.amp.autocast, but compatible with torch 1.5.1"""
if enabled:
with torch.cuda.amp.autocast(enabled):
yield
else:
yield
class CheckpointFunction(torch.autograd.Function):
"""Similar to the torch version, but support non-Tensor outputs.
The caller is expected to provide a dict (*parent_ctx_dict*) that will hold
the non-Tensor outputs. These should be combined with the Tensor *outputs*
by calling :func:`unpack_non_tensors`.
"""
@staticmethod
def forward( # type: ignore
ctx: Any,
dummy_tensor_requires_grad: torch.Tensor,
run_function: Any,
parent_ctx_dict: Dict[str, Any],
kwarg_keys: Tuple[str, ...],
*args: Any,
**kwargs: Any
) -> Any:
torch_checkpoint.check_backward_validity(args)
ctx.run_function = run_function
ctx.kwarg_keys = kwarg_keys
ctx.fwd_rng_state = get_rng_state()
ctx.is_model_parallel = get_model_parallel_group() is not None
# Megatron's dropout random state
if ctx.is_model_parallel:
ctx.fwd_cuda_rng_state_tracker = (
mpu.random.get_cuda_rng_tracker().get_states()
)
ctx.had_autocast_in_fwd = is_autocast_enabled()
tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args)
if parent_ctx_dict["offload"]:
ctx.fwd_device = tuple(x.device for x in tensor_inputs)
ctx.grad_requirements = tuple(x.requires_grad for x in tensor_inputs)
tensor_inputs = tuple(x.to("cpu", non_blocking=True) for x in tensor_inputs)
else:
ctx.fwd_device, ctx.grad_requirements = None, None
with torch.no_grad(), enable_checkpointing():
unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args)
outputs = run_function(*unpacked_args, **unpacked_kwargs)
the_module = unpacked_args[0]
ctx.distribute_checkpointed_activations = (
parent_ctx_dict["distribute_checkpointed_activations"]
and ctx.is_model_parallel
)
if ctx.distribute_checkpointed_activations:
# HACK [TODO: naman] currently only distributing the first tensor.
# second tensor for usual decoder models is just attention mask, which is seq_len * seq_len
# and not big enough to distribute over.
ctx.tensor_input_0_shape = tensor_inputs[0].data.shape
tensor_inputs[0].data = tensor_inputs[0].data.contiguous()
tensor_inputs[0].data = mpu.random.split_tensor_into_1d_equal_chunks(
tensor_inputs[0].data, new_buffer=True
)
ctx.save_for_backward(*tensor_inputs)
ctx.packed_non_tensor_inputs = packed_non_tensor_inputs
# Because we run with torch.no_grad(), we can't actually access
# outputs.requires_grad. Instead, we manually compute it by
# checking if either the input or the module needs grads
parameters = list(the_module.parameters())
# If the module is wrapped by FlattenParamsWrapper, then the
# parameters would have been deleted. If so, we need to access
# the views into the flattened parameters.
if hasattr(the_module, "_unflattened_param_views"):
parameters += the_module._unflattened_param_views
output_requires_grad = any(param.requires_grad for param in parameters) or any(
x.requires_grad for x in tensor_inputs
)
parent_ctx_dict["output_requires_grad"] = output_requires_grad
if not isinstance(outputs, torch.Tensor):
# Autograd Functions don't like non-Tensor outputs. We can split the
# non-Tensor and Tensor outputs, returning the former by reference
# through *parent_ctx_dict* and returning the latter directly.
outputs, packed_non_tensor_outputs = split_non_tensors(outputs)
parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs
return outputs
@staticmethod
def backward(ctx: Any, *args: Any) -> Tuple[Optional[Tensor], ...]:
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad(), please use .backward() if possible"
)
tensor_inputs: Tuple = ctx.saved_tensors
if ctx.distribute_checkpointed_activations:
tensor_inputs[0].data = mpu.random.gather_split_1d_tensor(
tensor_inputs[0].data
)
tensor_inputs[0].data = tensor_inputs[0].data.view(ctx.tensor_input_0_shape)
tensor_inputs = torch_checkpoint.detach_variable(tensor_inputs)
if ctx.fwd_device is not None:
tensor_inputs = tuple(
t.to(ctx.fwd_device[i], non_blocking=True)
for i, t in enumerate(tensor_inputs)
)
for i, need_grad in enumerate(ctx.grad_requirements):
tensor_inputs[i].requires_grad = need_grad
inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs)
# Store the current states.
bwd_rng_state = get_rng_state()
if ctx.is_model_parallel:
bwd_cuda_rng_state_tracker = mpu.random.get_cuda_rng_tracker().get_states()
mpu.random.get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# Set the states to what it used to be before the forward pass.
set_rng_state(ctx.fwd_rng_state)
with torch.enable_grad(), enable_recomputing(), autocast(
ctx.had_autocast_in_fwd
):
unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs)
outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs)
tensor_outputs, _ = split_non_tensors(outputs)
# Set the states back to what it was at the start of this function.
set_rng_state(bwd_rng_state)
if ctx.is_model_parallel:
mpu.random.get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
# Run backward() with only Tensors that require grad
outputs_with_grad = []
args_with_grad = []
for i in range(len(tensor_outputs)):
if tensor_outputs[i].requires_grad:
outputs_with_grad.append(tensor_outputs[i])
args_with_grad.append(args[i])
if len(outputs_with_grad) == 0:
raise RuntimeError(
"None of the outputs have requires_grad=True, "
"this checkpoint() is not necessary"
)
torch.autograd.backward(outputs_with_grad, args_with_grad)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs
)
return (None, None, None, None) + grads
|
flash_metaseq-main
|
metaseq/modules/checkpoint_activation_wrapper/checkpoint_activations.py
|
flash_metaseq-main
|
metaseq/modules/checkpoint_activation_wrapper/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import torch
from torch.utils.data.dataloader import default_collate
from . import BaseDataset
def _flatten(dico, prefix=None):
"""Flatten a nested dictionary."""
new_dico = OrderedDict()
if isinstance(dico, dict):
prefix = prefix + "." if prefix is not None else ""
for k, v in dico.items():
if v is None:
continue
new_dico.update(_flatten(v, prefix + k))
elif isinstance(dico, list):
for i, v in enumerate(dico):
new_dico.update(_flatten(v, prefix + ".[" + str(i) + "]"))
else:
new_dico = OrderedDict({prefix: dico})
return new_dico
def _unflatten(dico):
"""Unflatten a flattened dictionary into a nested dictionary."""
new_dico = OrderedDict()
for full_k, v in dico.items():
full_k = full_k.split(".")
node = new_dico
for k in full_k[:-1]:
if k.startswith("[") and k.endswith("]"):
k = int(k[1:-1])
if k not in node:
node[k] = OrderedDict()
node = node[k]
node[full_k[-1]] = v
return new_dico
class NestedDictionaryDataset(BaseDataset):
def __init__(self, defn, sizes=None):
super().__init__()
self.defn = _flatten(defn)
self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes
first = None
for v in self.defn.values():
if not isinstance(
v,
(
BaseDataset,
torch.utils.data.Dataset,
),
):
raise ValueError("Expected Dataset but found: {}".format(v.__class__))
first = first or v
if len(v) > 0:
assert len(v) == len(first), "dataset lengths must match"
self._len = len(first)
def __getitem__(self, index):
return OrderedDict((k, ds[index]) for k, ds in self.defn.items())
def __len__(self):
return self._len
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
if len(samples) == 0:
return {}
sample = OrderedDict()
for k, ds in self.defn.items():
try:
sample[k] = ds.collater([s[k] for s in samples])
except NotImplementedError:
sample[k] = default_collate([s[k] for s in samples])
return _unflatten(sample)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(s[index] for s in self.sizes)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if len(self.sizes) == 1:
return self.sizes[0][index]
else:
return (s[index] for s in self.sizes)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return any(ds.supports_prefetch for ds in self.defn.values())
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
for ds in self.defn.values():
if getattr(ds, "supports_prefetch", False):
ds.prefetch(indices)
def set_epoch(self, epoch):
# TODO(anj): Identify if we need this functionality for evals.
pass
|
flash_metaseq-main
|
metaseq/data/nested_dictionary_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional
import numpy as np
import torch
class StreamingTokenBlockDataset(torch.utils.data.IterableDataset):
"""View an IterableDataset of tokens as a 1D tensor and chunk into blocks.
This dataset can only be iterated over once.
Args:
dataset (~torch.utils.data.IterableDataset): dataset to chunk
block_size (int): maximum block size
break_mode (str, optional): Mode used for breaking tokens. Values can
be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
drop_last (bool, optional): drop the last item (default: False)
padding_idx (int, optional): index to use for padding symbols
(required if *drop_last* is ``False``)
shuffle_buffer_size (int, optional): buffer this many items and shuffle
using the provided *seed*; default value is 1, so no shuffling is
performed. This can be adjusted dynamically after initialization,
but only before iteration has begun.
seed (int, optional): seed for shuffling
"""
def __init__(
self,
dataset: torch.utils.data.IterableDataset,
block_size: int,
break_mode: str = "none",
drop_last: Optional[bool] = False,
padding_idx: Optional[int] = None,
shuffle_buffer_size: int = 1,
seed: Optional[int] = None,
):
super().__init__()
self.dataset = dataset
self.block_size = block_size
self.break_mode = break_mode
self.drop_last = drop_last
self.padding_idx = padding_idx
self.shuffle_buffer_size = shuffle_buffer_size
self.seed = seed
if break_mode == "none":
self.block_iterator = yield_token_blocks
elif break_mode == "eos_pad_8":
self.block_iterator = yield_single_sentences_pad_8
elif break_mode == "complete":
self.block_iterator = yield_doc_blocks
else:
raise ValueError(
f'Invalid value for break_mode = {break_mode}. Available options are "none", "eos_pad_8" or "complete".'
)
if not drop_last and padding_idx is None:
raise ValueError("padding_idx is required when drop_last is False")
assert shuffle_buffer_size >= 1
if shuffle_buffer_size > 1 and seed is None:
raise ValueError("seed is required when shuffle_buffer_size > 1")
# if break_mode != "none": raise NotImplementedError
self._started_iteration = False
def set_epoch(self, epoch):
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(epoch)
def set_shuffle_buffer_size(self, new_shuffle_buffer_size):
assert not self._started_iteration
self.shuffle_buffer_size = new_shuffle_buffer_size
def __iter__(self):
assert not self._started_iteration
self.started_iteration = True
block_itr = self.block_iterator(
self.dataset,
self.block_size,
self.drop_last,
self.padding_idx,
)
if self.seed is not None:
# add a random offset (2273) to the given seed to decouple this RNG
# from any other RNG instances elsewhere
rng = np.random.default_rng(2273 + self.seed)
else:
rng = None
buffer = []
def get_next_item_and_replace_in_buffer(replacement_item):
# return a random item from the buffer and replace with a new item
nonlocal rng
idx = rng.integers(len(buffer)) if rng is not None else 0
item = buffer[idx]
if replacement_item is not None:
buffer[idx] = replacement_item
else:
buffer.pop(idx)
return item
for block in block_itr:
if len(buffer) < self.shuffle_buffer_size:
# initially fill the buffer to the requested size
buffer.append(block)
else:
# return random block from the buffer and replace with new block
yield get_next_item_and_replace_in_buffer(block)
# clear buffer of any remaining items
while buffer:
yield get_next_item_and_replace_in_buffer(None)
def yield_single_sentences_pad_8(iterable, block_size, drop_last, padding_idx):
"""Mimics sample-break-mode eos i.e. 1 example per sequence without any packing.
When multiple examples are packed into a single sequence, example tokens would attend
to tokens in neighbouring examples, which may be undesirable. This mode can
avoid that. Since there is no packing, this mode is considerably slower.
We round up the example length to a multiple of 8, pad to this length and
return the example as is, without packing, truncating to block_size in cases of
very long examples.
"""
for idx, item in enumerate(iterable):
cur_block = []
cur_block_ids = []
if item.numel() > block_size:
# truncate right side
# TODO: Enable left side truncation
item = item[:block_size]
cur_block.append(item)
# We round up to a multiple of 8 + 1, because later on
# one element is removed for src/target tensor creation
# which brings it back to a multiple of 8. block_size is
# already passed with + 1 included.
cur_block_remain = min(int(math.ceil(item.numel() / 8)) * 8 + 1, block_size)
cur_block_remain -= item.numel()
padding = cur_block[-1].new_full((cur_block_remain,), padding_idx)
cur_block.append(padding)
cur_block_ids.append(idx)
yield {
"ids": torch.LongTensor(cur_block_ids),
"block": torch.cat(cur_block),
}
def yield_doc_blocks(iterable, block_size, drop_last, padding_idx):
"""Mimics sample-break-mode complete"""
cur_block = []
cur_block_ids = []
cur_block_remain = block_size
for idx, item in enumerate(iterable):
if item.numel() > block_size:
# truncate right side
item = item[:block_size]
if item.numel() > cur_block_remain:
padding = cur_block[-1].new_full((cur_block_remain,), padding_idx)
cur_block.append(padding)
block = torch.cat(cur_block)
yield {
"ids": torch.LongTensor(cur_block_ids),
"block": block,
}
cur_block = []
cur_block_ids = []
cur_block_remain = block_size
cur_block.append(item)
cur_block_ids.append(idx)
cur_block_remain -= item.numel()
assert cur_block_remain >= 0
if not drop_last and len(cur_block) > 0:
if cur_block_remain > 0:
padding = cur_block[-1].new_full((cur_block_remain,), padding_idx)
cur_block.append(padding)
block = torch.cat(cur_block)
assert block.numel() == block_size
yield {
"ids": torch.LongTensor(cur_block_ids),
"block": block,
}
def yield_token_blocks(iterable, block_size, drop_last, padding_idx):
"""Sample break mode = None. (Pre-Training default)."""
cur_block = []
cur_block_ids = []
cur_block_remain = block_size
for idx, item in enumerate(iterable):
cur_block_ids.append(idx)
while item.numel() > 0:
num_to_take = min(item.numel(), cur_block_remain)
cur_block.append(item[:num_to_take])
item = item[num_to_take:] # remainder
cur_block_remain -= num_to_take
assert cur_block_remain >= 0
if cur_block_remain == 0:
block = torch.cat(cur_block)
assert block.numel() == block_size
yield {
"ids": torch.LongTensor(cur_block_ids),
"block": block[:block_size],
}
cur_block = []
cur_block_ids = []
cur_block_remain = block_size
if not drop_last and len(cur_block) > 0:
if cur_block_remain > 0:
padding = cur_block[-1].new_full((cur_block_remain,), padding_idx)
cur_block.append(padding)
block = torch.cat(cur_block)
assert block.numel() == block_size
yield {
"ids": torch.LongTensor(cur_block_ids),
"block": block,
}
|
flash_metaseq-main
|
metaseq/data/streaming_token_block_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch.utils.data
from metaseq.data import data_utils
logger = logging.getLogger(__name__)
class EpochListening:
"""Mixin for receiving updates whenever the epoch increments."""
@property
def can_reuse_epoch_itr_across_epochs(self):
"""
Whether we can reuse the :class:`fairseq.data.EpochBatchIterator` for
this dataset across epochs.
This needs to return ``False`` if the sample sizes can change across
epochs, in which case we may need to regenerate batches at each epoch.
If your dataset relies in ``set_epoch`` then you should consider setting
this to ``False``.
"""
return True
def set_epoch(self, epoch):
"""Will receive the updated epoch number at the beginning of the epoch."""
pass
class BaseDataset(torch.utils.data.Dataset, EpochListening):
"""A dataset that provides helpers for batching."""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
raise NotImplementedError
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def num_tokens_vec(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
raise NotImplementedError
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self), dtype=np.int64)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return False
def attr(self, attr: str, index: int):
return getattr(self, attr, None)
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
raise NotImplementedError
def get_batch_shapes(self):
"""
Return a list of valid batch shapes, for example::
[(8, 512), (16, 256), (32, 128)]
The first dimension of each tuple is the batch size and can be ``None``
to automatically infer the max batch size based on ``--max-tokens``.
The second dimension of each tuple is the max supported length as given
by :func:`metaseq.data.BaseDataset.num_tokens`.
This will be used by :func:`metaseq.data.BaseDataset.batch_by_size`
to restrict batch shapes. This is useful on TPUs to avoid too many
dynamic shapes (and recompilations).
"""
return None
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
"""
Given an ordered set of indices, return batches according to
*max_tokens*, *max_sentences* and *required_batch_size_multiple*.
"""
from metaseq.data import data_utils
fixed_shapes = self.get_batch_shapes()
if fixed_shapes is not None:
def adjust_bsz(bsz, num_tokens):
if bsz is None:
assert max_tokens is not None, "Must specify --max-tokens"
bsz = max_tokens // num_tokens
if max_sentences is not None:
bsz = min(bsz, max_sentences)
elif (
bsz >= required_batch_size_multiple
and bsz % required_batch_size_multiple != 0
):
bsz -= bsz % required_batch_size_multiple
return bsz
fixed_shapes = np.array(
[
[adjust_bsz(bsz, num_tokens), num_tokens]
for (bsz, num_tokens) in fixed_shapes
]
)
try:
num_tokens_vec = self.num_tokens_vec(indices).astype("int64")
except NotImplementedError:
num_tokens_vec = None
return data_utils.batch_by_size(
indices,
num_tokens_fn=self.num_tokens,
num_tokens_vec=num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
fixed_shapes=fixed_shapes,
)
def filter_indices_by_size(self, indices, max_sizes):
"""
Filter a list of sample indices. Remove those that are longer than
specified in *max_sizes*.
WARNING: don't update, override method in child classes
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if isinstance(max_sizes, float) or isinstance(max_sizes, int):
if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray):
ignored = indices[self.sizes[indices] > max_sizes].tolist()
indices = indices[self.sizes[indices] <= max_sizes]
elif (
hasattr(self, "sizes")
and isinstance(self.sizes, list)
and len(self.sizes) == 1
):
ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
indices = indices[self.sizes[0][indices] <= max_sizes]
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
return indices, ignored
def ordered_indices_per_dataset(self):
"""Return a list of ordered indices vectors for each underlying dataset
(with parent dataset indices)."""
return [self.ordered_indices()]
@property
def supports_fetch_outside_dataloader(self):
"""Whether this dataset supports fetching outside the workers of the dataloader."""
return True
|
flash_metaseq-main
|
metaseq/data/base_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import numpy as np
from torch.utils.data.dataloader import default_collate
from . import BaseDataset
class ConcatDataset(BaseDataset):
@staticmethod
def cumsum(sequence, sample_ratios):
r, s = [], 0
for e, ratio in zip(sequence, sample_ratios):
curr_len = int(ratio * len(e))
r.append(curr_len + s)
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = [sample_ratios] * len(self.datasets)
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
sample_idx = sample_idx % self.real_sizes[dataset_idx]
return dataset_idx, sample_idx
def collater(self, samples, **extra_args):
# For now only supports datasets with same underlying collater implementations
if hasattr(self.datasets[0], "collater"):
return self.datasets[0].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
"""
Return an example's size as a float or tuple.
"""
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for ds, sr in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
# Only support underlying dataset with single size array.
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all(d.supports_prefetch for d in self.datasets)
def ordered_indices(self):
"""
Returns indices sorted by length. So less padding is needed.
"""
if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1:
# special handling for concatenating lang_pair_datasets
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = (
sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
)
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(src_sizes[indices], kind="mergesort")]
else:
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for to, ds in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, "supports_prefetch", False):
ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to])
frm = to
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
def ordered_indices_per_dataset(self):
"""Return a list of ordered indices vectors for each underlying dataset
(with parent dataset indices)."""
ordered_indices_list = []
for i, dataset in enumerate(self.datasets):
start = 0 if i == 0 else self.cumulative_sizes[i - 1]
subdataset_indices_list = dataset.ordered_indices_per_dataset()
for indices in subdataset_indices_list:
ordered_indices_list.append(indices + start)
return ordered_indices_list
|
flash_metaseq-main
|
metaseq/data/concat_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class PartitionedStreamingDataset(torch.utils.data.IterableDataset):
"""Partition an IterableDataset and iterate over a single shard.
If **drop_last** is ``False``, then the iterator will yield ``None`` for
shards that don't have data.
Args:
dataset (~torch.utils.data.IterableDataset): dataset to partition
num_shards (int): number of ways to partition the dataset
shard_id (int): shard index to iterate over
drop_last (bool, optional): drop the last item (default: False)
"""
def __init__(
self,
dataset: torch.utils.data.IterableDataset,
num_shards: int,
shard_id: int,
drop_last: bool = False,
):
super().__init__()
self.dataset = dataset
self.num_shards = num_shards
self.shard_id = shard_id
self.drop_last = drop_last
assert isinstance(dataset, torch.utils.data.IterableDataset)
assert num_shards > 0
assert shard_id >= 0 and shard_id < num_shards
def set_epoch(self, epoch):
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(epoch)
def __iter__(self):
chunk = []
for item in self.dataset:
chunk.append(item)
if len(chunk) == self.num_shards:
yield chunk[self.shard_id]
chunk = []
if len(chunk) > 0 and not self.drop_last:
if self.shard_id < len(chunk):
yield chunk[self.shard_id]
else:
yield None
|
flash_metaseq-main
|
metaseq/data/partitioned_streaming_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import BaseDataset
class IdDataset(BaseDataset):
def __getitem__(self, index):
return index
def __len__(self):
return 0
def collater(self, samples):
return torch.tensor(samples)
|
flash_metaseq-main
|
metaseq/data/id_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import numpy as np
import torch
import math
class StreamingSrcTgtDataset(torch.utils.data.IterableDataset):
"""View an IterableDataset of tokens as a 1D tensor and chunk into blocks.
This dataset can only be iterated over once.
Args:
dataset (~torch.utils.data.IterableDataset): dataset to chunk
block_size (int): maximum block size
break_mode (str, optional): Mode used for breaking tokens. Values can
be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
drop_last (bool, optional): drop the last item (default: False)
padding_idx (int, optional): index to use for padding symbols
(required if *drop_last* is ``False``)
shuffle_buffer_size (int, optional): buffer this many items and shuffle
using the provided *seed*; default value is 1, so no shuffling is
performed. This can be adjusted dynamically after initialization,
but only before iteration has begun.
seed (int, optional): seed for shuffling
"""
def __init__(
self,
dataset: torch.utils.data.IterableDataset,
block_size: int,
break_mode: str = "none",
drop_last: Optional[bool] = False,
padding_idx: Optional[int] = None,
shuffle_buffer_size: int = 1,
seed: Optional[int] = None,
):
super().__init__()
self.dataset = dataset
self.block_size = block_size
self.break_mode = break_mode
self.drop_last = drop_last
self.padding_idx = padding_idx
self.shuffle_buffer_size = shuffle_buffer_size
self.seed = seed
if break_mode == "none" or break_mode == "complete":
self.block_iterator = yield_src_tgt_blocks
elif break_mode == "eos_pad_8": # Single example per sequence
self.block_iterator = yield_src_tgt_single_sentences_pad_8
else:
raise NotImplementedError(f"Unknown break mode: {break_mode}")
if not drop_last and padding_idx is None:
raise ValueError("padding_idx is required when drop_last is False")
assert shuffle_buffer_size >= 1
if shuffle_buffer_size > 1 and seed is None:
raise ValueError("seed is required when shuffle_buffer_size > 1")
self._started_iteration = False
def set_epoch(self, epoch):
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(epoch)
def set_shuffle_buffer_size(self, new_shuffle_buffer_size):
assert not self._started_iteration
self.shuffle_buffer_size = new_shuffle_buffer_size
def __iter__(self):
assert not self._started_iteration
self.started_iteration = True
block_itr = self.block_iterator(
self.dataset,
self.block_size,
self.drop_last,
self.padding_idx,
)
if self.seed is not None:
# add a random offset (2273) to the given seed to decouple this RNG
# from any other RNG instances elsewhere
self.rng = np.random.default_rng(2273 + self.seed)
else:
self.rng = None
buffer = []
def get_next_item_and_replace_in_buffer(replacement_item):
# return a random item from the buffer and replace with a new item
idx = self.rng.integers(len(buffer)) if self.rng is not None else 0
item = buffer[idx]
if replacement_item is not None:
buffer[idx] = replacement_item
else:
buffer.pop(idx)
return item
for block in block_itr:
if len(buffer) < self.shuffle_buffer_size:
# initially fill the buffer to the requested size
buffer.append(block)
else:
# return random block from the buffer and replace with new block
yield get_next_item_and_replace_in_buffer(block)
# clear buffer of any remaining items
while buffer:
yield get_next_item_and_replace_in_buffer(None)
def yield_src_tgt_blocks(iterable, block_size, drop_last, padding_idx):
"""Packs multiple examples together in a block"""
cur_src_block = []
cur_src_block_ids = []
cur_tgt_block = []
cur_block_remain = block_size
for idx, (src, tgt) in enumerate(iterable):
if src.numel() > block_size:
# truncate right side
# TODO: Switch this to left truncate so that the target isnt ever truncated
src = src[:block_size]
tgt = tgt[:block_size]
if src.numel() > cur_block_remain:
padding = cur_src_block[-1].new_full((cur_block_remain,), padding_idx)
cur_src_block.append(padding)
cur_tgt_block.append(padding)
src_block = torch.cat(cur_src_block)
tgt_block = torch.cat(cur_tgt_block)
yield {
"ids": torch.LongTensor(cur_src_block_ids),
"src_block": src_block,
"tgt_block": tgt_block,
}
cur_src_block = []
cur_src_block_ids = []
cur_tgt_block = []
cur_block_remain = block_size
cur_src_block.append(src)
cur_src_block_ids.append(idx)
cur_tgt_block.append(tgt)
cur_block_remain -= src.numel()
assert cur_block_remain >= 0
if not drop_last and len(cur_src_block) > 0:
if cur_block_remain > 0:
padding = cur_src_block[-1].new_full((cur_block_remain,), padding_idx)
cur_src_block.append(padding)
cur_tgt_block.append(padding)
src_block = torch.cat(cur_src_block)
tgt_block = torch.cat(cur_tgt_block)
assert src_block.numel() == block_size
yield {
"ids": torch.LongTensor(cur_src_block_ids),
"src_block": src_block,
"tgt_block": tgt_block,
}
def yield_src_tgt_single_sentences_pad_8(iterable, block_size, drop_last, padding_idx):
"""Mimics sample-break-mode eos i.e. 1 example per sequence without any packing.
When multiple examples are packed into a single sequence, example tokens would attend
to tokens in neighbouring examples, which may be undesirable. This mode can
avoid that. Since there is no packing, this mode is considerably slower.
We round up the example length to a multiple of 8, pad to this length and
return the example as is, without packing, truncating to block_size in cases of
very long examples.
"""
for idx, (src, tgt) in enumerate(iterable):
cur_src_block = []
cur_src_block_ids = []
cur_tgt_block = []
if src.numel() > block_size:
# truncate right side
# TODO: Enable left side truncation
src = src[:block_size]
tgt = tgt[:block_size]
cur_src_block.append(src)
cur_src_block_ids.append(idx)
cur_tgt_block.append(tgt)
# We round up to a multiple of 8 + 1, because later on
# one element is removed for src/target tensor creation
# which brings it back to a multiple of 8. block_size is
# already passed with + 1 included.
# cur_block_remain = int(min(math.pow(2, math.ceil(math.log(src.numel(), 2))) + 1, block_size))
cur_block_remain = min(int(math.ceil(src.numel() / 8)) * 8 + 1, block_size)
cur_block_remain -= src.numel()
padding = cur_src_block[-1].new_full((cur_block_remain,), padding_idx)
cur_src_block.append(padding)
cur_tgt_block.append(padding)
yield {
"ids": torch.LongTensor(cur_src_block_ids),
"src_block": torch.cat(cur_src_block),
"tgt_block": torch.cat(cur_tgt_block),
}
|
flash_metaseq-main
|
metaseq/data/streaming_src_tgt_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from metaseq.data import data_utils
from . import BaseWrapperDataset
class TruncateDataset(BaseWrapperDataset):
"""Truncate a sequence by returning the first truncation_length tokens"""
def __init__(self, dataset, truncation_length):
super().__init__(dataset)
assert truncation_length is not None
self.truncation_length = truncation_length
self.dataset = dataset
def __getitem__(self, index):
item = self.dataset[index]
item_len = item.size(0)
if item_len > self.truncation_length:
item = item[: self.truncation_length]
return item
@property
def sizes(self):
return np.minimum(self.dataset.sizes, self.truncation_length)
def __len__(self):
return len(self.dataset)
class RandomCropDataset(TruncateDataset):
"""Truncate a sequence by returning a random crop of truncation_length tokens"""
def __init__(self, dataset, truncation_length, seed=1):
super().__init__(dataset, truncation_length)
self.seed = seed
self.epoch = 0
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
item_len = item.size(0)
excess = item_len - self.truncation_length
if excess > 0:
start_idx = np.random.randint(0, excess)
item = item[start_idx : start_idx + self.truncation_length]
return item
def maybe_shorten_dataset(
dataset,
split,
shorten_data_split_list,
shorten_method,
tokens_per_sample,
seed,
):
truncate_split = (
split in shorten_data_split_list.split(",") or len(shorten_data_split_list) == 0
)
if shorten_method == "truncate" and truncate_split:
dataset = TruncateDataset(dataset, tokens_per_sample)
elif shorten_method == "random_crop" and truncate_split:
dataset = RandomCropDataset(dataset, tokens_per_sample, seed)
return dataset
|
flash_metaseq-main
|
metaseq/data/shorten_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from . import BaseWrapperDataset
class SortDataset(BaseWrapperDataset):
def __init__(self, dataset, sort_order):
super().__init__(dataset)
if not isinstance(sort_order, (list, tuple)):
sort_order = [sort_order]
self.sort_order = sort_order
assert all(len(so) == len(dataset) for so in sort_order)
def ordered_indices(self):
return np.lexsort(self.sort_order)
|
flash_metaseq-main
|
metaseq/data/sort_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseDataset, data_utils
def collate(samples, pad_idx, eos_idx, fixed_pad_length=None, pad_to_bsz=None):
if len(samples) == 0:
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(
data_utils.collate_tokens(
[s[key][i] for s in samples],
pad_idx,
eos_idx,
left_pad=False,
pad_to_length=fixed_pad_length,
pad_to_bsz=pad_to_bsz,
)
)
return res
else:
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad=False,
pad_to_length=fixed_pad_length,
pad_to_bsz=pad_to_bsz,
)
src_tokens = merge("source")
if samples[0]["target"] is not None:
is_target_list = isinstance(samples[0]["target"], list)
target = merge("target", is_target_list)
else:
target = src_tokens
return {
"id": torch.LongTensor([s["id"] for s in samples]),
"nsentences": len(samples),
"ntokens": sum(len(s["source"]) for s in samples),
"net_input": {
"src_tokens": src_tokens,
"src_lengths": torch.LongTensor([s["source"].numel() for s in samples]),
},
"target": target,
}
class MonolingualDataset(BaseDataset):
"""
A wrapper around torch.utils.data.Dataset for monolingual data.
"""
def __init__(
self,
dataset,
sizes,
src_vocab,
tgt_vocab=None,
add_eos_for_other_targets=False,
shuffle=False,
add_bos_token=False,
fixed_pad_length=None,
pad_to_bsz=None,
src_lang_idx=None,
tgt_lang_idx=None,
):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = tgt_vocab or src_vocab
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
self.fixed_pad_length = fixed_pad_length
self.pad_to_bsz = pad_to_bsz
self.src_lang_idx = src_lang_idx
self.tgt_lang_idx = tgt_lang_idx
def __getitem__(self, index):
# *future_target* is the original sentence
# *source* is shifted right by 1 (maybe left-padded with eos)
#
# Left-to-right language models should condition on *source* and
# predict *future_target*.
source, future_target, _ = self.dataset[index]
target = self._filter_vocab(future_target)
source, target = self._maybe_add_bos(source, target)
return {"id": index, "source": source, "target": target}
def __len__(self):
return len(self.dataset)
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
# src_lang_idx and tgt_lang_idx are passed in for multilingual LM, with the
# first token being a lang_id token.
bos = self.src_lang_idx or self.vocab.bos()
source = torch.cat([source.new([bos]), source])
if target is not None:
tgt_bos = self.tgt_lang_idx or self.tgt_vocab.bos()
target = torch.cat([target.new([tgt_bos]), target])
return source, target
def num_tokens_vec(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
return self.sizes[indices]
def _filter_vocab(self, target):
if len(self.tgt_vocab) != len(self.vocab):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the right.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the right.
"""
return collate(
samples,
self.vocab.pad(),
self.vocab.eos(),
self.fixed_pad_length,
self.pad_to_bsz,
)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
|
flash_metaseq-main
|
metaseq/data/monolingual_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data.dataloader import default_collate
from . import BaseDataset
class BaseWrapperDataset(BaseDataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if hasattr(self.dataset, "collater"):
return self.dataset.collater(samples)
else:
return default_collate(samples)
@property
def sizes(self):
return self.dataset.sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def attr(self, attr: str, index: int):
return self.dataset.attr(attr, index)
def prefetch(self, indices):
self.dataset.prefetch(indices)
def get_batch_shapes(self):
return self.dataset.get_batch_shapes()
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
return self.dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
def filter_indices_by_size(self, indices, max_sizes):
return self.dataset.filter_indices_by_size(indices, max_sizes)
def set_epoch(self, epoch):
super().set_epoch(epoch)
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(epoch)
|
flash_metaseq-main
|
metaseq/data/base_wrapper_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class NumelDataset(BaseWrapperDataset):
def __init__(self, dataset, reduce=False):
super().__init__(dataset)
self.reduce = reduce
def __getitem__(self, index):
item = self.dataset[index]
if torch.is_tensor(item):
return torch.numel(item)
else:
return np.size(item)
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if self.reduce:
return sum(samples)
else:
return torch.tensor(samples)
|
flash_metaseq-main
|
metaseq/data/numel_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .dictionary import Dictionary, TruncatedDictionary
from .base_dataset import BaseDataset
from .base_wrapper_dataset import BaseWrapperDataset
from .append_token_dataset import AppendTokenDataset
from .concat_dataset import ConcatDataset
from .id_dataset import IdDataset
from .indexed_dataset import (
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
MMapIndexedDataset,
)
from .jsonl_dataset import JsonlDataset
from .list_dataset import ListDataset
from .lm_context_window_dataset import LMContextWindowDataset
from .monolingual_dataset import MonolingualDataset
from .nested_dictionary_dataset import NestedDictionaryDataset
from .numel_dataset import NumelDataset
from .pad_dataset import LeftPadDataset, PadDataset, RightPadDataset
from .partitioned_streaming_dataset import PartitionedStreamingDataset
from .prepend_token_dataset import PrependTokenDataset
from .resampling_dataset import ResamplingDataset
from .sort_dataset import SortDataset
from .streaming_shuffle_dataset import StreamingShuffleDataset
from .streaming_token_block_dataset import StreamingTokenBlockDataset
from .streaming_src_tgt_dataset import StreamingSrcTgtDataset
from .strip_token_dataset import StripTokenDataset
from .token_block_dataset import TokenBlockDataset
from .pad_dataset import MultiplePadDataset
from .shorten_dataset import TruncateDataset
from .iterators import (
CountingIterator,
EpochBatchIterator,
GroupedIterator,
ShardedIterator,
)
__all__ = [
"AppendTokenDataset",
"BaseWrapperDataset",
"ConcatDataset",
"CountingIterator",
"Dictionary",
"EpochBatchIterator",
"BaseDataset",
"GroupedIterator",
"IdDataset",
"IndexedCachedDataset",
"IndexedDataset",
"IndexedRawTextDataset",
"JsonlDataset",
"LeftPadDataset",
"ListDataset",
"LMContextWindowDataset",
"MMapIndexedDataset",
"MonolingualDataset",
"MultiplePadDataset",
"NestedDictionaryDataset",
"NumelDataset",
"PadDataset",
"PartitionedStreamingDataset",
"PrependTokenDataset",
"ResamplingDataset",
"RightPadDataset",
"ShardedIterator",
"SortDataset",
"StreamingShuffleDataset",
"StreamingTokenBlockDataset",
"StreamingSrcTgtDataset",
"StripTokenDataset",
"TokenBlockDataset",
"TruncateDataset",
"TruncatedDictionary",
]
|
flash_metaseq-main
|
metaseq/data/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from metaseq.data import data_utils
class StreamingShuffleDataset(torch.utils.data.IterableDataset):
"""Shuffle a dataset across epochs.
Note that :func:`set_epoch` must be called before the first iteration.
Args:
dataset (~torch.utils.data.Dataset): dataset to shuffle
seed (int): iterate over the underlying dataset in random order using
this random seed
"""
def __init__(self, dataset: torch.utils.data.Dataset, seed: int):
super().__init__()
self.dataset = dataset
self.seed = seed
assert len(dataset) > 0
self.indices = None
def set_epoch(self, epoch):
# shuffle the dataset according to the seed argument and epoch
seed = int(hash((self.seed, epoch)) % 1e6)
with data_utils.numpy_seed(seed):
self.indices = np.random.permutation(len(self.dataset))
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(epoch)
def __iter__(self):
assert (
self.indices is not None
), "must call StreamingShuffleDataset.set_epoch before iteration"
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None and worker_info.num_workers > 1:
chunks = np.array_split(self.indices, worker_info.num_workers)
indices = chunks[worker_info.id]
else:
indices = self.indices
for idx in indices:
yield self.dataset[idx]
|
flash_metaseq-main
|
metaseq/data/streaming_shuffle_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import BaseWrapperDataset
class StripTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, id_to_strip):
super().__init__(dataset)
self.id_to_strip = id_to_strip
def __getitem__(self, index):
item = self.dataset[index]
while len(item) > 0 and item[-1] == self.id_to_strip:
item = item[:-1]
while len(item) > 0 and item[0] == self.id_to_strip:
item = item[1:]
return item
|
flash_metaseq-main
|
metaseq/data/strip_token_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import os
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from metaseq import utils
from metaseq.file_io import PathManager
logger = logging.getLogger(__name__)
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
for filename in PathManager.ls(path):
parts = filename.split(".")
if len(parts) >= 3 and len(parts[1].split("-")) == 2:
return parts[1].split("-")
return src, dst
def collate_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
pad_to_bsz=None,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
batch_size = len(values) if pad_to_bsz is None else max(len(values), pad_to_bsz)
res = values[0].new(batch_size, size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def load_indexed_dataset(
path, dictionary=None, dataset_impl=None, combine=False, default="cached"
):
"""A helper function for loading indexed datasets.
Args:
path (str): path to indexed dataset (e.g., 'data-bin/train')
dictionary (~metaseq.data.Dictionary): data dictionary
dataset_impl (str, optional): which dataset implementation to use. If
not provided, it will be inferred automatically. For legacy indexed
data we use the 'cached' implementation by default.
combine (bool, optional): automatically load and combine multiple
datasets. For example, if *path* is 'data-bin/train', then we will
combine 'data-bin/train', 'data-bin/train1', ... and return a
single ConcatDataset instance.
"""
import metaseq.data.indexed_dataset as indexed_dataset
from metaseq.data.concat_dataset import ConcatDataset
datasets = []
for k in itertools.count():
path_k = path + (str(k) if k > 0 else "")
path_k = indexed_dataset.get_indexed_dataset_to_local(path_k)
dataset_impl_k = dataset_impl
if dataset_impl_k is None:
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)
dataset = indexed_dataset.make_dataset(
path_k,
impl=dataset_impl_k or default,
fix_lua_indexing=True,
dictionary=dictionary,
)
if dataset is None:
break
logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k))
datasets.append(dataset)
if not combine:
break
if len(datasets) == 0:
return None
elif len(datasets) == 1:
return datasets[0]
else:
return ConcatDataset(datasets)
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def collect_filtered(function, iterable, filtered):
"""
Similar to :func:`filter` but collects filtered elements in ``filtered``.
Args:
function (callable): function that returns ``False`` for elements that
should be filtered
iterable (iterable): iterable to filter
filtered (list): list to store filtered elements
"""
for el in iterable:
if function(el):
yield el
else:
filtered.append(el)
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def compare_leq(a, b):
return a <= b if not isinstance(a, tuple) else max(a) <= b
def check_size(idx):
if isinstance(max_positions, float) or isinstance(max_positions, int):
return size_fn(idx) <= max_positions
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = set(max_positions.keys()) & set(idx_size.keys())
return all(
all(
a is None or b is None or a <= b
for a, b in zip(idx_size[key], max_positions[key])
)
for key in intersect_keys
)
else:
# For MultiCorpusSampledDataset, will generalize it later
if not isinstance(size_fn(idx), Iterable):
return all(size_fn(idx) <= b for b in max_positions)
return all(
a is None or b is None or a <= b
for a, b in zip(size_fn(idx), max_positions)
)
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=-1)
return indices, ignored
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
"""
[deprecated] Filter indices based on their size.
Use `BaseDataset::filter_indices_by_size` instead.
Args:
indices (List[int]): ordered list of dataset indices
dataset (BaseDataset): metaseq dataset instance
max_positions (tuple): filter elements larger than this size.
Comparisons are done component-wise.
raise_exception (bool, optional): if ``True``, raise an exception if
any elements are filtered (default: False).
"""
warnings.warn(
"data_utils.filter_by_size is deprecated. "
"Use `BaseDataset::filter_indices_by_size` instead.",
stacklevel=2,
)
if isinstance(max_positions, float) or isinstance(max_positions, int):
if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray):
ignored = indices[dataset.sizes[indices] > max_positions].tolist()
indices = indices[dataset.sizes[indices] <= max_positions]
elif (
hasattr(dataset, "sizes")
and isinstance(dataset.sizes, list)
and len(dataset.sizes) == 1
):
ignored = indices[dataset.sizes[0][indices] > max_positions].tolist()
indices = indices[dataset.sizes[0][indices] <= max_positions]
else:
indices, ignored = _filter_by_size_dynamic(
indices, dataset.size, max_positions
)
else:
indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if len(ignored) > 0 and raise_exception:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
if len(ignored) > 0:
logger.warning(
(
"{} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if max_sizes is None:
return indices, []
if type(max_sizes) in (int, float):
max_src_size, max_tgt_size = max_sizes, max_sizes
else:
max_src_size, max_tgt_size = max_sizes
if tgt_sizes is None:
ignored = indices[src_sizes[indices] > max_src_size]
else:
ignored = indices[
(src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size)
]
if len(ignored) > 0:
if tgt_sizes is None:
indices = indices[src_sizes[indices] <= max_src_size]
else:
indices = indices[
(src_sizes[indices] <= max_src_size)
& (tgt_sizes[indices] <= max_tgt_size)
]
return indices, ignored.tolist()
def batch_by_size(
indices,
num_tokens_fn,
num_tokens_vec=None,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
fixed_shapes=None,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
num_tokens_vec (List[int], optional): precomputed vector of the number
of tokens for each index in indices (to enable faster batch generation)
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be less than N or a multiple of N (default: 1).
fixed_shapes (List[Tuple[int, int]], optional): if given, batches will
only be created with the given shapes. *max_sentences* and
*required_batch_size_multiple* will be ignored (default: None).
"""
try:
from metaseq.data.data_utils_fast import (
batch_by_size_fn,
batch_by_size_vec,
batch_fixed_shapes_fast,
)
except ImportError:
raise ImportError(
"Please build Cython components with: `pip install --editable .` "
"or `python setup.py build_ext --inplace`"
)
except ValueError:
raise ValueError(
"Please build (or rebuild) Cython components with: `pip install "
" --editable .` or `python setup.py build_ext --inplace`."
)
# added int() to avoid TypeError: an integer is required
max_tokens = int(max_tokens) if max_tokens is not None else -1
max_sentences = max_sentences if max_sentences is not None else -1
bsz_mult = required_batch_size_multiple
if not isinstance(indices, np.ndarray):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray):
num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1)
if fixed_shapes is None:
if num_tokens_vec is None:
return batch_by_size_fn(
indices,
num_tokens_fn,
max_tokens,
max_sentences,
bsz_mult,
)
else:
return batch_by_size_vec(
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
)
else:
fixed_shapes = np.array(fixed_shapes, dtype=np.int64)
sort_order = np.lexsort(
[
fixed_shapes[:, 1].argsort(), # length
fixed_shapes[:, 0].argsort(), # bsz
]
)
fixed_shapes_sorted = fixed_shapes[sort_order]
return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted)
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == "wordpiece":
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == "letter":
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol == "none":
pass
elif symbol is not None:
raise NotImplementedError(f"Unknown post_process option: {symbol}")
return sentence
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
def get_mem_usage():
try:
import psutil
mb = 1024 * 1024
return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb"
except ImportError:
return "N/A"
def lengths_to_padding_mask(lens: torch.LongTensor) -> torch.BoolTensor:
bsz, max_lens = lens.size(0), torch.max(lens).item()
mask = torch.arange(max_lens).to(lens.device).view(1, max_lens)
mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens)
return mask
def lengths_to_mask(lens: torch.LongTensor) -> torch.BoolTensor:
return ~lengths_to_padding_mask(lens)
def _find_extra_valid_paths(dataset_path: str) -> set:
paths = utils.split_paths(dataset_path)
all_valid_paths = set()
for sub_dir in paths:
contents = PathManager.ls(sub_dir)
valid_paths = [c for c in contents if re.match("valid*[0-9].*", c) is not None]
all_valid_paths |= {os.path.basename(p) for p in valid_paths}
# Remove .bin, .idx etc
roots = {os.path.splitext(p)[0] for p in all_valid_paths}
return roots
def raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None:
"""Raises if there are paths matching 'valid*[0-9].*' which are not combined or ignored."""
if (
train_cfg.dataset.ignore_unused_valid_subsets
or train_cfg.dataset.combine_valid_subsets
or train_cfg.dataset.disable_validation
or getattr(train_cfg.task, "data", None) is None
):
return
other_paths = _find_extra_valid_paths(train_cfg.task.data)
specified_subsets = train_cfg.dataset.valid_subset.split(",")
ignored_paths = [p for p in other_paths if p not in specified_subsets]
if ignored_paths:
advice = "Set --combine-val to combine them or --ignore-unused-valid-subsets to ignore them."
msg = f"Valid paths {ignored_paths} will be ignored. {advice}"
raise ValueError(msg)
|
flash_metaseq-main
|
metaseq/data/data_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class PrependTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if token is not None:
self._sizes = np.array(dataset.sizes) + 1
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
item = torch.cat([item.new([self.token]), item])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if self.token is not None:
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if self.token is not None:
n += 1
return n
|
flash_metaseq-main
|
metaseq/data/prepend_token_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import BaseWrapperDataset
class ListDataset(BaseWrapperDataset):
def __init__(self, dataset, sizes=None):
super().__init__(dataset)
self._sizes = sizes
def __iter__(self):
for x in self.dataset:
yield x
def collater(self, samples):
return samples
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def set_epoch(self, epoch):
pass
|
flash_metaseq-main
|
metaseq/data/list_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
from typing import Callable, Optional
import numpy as np
import torch
from metaseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if start is None:
self.n = getattr(iterable, "n", 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if self.n >= self.total:
raise RuntimeError(
"Mismatch between actual and expected iterable length. "
"This may be caused by resuming training from a checkpoint using "
"a different number of GPUs, in which case you can try the "
"--reset-dataloader option. Alternatively you may have a train or "
"validation set that is smaller than the number of GPUs. If none "
"of these apply, please report this to the metaseq developers."
)
self.n += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
# Only take after what we have already consumed (i.e. after restarting
# from checkpoint mid epoch, we have to subtract self.n which is the
# starting point)
#
# This to maintain the invariant self.total = self.n + len(iterable),
# before calling __next__ or __iter__
propagated_take = max(n - self.n, 0)
if hasattr(self.iterable, "take"):
self.iterable.take(propagated_take)
else:
self.iterable = itertools.islice(self.iterable, propagated_take)
class StreamingCountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable):
try:
import more_itertools
except ImportError:
raise ImportError(
"more_itertools is required for streaming iterators; "
"please install with: pip install more_itertools"
)
self._peekable_itr = more_itertools.peekable(iterable)
self._countable_itr = more_itertools.countable(self._peekable_itr)
def __iter__(self):
return self
def __next__(self):
return next(self._countable_itr)
def __len__(self):
return 0
def has_next(self):
return bool(self._peekable_itr) # whether peekable has items
@property
def n(self):
return self._countable_itr.items_seen
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus (bool, optional): ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
set_dataset_epoch (bool, optional): update the wrapped Dataset with
the new epoch number (default: True).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
@property
def first_batch(self):
return "DUMMY"
class StreamingEpochBatchIterator(EpochBatchIterating):
"""A steaming-style iterator over a :class:`torch.utils.data.IterableDataset`.
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
batch_size (int): number of items in each batch
collate_fn (callable): merges a list of samples to form a mini-batch
drop_last (bool): whether to skip the last batch, in cases where it
would be incomplete (i.e., have fewer than *batch_size* items)
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
"""
def __init__(
self,
dataset: torch.utils.data.IterableDataset,
batch_size: int,
collate_fn: Callable,
drop_last: bool,
num_workers: int = 0,
epoch: int = 1,
):
super().__init__()
self.dataset = dataset
self.batch_size = batch_size
self.collate_fn = collate_fn
self.drop_last = drop_last
self.num_workers = num_workers
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
assert isinstance(dataset, torch.utils.data.IterableDataset)
self._itr: Optional[StreamingCountingIterator] = None
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, **kwargs):
"""
Return a new iterator over the dataset.
In case :func:`load_state_dict` has been called recently, this will
return the loaded iterator.
"""
self.epoch = self.next_epoch_idx
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
if self._itr is None or self.end_of_epoch():
self._itr = self._get_iterator_for_epoch(self.epoch)
return self._itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._itr.has_next()
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
return self._itr.n
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
# small optimization: we advance the epoch before saving, so that
# when loading later we don't end up fast-forwarding the iterator
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
"epoch": epoch,
"iterations_in_epoch": iter_in_epoch,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict["epoch"]
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
self._itr = self._get_iterator_for_epoch(self.epoch)
itr_pos = state_dict.get("iterations_in_epoch", 0)
if itr_pos > 0:
# fast-forward epoch iterator
logger.warning(f"Fast-forwarding dataloader by {itr_pos} batches...")
t0 = time.time()
next(itertools.islice(self._itr, itr_pos, itr_pos), None)
logger.warning(
f"done fast-forwarding dataloader in {time.time() - t0:.1f} seconds"
)
def _get_iterator_for_epoch(self, epoch, offset=0):
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
itr = torch.utils.data.DataLoader(
dataset=self.dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
pin_memory=True,
drop_last=self.drop_last,
worker_init_fn=getattr(self.dataset, "worker_init_fn", None),
)
itr = StreamingCountingIterator(itr)
return itr
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative (default: ``0``).
disable_shuffling (bool, optional): force disable shuffling
(default: ``False``).
skip_remainder_batch (bool, optional): if set, discard the last batch in an epoch
for the sake of training stability, as the last batch is usually smaller than
local_batch_size * distributed_word_size (default: ``False``).
"""
def __init__(
self,
dataset,
collate_fn,
batch_sampler,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
buffer_size=0,
timeout=0,
disable_shuffling=False,
skip_remainder_batch=False,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = (
tuple(batch_sampler) if not callable(batch_sampler) else None
)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.disable_shuffling = disable_shuffling
self.skip_remainder_batch = skip_remainder_batch
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = not disable_shuffling
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, "supports_prefetch", False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
@property
def first_batch(self):
if len(self.frozen_batches) == 0:
raise Exception(
"The dataset is empty. This could indicate "
"that all elements in the dataset have been skipped. "
"Try increasing the max number of allowed tokens or using "
"a larger dataset."
)
if getattr(self.dataset, "supports_fetch_outside_dataloader", True):
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]])
else:
return "DUMMY"
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus (bool, optional): ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
set_dataset_epoch (bool, optional): update the wrapped Dataset with
the new epoch number (default: True).
"""
if self.disable_shuffling:
shuffle = False
self.epoch = self.next_epoch_idx
if set_dataset_epoch and hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler):
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle,
fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
"version": 2,
"epoch": epoch,
"iterations_in_epoch": iter_in_epoch,
"shuffle": self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict["epoch"]
itr_pos = state_dict.get("iterations_in_epoch", 0)
version = state_dict.get("version", 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get("shuffle", True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
"Cannot resume training due to dataloader mismatch, please "
"report this to the metaseq developers. You can relaunch "
"training with `--reset-dataloader` and it should work."
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(
self, epoch, shuffle, fix_batches_to_gpus=False, offset=0
):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CountingIterator
itr = CountingIterator(itr, start=offset)
if self.skip_remainder_batch:
# TODO: Below is a lazy implementation which discard the final batch regardless
# of whether it is a full batch or not.
total_num_itrs = len(batches) - 1
itr.take(total_num_itrs)
logger.info(f"skip final residual batch, total_num_itrs = {total_num_itrs}")
return itr
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
skip_remainder_batch (bool, optional): if set, discard the last grouped batch in
each training epoch, as the last grouped batch is usually smaller than
local_batch_size * distributed_word_size * chunk_size (default: ``False``).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size, skip_remainder_batch=False):
if skip_remainder_batch:
total_num_itrs = int(math.floor(len(iterable) / float(chunk_size)))
logger.info(
f"skip final residual batch, grouped total_num_itrs = {total_num_itrs}"
)
else:
total_num_itrs = int(math.ceil(len(iterable) / float(chunk_size)))
logger.info(f"grouped total_num_itrs = {total_num_itrs}")
itr = _chunk_iterator(iterable, chunk_size, skip_remainder_batch)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))),
total=total_num_itrs,
)
self.chunk_size = chunk_size
if skip_remainder_batch:
self.take(total_num_itrs)
# TODO: [Hack] Here the grouped iterator modifies the base iterator size so that
# training can move into the next epoch once the grouped iterator is exhausted.
# Double-check this implementation in case unexpected behavior occurs.
iterable.take(total_num_itrs * chunk_size)
def _chunk_iterator(itr, chunk_size, skip_remainder_batch=False):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if not skip_remainder_batch and len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError("shard_id must be between 0 and num_shards")
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
for item in self._source:
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._iterable, "take"):
self._iterable.take(n)
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if (
self.warning_time is None
or time.time() - self.warning_time > 15 * 60
):
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
|
flash_metaseq-main
|
metaseq/data/iterators.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import json
import subprocess
import tempfile
from typing import Hashable
try:
import pyarrow.plasma as plasma
PYARROW_AVAILABLE = True
except ImportError:
plasma = None
PYARROW_AVAILABLE = False
class PlasmaArray:
"""
Wrapper around numpy arrays that automatically moves the data to shared
memory upon serialization. This is particularly helpful when passing numpy
arrays through multiprocessing, so that data is not unnecessarily
duplicated or pickled.
"""
def __init__(self, array):
super().__init__()
self.array = array
self.disable = array.nbytes < 134217728 # disable for arrays <128MB
self.object_id = None
self.path = None
# variables with underscores shouldn't be pickled
self._client = None
self._server = None
self._server_tmp = None
self._plasma = None
@property
def plasma(self):
if self._plasma is None and not self.disable:
self._plasma = plasma
return self._plasma
def start_server(self):
if self.plasma is None or self._server is not None:
return
assert self.object_id is None
assert self.path is None
self._server_tmp = tempfile.NamedTemporaryFile()
self.path = self._server_tmp.name
self._server = subprocess.Popen(
["plasma_store", "-m", str(int(1.05 * self.array.nbytes)), "-s", self.path]
)
@property
def client(self):
if self._client is None:
assert self.path is not None
self._client = self.plasma.connect(self.path, num_retries=200)
return self._client
def __getstate__(self):
"""Called on pickle load"""
if self.plasma is None:
return self.__dict__
if self.object_id is None:
self.start_server()
self.object_id = self.client.put(self.array)
state = self.__dict__.copy()
del state["array"]
state["_client"] = None
state["_server"] = None
state["_server_tmp"] = None
state["_plasma"] = None
return state
def __setstate__(self, state):
"""Called on pickle save"""
self.__dict__.update(state)
if self.plasma is None:
return
self.array = self.client.get(self.object_id)
def __del__(self):
if self._server is not None:
self._server.kill()
self._server = None
self._server_tmp.close()
self._server_tmp = None
DEFAULT_PLASMA_PATH = "/tmp/plasma"
class PlasmaView:
"""Interface to write and read from shared memory. Whereas PlasmaArray writes to plasma on serialization,
PlasmaView writes to shared memory on instantiation."""
def __init__(self, array, split_path: str, hash_data: Hashable, plasma_path=None):
"""
Args:
array: numpy array to store. This can be read with ``PlasmaView().array``
split_path: the path whence the data was read, used for hashing
hash_data: other metadata about the array that can be used to create a unique key.
as of writing, the 3 callers in ``TokenBlockDataset`` use::
hash_data = ((block_size, document_sep_len, str(break_mode), len(dataset)), 0|1|2)
"""
assert PYARROW_AVAILABLE
assert split_path is not None
if plasma_path is None:
plasma_path = DEFAULT_PLASMA_PATH
self.path = plasma_path
self.split_path = split_path
self._client = None # Initialize lazily for pickle. plasma clients should not be deep copied or serialized.
self._n = None
self.object_id = self.get_object_id(self.split_path, hash_data)
try:
self.client.put(array, object_id=self.object_id)
except plasma.PlasmaObjectExists:
pass
@property
def client(self):
if self._client is None:
self._client = plasma.connect(self.path, num_retries=200)
return self._client
@property
def array(self):
"""Fetch a read only view of an np.array, stored in plasma."""
ret = self.client.get(self.object_id)
return ret
@staticmethod
def get_object_id(split_path: str, hash_data: Hashable):
"""Returns plasma.ObjectID from hashing split_path and object_num."""
hash = hashlib.blake2b(bytes(split_path, "utf-8"), digest_size=20)
harg = json.dumps(hash_data).encode("utf-8")
hash.update(harg)
return plasma.ObjectID(hash.digest())
def __getstate__(self):
"""Called on pickle save"""
self.disconnect()
state = self.__dict__.copy()
assert state["_client"] is None
assert "object_id" in state
return state
def __setstate__(self, state):
"""Called on pickle load"""
self.__dict__.update(state)
def __del__(self):
self.disconnect()
def disconnect(self):
if self._client is not None:
self._client.disconnect()
self._client = None
def __len__(self):
"""Save reads by caching len"""
if self._n is None:
self._n = len(self.array)
return self._n
GB100 = (1024**3) * 100
class PlasmaStore:
def __init__(self, path=DEFAULT_PLASMA_PATH, nbytes: int = GB100):
self.server = self.start(path, nbytes)
def __del__(self):
self.server.kill()
@staticmethod
def start(path=DEFAULT_PLASMA_PATH, nbytes: int = GB100) -> subprocess.Popen:
if not PYARROW_AVAILABLE:
raise ImportError("please run pip install pyarrow to use --use_plasma_view")
# best practice is to allocate more space than we need. The limitation seems to be the size of /dev/shm
_server = subprocess.Popen(["plasma_store", "-m", str(nbytes), "-s", path])
plasma.connect(path, num_retries=200) # If we can't connect we fail immediately
return _server
|
flash_metaseq-main
|
metaseq/data/plasma_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
from metaseq.data import BaseWrapperDataset, plasma_utils
logger = logging.getLogger(__name__)
class ResamplingDataset(BaseWrapperDataset):
"""Randomly samples from a given dataset at each epoch.
Sampling is done with or without replacement, depending on the "replace"
parameter.
Optionally, the epoch size can be rescaled. This is potentially desirable
to increase per-epoch coverage of the base dataset (since sampling with
replacement means that many items in the dataset will be left out). In the
case of sampling without replacement, size_ratio should be strictly less
than 1.
Args:
dataset (~torch.utils.data.Dataset): dataset on which to sample.
weights (List[float]): list of probability weights
(default: None, which corresponds to uniform sampling).
replace (bool): sampling mode; True for "with replacement", or False
for "without replacement" (default: True)
size_ratio (float): the ratio to subsample to; must be positive
(default: 1.0).
batch_by_size (bool): whether or not to batch by sequence length
(default: True).
seed (int): RNG seed to use (default: 0).
epoch (int): starting epoch number (default: 1).
"""
def __init__(
self,
dataset,
weights=None,
replace=True,
size_ratio=1.0,
batch_by_size=True,
seed=0,
epoch=1,
):
super().__init__(dataset)
if weights is None:
self.weights = None
else:
assert len(weights) == len(dataset)
weights_arr = np.array(weights, dtype=np.float64)
weights_arr /= weights_arr.sum()
self.weights = plasma_utils.PlasmaArray(weights_arr)
self.replace = replace
assert size_ratio > 0.0
if not self.replace:
assert size_ratio <= 1.0
logger.info(f"size ratio = {size_ratio}; replace = {self.replace}")
self.size_ratio = float(size_ratio)
self.actual_size = np.ceil(len(dataset) * self.size_ratio).astype(int)
self.batch_by_size = batch_by_size
self.seed = seed
self._cur_epoch = None
self._cur_indices = None
self.set_epoch(epoch)
def __getitem__(self, index):
return self.dataset[self._cur_indices.array[index]]
def __len__(self):
return self.actual_size
@property
def sizes(self):
if isinstance(self.dataset.sizes, list):
return [s[self._cur_indices.array] for s in self.dataset.sizes]
return self.dataset.sizes[self._cur_indices.array]
def num_tokens(self, index):
return self.dataset.num_tokens(self._cur_indices.array[index])
def size(self, index):
return self.dataset.size(self._cur_indices.array[index])
def ordered_indices(self):
if self.batch_by_size:
order = [
np.arange(len(self)),
self.sizes,
] # No need to handle `self.shuffle == True`
return np.lexsort(order)
else:
return np.arange(len(self))
def prefetch(self, indices):
self.dataset.prefetch(self._cur_indices.array[indices])
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch):
logger.debug("ResamplingDataset.set_epoch: {}".format(epoch))
super().set_epoch(epoch)
if epoch == self._cur_epoch:
return
self._cur_epoch = epoch
# Generate a weighted sample of indices as a function of the
# random seed and the current epoch.
rng = np.random.RandomState(
[
42, # magic number
self.seed % (2**32), # global seed
self._cur_epoch, # epoch index
]
)
self._cur_indices = plasma_utils.PlasmaArray(
rng.choice(
len(self.dataset),
self.actual_size,
replace=self.replace,
p=(None if self.weights is None else self.weights.array),
)
)
|
flash_metaseq-main
|
metaseq/data/resampling_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class AppendTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if token is not None:
self._sizes = np.array(dataset.sizes) + 1
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
item = torch.cat([item, item.new([self.token])])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if self.token is not None:
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if self.token is not None:
n += 1
return n
|
flash_metaseq-main
|
metaseq/data/append_token_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from metaseq.data import data_utils
from . import BaseWrapperDataset
class PadDataset(BaseWrapperDataset):
def __init__(self, dataset, pad_idx, left_pad, pad_length=None):
super().__init__(dataset)
self.pad_idx = pad_idx
self.left_pad = left_pad
self.pad_length = pad_length
def collater(self, samples):
return data_utils.collate_tokens(
samples, self.pad_idx, left_pad=self.left_pad, pad_to_length=self.pad_length
)
class LeftPadDataset(PadDataset):
def __init__(self, dataset, pad_idx, pad_length=None):
super().__init__(dataset, pad_idx, left_pad=True, pad_length=pad_length)
class RightPadDataset(PadDataset):
def __init__(self, dataset, pad_idx, pad_length=None):
super().__init__(dataset, pad_idx, left_pad=False, pad_length=pad_length)
class MultiplePadDataset(BaseWrapperDataset):
"""
This class pads the given dataset to ensure that the padded size is a
multiple of the given `multiple`.
For instance,
MultiplePadDataset(
tgt_dataset, pad_idx=self.source_dictionary.pad(), multiple=8
)
would pad the tgt_dataset in multiples of 8.
"""
def __init__(self, dataset, pad_idx, multiple):
super().__init__(dataset)
self.pad_idx = pad_idx
self.multiple = multiple
def collater(self, samples):
max_len = max([s.size(0) for s in samples])
max_len_multiple = int(math.ceil(max_len / self.multiple)) * self.multiple
return data_utils.collate_tokens(
samples, self.pad_idx, left_pad=False, pad_to_length=max_len_multiple
)
def __getitem__(self, index):
l = len(self.dataset[index])
cur_block = []
cur_block.append(self.dataset[index])
cur_block_remain = int(math.ceil(l / self.multiple) * self.multiple)
cur_block_remain -= self.dataset[index].numel()
padding = cur_block[-1].new_full((cur_block_remain,), self.pad_idx)
cur_block.append(padding)
return torch.cat(cur_block)
|
flash_metaseq-main
|
metaseq/data/pad_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import shutil
import struct
from functools import lru_cache
from typing import Union
import numpy as np
import torch
from metaseq.dataclass.constants import DATASET_IMPL_CHOICES
from metaseq.file_io import PathManager
from . import BaseDataset
def best_fitting_int_dtype(
max_int_to_represent,
) -> Union[np.uint16, np.uint32, np.int64]:
if max_int_to_represent is None:
return np.uint32 # Safe guess
elif max_int_to_represent < 65500:
return np.uint16
elif max_int_to_represent < 4294967295:
return np.uint32
else:
return np.int64
# we avoid np.uint64 because it doesn't save space and its type promotion behaves unexpectedly
# https://github.com/numpy/numpy/issues/5745
def get_available_dataset_impl():
return list(map(str, DATASET_IMPL_CHOICES))
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return "raw"
elif IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
else:
return None
else:
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == "mmap":
return MMapIndexedDatasetBuilder(
out_file, dtype=best_fitting_int_dtype(vocab_size)
)
elif impl == "fasta":
raise NotImplementedError
elif impl == "huffman":
raise ValueError(
"Use HuffmanCodeBuilder directly as it has a different interface."
)
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == "raw" and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
return None
def dataset_exists(path, impl):
if impl == "raw":
return IndexedRawTextDataset.exists(path)
elif impl == "mmap":
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
_code_to_dtype = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16,
9: np.uint32,
10: np.uint64,
}
def _dtype_header_code(dtype) -> int:
for k in _code_to_dtype.keys():
if _code_to_dtype[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
class IndexedDataset(BaseDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = _code_to_dtype[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i) -> torch.Tensor:
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(BaseDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, "r", encoding="utf-8") as f:
for line in f:
self.lines.append(line.strip("\n"))
tokens = dictionary.encode_line(
line,
add_if_not_exist=False,
append_eos=self.append_eos,
reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError("index out of range")
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return PathManager.exists(path)
class IndexedDatasetBuilder:
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8,
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, "wb")
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), "rb") as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, "wb")
index.write(b"TNTIDX\x00\x00")
index.write(struct.pack("<Q", 1))
index.write(
struct.pack("<QQ", _dtype_header_code(self.dtype), self.element_size)
)
index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
def _warmup_mmap_file(path):
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index:
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@classmethod
def writer(cls, path, dtype):
class _Writer:
def __enter__(self):
self._file = open(path, "wb")
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack("<Q", 1))
self._file.write(struct.pack("<B", _dtype_header_code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack("<Q", len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = _code_to_dtype[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode="r", order="C"
)
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
def get_indexed_dataset_to_local(path) -> str:
local_index_path = PathManager.get_local_path(index_file_path(path))
local_data_path = PathManager.get_local_path(data_file_path(path))
assert local_index_path.endswith(".idx") and local_data_path.endswith(".bin"), (
"PathManager.get_local_path does not return files with expected patterns: "
f"{local_index_path} and {local_data_path}"
)
local_path = local_data_path[:-4] # stripping surfix ".bin"
assert local_path == local_index_path[:-4] # stripping surfix ".idx"
return local_path
class MMapIndexedDatasetBuilder:
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, "wb")
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C"))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), "rb") as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
|
flash_metaseq-main
|
metaseq/data/indexed_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
from multiprocessing import Pool
import torch
from metaseq import utils
from metaseq.data import data_utils
from metaseq.file_chunker_utils import Chunker, find_offsets
from metaseq.file_io import PathManager
from metaseq.tokenizer import tokenize_line
class Dictionary:
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
):
self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def get_count(self, idx):
return self.count[idx]
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(
self,
tensor,
bpe_symbol=None,
escape_unk=False,
extra_symbols_to_ignore=None,
unk_string=None,
include_eos=False,
):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return "\n".join(
self.string(
t,
bpe_symbol,
escape_unk,
extra_symbols_to_ignore,
include_eos=include_eos,
)
for t in tensor
)
extra_symbols_to_ignore = set(extra_symbols_to_ignore or [])
extra_symbols_to_ignore.add(self.eos())
def token_string(i):
if i == self.unk():
if unk_string is not None:
return unk_string
else:
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, "bos_index"):
extra_symbols_to_ignore.add(self.bos())
sent = " ".join(
token_string(i)
for i in tensor
if utils.item(i) not in extra_symbols_to_ignore
)
return data_utils.post_process(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
try:
line, field = line.rstrip().rsplit(" ", 1)
if field == "#metaseq:overwrite":
overwrite = True
line, field = line.rsplit(" ", 1)
else:
overwrite = False
count = int(field)
word = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#metaseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(word)
)
self.add_symbol(word, n=count, overwrite=overwrite)
except ValueError:
raise ValueError(
f"Incorrect dictionary format, expected '<token> <cnt> [flags]': \"{line}\""
)
def _save(self, f, kv_iterator):
if isinstance(f, str):
PathManager.mkdirs(os.path.dirname(f))
with PathManager.open(f, "w", encoding="utf-8") as fd:
return self.save(fd)
for k, v in kv_iterator:
print("{} {}".format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(
f,
zip(
ex_keys + self.symbols[self.nspecial :],
ex_vals + self.count[self.nspecial :],
),
)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(
self,
line,
line_tokenizer=tokenize_line,
add_if_not_exist=True,
consumer=None,
append_eos=True,
reverse_order=False,
) -> torch.IntTensor:
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
ids = []
for word in words:
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids.append(idx)
if append_eos:
ids.append(self.eos_index)
return torch.tensor(ids, dtype=torch.int32)
@staticmethod
def _add_file_to_dictionary_single_worker(
filename,
tokenize,
eos_word,
start_offset,
end_offset,
):
counter = Counter()
with Chunker(filename, start_offset, end_offset) as line_iterator:
for line in line_iterator:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
local_file = PathManager.get_local_path(filename)
offsets = find_offsets(local_file, num_workers)
if num_workers > 1:
chunks = zip(offsets, offsets[1:])
pool = Pool(processes=num_workers)
results = []
for (start_offset, end_offset) in chunks:
results.append(
pool.apply_async(
Dictionary._add_file_to_dictionary_single_worker,
(
local_file,
tokenize,
dict.eos_word,
start_offset,
end_offset,
),
)
)
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(
Dictionary._add_file_to_dictionary_single_worker(
local_file, tokenize, dict.eos_word, offsets[0], offsets[1]
)
)
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(
wrapped_dict.__class__.__name__,
(self.__class__, wrapped_dict.__class__),
{},
)
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if i < self.length:
return self.wrapped_dict[i]
return self.wrapped_dict.unk()
|
flash_metaseq-main
|
metaseq/data/dictionary.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import mmap
import os
import sys
import threading
from pathlib import Path
from typing import Callable, Optional
import numpy as np
import torch
logger = logging.getLogger(__name__)
class JsonlDataset(torch.utils.data.Dataset):
"""
For loading JSONL data and encoding on-the-fly with a given tokenizer.
JSONL format is expected to roughly follow that of The Pile.
One-line-per-document of the form:
```
{
"text": "text goes here, with newlines",
"meta": {"pile_set_name": "name of corpus", "other": "metadata"}
}
```
Note that only the "text" key is used.
"""
def __init__(self, path: str, tokenizer: Optional[Callable] = None, recache=False):
self.path = path
self.tokenizer = tokenizer
self.threadlocal = threading.local()
# TODO(susan): Fix this fairseq reference. _build_index fails otherwise.
self.cache = Path(f"{path}.fairseq.idx.npy")
if self.cache.exists() and not recache:
self.offsets = np.load(self.cache)
else:
self.offsets = self._build_index(path)
np.save(self.cache, self.offsets)
# print(f'n offsets: {len(self.offsets)}')
def _get_mmap(self):
if not hasattr(self.threadlocal, "handles"):
f = open(self.path, "rb")
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
self.threadlocal.handles = [f, mm]
if (
self.path.endswith(".gz")
or self.path.endswith(".bz")
or self.path.endswith(".bz2")
):
raise NotImplementedError(
"Compressed files are not supported because .seek() would require "
"rereading the entire file, making performance too slow."
)
return self.threadlocal.handles[-1]
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError
f = self._get_mmap()
f.seek(self.offsets[idx])
item = f.readline().decode("utf-8")
item = json.loads(item)
if self.tokenizer is not None:
item = self.tokenizer(item)
return item
def __len__(self):
return len(self.offsets)
def _build_index(self, path: str):
"""Build index of start positions of each line."""
logger.info(f"Building index for file: {path}")
f = self._get_mmap()
f.seek(0)
offsets = []
cur = 0
while True:
line = f.readline()
if line == b"":
break
offsets.append(cur)
cur += len(line)
return offsets
def __setstate__(self, state):
self.__dict__ = state
self.threadlocal = threading.local()
def __getstate__(self):
d = {}
for i, v in self.__dict__.items():
if i != "threadlocal":
d[i] = v
return d
def __del__(self):
if hasattr(self.threadlocal, "handles"):
# cleanup files we opened on initialization
while self.threadlocal.handles:
self.threadlocal.handles.pop().close()
@staticmethod
def exists(path):
return os.path.exists(path)
if __name__ == "__main__":
"""Usage:
python metaseq/data/jsonl_dataset.py "flan_streaming/valid/00/*.jsonl"
"""
parser = argparse.ArgumentParser(
description="Precompute index file from JSONL files"
)
parser.add_argument(
"pattern", help="glob to jsonl files, e.g. flan_streaming/valid/00/*.jsonl"
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
from glob import glob
from tqdm import tqdm
for f in tqdm(list(glob(args.pattern))):
JsonlDataset(f, recache=True)
|
flash_metaseq-main
|
metaseq/data/jsonl_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict
import numpy as np
import torch
from metaseq.data.monolingual_dataset import MonolingualDataset
from . import BaseDataset
class LMContextWindowDataset(BaseDataset):
"""
Wraps a MonolingualDataset and provides more context for evaluation.
Each item in the new dataset will have a maximum size of
``tokens_per_sample + context_window``.
Args:
dataset: dataset to wrap
tokens_per_sample (int): the max number of tokens in each dataset item
context_window (int): the number of accumulated tokens to add to each
dataset item
pad_idx (int): padding symbol
"""
def __init__(
self,
dataset: MonolingualDataset,
tokens_per_sample: int,
context_window: int,
pad_idx: int,
):
assert context_window > 0
self.dataset = dataset
self.tokens_per_sample = tokens_per_sample
self.context_window = context_window
self.pad_idx = pad_idx
self.prev_tokens = np.empty([0])
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples) -> Dict:
sample = self.dataset.collater(samples)
pad = self.pad_idx
max_sample_len = self.tokens_per_sample + self.context_window
bsz, tsz = sample["net_input"]["src_tokens"].shape
start_idxs = [0] * bsz
toks = sample["net_input"]["src_tokens"]
lengths = sample["net_input"]["src_lengths"]
tgt = sample["target"]
new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64)
new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64)
sample_lens = toks.ne(pad).long().sum(dim=1).cpu()
for i in range(bsz):
sample_len = sample_lens[i]
extra = len(self.prev_tokens) + sample_len - max_sample_len
if extra > 0:
self.prev_tokens = self.prev_tokens[extra:]
pads = np.full(self.context_window - len(self.prev_tokens), pad)
new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads])
new_tgt[
i, len(self.prev_tokens) : len(self.prev_tokens) + len(tgt[i])
] = tgt[i]
start_idxs[i] = len(self.prev_tokens)
lengths[i] += len(self.prev_tokens)
self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window :]
sample["net_input"]["src_tokens"] = torch.from_numpy(new_toks)
sample["target"] = torch.from_numpy(new_tgt)
sample["start_indices"] = start_idxs
return sample
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
# NOTE we don't shuffle the data to retain access to the previous dataset elements
return np.arange(len(self.dataset))
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
|
flash_metaseq-main
|
metaseq/data/lm_context_window_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import numpy as np
import torch
from metaseq.data import BaseDataset, plasma_utils
from metaseq.data.indexed_dataset import best_fitting_int_dtype
class TokenBlockDataset(BaseDataset):
"""Break a Dataset of tokens into blocks.
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes (List[int]): sentence lengths (required for 'complete' and 'eos')
block_size (int, optional): maximum block size (ignored in 'eos' break mode)
break_mode (str, optional): Mode used for breaking tokens. Values can
be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
- 'complete': break tokens into blocks (up to block_size) such that
blocks contains complete sentences, although block_size may be
exceeded if some sentences exceed block_size
- 'complete_doc': similar to 'complete' mode, but do not
cross document boundaries
- 'eos': each block contains one sentence (block_size is ignored)
include_targets (bool, optional): return next tokens as targets
(default: False).
document_sep_len (int, optional): document separator size (required for
'complete_doc' break mode). Typically 1 if the sentences have eos
and 0 otherwise.
"""
def __init__(
self,
dataset,
sizes,
block_size,
pad,
eos,
break_mode=None,
include_targets=False,
document_sep_len=1,
use_plasma_view=False,
split_path=None,
plasma_path=None,
):
super().__init__()
self.dataset = dataset
self.pad = pad
self.eos = eos
self.include_targets = include_targets
assert len(dataset) > 0
assert len(dataset) == len(sizes)
_sizes, block_to_dataset_index, slice_indices = self._build_slice_indices(
sizes, break_mode, document_sep_len, block_size
)
if use_plasma_view:
plasma_id = (block_size, document_sep_len, str(break_mode), len(dataset))
self._slice_indices = plasma_utils.PlasmaView(
slice_indices, split_path, (plasma_id, 0), plasma_path=plasma_path
)
self._sizes = plasma_utils.PlasmaView(
_sizes, split_path, (plasma_id, 1), plasma_path=plasma_path
)
self._block_to_dataset_index = plasma_utils.PlasmaView(
block_to_dataset_index,
split_path,
(plasma_id, 2),
plasma_path=plasma_path,
)
else:
self._slice_indices = plasma_utils.PlasmaArray(slice_indices)
self._sizes = plasma_utils.PlasmaArray(_sizes)
self._block_to_dataset_index = plasma_utils.PlasmaArray(
block_to_dataset_index
)
@staticmethod
def _build_slice_indices(
sizes, break_mode, document_sep_len, block_size
) -> Tuple[np.ndarray]:
"""Use token_block_utils_fast to build arrays for indexing into self.dataset"""
try:
from metaseq.data.token_block_utils_fast import (
_get_slice_indices_fast,
_get_block_to_dataset_index_fast,
)
except ImportError:
raise ImportError(
"Please build Cython components with: `pip install --editable .` "
"or `python setup.py build_ext --inplace`"
)
if isinstance(sizes, list):
sizes = np.array(sizes, dtype=np.int64)
else:
if torch.is_tensor(sizes):
sizes = sizes.numpy()
sizes = sizes.astype(np.int64)
break_mode = break_mode if break_mode is not None else "none"
# For "eos" break-mode, block_size is not required parameters.
if break_mode == "eos" and block_size is None:
block_size = 0
slice_indices = _get_slice_indices_fast(
sizes, str(break_mode), block_size, document_sep_len
)
_sizes = slice_indices[:, 1] - slice_indices[:, 0]
# build index mapping block indices to the underlying dataset indices
if break_mode == "eos":
# much faster version for eos break mode
block_to_dataset_index = np.stack(
[
np.arange(len(sizes)), # starting index in dataset
np.zeros(
len(sizes), dtype=np.compat.long
), # starting offset within starting index
np.arange(len(sizes)), # ending index in dataset
],
1,
)
else:
block_to_dataset_index = _get_block_to_dataset_index_fast(
sizes,
slice_indices,
)
size_dtype = np.uint16 if block_size < 65535 else np.uint32
num_tokens = slice_indices[-1].max()
slice_indices_dtype = best_fitting_int_dtype(num_tokens)
slice_indices = slice_indices.astype(slice_indices_dtype)
_sizes = _sizes.astype(size_dtype)
block_to_dataset_index = block_to_dataset_index.astype(slice_indices_dtype)
return _sizes, block_to_dataset_index, slice_indices
@property
def slice_indices(self):
return self._slice_indices.array
@property
def sizes(self):
return self._sizes.array
@property
def block_to_dataset_index(self):
return self._block_to_dataset_index.array
def attr(self, attr: str, index: int):
start_ds_idx, _, _ = self.block_to_dataset_index[index]
return self.dataset.attr(attr, start_ds_idx)
def __getitem__(self, index):
start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index]
buffer = torch.cat(
[self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)]
)
slice_s, slice_e = self.slice_indices[index]
length = slice_e - slice_s
s, e = start_offset, start_offset + length
item = buffer[s:e]
if self.include_targets:
# *target* is the original sentence (=item)
# *source* is shifted right by 1 (maybe left-padded with eos)
# *past_target* is shifted right by 2 (left-padded as needed)
if s == 0:
source = torch.cat([item.new([self.eos]), buffer[0 : e - 1]])
past_target = torch.cat(
[item.new([self.pad, self.eos]), buffer[0 : e - 2]]
)
else:
source = buffer[s - 1 : e - 1]
if s == 1:
past_target = torch.cat([item.new([self.eos]), buffer[0 : e - 2]])
else:
past_target = buffer[s - 2 : e - 2]
return source, item, past_target
return item
def __len__(self):
return len(self.slice_indices)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(
{
ds_idx
for index in indices
for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]]
for ds_idx in range(start_ds_idx, end_ds_idx + 1)
}
)
|
flash_metaseq-main
|
metaseq/data/token_block_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from metaseq import file_utils
from metaseq.data.encoders import register_bpe
from metaseq.dataclass import MetaseqDataclass
from .gpt2_bpe_utils import get_encoder
DEFAULT_ENCODER_JSON = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json"
DEFAULT_VOCAB_BPE = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe"
@dataclass
class GPT2BPEConfig(MetaseqDataclass):
gpt2_encoder_json: str = field(
default=DEFAULT_ENCODER_JSON, metadata={"help": "path to encoder.json"}
)
gpt2_vocab_bpe: str = field(
default=DEFAULT_VOCAB_BPE, metadata={"help": "path to vocab.bpe"}
)
@register_bpe("gpt2", dataclass=GPT2BPEConfig)
class GPT2BPE(object):
def __init__(self, cfg):
encoder_json = file_utils.cached_path(cfg.gpt2_encoder_json)
vocab_bpe = file_utils.cached_path(cfg.gpt2_vocab_bpe)
self.bpe = get_encoder(encoder_json, vocab_bpe)
def encode(self, x: str) -> str:
return " ".join(map(str, self.bpe.encode(x)))
def decode(self, x: str) -> str:
return self.bpe.decode(
[int(tok) if tok not in {"<unk>", "<mask>"} else tok for tok in x.split()]
)
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(" ")
|
flash_metaseq-main
|
metaseq/data/encoders/gpt2_bpe.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from metaseq import file_utils
from metaseq.data.encoders import register_bpe
from metaseq.dataclass import MetaseqDataclass
@dataclass
class HuggingFaceByteLevelBPEConfig(MetaseqDataclass):
bpe_merges: str = field(default="???", metadata={"help": "path to merges.txt"})
bpe_vocab: str = field(default="???", metadata={"help": "path to vocab.json"})
bpe_add_prefix_space: bool = field(
default=False, metadata={"help": "add prefix space before encoding"}
)
@register_bpe("hf_byte_bpe", dataclass=HuggingFaceByteLevelBPEConfig)
class HuggingFaceByteLevelBPE(object):
def __init__(self, cfg):
try:
from tokenizers import ByteLevelBPETokenizer
except ImportError:
raise ImportError(
"Please install huggingface/tokenizers with: " "pip install tokenizers"
)
bpe_vocab = file_utils.cached_path(cfg.bpe_vocab)
bpe_merges = file_utils.cached_path(cfg.bpe_merges)
self.bpe = ByteLevelBPETokenizer(
bpe_vocab,
bpe_merges,
add_prefix_space=cfg.bpe_add_prefix_space,
)
def encode(self, x: str) -> str:
return " ".join(map(str, self.bpe.encode(x).ids))
def decode(self, x: str) -> str:
return self.bpe.decode(
[int(tok) if tok not in {"<unk>", "<mask>"} else tok for tok in x.split()]
)
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(" ")
|
flash_metaseq-main
|
metaseq/data/encoders/hf_byte_bpe.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from metaseq import registry
build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry(
"--tokenizer",
default=None,
)
build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry(
"--bpe",
default=None,
)
# automatically import any Python files in the encoders/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("metaseq.data.encoders." + module)
|
flash_metaseq-main
|
metaseq/data/encoders/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Byte pair encoding utilities from GPT-2.
Original source: https://github.com/openai/gpt-2/blob/master/src/encoder.py
Original license: MIT
"""
import json
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors="replace"):
self.encoder = encoder
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
try:
import regex as re
self.re = re
except ImportError:
raise ImportError("Please install regex with: pip install regex")
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = self.re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in self.re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder.get(token, token) for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode(
"utf-8", errors=self.errors
)
return text
def get_encoder(encoder_json_path, vocab_bpe_path):
with open(encoder_json_path, "r") as f:
encoder = json.load(f)
with open(vocab_bpe_path, "r", encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
|
flash_metaseq-main
|
metaseq/data/encoders/gpt2_bpe_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A standalone module for aggregating metrics.
Metrics can be logged from anywhere using the `log_*` functions defined
in this module. The logged values will be aggregated dynamically based
on the aggregation context in which the logging occurs. See the
:func:`aggregate` context manager for more details.
"""
import contextlib
import logging
import subprocess
import uuid
from collections import defaultdict
from typing import Callable, List, Optional, Dict
from .meters import (
OrderedDict,
MetersDict,
AverageMeter,
TimeMeter,
StopwatchMeter,
Meter,
)
# Aggregation contexts are considered "active" when inside the scope
# created by the :func:`aggregate` context manager.
_aggregators = OrderedDict()
_active_aggregators = OrderedDict()
_active_aggregators_cnt = defaultdict(lambda: 0)
def reset() -> None:
"""Reset all metrics aggregators."""
_aggregators.clear()
_active_aggregators.clear()
_active_aggregators_cnt.clear()
# The "default" aggregator observes all logged values.
_aggregators["default"] = MetersDict()
_active_aggregators["default"] = _aggregators["default"]
_active_aggregators_cnt["default"] = 1
reset()
@contextlib.contextmanager
def aggregate(name: Optional[str] = None, new_root: bool = False):
"""Context manager to aggregate metrics under a given name.
Aggregations can be nested. If *new_root* is ``False``, then logged
metrics will be recorded along the entire stack of nested
aggregators, including a global "default" aggregator. If *new_root*
is ``True``, then this aggregator will be the root of a new
aggregation stack, thus bypassing any parent aggregators.
Note that aggregation contexts are uniquely identified by their
*name* (e.g., train, valid). Creating a context with an existing
name will reuse the corresponding :class:`MetersDict` instance.
If no name is given, then a temporary aggregator will be created.
Usage::
with metrics.aggregate("train"):
for step, batch in enumerate(epoch):
with metrics.aggregate("train_inner") as agg:
metrics.log_scalar("loss", get_loss(batch))
if step % log_interval == 0:
print(agg.get_smoothed_value("loss"))
agg.reset()
print(metrics.get_smoothed_values("train")["loss"])
Args:
name (str): name of the aggregation. Defaults to a
random/temporary name if not given explicitly.
new_root (bool): make this aggregation the root of a new
aggregation stack.
"""
if name is None:
# generate a temporary name
name = str(uuid.uuid4())
assert name not in _aggregators
agg = MetersDict()
else:
assert name != "default"
agg = _aggregators.setdefault(name, MetersDict())
if new_root:
backup_aggregators = _active_aggregators.copy()
_active_aggregators.clear()
backup_aggregators_cnt = _active_aggregators_cnt.copy()
_active_aggregators_cnt.clear()
_active_aggregators[name] = agg
_active_aggregators_cnt[name] += 1
yield agg
_active_aggregators_cnt[name] -= 1
if _active_aggregators_cnt[name] == 0 and name in _active_aggregators:
del _active_aggregators[name]
if new_root:
_active_aggregators.clear()
_active_aggregators.update(backup_aggregators)
_active_aggregators_cnt.clear()
_active_aggregators_cnt.update(backup_aggregators_cnt)
def get_active_aggregators() -> List[MetersDict]:
return list(_active_aggregators.values())
def log_scalar(
key: str,
value: float,
weight: float = 1,
priority: int = 10,
round: Optional[int] = None,
):
"""Log a scalar value.
Args:
key (str): name of the field to log
value (float): value to log
weight (float): weight that this value contributes to the average.
A weight of 0 will always log the latest value.
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, AverageMeter(round=round), priority)
agg[key].update(value, weight)
def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20):
"""Log a scalar value derived from other meters.
Args:
key (str): name of the field to log
fn (Callable[[MetersDict], float]): function that takes a single
argument *meters* and returns the derived value
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, MetersDict._DerivedMeter(fn), priority)
def log_speed(
key: str,
value: float,
priority: int = 30,
round: Optional[int] = None,
):
"""Log the rate of some quantity per second.
Args:
key (str): name of the field to log
value (float): value to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, TimeMeter(round=round), priority)
agg[key].reset() # reset meter on the first call
else:
agg[key].update(value)
def log_start_time(key: str, priority: int = 40, round: Optional[int] = None):
"""Log the duration of some event in seconds.
The duration will be computed once :func:`log_stop_time` is called.
Args:
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, StopwatchMeter(round=round), priority)
agg[key].start()
def log_stop_time(key: str, weight: float = 0.0, prehook=None):
"""Log the duration of some event in seconds.
The duration will be computed since :func:`log_start_time` was called.
Set weight > 0 to report the average time instead of the sum.
Args:
key (str): name of the field to log
weight (float): weight that this time contributes to the average
prehook (function, no arguments): will be called before the timer
is stopped. For example, use prehook=torch.cuda.synchronize to
make sure all gpu operations are done before timer is stopped.
"""
for agg in get_active_aggregators():
if key in agg:
agg[key].stop(weight, prehook)
def log_custom(
new_meter_fn: Callable[[], Meter],
key: str,
*args,
priority: int = 50,
**kwargs,
):
"""Log using a custom Meter.
Any extra *args* or *kwargs* will be passed through to the Meter's
*update* method.
Args:
new_meter_fn (Callable[[], Meter]): function that returns a new
Meter instance
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs)
def reset_meter(name: str, key: str) -> None:
"""Reset Meter instance aggregated under a given *name* and *key*."""
meter = get_meter(name, key)
if meter is not None:
meter.reset()
def reset_meters(name: str) -> None:
"""Reset Meter instances aggregated under a given *name*."""
meters = get_meters(name)
if meters is not None:
meters.reset()
def get_meter(name: str, key: str) -> Meter:
"""Get a single Meter instance aggregated under *name* and *key*.
Returns:
Meter or None if no metrics have been logged under *name* and *key*.
"""
if name not in _aggregators:
return None
return _aggregators[name].get(key, None)
def get_meters(name: str) -> MetersDict:
"""Get Meter instances aggregated under a given *name*.
Returns:
MetersDict or None if no metrics have been logged under *name*.
"""
return _aggregators.get(name, None)
def get_smoothed_value(name: str, key: str) -> float:
"""Get a single smoothed value.
Raises:
KeyError: if no metrics have been logged under *name* and *key*.
"""
return _aggregators[name].get_smoothed_value(key)
def get_smoothed_values(name: str) -> Dict[str, float]:
"""Get smoothed values aggregated under a given *name*.
Raises:
KeyError: if no metrics have been logged under *name*.
"""
return _aggregators[name].get_smoothed_values()
def state_dict():
return OrderedDict([(name, agg.state_dict()) for name, agg in _aggregators.items()])
def load_state_dict(state_dict):
for name, agg_state in state_dict.items():
_aggregators[name] = MetersDict()
_aggregators[name].load_state_dict(agg_state)
def nvidia_smi_gpu_memory_stats():
"""
Parse the nvidia-smi output and extract the memory used stats.
"""
out_dict = {}
try:
sp = subprocess.Popen(
["nvidia-smi", "--query-gpu=index,memory.used", "--format=csv,noheader"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
out_str = sp.communicate()
out_list = out_str[0].decode("utf-8").split("\n")
out_dict = {}
for item in out_list:
if " MiB" in item:
gpu_idx, mem_used = item.split(",")
gpu_key = f"gpu_{gpu_idx}_mem_used_gb"
out_dict[gpu_key] = int(mem_used.strip().split(" ")[0]) / 1024
except FileNotFoundError:
logging.error(
"Failed to find the 'nvidia-smi' executable for printing GPU stats"
)
except subprocess.CalledProcessError as e:
logging.error(f"nvidia-smi returned non zero error code: {e.returncode}")
return out_dict
def get_nvidia_smi_gpu_memory_stats_str():
return "nvidia-smi stats: {}".format(nvidia_smi_gpu_memory_stats())
|
flash_metaseq-main
|
metaseq/logging/metrics.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import time
from collections import OrderedDict
from typing import Dict, Optional
try:
import torch
def type_as(a, b):
if torch.is_tensor(a) and torch.is_tensor(b):
return a.to(b)
else:
return a
except ImportError:
torch = None
def type_as(a, b):
return a
try:
import numpy as np
except ImportError:
np = None
class Meter(object):
"""Base class for Meters."""
def __init__(self):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
def reset(self):
raise NotImplementedError
@property
def smoothed_value(self) -> float:
"""Smoothed value used for logging."""
raise NotImplementedError
def safe_round(number, ndigits):
if hasattr(number, "__round__"):
return round(number, ndigits)
elif torch is not None and torch.is_tensor(number) and number.numel() == 1:
return safe_round(number.item(), ndigits)
elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"):
return safe_round(number.item(), ndigits)
else:
return number
class AverageMeter(Meter):
"""Computes and stores the average and current value"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.reset()
def reset(self):
self.val = None # most recent update
self.sum = 0 # sum from all updates
self.count = 0 # total n from all updates
def update(self, val, n=1):
if val is not None:
self.val = val
if n > 0:
self.sum = type_as(self.sum, val) + (val * n)
self.count = type_as(self.count, n) + n
def state_dict(self):
return {
"val": self.val,
"sum": self.sum,
"count": self.count,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.val = state_dict["val"]
self.sum = state_dict["sum"]
self.count = state_dict["count"]
self.round = state_dict.get("round", None)
@property
def avg(self):
return self.sum / self.count if self.count > 0 else self.val
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class TimeMeter(Meter):
"""Computes the average occurrence of some event per second"""
def __init__(
self,
init: int = 0,
n: int = 0,
round: Optional[int] = None,
):
self.round = round
self.reset(init, n)
def reset(self, init=0, n=0):
self.init = init
self.start = time.perf_counter()
self.n = n
self.i = 0
def update(self, val=1):
self.n = type_as(self.n, val) + val
self.i += 1
def state_dict(self):
return {
"init": self.elapsed_time,
"n": self.n,
"round": self.round,
}
def load_state_dict(self, state_dict):
if "start" in state_dict:
# backwards compatibility for old state_dicts
self.reset(init=state_dict["init"])
else:
self.reset(init=state_dict["init"], n=state_dict["n"])
self.round = state_dict.get("round", None)
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.perf_counter() - self.start)
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class StopwatchMeter(Meter):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1, prehook=None):
if self.start_time is not None:
if prehook is not None:
prehook()
delta = time.perf_counter() - self.start_time
self.sum = self.sum + delta
self.n = type_as(self.n, n) + n
def reset(self):
self.sum = 0 # cumulative time during which stopwatch was active
self.n = 0 # total n across all start/stop
self.start()
def state_dict(self):
return {
"sum": self.sum,
"n": self.n,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.sum = state_dict["sum"]
self.n = state_dict["n"]
self.start_time = None
self.round = state_dict.get("round", None)
@property
def avg(self):
return self.sum / self.n if self.n > 0 else self.sum
@property
def elapsed_time(self):
if self.start_time is None:
return 0.0
return time.perf_counter() - self.start_time
@property
def smoothed_value(self) -> float:
val = self.avg if self.sum > 0 else self.elapsed_time
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class MetersDict(OrderedDict):
"""A sorted dictionary of :class:`Meters`.
Meters are sorted according to a priority that is given when the
meter is first added to the dictionary.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.priorities = []
def __setitem__(self, key, value):
assert key not in self, "MetersDict doesn't support reassignment"
priority, value = value
bisect.insort(self.priorities, (priority, len(self.priorities), key))
super().__setitem__(key, value)
for _, _, key in self.priorities: # reorder dict to match priorities
self.move_to_end(key)
def add_meter(self, key, meter, priority):
self.__setitem__(key, (priority, meter))
def state_dict(self):
return [
(pri, key, self[key].__class__.__name__, self[key].state_dict())
for pri, _, key in self.priorities
# can't serialize DerivedMeter instances
if not isinstance(self[key], MetersDict._DerivedMeter)
]
def load_state_dict(self, state_dict):
self.clear()
self.priorities.clear()
for pri, key, meter_cls, meter_state in state_dict:
meter = globals()[meter_cls]()
meter.load_state_dict(meter_state)
self.add_meter(key, meter, pri)
def get_smoothed_value(self, key: str) -> float:
"""Get a single smoothed value."""
meter = self[key]
if isinstance(meter, MetersDict._DerivedMeter):
return meter.fn(self)
else:
return meter.smoothed_value
def get_smoothed_values(self) -> Dict[str, float]:
"""Get all smoothed values."""
return OrderedDict(
[
(key, self.get_smoothed_value(key))
for key in self.keys()
if not key.startswith("_")
]
)
def reset(self):
"""Reset Meter instances."""
for meter in self.values():
if isinstance(meter, MetersDict._DerivedMeter):
continue
meter.reset()
class _DerivedMeter(Meter):
"""A Meter whose values are derived from other Meters."""
def __init__(self, fn):
self.fn = fn
def reset(self):
pass
|
flash_metaseq-main
|
metaseq/logging/meters.py
|
flash_metaseq-main
|
metaseq/logging/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., json).
"""
import logging
from collections import OrderedDict
from numbers import Number
import torch
from metaseq.logging.meters import AverageMeter, TimeMeter, StopwatchMeter
logger = logging.getLogger(__name__)
class BaseProgressBar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.n = getattr(iterable, "n", 0)
self.epoch = epoch
self.prefix = ""
if epoch is not None:
self.prefix += "epoch {:03d}".format(epoch)
if prefix is not None:
self.prefix += (" | " if self.prefix != "" else "") + prefix
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def update_config(self, config):
"""Log latest configuration."""
pass
def _str_commas(self, stats):
return ", ".join(key + "=" + stats[key].strip() for key in stats.keys())
def _str_pipes(self, stats):
return " | ".join(key + " " + stats[key].strip() for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
def format_stat(stat):
if isinstance(stat, Number):
stat = "{:g}".format(stat)
elif isinstance(stat, AverageMeter):
stat = "{:.3f}".format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = "{:g}".format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = "{:g}".format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
if isinstance(stat, float):
stat = f'{float(f"{stat:.3g}"):g}' # 3 significant figures
return stat
|
flash_metaseq-main
|
metaseq/logging/progress_bar/base_progress_bar.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
from metaseq.logging.progress_bar.base_progress_bar import logger
from metaseq.logging.progress_bar.json_progress_bar import JsonProgressBar
from metaseq.logging.progress_bar.tensorboard_progress_bar import (
TensorboardProgressBarWrapper,
)
from metaseq.logging.progress_bar.wandb_progress_bar import WandBProgressBarWrapper
def get_progress_bar(
iterator,
log_format: str = "json",
log_interval: int = 100,
log_file: Optional[str] = None,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
wandb_project: Optional[str] = None,
wandb_run_name: Optional[str] = None,
):
if log_file is not None:
handler = logging.FileHandler(filename=log_file)
logger.addHandler(handler)
if log_format == "json":
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
else:
raise ValueError("Unknown log format: {}".format(log_format))
if tensorboard_logdir:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
if wandb_project:
bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)
return bar
|
flash_metaseq-main
|
metaseq/logging/progress_bar/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import string
import sys
from numbers import Number
import atexit
import torch
from metaseq.logging.meters import AverageMeter
from metaseq.logging.progress_bar.base_progress_bar import BaseProgressBar, logger
_tensorboard_writers = {}
SummaryWriter = None
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
try:
from tensorboardX import SummaryWriter
except ImportError:
pass
def _close_writers():
for w in _tensorboard_writers.values():
w.close()
atexit.register(_close_writers)
class TensorboardProgressBarWrapper(BaseProgressBar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
if SummaryWriter is None:
logger.warning(
"tensorboard not found, please install with: pip install tensorboard"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
# tensorboard doesn't play well when we clobber it with reruns
# find an acceptable suffix
for suffix in [""] + list(string.ascii_uppercase):
logdir = os.path.join(self.tensorboard_logdir + suffix, key)
if not os.path.exists(logdir):
logger.info(f"Setting tensorboard directory to {logdir}")
break
else:
# wow we have cycled through a lot of these
raise RuntimeError(
f"Tensorboard logdir {logdir} already exists. "
"Ran out of possible suffixes."
)
_writers[key] = SummaryWriter(logdir)
_writers[key].add_text("sys.argv", " ".join(sys.argv))
return _writers[key]
def __len__(self):
return len(self.wrapped_bar)
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
"""Log latest configuration."""
# TODO add hparams to Tensorboard
self.wrapped_bar.update_config(config)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or "")
if writer is None:
return
if step is None:
step = stats["num_updates"]
for key in stats.keys() - {"num_updates"}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
elif torch.is_tensor(stats[key]) and stats[key].numel() == 1:
writer.add_scalar(key, stats[key].item(), step)
writer.flush()
|
flash_metaseq-main
|
metaseq/logging/progress_bar/tensorboard_progress_bar.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.