python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
from .components import LinearActivation, Activation, Normalization, DropoutNd
|
hyena-dna-main
|
src/models/nn/__init__.py
|
""" Utility wrappers around modules to let them handle Args and extra arguments """
import inspect
from functools import wraps
import torch
from torch import nn
def wrap_kwargs(f):
"""
Given a callable f that can consume some named arguments,
wrap it with a kwargs that passes back any unused args
EXAMPLES
--------
Basic usage:
def foo(x, y=None):
return x
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'z': 2})
--------
The wrapped function can return its own argument dictionary,
which gets merged with the new kwargs.
def foo(x, y=None):
return x, {}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'z': 2})
def foo(x, y=None):
return x, {"y": y, "z": None}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'y': 1, 'z': 2})
--------
The wrapped function can have its own kwargs parameter:
def foo(x, y=None, **kw_args):
return x, {}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {})
--------
Partial functions and modules work automatically:
class Module:
def forward(self, x, y=0):
return x, {"y": y+1}
m = Module()
wrap_kwargs(m.forward)(0, y=1, z=2) == (0, {'y': 2, 'z': 2})
"""
sig = inspect.signature(f)
# Check if f already has kwargs
has_kwargs = any([
param.kind == inspect.Parameter.VAR_KEYWORD
for param in sig.parameters.values()
])
if has_kwargs:
@wraps(f)
def f_kwargs(*args, **kwargs):
y = f(*args, **kwargs)
if isinstance(y, tuple) and isinstance(y[-1], dict):
return y
else:
return y, {}
else:
param_kwargs = inspect.Parameter("kwargs", kind=inspect.Parameter.VAR_KEYWORD)
sig_kwargs = inspect.Signature(parameters=list(sig.parameters.values())+[param_kwargs])
@wraps(f)
def f_kwargs(*args, **kwargs):
bound = sig_kwargs.bind(*args, **kwargs)
if "kwargs" in bound.arguments:
kwargs = bound.arguments.pop("kwargs")
else:
kwargs = {}
y = f(**bound.arguments)
if isinstance(y, tuple) and isinstance(y[-1], dict):
return *y[:-1], {**y[-1], **kwargs}
else:
return y, kwargs
return f_kwargs
def discard_kwargs(f):
if f is None: return None
f_kwargs = wrap_kwargs(f)
@wraps(f)
def f_(*args, **kwargs):
return f_kwargs(*args, **kwargs)[0]
return f_
def PassthroughSequential(*modules):
"""Special Sequential module that chains kwargs.
Semantics are the same as nn.Sequential, with extra convenience features:
- Discard None modules
- Flatten inner Sequential modules
- In case with 0 or 1 Module, rename the class for ease of inspection
"""
def flatten(module):
if isinstance(module, nn.Sequential):
return sum([flatten(m) for m in module], [])
else:
return [module]
modules = flatten(nn.Sequential(*modules))
modules = [module for module in modules if module if not None]
class Sequential(nn.Sequential):
def forward(self, x, **kwargs):
for layer in self:
x, kwargs = wrap_kwargs(layer.forward)(x, **kwargs)
return x, kwargs
def step(self, x, **kwargs):
for layer in self:
fn = getattr(layer, "step", layer.forward)
x, kwargs = wrap_kwargs(fn)(x, **kwargs)
return x, kwargs
if len(modules) == 0:
Sequential.__name__ = "Identity"
elif len(modules) == 1:
Sequential.__name__ = type(modules[0]).__name__
return Sequential(*modules)
|
hyena-dna-main
|
src/models/nn/utils.py
|
""" Defines flexible gating mechanisms based on ideas from LSSL paper and UR-LSTM paper https://arxiv.org/abs/1910.09890 """
import torch
import torch.nn as nn
class Gate(nn.Module):
""" Implements gating mechanisms. TODO update this with more detailed description with reference to LSSL paper when it's on arxiv
Mechanisms:
N - No gate
G - Standard sigmoid gate
UR - Uniform refine gates
R - Refine gate
FS - Forward discretization, Sigmoid activation [equivalent to G]
BE - Backward discretization, Exp activation [equivalent to G]
BR - Backward discretization, Relu activation
TE - Trapezoid discretization, Exp activation
TR - Trapezoid discretization, Relu activation
TS - Trapezoid discretization, Sigmoid activation (0 to 2)
"""
def __init__(self, size, preact_ctor, preact_args, mechanism='N'):
super().__init__()
self.size = size
self.mechanism = mechanism
if self.mechanism == 'N':
pass
elif self.mechanism in ['G', 'FS', 'BE', 'BR', 'TE', 'TR', 'TS', 'ZE', 'ZR', 'ZS']:
self.W_g = preact_ctor(*preact_args)
elif self.mechanism in ['U', 'UT']:
self.W_g = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'UR':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'R':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
elif self.mechanism in ['GT']:
self.W_g = preact_ctor(*preact_args)
else:
assert False, f'Gating type {self.mechanism} is not supported.'
def forward(self, *inputs):
if self.mechanism == 'N':
return 1.0
if self.mechanism == 'G':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
if self.mechanism == 'U':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
elif self.mechanism == 'UR':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'R':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'UT':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'GT':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
else:
g_preact = self.W_g(*inputs)
# if self.mechanism[1] == 'S':
# g = torch.sigmoid(g_preact)
# elif self.mechanism[1] == 'E':
# g = torch.exp(g_preact)
# elif self.mechanism[1] == 'R':
# g = torch.relu(g_preact)
if self.mechanism == 'FS':
g = torch.sigmoid(g_preact)
g = self.forward_diff(g)
elif self.mechanism == 'BE':
g = torch.exp(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'BR':
g = torch.relu(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'TS':
g = 2 * torch.sigmoid(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TE':
g = torch.exp(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TR':
g = torch.relu(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'ZE':
g = torch.exp(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZR':
g = torch.relu(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZS':
g = torch.sigmoid(g_preact)
g = self.zoh(g)
return g
def forward_diff(self, x):
return x
def backward_diff(self, x):
return x / (1+x)
def trapezoid(self, x):
return x / (1 + x/2)
def zoh(self, x):
return 1 - torch.exp(-x)
|
hyena-dna-main
|
src/models/nn/gate.py
|
"""Implementations of several types of Discrete Sin/Cosine Transforms with various reductions to FFT.
Currently not used by S4
"""
import torch
import torch.nn as nn
import numpy as np
import scipy.fft
from einops import rearrange, repeat
class DCT(nn.Module):
""" Reductions adapted from https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft """
def __init__(self, N, norm='backward'):
super().__init__()
self.N = N
# Materialize DCT matrix
P = scipy.fft.dct(np.eye(N), norm=norm, type=2).T
P = torch.tensor(P, dtype=torch.float)
self.register_buffer('P', P)
# TODO take care of normalization
Q = np.exp(-1j * np.pi / (2 * self.N) * np.arange(self.N))
Q = torch.tensor(Q, dtype=torch.cfloat)
self.register_buffer('Q', Q) # half shift
def forward(self, x, mode=2):
if mode == 0:
return self.forward_dense(x)
elif mode == 1:
return self.forward_n(x)
elif mode == 2:
return self.forward_2n(x)
elif mode == 4:
return self.forward_4n(x)
def forward_dense(self, x):
""" Baseline DCT type II - matmul by DCT matrix """
y = (self.P.to(x) @ x.unsqueeze(-1)).squeeze(-1)
return y
def forward_4n(self, x):
""" DCT type II - reduction to FFT size 4N """
assert self.N == x.shape[-1]
x = torch.cat([x, x.flip(-1)], dim=-1)
z = torch.zeros_like(x)
x = torch.stack([z, x], dim=-1)
x = x.view(x.shape[:-2] + (-1,))
y = torch.fft.fft(x)
y = y[..., :self.N]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_2n(self, x):
""" DCT type II - reduction to FFT size 2N mirrored
The reduction from the DSP forum is not quite correct in the complex input case.
halfshift(FFT[a, b, c, d, d, c, b, a]) -> [A, B, C, D, 0, -D, -C, -B]
In the case of real input, the intermediate step after FFT has form [A, B, C, D, 0, D*, C*, B*]
"""
assert self.N == x.shape[-1]
x = torch.cat([x, x.flip(-1)], dim=-1)
y = torch.fft.fft(x)[..., :self.N]
y = y * self.Q
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_n(self, x):
""" DCT type II - reduction to size N """
assert self.N == x.shape[-1]
x = torch.cat([x[..., 0::2], x[..., 1::2].flip(-1)], dim=-1)
y = torch.fft.fft(x)
y = y * 2 * self.Q
if torch.is_complex(x):
y = torch.cat([y[..., :1], (y[..., 1:] + 1j * y[..., 1:].flip(-1)) / 2], dim=-1) # TODO in-place sum
else:
y = torch.real(y)
return y
class IDCT(nn.Module):
def __init__(self, N, norm='backward'):
super().__init__()
self.N = N
# Materialize DCT matrix
P = np.linalg.inv(scipy.fft.dct(np.eye(N), norm=norm, type=2).T)
P = torch.tensor(P, dtype=torch.float)
self.register_buffer('P', P)
# TODO take care of normalization
Q = np.exp(-1j * np.pi / (2 * self.N) * np.arange(2*self.N))
Q = torch.tensor(Q, dtype=torch.cfloat)
self.register_buffer('Q', Q) # half shift
def forward(self, x, mode=2):
if mode == 0:
return self.forward_dense(x)
elif mode == 1:
return self.forward_n(x)
elif mode == 2:
return self.forward_2n(x)
elif mode == 4:
return self.forward_4n(x)
def forward_dense(self, x):
""" Baseline DCT type II - matmul by DCT matrix """
y = (self.P.to(x) @ x.unsqueeze(-1)).squeeze(-1)
return y
def forward_4n(self, x):
""" DCT type II - reduction to FFT size 4N """
assert self.N == x.shape[-1]
z = x.new_zeros(x.shape[:-1] + (1,))
x = torch.cat([x, z, -x.flip(-1), -x[..., 1:], z, x[..., 1:].flip(-1)], dim=-1)
y = torch.fft.ifft(x)
y = y[..., 1:2*self.N:2]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_2n(self, x):
""" DCT type II - reduction to FFT size 2N mirrored """
assert self.N == x.shape[-1]
z = x.new_zeros(x.shape[:-1] + (1,))
x = torch.cat([x, z, -x[..., 1:].flip(-1)], dim=-1)
x = x / self.Q
y = torch.fft.ifft(x)[..., :self.N]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_n(self, x):
""" DCT type II - reduction to size N """
assert self.N == x.shape[-1]
raise NotImplementedError # Straightforward by inverting operations of DCT-II reduction
def test_dct_ii():
N = 8
dct = DCT(N)
baseline = dct.forward_dense
methods = [dct.forward_4n, dct.forward_2n, dct.forward_n]
# Real case
print("DCT-II Real input")
x = torch.randn(1, N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
# Complex case
print("DCT-II Complex input")
x = torch.randn(N) + 1j * torch.randn(N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
def test_dct_iii():
N = 8
dct = IDCT(N)
baseline = dct.forward_dense
methods = [dct.forward_4n, dct.forward_2n]
# Real case
print("DCT-III Real input")
x = torch.randn(1, N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
# Complex case
print("DCT-III Complex input")
# x = torch.randn(N) + 1j * torch.randn(N)
x = 1j * torch.ones(N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
|
hyena-dna-main
|
src/models/nn/dxt.py
|
""" Utility nn components, in particular handling activations, initializations, and normalization layers """
from functools import partial
import math
from typing import ForwardRef
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from opt_einsum import contract
def stochastic_depth(input: torch.tensor, p: float, mode: str, training: bool = True):
"""
Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
<https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
branches of residual architectures.
Args:
input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): probability of the input to be zeroed.
mode (str): ``"batch"`` or ``"row"``.
``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
randomly selected rows from the batch.
training: apply stochastic depth if is ``True``. Default: ``True``
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
if p < 0.0 or p > 1.0:
raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p))
if mode not in ["batch", "row"]:
raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode))
if not training or p == 0.0:
return input
survival_rate = 1.0 - p
if mode == "row":
size = [input.shape[0]] + [1] * (input.ndim - 1)
else:
size = [1] * input.ndim
noise = torch.empty(size, dtype=input.dtype, device=input.device)
noise = noise.bernoulli_(survival_rate).div_(survival_rate)
return input * noise
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
# TODO(karan): need to upgrade to torchvision==0.11.0 to use StochasticDepth directly
# from torchvision.ops import StochasticDepth
super().__init__()
self.p = p
self.mode = mode
def forward(self, input):
return stochastic_depth(input, self.p, self.mode, self.training)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + '('
tmpstr += 'p=' + str(self.p)
tmpstr += ', mode=' + str(self.mode)
tmpstr += ')'
return tmpstr
class DropoutNd(nn.Module):
def __init__(self, p: float = 0.5, tie=True, transposed=True):
"""
tie: tie dropout mask across sequence lengths (Dropout1d/2d/3d)
"""
super().__init__()
if p < 0 or p >= 1:
raise ValueError("dropout probability has to be in [0, 1), " "but got {}".format(p))
self.p = p
self.tie = tie
self.transposed = transposed
self.binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
def forward(self, X):
""" X: (batch, dim, lengths...) """
if self.training:
if not self.transposed: X = rearrange(X, 'b d ... -> b ... d')
# binomial = torch.distributions.binomial.Binomial(probs=1-self.p) # This is incredibly slow
mask_shape = X.shape[:2] + (1,)*(X.ndim-2) if self.tie else X.shape
# mask = self.binomial.sample(mask_shape)
mask = torch.rand(*mask_shape, device=X.device) < 1.-self.p
X = X * mask * (1.0/(1-self.p))
if not self.transposed: X = rearrange(X, 'b ... d -> b d ...')
return X
return X
def Activation(activation=None, size=None, dim=-1):
if activation in [ None, 'id', 'identity', 'linear' ]:
return nn.Identity()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'relu':
return nn.ReLU()
elif activation == 'gelu':
return nn.GELU()
elif activation in ['swish', 'silu']:
return nn.SiLU()
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation == 'sigmoid':
return nn.Sigmoid()
elif activation == 'softplus':
return nn.Softplus()
elif activation in ['sqrelu', 'relu2']:
return SquaredReLU()
elif activation == 'laplace':
return Laplace()
elif activation == 'ln':
return TransposedLN(dim)
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
def get_initializer(name, activation=None):
if activation in [ None, 'id', 'identity', 'linear' ]:
nonlinearity = 'linear'
elif activation in ['relu', 'tanh', 'sigmoid']:
nonlinearity = activation
elif activation in ['gelu', 'swish', 'silu']:
nonlinearity = 'relu' # Close to ReLU so approximate with ReLU's gain
else:
raise NotImplementedError(f"get_initializer: activation {activation} not supported")
if name == 'uniform':
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == 'normal':
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == 'xavier':
initializer = torch.nn.init.xavier_normal_
elif name == 'zero':
initializer = partial(torch.nn.init.constant_, val=0)
elif name == 'one':
initializer = partial(torch.nn.init.constant_, val=1)
else:
raise NotImplementedError(f"get_initializer: initializer type {name} not supported")
return initializer
def LinearActivation(
d_input, d_output, bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False, # Apply activation as part of this module
weight_norm=False,
**kwargs,
):
""" Returns a linear nn.Module with control over axes order, initialization, and activation """
# Construct core module
# linear_cls = partial(nn.Conv1d, kernel_size=1) if transposed else nn.Linear
linear_cls = TransposedLinear if transposed else nn.Linear
if activation == 'glu': d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, d_output, dim=1 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
class SquaredReLU(nn.Module):
def forward(self, x):
# return F.relu(x)**2
return torch.square(F.relu(x)) # Could this be faster?
def laplace(x, mu=0.707107, sigma=0.282095):
x = (x - mu).div(sigma * math.sqrt(2.0))
return 0.5 * (1.0 + torch.erf(x))
class Laplace(nn.Module):
def __init__(self, mu=0.707107, sigma=0.282095):
super().__init__()
self.mu = mu
self.sigma = sigma
def forward(self, x):
return laplace(x, mu=self.mu, sigma=self.sigma)
class TransposedLinear(nn.Module):
""" Linear module on the second-to-last dimension
Assumes shape (B, D, L), where L can be 1 or more axis
"""
def __init__(self, d_input, d_output, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.empty(d_output, d_input))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # nn.Linear default init
# nn.init.kaiming_uniform_(self.weight, nonlinearity='linear') # should be equivalent
if bias:
self.bias = nn.Parameter(torch.empty(d_output))
bound = 1 / math.sqrt(d_input)
nn.init.uniform_(self.bias, -bound, bound)
setattr(self.bias, "_optim", {"weight_decay": 0.0})
else:
self.bias = 0.0
def forward(self, x):
num_axis = len(x.shape[2:]) # num_axis in L, for broadcasting bias
y = contract('b u ..., v u -> b v ...', x, self.weight) + self.bias.view(-1, *[1]*num_axis)
return y
class TransposedLN(nn.Module):
""" LayerNorm module over second dimension
Assumes shape (B, D, L), where L can be 1 or more axis
This is slow and a dedicated CUDA/Triton implementation shuld provide substantial end-to-end speedup
"""
def __init__(self, d, scalar=True):
super().__init__()
self.scalar = scalar
if self.scalar:
self.m = nn.Parameter(torch.zeros(1))
self.s = nn.Parameter(torch.ones(1))
setattr(self.m, "_optim", {"weight_decay": 0.0})
setattr(self.s, "_optim", {"weight_decay": 0.0})
else:
self.ln = nn.LayerNorm(d)
def forward(self, x):
if self.scalar:
# calc. stats over D dim / channels
s, m = torch.std_mean(x, dim=1, unbiased=False, keepdim=True)
y = (self.s/s) * (x-m+self.m)
else:
# move channel to last axis, apply layer_norm, then move channel back to second axis
_x = self.ln(rearrange(x, 'b d ... -> b ... d'))
y = rearrange(_x, 'b ... d -> b d ...')
return y
class Normalization(nn.Module):
def __init__(
self,
d,
transposed=False, # Length dimension is -1 or -2
_name_='layer',
**kwargs
):
super().__init__()
self.transposed = transposed
self._name_ = _name_
if _name_ == 'layer':
self.channel = True # Normalize over channel dimension
if self.transposed:
self.norm = TransposedLN(d, **kwargs)
else:
self.norm = nn.LayerNorm(d, **kwargs)
elif _name_ == 'instance':
self.channel = False
norm_args = {'affine': False, 'track_running_stats': False}
norm_args.update(kwargs)
self.norm = nn.InstanceNorm1d(d, **norm_args) # (True, True) performs very poorly
elif _name_ == 'batch':
self.channel = False
norm_args = {'affine': True, 'track_running_stats': True}
norm_args.update(kwargs)
self.norm = nn.BatchNorm1d(d, **norm_args)
elif _name_ == 'group':
self.channel = False
self.norm = nn.GroupNorm(1, d, *kwargs)
elif _name_ == 'none':
self.channel = True
self.norm = nn.Identity()
else: raise NotImplementedError
def forward(self, x):
# Handle higher dimension logic
shape = x.shape
if self.transposed:
x = rearrange(x, 'b d ... -> b d (...)')
else:
x = rearrange(x, 'b ... d -> b (...)d ')
# The cases of LayerNorm / no normalization are automatically handled in all cases
# Instance/Batch Norm work automatically with transposed axes
if self.channel or self.transposed:
x = self.norm(x)
else:
x = x.transpose(-1, -2)
x = self.norm(x)
x = x.transpose(-1, -2)
x = x.view(shape)
return x
def step(self, x, **kwargs):
assert self._name_ in ["layer", "none"]
if self.transposed: x = x.unsqueeze(-1)
x = self.forward(x)
if self.transposed: x = x.squeeze(-1)
return x
class TSNormalization(nn.Module):
def __init__(self, method, horizon):
super().__init__()
self.method = method
self.horizon = horizon
def forward(self, x):
# x must be BLD
if self.method == 'mean':
self.scale = x.abs()[:, :-self.horizon].mean(dim=1)[:, None, :]
return x / self.scale
elif self.method == 'last':
self.scale = x.abs()[:, -self.horizon-1][:, None, :]
return x / self.scale
return x
class TSInverseNormalization(nn.Module):
def __init__(self, method, normalizer):
super().__init__()
self.method = method
self.normalizer = normalizer
def forward(self, x):
if self.method == 'mean' or self.method == 'last':
return x * self.normalizer.scale
return x
class ReversibleInstanceNorm1dInput(nn.Module):
def __init__(self, d, transposed=False):
super().__init__()
# BLD if transpoed is False, otherwise BDL
self.transposed = transposed
self.norm = nn.InstanceNorm1d(d, affine=True, track_running_stats=False)
def forward(self, x):
# Means, stds
if not self.transposed:
x = x.transpose(-1, -2)
self.s, self.m = torch.std_mean(x, dim=-1, unbiased=False, keepdim=True)
self.s += 1e-4
x = (x - self.m) / self.s
# x = self.norm.weight.unsqueeze(-1) * x + self.norm.bias.unsqueeze(-1)
if not self.transposed:
return x.transpose(-1, -2)
return x
class ReversibleInstanceNorm1dOutput(nn.Module):
def __init__(self, norm_input):
super().__init__()
self.transposed = norm_input.transposed
self.weight = norm_input.norm.weight
self.bias = norm_input.norm.bias
self.norm_input = norm_input
def forward(self, x):
if not self.transposed:
x = x.transpose(-1, -2)
# x = (x - self.bias.unsqueeze(-1))/self.weight.unsqueeze(-1)
x = x * self.norm_input.s + self.norm_input.m
if not self.transposed:
return x.transpose(-1, -2)
return x
|
hyena-dna-main
|
src/models/nn/components.py
|
# Copyright (c) 2023, Tri Dao, Dan Fu.
# Simplified, mostly standalone version of LongConvLM for synthetics.
import math
from functools import partial
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import StochasticDepth
from einops import rearrange
from src.utils import instantiate
import src.utils.registry as registry
class LinearResidual(nn.Linear):
"""Wrap nn.Linear to return the residual as well. For compatibility with FusedDense.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return super().forward(input), input
class SelfAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
super().__init__()
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv, causal=None, key_padding_mask=None):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
causal: if passed, will override self.causal
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
False means to mask out. (B, S)
"""
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
causal = self.causal if causal is None else causal
q, k, v = qkv.unbind(dim=2)
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
if key_padding_mask is not None:
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype,
device=scores.device)
padding_mask.masked_fill_(key_padding_mask, 0.0)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + rearrange(padding_mask, 'b s -> b 1 1 s')
if causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
attention_drop = F.dropout(attention, self.dropout_p if self.training else 0.0)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output
class MHA(nn.Module):
"""Multi-head self-attention and cross-attention
"""
def __init__(self, embed_dim, num_heads, bias=True, dropout=0.0,
softmax_scale=None, causal=False, layer_idx=None, dwconv=False,return_residual=False,device=None, dtype=None) -> None:
"""
return_residual: whether to return the input x along with the output. This is for
performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.layer_idx = layer_idx
self.dwconv = dwconv
self.return_residual = return_residual
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
linear_cls = nn.Linear
linear_resid_cls = LinearResidual
inner_attn_cls = SelfAttention
if not self.return_residual:
self.Wqkv = linear_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
else:
self.Wqkv = linear_resid_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
if self.dwconv:
self.dwconv_qkv = nn.Conv1d(3 * embed_dim, 3 * embed_dim, kernel_size=3, padding=2,
groups=3 * embed_dim)
self.inner_attn = inner_attn_cls(causal=causal, softmax_scale=softmax_scale,
attention_dropout=dropout)
# output projection always have the bias (for now)
self.out_proj = linear_cls(embed_dim, embed_dim, **factory_kwargs)
def forward(self, x, key_padding_mask=None, **kwargs):
"""
Arguments:
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
is the is the sum of the sequence lengths in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into x. Only applicable when using
FlashAttention.
max_seqlen: int. Maximum sequence length in the batch.
key_padding_mask: boolean mask, True means to keep, False means to mask out.
(batch, seqlen). Only applicable when not using FlashAttention.
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
inference_params: for generation. Adapted from Megatron-LM (and Apex)
https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
"""
kwargs = ({'key_padding_mask': key_padding_mask, **kwargs})
if not self.return_residual:
qkv = self.Wqkv(x)
else:
qkv, x = self.Wqkv(x)
if self.dwconv:
qkv = rearrange(self.dwconv_qkv(rearrange(qkv, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
qkv = rearrange(qkv, '... (three h d) -> ... three h d', three=3, d=self.head_dim)
context = self.inner_attn(qkv, **kwargs)
out = self.out_proj(rearrange(context, '... h d -> ... (h d)'))
return out if not self.return_residual else (out, x)
class GPT2Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None,
word_embed_proj_dim=None, device=None, dtype=None):
"""
If max_position_embeddings <= 0, there's no position embeddings
If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
the project up to embed_dim
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if word_embed_proj_dim is None:
self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx,
**factory_kwargs)
self.project_in = None
else:
self.word_embeddings = nn.Embedding(vocab_size, word_embed_proj_dim,
padding_idx=padding_idx, **factory_kwargs)
self.project_in = nn.Linear(word_embed_proj_dim, embed_dim, bias=False,
**factory_kwargs)
self.max_position_embeddings = max_position_embeddings
if self.max_position_embeddings > 0:
self.position_embeddings = nn.Embedding(max_position_embeddings, embed_dim,
**factory_kwargs)
def forward(self, input_ids, position_ids=None):
"""
input_ids: (batch, seqlen)
position_ids: (batch, seqlen)
"""
batch_size, seqlen = input_ids.shape
embeddings = self.word_embeddings(input_ids)
if self.project_in is not None:
embeddings = self.project_in(embeddings)
if self.max_position_embeddings > 0:
if position_ids is None:
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, activation=F.gelu,
return_residual=False, device=None, dtype=None):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/mlp.py
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.return_residual = return_residual
self.fc1 = nn.Linear(in_features, hidden_features, **factory_kwargs)
self.activation = activation
self.fc2 = nn.Linear(hidden_features, out_features, **factory_kwargs)
def forward(self, x):
y = self.fc1(x)
y = self.activation(y)
y = self.fc2(y)
return y if not self.return_residual else (y, x)
class Block(nn.Module):
def __init__(self, dim, mixer_cls=None, mlp_cls=None, norm_cls=nn.LayerNorm,
dropout_cls=nn.Dropout, prenorm=True, resid_dropout1=0., resid_dropout2=0.,
drop_path1=0., drop_path2=0.,
return_residual=False,
residual_in_fp32=False):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/block.py
For prenorm=True, this Block has a slightly different structure compared to a regular
prenorm Transformer block.
The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
[Ref: https://arxiv.org/abs/2002.04745]
Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
the hidden_states (output of the MLP) and the residual.
This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
The residual needs to be provided (except for the very first block).
For prenorm=False, this Block has the same structure as a regular postnorm Transformer
block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
This is for performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
super().__init__()
self.prenorm = prenorm
self.return_residual = return_residual
self.residual_in_fp32 = residual_in_fp32
if self.residual_in_fp32:
assert self.prenorm, 'residual_in_fp32 is only compatible with prenorm=True'
if mixer_cls is None:
mixer_cls = partial(MHA, num_heads=dim // 64)
if mlp_cls is None:
mlp_cls = partial(Mlp, hidden_features=4 * dim)
self.mixer = mixer_cls(dim)
self.dropout1 = dropout_cls(resid_dropout1)
self.drop_path1 = StochasticDepth(drop_path1, mode='row')
self.norm1 = norm_cls(dim)
self.mlp = mlp_cls(dim)
if not isinstance(self.mlp, nn.Identity):
self.dropout2 = dropout_cls(resid_dropout2)
self.drop_path2 = StochasticDepth(drop_path2, mode='row')
self.norm2 = norm_cls(dim)
def forward(self, hidden_states, residual = None,
mixer_subset=None, mixer_kwargs=None):
r"""Pass the input through the encoder layer.
Args:
hidden_states: the sequence to the encoder layer (required).
residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
"""
if self.prenorm:
dropped = self.drop_path1(self.dropout1(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
if mixer_kwargs is None:
mixer_kwargs = {}
if mixer_subset is not None:
mixer_kwargs['mixer_subset'] = mixer_subset
hidden_states = self.mixer(hidden_states, **mixer_kwargs)
if mixer_subset is not None:
residual = residual[:, mixer_subset]
if not isinstance(self.mlp, nn.Identity):
dropped = self.drop_path2(self.dropout2(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
else:
assert residual is None
mixer_out = self.mixer(
hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
)
if self.return_residual: # mixer out is actually a pair here
mixer_out, hidden_states = mixer_out
hidden_states = self.norm1((self.drop_path1(self.dropout1(mixer_out))
+ hidden_states).to(dtype=self.norm1.weight.dtype))
if not isinstance(self.mlp, nn.Identity):
mlp_out = self.mlp(hidden_states)
if self.return_residual: # mlp out is actually a pair here
mlp_out, hidden_states = mlp_out
hidden_states = self.norm2((self.drop_path2(self.dropout2(mlp_out))
+ hidden_states).to(dtype=self.norm2.weight.dtype))
return hidden_states
def create_mixer_cls(layer=None,
attn_layer_idx=None, attn_cfg=None, layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop('causal', True)
mha_cls = MHA
mixer_cls = partial(mha_cls, causal=causal, layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),**factory_kwargs)
else:
mixer_cls = instantiate(registry.layer, layer, partial=True, layer_idx=layer_idx, **factory_kwargs)
return mixer_cls
def create_mlp_cls(d_model, d_inner=None, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
mlp_cls = partial(Mlp, hidden_features=inner_dim,
activation=partial(F.gelu, approximate='tanh'), **factory_kwargs)
return mlp_cls
def create_block(d_model, d_inner=None,
layer=None, attn_layer_idx=None,
attn_cfg=None, layer_norm_epsilon=1e-5,
resid_dropout1=0.0, resid_dropout2=0.0, residual_in_fp32=False,
layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
mixer_cls = create_mixer_cls(layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_idx=layer_idx,
**factory_kwargs)
mlp_cls = create_mlp_cls(d_model, d_inner=d_inner,
**factory_kwargs)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(d_model, mixer_cls, mlp_cls, norm_cls=norm_cls,
prenorm=True, resid_dropout1=resid_dropout1, resid_dropout2=resid_dropout2,residual_in_fp32=residual_in_fp32)
block.layer_idx = layer_idx
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(module, n_layer, initializer_range=0.02, rescale_prenorm_residual=True,
glu_act=False):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(p[:out_features // 2], mean=0.0, std=initializer_range / math.sqrt(2 * n_layer) * 2)
class LMBackbone(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.process_group = process_group
self.residual_in_fp32 = residual_in_fp32
self.embeddings = GPT2Embeddings(d_model, vocab_size, max_position_embeddings,
**factory_kwargs)
self.layers = nn.ModuleList([create_block(
d_model, d_inner=d_inner,
layer=layer, attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout, residual_in_fp32=residual_in_fp32,layer_idx=i,
**factory_kwargs,
) for i in range(n_layer)])
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
def forward(self, input_ids, position_ids=None):
hidden_states = self.embeddings(input_ids, position_ids=position_ids,)
residual = None
for layer in self.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
return hidden_states
class SimpleLMHeadModel(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
pad_vocab_size_multiple: int = 1,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, residual_in_fp32=residual_in_fp32,
**factory_kwargs, **kwargs
)
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
def forward(self, input_ids, position_ids=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids)
lm_logits = self.lm_head(hidden_states)
CausalLMOutput = namedtuple('CausalLMOutput', ['logits'])
return CausalLMOutput(logits=lm_logits), None
|
hyena-dna-main
|
src/models/sequence/simple_lm.py
|
""" Implementation of FFN block in the style of Transformers """
from functools import partial
from torch import nn
from src.models.sequence.base import SequenceModule
from src.models.nn import LinearActivation, DropoutNd
class FF(SequenceModule):
def __init__(self, d_input, expand=2, d_output=None, transposed=False, activation='gelu', initializer=None, dropout=0.0, tie_dropout=False):
super().__init__()
self.d_output = d_input if d_output is None else d_output
self.transposed = transposed
d_inner = expand * d_input
linear1 = LinearActivation(
d_input, d_inner,
transposed=transposed,
activation=activation,
initializer=initializer,
activate=True,
)
dropout_cls = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
# dropout_cls = nn.Dropout2d if self.transposed else nn.Dropout
drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
linear2 = LinearActivation(
d_inner, self.d_output,
transposed=transposed,
activation=None,
initializer=initializer,
activate=False,
)
self.ff = nn.Sequential(
linear1,
drop,
linear2,
)
def forward(self, x, *args, **kwargs):
return self.ff(x), None
def step(self, x, state, **kwargs):
# x: [batch, d_input]
if self.transposed:
# expects: [batch, d_input, seq_len]
return self.ff(x.unsqueeze(-1)).squeeze(-1), state
else:
return self.ff(x), state
|
hyena-dna-main
|
src/models/sequence/ff.py
|
'''PyTorch version of the block FFT convolution as described in the H3 paper.'''
import torch
from einops import rearrange
import math
from torch import nn
from src.models.nn import Activation
from src.utils.train import OptimModule
def ref_dft_matrix(N, H=1):
"""Compute the DFT matrix of size N x N.
This is where we could add extra compute for free."""
# n = torch.arange(N)
n = torch.arange(N).cuda()
k = n.view(-1, 1)
M = torch.exp(-2j * torch.pi * n * k / N)
return torch.view_as_real(M.repeat(H, 1, 1))
def compute_twiddle_factors(n, m):
"""Compute the twiddle factors of size n x m"""
# n_a = torch.arange(n).view(-1, 1)
# m_a = torch.arange(m)
n_a = torch.arange(n).cuda().view(-1, 1)
m_a = torch.arange(m).cuda()
N = n * m
M = torch.exp(-2j * torch.pi * n_a * m_a / N)
return torch.view_as_real(M)
def _cooley_tukey(
k, n, m,
dft_matrix=ref_dft_matrix,
max_m=16,
activation=None,
):
'''
Compute the FFT using the general Cooley-Tukey algorithm:
* Reshape to (m, n)
* Do n m-length FFTs along the rows
* Transpose to (n, m), multiply by twiddle factors
* Do m n-length FFTs along the rows
This function assumes that m <= 16 and recurses on n.
The base case is n <= 16 (we are simulating tensor cores of 16x16 mm).
The dft_matrix function is overwriteable
so that we can replace it with learnable parameters in a model.
'''
assert m <= max_m
if activation is not None:
act_fn = Activation(activation)
k = rearrange(k, '... (m n) -> ... m n', m=m, n=n) # (m, n)
# do n m-length FFTs
if activation is None:
mat = torch.view_as_complex(dft_matrix(m))
k_f = torch.einsum('... m o, ... o n -> ... m n', mat, k) # (..., m, n)
else:
mat = torch.view_as_complex(dft_matrix(m))
k_f = torch.view_as_complex(act_fn(
torch.view_as_real(torch.einsum('... m o, ... o n -> ... m n', mat, k))
)) # (..., m, n)
# multiply by twiddle factors
twi = torch.view_as_complex(compute_twiddle_factors(n, m)) # (n, m)
k_f = torch.einsum('n m, ... m n -> ... n m', twi, k_f) # (..., n, m)
if n <= max_m:
# do m n-length FFTs
if activation is None:
mat = torch.view_as_complex(dft_matrix(n))
k_f = torch.einsum('... n o, ... o m -> ... n m', mat, k_f) # (.., n, m)
else:
mat = torch.view_as_complex(dft_matrix(n))
k_f = torch.view_as_complex(act_fn(
torch.view_as_real(torch.einsum('... n o, ... o m -> ... n m', mat, k_f))
)) # (.., n, m)
else:
# recurse
k_f = rearrange(k_f, '... h n m -> ... m h n')
k_f = _cooley_tukey(k_f, n // max_m, max_m, dft_matrix, max_m, activation)
k_f = rearrange(k_f, '... m h n -> ... h n m')
# reshape for the output
k_f = rearrange(k_f, '... n m -> ... (n m)') # (..., n*m)
return k_f
def block_fft(
k, N,
dft_matrix=ref_dft_matrix,
max_m=16,
**kwargs,
):
'''
Compute the FFT of size N of the vector k, using _block_fft_recurse.
The dft_matrix function is overwriteable
so that we can replace it with learnable parameters in a model.
'''
if not math.log(N, 2).is_integer():
N = int(2 ** math.ceil(math.log(N, 2)))
# pad k with zeros if necessary (e.g. for causality)
if k.shape[-1] != N:
k = nn.ConstantPad1d((0, N - k.shape[-1]), 0)(k)
if N <= max_m:
mat = torch.view_as_complex(dft_matrix(m))
return torch.einsum('... n o, ... o -> ... n', mat, k) # (.., n, m)
n = N // max_m
m = max_m
return _cooley_tukey(k, n, m, dft_matrix, max_m, **kwargs)
class BlockFFT(OptimModule):
'''
Learnable Block FFT module.
Args:
learn_dft_matrix (bool): If True, learn a different DFT matrix for lengths 2, 4, 8, and 16. If False, this module computes a normal FFT.
'''
def __init__(self, learn_dft_matrices=True, H=1, max_m=16, dft_lr=0.001, dropout=0, learn_additive=False, **block_fft_args):
super().__init__()
self.learn_dft_matrices = learn_dft_matrices
self.block_fft_args = block_fft_args
self.max_m=max_m
self.drop = torch.nn.Dropout(p=dropout)
self.learn_additive=learn_additive
# get the powers of 2 up to max_m
assert math.log(max_m, 2).is_integer(), 'max_m must be a power of 2'
self.powers = [ 2 ** (i + 1) for i in range(int(math.log(max_m, 2))) ]
if learn_dft_matrices:
assert dft_lr>0,"If learn_dft_matrices=True dft_lr must be positive"
self.dft_matrices = nn.ParameterList()
for n in self.powers:
setattr(self,f"mat_{n}",nn.Parameter(
0.01 * torch.randn(H, n, n, 2) if self.learn_additive
else ref_dft_matrix(n, H=H),
requires_grad=True))
self.register(f"mat_{n}",getattr(self,f"mat_{n}"),dft_lr)
self.dft_matrices.append(getattr(self,"mat_{}".format(n)))
def compute_dft_matrix(self, n):
if not self.learn_dft_matrices:
return ref_dft_matrix(n)
else:
assert n in self.powers
if self.learn_additive:
mat = ref_dft_matrix(n)
return mat + self.drop(self.dft_matrices[int(math.log(n, 2) - 1)])
else:
return self.drop(self.dft_matrices[int(math.log(n, 2) - 1)])
def forward(self, x, N,forward=True):
'''Compute an FFT (forward=True) or iFFT (forward=False) of length N over x.'''
if forward:
return block_fft(x, N, dft_matrix=self.compute_dft_matrix, **self.block_fft_args)
else:
return (1/(N))*torch.conj(block_fft(torch.conj(x), N, dft_matrix=self.compute_dft_matrix, **self.block_fft_args))
if __name__ == "__main__":
B = 128
H = 29
N = 8192
n = 2
m = 8
k = torch.randn(B, H, N).to(torch.complex64)
print(f'(B, H, N) = ({B}, {H}, {N})')
# test FFT
k_f = block_fft(k, N)
k_f_ref = torch.fft.fft(k, N)
print('L-inf error in FFT: ', torch.max(torch.abs(k_f - k_f_ref)).item())
|
hyena-dna-main
|
src/models/sequence/block_fft.py
|
from .base import SequenceModule, TransposedModule
from .model import SequenceModel
from .ff import FF
|
hyena-dna-main
|
src/models/sequence/__init__.py
|
from functools import partial
import torch
import torch.nn as nn
from flash_attn.utils.generation import GenerationMixin
from flash_attn.utils.distributed import sync_shared_params
try:
from flash_attn.ops.fused_dense import ColumnParallelLinear
except ImportError:
ColumnParallelLinear = None
# grab all functions / modules from long_conv_lm.py
from src.models.sequence.long_conv_lm import LMBackbone
from src.models.sequence.long_conv_lm import _init_weights
class DNAEmbeddingModel(nn.Module, GenerationMixin):
"""DNA Embedding Model, which is the same as ConvLMHeadModel (in long_conv_lm.py), except no decoder head, we just pass back the hidden states for downstream tasks."""
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1, dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,
fused_mlp=False, fused_dropout_add_ln=False, residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, sequence_parallel=True,
device=None, dtype=None, return_hidden_state=False, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_model = d_model # for decoder
self.process_group = process_group
self.return_hidden_state = return_hidden_state
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
process_group=process_group,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
dropout_cls=dropout_cls, layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln, residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
**factory_kwargs, **kwargs
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError('fused_dense_lib is not installed')
self.lm_head = ColumnParallelLinear(
d_model, vocab_size, process_group, bias=False,
sequence_parallel=sequence_parallel, **factory_kwargs
)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids,
inference_params=inference_params)
# we only need the last hidden state for embeddings (decoder head will predict classification task)
return hidden_states, None
@property
def d_output(self):
"""Model /embedding dimension, used for decoder mapping.
"""
if getattr(self, "d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_output")
return self.d_model
def load_backbone(model, state_dict, freeze_backbone=False, ignore_head=True):
"""
Modifies state dict loading with custom function. This is necessary because the head of
a lm outputs logits for vocab, but we just the embeddings for downstream tasks.
inputs:
model: nn.Module, the from 'scratch' model
state_dict: dict, from the pretrained weights
ignore_head: bool, whether to inflate weights in the head (or keep scratch weights).
If number of classes changes (eg, imagenet to hmdb51), then you need to use this.
return:
state_dict: dict, update with inflated weights
"""
# consumes prefix from pretrained model, if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, "model."
)
model_new_params_dict = model.state_dict()
updated_model_state_dict = {}
# loop through scratch model keys (pretrained may have extra stuff)
for key in sorted(model_new_params_dict.keys()):
loaded_params = state_dict.get(key, None)
# make sure key is in the loaded params first, if not, then print it out
if loaded_params is None:
# This should never happen, it should be there!
print("Missing key in pretrained model!", key)
raise Exception
elif ignore_head and 'head' in key:
# ignore head weights
print("found head key / parameter, load from scratch", key)
# using scratch by default, nothing needed
used_params = model_new_params_dict[key]
elif "decoder" in key:
print("found decoder key / parameter, load from scratch", key)
used_params = model_new_params_dict[key]
else:
print('key: shape MATCH, loading', key) # load matched weights
used_params = loaded_params
# we need to pass back a state dict with the '.model' prefix!!!!!
key_with_prefix = 'model.' + key
updated_model_state_dict[key_with_prefix] = used_params
if freeze_backbone:
print("freezing model backbone params!")
# note, decoder not included in backbone
for name, param in model.named_parameters():
param.requires_grad = False
# we have updated the new model state dict with pretrained now
return updated_model_state_dict
|
hyena-dna-main
|
src/models/sequence/dna_embedding.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import opt_einsum as oe
optimized = True
if optimized:
contract = oe.contract
else:
contract = torch.einsum
from src.models.nn import LinearActivation, Activation, DropoutNd
from src.models.sequence.block_fft import BlockFFT
from src.models.sequence.long_conv_kernel import LongConvKernel
class LongConv(nn.Module):
def __init__(
self,
d_model,
l_max=1024,
channels=1,
bidirectional=False,
# Arguments for position-wise feedforward components
activation='gelu', # activation between conv and FF
postact='glu', # activation after FF
initializer=None, # initializer on FF
weight_norm=False, # weight normalization on FF
dropout=0.0, tie_dropout=False,
transposed=True, # axis ordering (B, L, D) or (B, D, L)
verbose=False,
block_fft_conv=False, # replace the FFT conv with Monarch blocks
block_fft_conv_args={},
# SSM Kernel arguments
**kernel_args,
):
"""
d_state: the dimension of the state, also denoted by N
l_max: the maximum kernel length, also denoted by L
channels: can be interpreted as a number of "heads"; the SSM is a map from a 1-dim to C-dim sequence. It's not recommended to change this unless desperate for things to tune; instead, increase d_model for larger models
bidirectional: if True, convolution kernel will be two-sided
Position-wise feedforward components:
--------------------
activation: activation in between SS and FF
postact: activation after FF ('id' for no activation, None to remove FF layer)
initializer: initializer on FF
weight_norm: weight normalization on FF
dropout: standard dropout argument. tie_dropout=True ties the dropout mask across the sequence length, emulating nn.Dropout1d
Other arguments:
--------------------
transposed: choose backbone axis ordering of (B, L, H) (if False) or (B, H, L) (if True) [B=batch size, L=sequence length, H=hidden dimension]
"""
super().__init__()
if verbose:
import src.utils.train
log = src.utils.train.get_logger(__name__)
log.info(f"Constructing Long Conv (H, L) = ({d_model}, {l_max})")
self.d_model = d_model
self.H = d_model
self.L = l_max
self.bidirectional = bidirectional
self.channels = channels
self.transposed = transposed
self.block_fft_conv = block_fft_conv
self.block_fft_conv_args = block_fft_conv_args
self.D = nn.Parameter(torch.randn(channels, self.H))
if self.bidirectional:
channels *= 2
# SSM Kernel
self.kernel = LongConvKernel(self.H, L=self.L, channels=channels, verbose=verbose, **kernel_args)
if self.block_fft_conv:
self.block_fft_u = BlockFFT(**self.block_fft_conv_args)
self.block_fft_k = BlockFFT(**self.block_fft_conv_args)
# Pointwise
self.activation = Activation(activation)
# dropout_fn = nn.Dropout2d if self.transposed else nn.Dropout # Broken in torch==1.11
dropout_fn = DropoutNd if tie_dropout else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
if postact is None:
self.output_linear = nn.Identity()
else:
self.output_linear = LinearActivation(
self.d_model * self.channels,
self.d_model,
# self.H*self.channels,
# self.d_model*(1 if self.gate is None else self.gate),
transposed=self.transposed,
initializer=initializer,
activation=postact,
activate=True,
weight_norm=weight_norm,
)
def forward(self, u, state=None, rate=1.0, lengths=None, **kwargs): # absorbs return_output and transformer src mask
"""
u: (B H L) if self.transposed else (B L H)
state: (H N) never needed, remnant from state spaces repo
Returns: same shape as u
"""
if not self.transposed: u = u.transpose(-1, -2)
L = u.size(-1)
# Mask out padding tokens
# TODO handle option for mask - instead of lengths, which assumes suffix padding
if isinstance(lengths, int):
if lengths != L:
lengths = torch.tensor(lengths, dtype=torch.long, device=u.device)
else:
lengths = None
if lengths is not None:
assert isinstance(lengths, torch.Tensor) and lengths.ndim == 1 and lengths.size(0) in [1, u.size(0)]
mask = torch.where(torch.arange(L, device=lengths.device) < lengths[:, None, None], 1., 0.)
u = u * mask
# Compute SS Kernel
L_kernel = L if self.L is None else min(L, round(self.L / rate))
k, _ = self.kernel(L=L_kernel, rate=rate, state=state) # (C H L) (B C H L)
# Convolution
if self.bidirectional:
k0, k1 = rearrange(k, '(s c) h l -> s c h l', s=2)
k = F.pad(k0, (0, L)) \
+ F.pad(k1.flip(-1), (L, 0))
if self.block_fft_conv:
k_f = self.block_fft_k(k.to(torch.complex64), N=L_kernel+L) # (C H L)
u_f = self.block_fft_u(u.to(torch.complex64), N=L_kernel+L) # (B H L)
y_f = contract('bhl,chl->bchl', u_f, k_f)
if self.learn_ifft:
y = self.block_fft_u(y_f, N=L_kernel+L,forward=False).real[..., :L]
else:
y = torch.fft.ifft(y_f, n=L_kernel+L, dim=-1).real[..., :L] # (B C H L)
else:
k_f = torch.fft.rfft(k, n=L_kernel+L) # (C H L)
u_f = torch.fft.rfft(u, n=L_kernel+L) # (B H L)
y_f = contract('bhl,chl->bchl', u_f, k_f)
y = torch.fft.irfft(y_f, n=L_kernel+L)[..., :L] # (B C H L)
# Compute skip connection
y = y + contract('bhl,ch->bchl', u, self.D)
# Reshape to flatten channels
y = rearrange(y, '... c h l -> ... (c h) l')
if not self.transposed: y = y.transpose(-1, -2)
y = self.activation(y)
y = self.dropout(y)
y = self.output_linear(y)
return y, None
@property
def d_state(self):
return self.H
@property
def d_output(self):
return self.d_model
|
hyena-dna-main
|
src/models/sequence/long_conv.py
|
import copy
import math
import re
from functools import partial
from collections import namedtuple, OrderedDict
from collections.abc import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from einops import rearrange
from flash_attn.modules.mha import MHA, ParallelMHA
from flash_attn.modules.mlp import Mlp, FusedMLP, ParallelFusedMLP
from flash_attn.modules.block import Block
from flash_attn.modules.embedding import GPT2Embeddings, ParallelGPT2Embeddings
from flash_attn.utils.generation import GenerationMixin
from flash_attn.utils.distributed import sync_shared_params, all_gather_raw
try:
from flash_attn.ops.fused_dense import ColumnParallelLinear
except ImportError:
ColumnParallelLinear = None
try:
from flash_attn.ops.layer_norm import dropout_add_layer_norm
except ImportError:
dropout_add_layer_norm = None
from src.utils import instantiate
import src.utils.registry as registry
class CheckpointedModule(torch.nn.Module):
def __init__(self, layer):
super().__init__()
self.layer = layer
def forward(self, x):
return checkpoint(self.layer, x)
def create_mixer_cls(
layer=None,
process_group=None,
attn_layer_idx=None,
attn_cfg=None,
layer_idx=None,
sequence_parallel=True,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
parallel_kwargs = (
{"process_group": process_group, "sequence_parallel": sequence_parallel}
if process_group is not None
else {}
)
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop("causal", True)
fused_bias_fc = (
False if attn_cfg is None else attn_cfg.get("fused_bias_fc", False)
)
if not fused_bias_fc:
assert process_group is None, "TensorParallel MHA requires fused_bias_fc"
mha_cls = MHA if process_group is None else ParallelMHA
# ParallelMHA doesn't take 'fused_bias_fc', it is assumed that we fuse matmul + bias
if process_group is not None:
attn_cfg = copy.deepcopy(attn_cfg) # Don't modify the original cfg
attn_cfg.pop("fused_bias_fc", None)
mixer_cls = partial(
mha_cls,
causal=causal,
layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),
**parallel_kwargs,
**factory_kwargs,
)
else:
fused_bias_fc = False if layer is None else layer.get("fused_bias_fc", False)
if process_group is not None:
assert fused_bias_fc, "TensorParallel SSM requires fused_bias_fc"
mixer_cls = instantiate(
registry.layer,
layer,
partial=True,
layer_idx=layer_idx,
**factory_kwargs,
**parallel_kwargs,
)
# mixer_cls = partial(ssm_cls, layer_idx=layer_idx,
# **(ssm_cfg if ssm_cfg is not None else {}),
# **parallel_kwargs, **factory_kwargs)
return mixer_cls
def create_mlp_cls(
d_model,
d_inner=None,
process_group=None,
fused_mlp=False,
sequence_parallel=True,
identity_mlp=False,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
if process_group is not None:
assert fused_mlp, "Tensor Parallel is only implemented for FusedMLP"
if not fused_mlp and not identity_mlp:
mlp_cls = partial(
Mlp,
hidden_features=inner_dim,
activation=partial(F.gelu, approximate="tanh"),
**factory_kwargs,
)
elif fused_mlp:
mlp_cls = FusedMLP if process_group is None else ParallelFusedMLP
parallel_kwargs = (
{"process_group": process_group, "sequence_parallel": sequence_parallel}
if process_group is not None
else {}
)
mlp_cls = partial(
mlp_cls, hidden_features=inner_dim, **parallel_kwargs, **factory_kwargs
)
else:
mlp_cls = nn.Identity
return mlp_cls
def create_block(
d_model,
d_inner=None,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
layer_norm_epsilon=1e-5,
resid_dropout1=0.0,
resid_dropout2=0.0,
residual_in_fp32=False,
fused_mlp=False,
identity_mlp=False,
fused_dropout_add_ln=False,
layer_idx=None,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
mixer_cls = create_mixer_cls(
layer=layer,
process_group=process_group,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
layer_idx=layer_idx,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
mlp_cls = create_mlp_cls(
d_model,
d_inner=d_inner,
process_group=process_group,
fused_mlp=fused_mlp,
identity_mlp=identity_mlp,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(
d_model,
mixer_cls,
mlp_cls,
norm_cls=norm_cls,
prenorm=True,
resid_dropout1=resid_dropout1,
resid_dropout2=resid_dropout2,
fused_dropout_add_ln=fused_dropout_add_ln,
residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel and process_group is not None,
mark_shared_params=process_group is not None,
)
block.layer_idx = layer_idx
if checkpoint_mlp:
block.mlp = CheckpointedModule(block.mlp)
if checkpoint_mixer:
block.mixer = CheckpointedModule(block.mixer)
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(
module,
n_layer,
initializer_range=0.02,
rescale_prenorm_residual=True,
glu_act=False,
):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(
p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer)
)
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(
p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer)
)
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(
p[: out_features // 2],
mean=0.0,
std=initializer_range / math.sqrt(2 * n_layer) * 2,
)
class LMBackbone(nn.Module):
def __init__(
self,
d_model: int,
n_layer: int,
d_inner: int,
vocab_size: int,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
max_position_embeddings=0,
resid_dropout: float = 0.0,
embed_dropout: float = 0.1,
dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5,
initializer_cfg=None,
fused_mlp=False,
identity_mlp=False,
fused_dropout_add_ln=False,
residual_in_fp32=False,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
**kwargs,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.process_group = process_group
self.sequence_parallel = sequence_parallel
self.residual_in_fp32 = residual_in_fp32
if process_group is None:
self.embeddings = GPT2Embeddings(
d_model, vocab_size, max_position_embeddings, **factory_kwargs
)
else:
self.embeddings = ParallelGPT2Embeddings(
d_model,
vocab_size,
max_position_embeddings,
process_group=process_group,
sequence_parallel=self.sequence_parallel,
**factory_kwargs,
)
# We change the order of dropout, residual and layer norm:
# Instead of LN -> Attn / MLP -> Dropout -> Add, we do:
# Dropout -> Add -> LN -> Attn / MLP, returning both the residual branch (output of Add) and
# the main branch (output of MLP). The model definition is unchanged, but the mapping of the
# nn.Dropout probabilities are changed.
# This is for performance reason: we can fuse dropout + add + layer_norm.
self.fused_dropout_add_ln = fused_dropout_add_ln
if self.fused_dropout_add_ln and dropout_add_layer_norm is None:
raise ImportError("dropout_add_layer_norm is not installed")
self.layers = nn.ModuleList(
[
create_block(
d_model,
d_inner=d_inner,
process_group=process_group,
layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout,
residual_in_fp32=residual_in_fp32,
fused_mlp=fused_mlp,
identity_mlp=identity_mlp,
fused_dropout_add_ln=fused_dropout_add_ln,
layer_idx=i,
sequence_parallel=self.sequence_parallel,
checkpoint_mlp=checkpoint_mlp,
checkpoint_mixer=checkpoint_mixer,
**factory_kwargs,
)
for i in range(n_layer)
]
)
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
if process_group is not None:
for p in self.ln_f.parameters():
# Mark the norm parameters as "shared_params" so that we sync their values at init.
p._shared_params = True
# Mark the norm params as "sequence_parallel" so we run all-reduce on their grads.
if self.sequence_parallel:
p._sequence_parallel = True
self.apply(
partial(
_init_weights,
n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {}),
)
)
self.tie_weights()
def tie_weights(self):
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None):
# If using Tensor Parallel with sequence parallel, we combine the batch and the seqlen
# dimensions so that we can split on it easily, in case of small batch size.
# Only the attention/SSM layers need to know the seqlen.
embedding_kwargs = (
{"combine_batch_seqlen_dim": True}
if self.process_group is not None and self.sequence_parallel
else {}
)
hidden_states = self.embeddings(
input_ids, position_ids=position_ids, **embedding_kwargs
)
residual = None
mixer_kwargs = (
{"seqlen": input_ids.shape[1]}
if self.process_group is not None and self.sequence_parallel
else {}
)
if inference_params is not None:
mixer_kwargs["inference_params"] = inference_params
for layer in self.layers:
hidden_states, residual = layer(
hidden_states, residual, mixer_kwargs=mixer_kwargs
)
if not self.fused_dropout_add_ln:
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
else:
# Set prenorm=False here since we don't need the residual
hidden_states = dropout_add_layer_norm(
hidden_states,
residual,
self.ln_f.weight,
self.ln_f.bias,
self.drop_f.p if self.training else 0.0,
self.ln_f.eps,
prenorm=False,
residual_in_fp32=self.residual_in_fp32,
)
return hidden_states
class ConvLMHeadModel(nn.Module, GenerationMixin):
def __init__(
self,
d_model: int,
n_layer: int,
d_inner: int,
vocab_size: int,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
max_position_embeddings=0,
resid_dropout: float = 0.0,
embed_dropout: float = 0.1,
dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5,
initializer_cfg=None,
fused_mlp=False,
fused_dropout_add_ln=False,
residual_in_fp32=False,
pad_vocab_size_multiple: int = 1,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
**kwargs,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.process_group = process_group
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (
vocab_size % pad_vocab_size_multiple
)
self.backbone = LMBackbone(
d_model=d_model,
n_layer=n_layer,
d_inner=d_inner,
vocab_size=vocab_size,
process_group=process_group,
layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout,
embed_dropout=embed_dropout,
dropout_cls=dropout_cls,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg,
fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln,
residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
checkpoint_mlp=checkpoint_mlp,
checkpoint_mixer=checkpoint_mixer,
**factory_kwargs,
**kwargs,
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError("fused_dense_lib is not installed")
self.lm_head = ColumnParallelLinear(
d_model,
vocab_size,
process_group,
bias=False,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
# Initialize weights and apply final processing
self.apply(
partial(
_init_weights,
n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {}),
)
)
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(
self, input_ids, position_ids=None, inference_params=None, state=None
): # state for the repo interface
hidden_states = self.backbone(
input_ids, position_ids=position_ids, inference_params=inference_params
)
lm_logits = self.lm_head(hidden_states)
# During inference, we want the full logit for sampling
if ColumnParallelLinear is not None and inference_params is not None:
if isinstance(self.lm_head, ColumnParallelLinear):
lm_logits, _ = all_gather_raw(lm_logits, self.lm_head.process_group)
lm_logits = rearrange(
lm_logits, "(n b) s d -> b s (n d)", b=hidden_states.shape[0]
)
CausalLMOutput = namedtuple("CausalLMOutput", ["logits"])
return CausalLMOutput(logits=lm_logits), None
class DNAEmbeddingModel(nn.Module, GenerationMixin):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1, dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,
fused_mlp=False, fused_dropout_add_ln=False, residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, sequence_parallel=True,
device=None, dtype=None, return_hidden_state=False, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_model = d_model # for decoder
self.process_group = process_group
self.return_hidden_state = return_hidden_state
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
process_group=process_group,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
dropout_cls=dropout_cls, layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln, residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
**factory_kwargs, **kwargs
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError('fused_dense_lib is not installed')
self.lm_head = ColumnParallelLinear(
d_model, vocab_size, process_group, bias=False,
sequence_parallel=sequence_parallel, **factory_kwargs
)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids,
inference_params=inference_params)
# we only need the last hidden state for embeddings (decoder head will predict classification task)
return hidden_states, None
@property
def d_output(self):
"""Model /embedding dimension, used for decoder mapping.
"""
if getattr(self, "d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_output")
return self.d_model
def load_backbone(model, state_dict, freeze_backbone=False, ignore_head=True):
"""
Modifies state dict loading with custom function. Every layer in new model will be
inputs:
model: nn.Module, the from 'scratch' model
state_dict: dict, from the pretrained weights
ignore_head: bool, whether to inflate weights in the head (or keep scratch weights).
If number of classes changes (eg, imagenet to hmdb51), then you need to use this.
return:
state_dict: dict, update with inflated weights
"""
# consumes prefix from pretrained model, if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, "model."
)
model_new_params_dict = model.state_dict()
updated_model_state_dict = {}
# loop through scratch model keys (pretrained may have extra stuff)
for key in sorted(model_new_params_dict.keys()):
loaded_params = state_dict.get(key, None)
# make sure key is in the loaded params first, if not, then print it out
if loaded_params is None:
# This should never happen, it should be there!
print("Missing key in pretrained model!", key)
raise Exception
elif ignore_head and 'head' in key:
# ignore head weights
print("found head key / parameter, load from scratch", key)
# using scratch by default, nothing needed
used_params = model_new_params_dict[key]
elif "decoder" in key:
print("found decoder key / parameter, load from scratch", key)
used_params = model_new_params_dict[key]
else:
print('key: shape MATCH, loading', key) # load matched weights
used_params = loaded_params
# we need to pass back a state dict with the '.model' prefix!!!!!
key_with_prefix = 'model.' + key
updated_model_state_dict[key_with_prefix] = used_params
if freeze_backbone:
print("freezing model backbone params!")
# note, decoder not included in backbone
for name, param in model.named_parameters():
param.requires_grad = False
# we have updated the new model state dict with pretrained now
return updated_model_state_dict
def shard_state_dict_tp(state_dict, world_size, rank, pad_vocab_size_multiple=1):
"""Convert the state_dict of a standard SSM model to the state_dict of a SSM model
with tensor parallel.
"""
layer_idx_match = [
re.search(r"backbone\.layers\.(\d+)\.", k) for k in state_dict.keys()
]
num_hidden_layers = len(set(m.group(1) for m in layer_idx_match if m is not None))
vocab_size = state_dict["backbone.embeddings.word_embeddings.weight"].shape[0]
inner_dim, hidden_size = state_dict["backbone.layers.0.mlp.fc1.weight"].shape
vocab_size = (
math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple
)
assert vocab_size % world_size == 0
assert hidden_size % world_size == 0
assert inner_dim % world_size == 0
def shard_dim(state_dict, key, dim=0):
x = state_dict[key]
dimension = x.shape[dim] // world_size
state_dict[key] = x.narrow(dim, rank * dimension, dimension)
def shard_qkv_headdim(state_dict, key):
x = rearrange(state_dict[key], "(three d) ... -> three d ...", three=3)
dim = x.shape[1] // world_size
state_dict[key] = rearrange(
x[:, rank * dim : (rank + 1) * dim], "three d ... -> (three d) ..."
)
shard_dim(state_dict, "backbone.embeddings.word_embeddings.weight", 0)
if "lm_head.weight" in state_dict:
shard_dim(state_dict, "lm_head.weight", 0)
if "backbone.embeddings.position_embeddings.weight" in state_dict:
shard_dim(state_dict, "backbone.embeddings.position_embeddings.weight", -1)
for i in range(num_hidden_layers):
shard_qkv_headdim(state_dict, f"backbone.layers.{i}.mixer.Wqkv.weight")
shard_qkv_headdim(state_dict, f"backbone.layers.{i}.mixer.Wqkv.bias")
shard_dim(state_dict, f"backbone.layers.{i}.mixer.out_proj.weight", -1)
if rank != 0:
state_dict.pop(f"backbone.layers.{i}.mixer.out_proj.bias")
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc1.weight", 0)
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc1.bias", 0)
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc2.weight", -1)
if rank != 0:
state_dict.pop(f"backbone.layers.{i}.mlp.fc2.bias")
if f"backbone.layers.{i}.mixer.kernel.kernel.B" in state_dict:
for name in [
"D",
"ssm_k_D",
"kernel.kernel.B",
"kernel.kernel.inv_A_real",
"kernel.kernel.A_imag",
"ssm_k_kernel.kernel.B",
"kernel.kernel.log_dt",
]:
if f"backbone.layers.{i}.mixer.{name}" in state_dict:
shard_dim(state_dict, f"backbone.layers.{i}.mixer.{name}", 0)
for name in ["kernel.kernel.C", "ssm_k_kernel.kernel.C"]:
if f"backbone.layers.{i}.mixer.{name}" in state_dict:
shard_dim(state_dict, f"backbone.layers.{i}.mixer.{name}", 1)
return state_dict
|
hyena-dna-main
|
src/models/sequence/long_conv_lm.py
|
""" Isotropic deep sequence model backbone, in the style of ResNets / Transformers.
The SequenceModel class implements a generic (batch, length, d_input) -> (batch, length, d_output) transformation
"""
from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
from src.utils.config import to_list, to_dict
from src.models.sequence.block import SequenceResidualBlock
from src.models.sequence.base import SequenceModule
from src.models.nn.components import Normalization, DropoutNd
class SequenceModel(SequenceModule):
def __init__(
self,
d_model, # Resize input (useful for deep models with residuals)
n_layers=1, # Number of layers
transposed=False, # Transpose inputs so each layer receives (batch, dim, length)
dropout=0.0, # Dropout parameter applied on every residual and every layer
tie_dropout=False, # Tie dropout mask across sequence like nn.Dropout1d/nn.Dropout2d
prenorm=True, # Pre-norm vs. post-norm
n_repeat=1, # Each layer is repeated n times per stage before applying pooling
layer=None, # Layer config, must be specified
residual=None, # Residual config
norm=None, # Normalization config (e.g. layer vs batch)
pool=None, # Config for pooling layer per stage
track_norms=True, # Log norms of each layer output
dropinp=0.0, # Input dropout
):
super().__init__()
# Save arguments needed for forward pass
self.d_model = d_model
self.transposed = transposed
self.track_norms = track_norms
# Input dropout (not really used)
dropout_fn = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.drop = dropout_fn(dropinp) if dropinp > 0.0 else nn.Identity()
layer = to_list(layer, recursive=False)
# Some special arguments are passed into each layer
for _layer in layer:
# If layers don't specify dropout, add it
if _layer.get('dropout', None) is None:
_layer['dropout'] = dropout
# Ensure all layers are shaped the same way
_layer['transposed'] = transposed
# Duplicate layers
layers = layer * n_layers * n_repeat
# Instantiate layers
_layers = []
d = d_model
for l, layer in enumerate(layers):
# Pool at the end of every n_repeat blocks
pool_cfg = pool if (l+1) % n_repeat == 0 else None
block = SequenceResidualBlock(d, l+1, prenorm=prenorm, dropout=dropout, tie_dropout=tie_dropout, transposed=transposed, layer=layer, residual=residual, norm=norm, pool=pool_cfg)
_layers.append(block)
d = block.d_output
self.d_output = d
self.layers = nn.ModuleList(_layers)
if prenorm:
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(self.d_output, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(self.d_output, transposed=self.transposed, **norm)
else:
self.norm = nn.Identity()
def forward(self, inputs, *args, state=None, **kwargs):
""" Inputs assumed to be (batch, sequence, dim) """
if self.transposed: inputs = rearrange(inputs, 'b ... d -> b d ...')
inputs = self.drop(inputs)
# Track norms
if self.track_norms: output_norms = [torch.mean(inputs.detach() ** 2)]
# Apply layers
outputs = inputs
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
outputs, state = layer(outputs, *args, state=prev_state, **kwargs)
next_states.append(state)
if self.track_norms: output_norms.append(torch.mean(outputs.detach() ** 2))
if self.norm is not None: outputs = self.norm(outputs)
if self.transposed: outputs = rearrange(outputs, 'b d ... -> b ... d')
if self.track_norms:
metrics = to_dict(output_norms, recursive=False)
self.metrics = {f'norm/{i}': v for i, v in metrics.items()}
return outputs, next_states
@property
def d_state(self):
d_states = [layer.d_state for layer in self.layers]
return sum([d for d in d_states if d is not None])
@property
def state_to_tensor(self):
# Slightly hacky way to implement this in a curried manner (so that the function can be extracted from an instance)
# Somewhat more sound may be to turn this into a @staticmethod and grab subclasses using hydra.utils.get_class
def fn(state):
x = [_layer.state_to_tensor(_state) for (_layer, _state) in zip(self.layers, state)]
x = [_x for _x in x if _x is not None]
return torch.cat( x, dim=-1)
return fn
def default_state(self, *batch_shape, device=None):
return [layer.default_state(*batch_shape, device=device) for layer in self.layers]
def step(self, x, state, **kwargs):
# Apply layers
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
x, state = layer.step(x, state=prev_state, **kwargs)
next_states.append(state)
x = self.norm(x)
return x, next_states
|
hyena-dna-main
|
src/models/sequence/model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat
from src.utils.train import OptimModule
class LongConvKernel(OptimModule):
def __init__(
self,
H,
L,
channels=1,
learning_rate=None,
lam=0.1,
causal=True,
kernel_dropout=0,
weight_init="random",
use_ma_smoothing = False,
ma_window_len = 7,
smooth_freq = False,
**kwargs
):
super().__init__()
self.drop = torch.nn.Dropout(p=kernel_dropout)
self.H = H
self.weight_init = weight_init
self.causal = causal
self.L = L*2 if not causal else L
self.channels = channels
self.lam = lam
self.kernel = torch.nn.Parameter(self._parameter_initialization()) #(c,H,L)
self.register("kernel", self.kernel, learning_rate)
self.use_ma_smoothing=use_ma_smoothing
self.smooth_freq = smooth_freq
self.ma_window_len = ma_window_len
if self.use_ma_smoothing:
if smooth_freq:
weight = torch.arange(ma_window_len, dtype = self.kernel.dtype)
weight = torch.exp(-0.5 * torch.abs(weight - ma_window_len // 2) ** 2)
weight = repeat(weight, 'l -> h1 h2 l', h1 = self.H, h2 = 1)
weight = weight.type(torch.fft.rfft(self.kernel).dtype)
self.smooth_weight = weight
else:
self.ma_window_len = ma_window_len
assert self.ma_window_len%2!=0, "window size must be odd"
padding = (self.ma_window_len//2)
self.smooth = torch.nn.AvgPool1d(kernel_size=self.ma_window_len,stride=1,padding=padding)
def _parameter_initialization(self):
if self.weight_init=="random":
return torch.randn(self.channels, self.H, self.L) * 0.002
elif self.weight_init=="double_exp":
K = torch.randn(self.channels, self.H, self.L,dtype=torch.float32) * 0.02
double_exp = torch.zeros((self.H,self.L),dtype=torch.float32)
for i in range(self.H):
for j in range(self.L):
double_exp[i,j] = torch.exp(-(j/self.L)*torch.pow(torch.tensor(int(self.H/2)),torch.tensor(i/self.H)))
K = torch.einsum("c h l, h l -> c h l",K,double_exp)
return K
else: raise NotImplementedError(f"{self.weight_init} is not valid")
def forward(self, **kwargs):
k = self.kernel
if self.use_ma_smoothing:
if self.smooth_freq:
k_f = torch.fft.rfft(k, dim=-1)
k_f = F.conv1d(k_f, self.smooth_weight.to(k_f.device), padding='same', groups=self.H)
k = torch.fft.irfft(k_f, dim=-1)
else:
k = self.smooth(k)
k = F.relu(torch.abs(k)-self.lam)*torch.sign(k)
k = self.drop(k)
return k, None
@property
def d_output(self):
return self.H
|
hyena-dna-main
|
src/models/sequence/long_conv_kernel.py
|
import math
import sys
from re import U
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from einops import rearrange, repeat
try:
from src.ops.fftconv import fftconv_ref, fftconv_func, fftconv_heads_ref
except ImportError:
fftconv_func = None
try:
from flash_attn.ops.fused_dense import FusedDense
except ImportError:
FusedDense = None
import src.utils.registry as registry
from src.utils.train import OptimModule
from src.utils.config import instantiate, auto_assign_attrs
from src.models.nn import Activation
class FFTConvFuncv2(torch.autograd.Function):
@staticmethod
def forward(ctx, u, k):
seqlen = u.shape[-1]
if len(u.shape) > 3:
k = k.unsqueeze(1)
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
ctx.save_for_backward(u_f, k_f)
return y
@staticmethod
def backward(ctx, dout):
u_f, k_f = ctx.saved_tensors
seqlen = dout.shape[-1]
fft_size = 2 * seqlen
dout_f = torch.fft.rfft(dout, n=fft_size)
du = torch.fft.irfft(dout_f * k_f.conj(), n=fft_size, norm="forward")[
..., :seqlen
]
dk = torch.fft.irfft(dout_f * u_f.conj(), n=fft_size, norm="forward")[
..., :seqlen
]
return du, dk.squeeze()
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
if k_rev is not None:
k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
k_f = k_f + k_rev_f.conj()
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3:
k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
out = y + u * D.unsqueeze(-1)
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
return (out * rearrange(dropout_mask, "b H -> b H 1")).to(dtype=u.dtype)
else:
return out.to(dtype=u.dtype)
@torch.jit.script
def mul_sum(q, y):
return (q * y).sum(dim=1)
class Sin(nn.Module):
def __init__(self, dim, w=10, train_freq=True):
super().__init__()
self.freq = (
nn.Parameter(w * torch.ones(1, dim))
if train_freq
else w * torch.ones(1, dim)
)
def forward(self, x):
return torch.sin(self.freq * x)
class PositionalEmbedding(OptimModule):
def __init__(self, emb_dim: int, seq_len: int, lr_pos_emb: float = 1e-5, **kwargs):
"""Complex exponential positional embeddings for Hyena filters."""
super().__init__()
self.seq_len = seq_len
# The time embedding fed to the filteres is normalized so that t_f = 1
t = torch.linspace(0, 1, self.seq_len)[None, :, None] # 1, L, 1
if emb_dim > 1:
bands = (emb_dim - 1) // 2
# To compute the right embeddings we use the "proper" linspace
t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
f = torch.linspace(1e-4, bands - 1, bands)[None, None]
z = torch.exp(-1j * f * w)
z = torch.cat([t, z.real, z.imag], dim=-1)
self.register("z", z, lr=lr_pos_emb)
self.register("t", t, lr=0.0)
def forward(self, L):
return self.z[:, :L], self.t[:, :L]
class ExponentialModulation(OptimModule):
def __init__(
self,
d_model,
fast_decay_pct=0.3,
slow_decay_pct=1.5,
target=1e-2,
modulation_lr=0.0,
shift: float = 0.0,
**kwargs,
):
super().__init__()
self.shift = shift
max_decay = math.log(target) / fast_decay_pct
min_decay = math.log(target) / slow_decay_pct
deltas = torch.linspace(min_decay, max_decay, d_model)[None, None]
self.register("deltas", deltas, lr=modulation_lr)
def forward(self, t, x):
decay = torch.exp(-t * self.deltas.abs())
x = x * (decay + self.shift)
return x
class HyenaFilter(OptimModule):
def __init__(
self,
d_model,
emb_dim=3, # dim of input to MLP, augments with positional encoding
order=16, # width of the implicit MLP
fused_fft_conv=False,
seq_len=1024,
lr=1e-3,
lr_pos_emb=1e-5,
dropout=0.0,
w=1, # frequency of periodic activations
wd=0, # weight decay of kernel parameters
bias=True,
num_inner_mlps=2,
linear_mixer=False,
modulate: bool = True,
normalized=False,
**kwargs,
):
"""
Implicit long filter with modulation.
Args:
d_model: number of channels in the input
emb_dim: dimension of the positional encoding (`emb_dim` - 1) // 2 is the number of bands
order: width of the FFN
num_inner_mlps: number of inner linear layers inside filter MLP
Note:
filter_dropout is not implemented
"""
super().__init__()
auto_assign_attrs(
self, d_model=d_model, emb_dim=emb_dim, seq_len=seq_len, modulate=modulate
)
self.use_bias = bias
self.fused_fft_conv = fused_fft_conv
self.bias = nn.Parameter(torch.randn(self.d_model))
self.dropout = nn.Dropout(dropout)
act = Sin(dim=order, w=w)
assert (
emb_dim % 2 != 0 and emb_dim >= 3
), "emb_dim must be odd and greater or equal to 3 (time, sine and cosine)"
self.pos_emb = PositionalEmbedding(emb_dim, seq_len, lr_pos_emb)
# uses a variable number of inner linear layers
if linear_mixer is False:
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, order),
act,
)
for i in range(num_inner_mlps):
self.implicit_filter.append(nn.Linear(order, order))
self.implicit_filter.append(act)
# final linear layer
self.implicit_filter.append(nn.Linear(order, d_model, bias=False))
else:
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, d_model, bias=False),
)
self.modulation = ExponentialModulation(d_model, **kwargs)
self.normalized = normalized
for c in self.implicit_filter.children():
for name, v in c.state_dict().items():
optim = {"weight_decay": wd, "lr": lr}
setattr(getattr(c, name), "_optim", optim)
def filter(self, L, *args, **kwargs):
z, t = self.pos_emb(L)
h = self.implicit_filter(z)
if self.modulate:
h = self.modulation(t, h)
if self.normalized:
h = h / torch.norm(h, dim=-1, p=1, keepdim=True)
return h
def forward(self, x, L, k=None, bias=None, *args, **kwargs):
if k is None:
k = self.filter(L)
# Ensure compatibility with filters that return a tuple
k = k[0] if type(k) is tuple else k
if bias is None:
bias = self.bias
bias = bias if self.use_bias else 0 * bias
if self.fused_fft_conv:
bias = bias.to(dtype=torch.float32)
y = fftconv_func(
x,
k,
bias,
dropout_mask=None,
gelu=False,
force_fp16_output=torch.is_autocast_enabled(),
)
else:
y = fftconv_ref(x, k, bias, dropout_mask=None, gelu=False)
# y = (
# FFTConvFuncv2.apply(x, k.to(dtype=torch.float32))
# + bias.unsqueeze(-1) * x
# )
return y.to(dtype=x.dtype)
class HyenaOperator(nn.Module):
def __init__(
self,
d_model,
l_max,
order=2,
filter_order=64,
num_heads=1,
inner_factor=1,
num_blocks=1,
fused_bias_fc=False,
outer_mixing=False,
dropout=0.0,
filter_dropout=0.0,
filter_cls="hyena-filter",
post_order_ffn=False,
jit_filter=False,
short_filter_order=3,
activation="id",
return_state=False,
**filter_args,
):
r"""
Hyena operator described in the paper https://arxiv.org/pdf/2302.10866.pdf
Args:
d_model (int): Dimension of the input and output embeddings (width of the layer)
l_max: (int): Maximum input sequence length. Defaults to None
order: (int): Depth of the Hyena recurrence. Defaults to 2
filter_order: (int): Width of the FFN parametrizing the implicit filter. Defaults to 64
num_heads: (int): Number of heads. Defaults to 1
inner_factor: (int): Width multiplier. Defaults to 1
num_blocks: (int): Number of blocks in sequence length. Defaults to 1
fused_bias_fc: (bool): Whether to use fused bias FC. Defaults to False
dropout: (float): Dropout probability. Defaults to 0.0
filter_dropout: (float): Dropout probability for the filter. Defaults to 0.0
post_order_ffn: (bool): Apply a dense layer between steps of the recurrence. Defaults to False
jit_filter: (bool): Whether JIT the implicit filter function. Defaults to False
short_filter_order: (int): Length of the explicit input convolutional filter. Defaults to 3
activation: (str): type of act between kernel output and FF (default identity)
return_state: (bool): whether to return a state
"""
super().__init__()
assert (
d_model % num_heads == 0
), f"Model dimension {d_model} must be divisible by num heads {num_heads}"
assert (
l_max % num_blocks == 0
), f"Maximum signal length {l_max} must be divisible by block dimension {num_blocks}"
block_dim = l_max // num_blocks
head_dim = d_model // num_heads
auto_assign_attrs(
self,
d_model=d_model,
order=order,
l_max=l_max,
num_heads=num_heads,
inner_factor=inner_factor,
block_dim=block_dim,
head_dim=head_dim,
filter_order=filter_order,
post_order_ffn=post_order_ffn,
short_filter_order=short_filter_order,
num_blocks=num_blocks,
filter_dropout=filter_dropout,
jit_filter=jit_filter,
outer_mixing=outer_mixing,
activation=activation,
return_state=return_state,
)
self.activation = Activation(activation)
self.dropout = nn.Dropout(dropout)
self.setup_projections(fused_bias_fc, inner_factor)
self.setup_filters(filter_cls, filter_args)
def setup_projections(self, fused_bias_fc, inner_factor):
"Initializes input and output projections (over the width dimension)"
if fused_bias_fc and FusedDense is None:
raise ImportError("fused_dense is not installed")
linear_cls = nn.Linear if not fused_bias_fc else FusedDense
self.out_proj = linear_cls(self.d_model * inner_factor, self.d_model)
self.in_proj = linear_cls(self.d_model, (self.order + 1) * self.d_model)
if self.post_order_ffn:
self.ord_proj_w = nn.Parameter(
torch.randn(self.order, self.num_heads, self.num_heads)
/ math.sqrt(self.head_dim)
)
def setup_filters(self, filter_cls, filter_args):
"Initializes the explicit and implicit filters"
assert self.order >= 2, f"Order must be at least 2, (got {self.order})"
total_width = self.d_model * self.inner_factor * (self.order + 1)
self.short_filter = nn.Conv1d(
in_channels=total_width,
out_channels=total_width,
kernel_size=self.short_filter_order,
groups=total_width,
padding=self.short_filter_order - 1,
)
filter_cls = instantiate(registry.layer, filter_cls, partial=True)
self.filter_fn = filter_cls(
self.head_dim * self.inner_factor * (self.order - 1),
order=self.filter_order,
seq_len=self.l_max,
channels=1,
dropout=self.filter_dropout,
**filter_args,
)
if self.jit_filter:
self.filter_fn = torch.jit.script(self.filter_fn, self.L)
def recurrence(self, u, state):
"Fast inference mode via distilled recurrence"
raise NotImplementedError("Working on it!")
def forward(self, u, *args, **kwargs):
l = u.size(-2)
l_filter = min(l, self.l_max)
u = self.in_proj(u)
u = rearrange(u, "b l d -> b d l")
uc = self.short_filter(u)[..., :l_filter]
uc = rearrange(
uc,
"b (ho v) (z l) -> b ho v z l",
z=self.num_blocks,
ho=self.num_heads,
v=self.head_dim * (self.order + 1),
)
*x, v = uc.split(self.d_model, dim=2)
k = self.filter_fn.filter(l_filter)
# `c` is always 1 by default
k = rearrange(k, "c l (v o) -> c o v l", v=self.head_dim, o=self.order - 1)[0]
bias = rearrange(
self.filter_fn.bias, "(v o) -> o v", v=self.head_dim, o=self.order - 1
)
for o, x_i in enumerate(reversed(x[1:])):
if self.outer_mixing:
v = rearrange(v, "b h v z l -> b h 1 v z l")
v = self.dropout(v * rearrange(x_i, "b h v z l -> b h v 1 z l"))
v = v.sum(dim=2)
else:
v = self.dropout(v * x_i)
# the bias term is broadcasted. Last dimension (l) is handled by fftconv
v = self.filter_fn(v, l_filter, k=k[o], bias=bias[o, None, :, None])
if self.post_order_ffn:
w = self.ord_proj_w[o]
v = mul_sum(
rearrange(w, "h1 h2 -> 1 h1 h2 1 1 1"),
rearrange(v, "b h v z l -> b h 1 v z l"),
)
y = self.activation(
rearrange(
v * x[0],
"b h v z l -> b (z l) (h v)",
z=self.num_blocks,
h=self.num_heads,
)
)
y = self.out_proj(y)
if self.return_state:
return y, None
return y
@property
def d_output(self):
return self.d_model
|
hyena-dna-main
|
src/models/sequence/hyena.py
|
""" Implements a full residual block around a black box layer
Configurable options include:
normalization position: prenorm or postnorm
normalization type: batchnorm, layernorm etc.
subsampling/pooling
residual options: feedforward, residual, affine scalars, depth-dependent scaling, etc.
"""
from torch import nn
from functools import partial
import src.utils as utils
from src.models.nn.components import Normalization, StochasticDepth, DropoutNd
from src.models.sequence import SequenceModule
from src.models.sequence.pool import registry as pool_registry
from src.models.nn.residual import registry as residual_registry
import src.utils.registry as registry
class SequenceResidualBlock(SequenceModule):
def __init__(
self,
d_input,
i_layer=None, # Only needs to be passed into certain residuals like Decay
prenorm=True,
dropout=0.0,
tie_dropout=False,
transposed=False,
layer=None, # Config for black box module
residual=None, # Config for residual function
norm=None, # Config for normalization layer
pool=None,
drop_path=0.,
):
super().__init__()
self.i_layer = i_layer
self.d_input = d_input
self.layer = utils.instantiate(registry.layer, layer, d_input)
self.prenorm = prenorm
self.transposed = transposed
# Residual
# d_residual is the output dimension after residual
if residual is None:
self.residual = None
self.d_residual = self.layer.d_output
else:
self.residual = utils.instantiate(residual_registry, residual, i_layer, d_input, self.layer.d_output)
self.d_residual = self.residual.d_output
# Normalization
d_norm = d_input if self.prenorm else self.d_residual
# We don't use config to directly instantiate since Normalization has some special cases
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(d_norm, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(d_norm, transposed=self.transposed, **norm)
# Pool
self.pool = utils.instantiate(pool_registry, pool, self.d_residual, transposed=self.transposed)
# Dropout
dropout_cls = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
# Stochastic depth
self.drop_path = StochasticDepth(drop_path, mode='row') if drop_path > 0.0 else nn.Identity()
@property
def d_output(self):
return self.pool.d_output if self.pool is not None else self.d_residual
@property
def d_state(self):
return self.layer.d_state
@property
def state_to_tensor(self):
return self.layer.state_to_tensor
def default_state(self, *args, **kwargs):
return self.layer.default_state(*args, **kwargs)
def forward(self, x, state=None, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm: y = self.norm(y)
# Black box layer
y, state = self.layer(y, state=state, **kwargs)
# Residual
if self.residual is not None: y = self.residual(x, self.drop_path(self.drop(y)), self.transposed)
# Post-norm
if self.norm is not None and not self.prenorm: y = self.norm(y)
# Pool
if self.pool is not None: y, _ = self.pool(y)
return y, state
def step(self, x, state, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm:
y = self.norm.step(y)
# Black box layer
y, state = self.layer.step(y, state, **kwargs)
# Residual
if self.residual is not None: y = self.residual(x, y, transposed=False) # NOTE this would not work with concat residual function (catformer)
# Post-norm
if self.norm is not None and not self.prenorm:
y = self.norm.step(y)
# Pool
if self.pool is not None: y, _ = self.pool(y)
return y, state
|
hyena-dna-main
|
src/models/sequence/block.py
|
"""Implements downsampling and upsampling on sequences."""
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from src.models.sequence import SequenceModule
from src.models.nn import LinearActivation
""" Simple pooling functions that just downsample or repeat
stride: Subsample on the layer dimension
expand: Repeat on the feature dimension
"""
class DownSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if x is None: return None
if self.stride > 1:
assert x.ndim == 3, "Downsampling with higher-dimensional inputs is currently not supported. It is recommended to use average or spectral pooling instead."
if self.transposed:
x = x[..., 0::self.stride]
else:
x = x[..., 0::self.stride, :]
if self.expand > 1:
if self.transposed:
x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
else:
x = repeat(x, 'b ... d -> b ... (d e)', e=self.expand)
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class DownAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=None, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
if self.expand is not None:
self.linear = LinearActivation(
d_input,
d_input * expand,
transposed=transposed,
)
def forward(self, x):
if not self.transposed:
x = rearrange(x, 'b ... d -> b d ...')
if self.stride > 1:
# einops appears slower than F
if x.ndim == 3:
x = F.avg_pool1d(x, self.stride, self.stride)
elif x.ndim == 4:
x = F.avg_pool2d(x, self.stride, self.stride)
else:
# Reduction string e.g. "b d (l1 2) (l2 2) -> b d l1 l2"
reduce_str = "b d " + " ".join([f"(l{i} {self.stride})" for i in range(x.ndim-2)]) \
+ " -> b d " + " ".join([f"l{i}" for i in range(x.ndim-2)])
x = reduce(x, reduce_str, 'mean')
# if self.expand > 1:
# x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
if not self.transposed:
x = rearrange(x, 'b d ... -> b ... d')
if self.expand is not None:
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
if self.expand is None:
return self.d_input
else:
return self.d_input * self.expand
class DownSpectralPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
"""
x: (B, L..., D)
"""
if not self.transposed:
x = rearrange(x, 'b ... d -> b d ...')
shape = x.shape[2:]
x_f = torch.fft.ifftn(x, s=shape)
for axis, l in enumerate(shape):
assert l % self.stride == 0, 'input length must be divisible by stride'
new_l = l // self.stride
idx = torch.cat([torch.arange(0, new_l-new_l//2), l+torch.arange(-new_l//2, 0)]).to(x_f.device)
x_f = torch.index_select(x_f, 2+axis, idx)
x = torch.fft.ifftn(x_f, s=[l//self.stride for l in shape])
x = x.real
if self.expand > 1:
x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
if not self.transposed:
x = rearrange(x, 'b d ... -> b ... d')
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class UpSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if x is None: return None
if self.expand > 1:
if self.transposed:
x = reduce(x, '... (d e) l -> ... d l', 'mean', e=self.expand)
else:
x = reduce(x, '... (d e) -> ... d', 'mean', e=self.expand)
if self.stride > 1:
if self.transposed:
x = repeat(x, '... l -> ... (l e)', e=self.stride)
else:
x = repeat(x, '... l d -> ... (l e) d', e=self.stride)
return x, None
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
class UpAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
assert d_input % expand == 0
self.d_input = d_input
self.stride = stride
self.expand = expand
self.causal = causal
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_input // expand,
transposed=transposed,
)
def forward(self, x):
# TODO only works for 1D right now
if x is None: return None
x = self.linear(x)
if self.stride > 1:
if self.transposed:
if self.causal:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = repeat(x, '... l -> ... (l e)', e=self.stride)
else:
if self.causal:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = repeat(x, '... l d -> ... (l e) d', e=self.stride)
return x, None
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
class DownLinearPool(SequenceModule):
def __init__(self, d_model, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
self.d_model = d_model
self.stride = stride
self.expand = expand
self.transposed = transposed
self.linear = LinearActivation(
d_model * stride,
d_model * expand,
transposed=transposed,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.stride)
else:
x = rearrange(x, '... (l s) h -> ... l (h s)', s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
# if self.stride > 1 or self.expand > 1:
# raise NotImplementedError
# return x, state
if x is None: return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
@property
def d_output(self):
return self.d_input * self.expand
class UpLinearPool(SequenceModule):
def __init__(self, d, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
# self.d_model = d * expand
# self.d_output = d
assert d % expand == 0
self.d_model = d
self.d_output = d // expand
# self._d_output = d_output
self.stride = stride
self.causal = causal
self.transposed = transposed
self.linear = LinearActivation(
self.d_model,
self.d_output * stride,
transposed=transposed,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
if self.causal:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.stride)
else:
if self.causal:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, '... l (h s) -> ... (l s) h', s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.stride)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.stride), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
# @property
# def d_output(self): return self._d_output
""" Pooling functions with trainable parameters """ # TODO make d_output expand instead
class DownPool2d(SequenceModule):
def __init__(self, d_input, d_output, stride=1, transposed=True, weight_norm=True):
super().__init__()
self.linear = LinearActivation(
d_input,
d_output,
transposed=transposed,
weight_norm=weight_norm,
)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride),
def forward(self, x):
if self.transposed:
x = self.pool(x)
# TODO DownPool/UpPool are currently used by unet/sashimi backbones
# DownLinearPool is used by the registry (for isotropic backbone)
# DownPool is essentially the same as DownLinearPool. These should be consolidated
class DownPool(SequenceModule):
def __init__(self, d_input, d_output=None, expand=None, stride=1, transposed=True, weight_norm=True, initializer=None, activation=None):
super().__init__()
assert (d_output is None) + (expand is None) == 1
if d_output is None: d_output = d_input * expand
self.d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input * stride,
d_output,
transposed=transposed,
initializer=initializer,
weight_norm = weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.stride)
else:
x = rearrange(x, '... (l s) h -> ... l (h s)', s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
if x is None: return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
class UpPool(SequenceModule):
def __init__(self, d_input, d_output, stride, transposed=True, weight_norm=True, initializer=None, activation=None):
super().__init__()
self.d_input = d_input
self._d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_output * stride,
transposed=transposed,
initializer=initializer,
weight_norm = weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.stride)
else:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, '... l (h s) -> ... (l s) h', s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.stride)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.stride), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
@property
def d_output(self): return self._d_output
registry = {
'sample': DownSample,
'pool': DownAvgPool,
'avg': DownAvgPool,
'linear': DownLinearPool,
'spectral': DownSpectralPool,
}
up_registry = {
# 'sample': UpSample,
'pool': UpAvgPool,
'avg': UpAvgPool,
'linear': UpLinearPool,
# 'spectral': UpSpectralPool, # Not implemented and no way to make this causal
}
|
hyena-dna-main
|
src/models/sequence/pool.py
|
from torch import nn
import functools
class SequenceModule(nn.Module):
"""Abstract sequence model class. All models must adhere to this interface
A SequenceModule is generally a model that transforms an input of shape
(n_batch, l_sequence, d_model) to (n_batch, l_sequence, d_output)
REQUIRED methods and attributes
forward, d_model, d_output: controls standard forward pass, a sequence-to-sequence transformation
__init__ should also satisfy the following interface; see SequenceIdentity for an example
def __init__(self, d_model, transposed=False, **kwargs)
OPTIONAL methods
default_state, step: allows stepping the model recurrently with a hidden state
state_to_tensor, d_state: allows decoding from hidden state
"""
@property
def d_model(self):
"""Model dimension (generally same as input dimension).
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline (e.g. model backbone, encoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_model")
return self._d_model
@d_model.setter
def d_model(self, d):
self._d_model = d
@property
def d_output(self):
"""Output dimension of model.
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline (e.g. model backbone, decoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_output", None) is None:
raise NotImplementedError("SequenceModule instantiation must specify d_output for decoder")
return self._d_output
@d_output.setter
def d_output(self, d):
self._d_output = d
def forward(self, x, state=None, **kwargs):
"""Forward pass of sequence model, a sequence-to-sequence transformation with an optional state.
Generally, this should map a tensor of shape (batch, length, self.d_model) to (batch, length, self.d_output)
Additionally, it returns a "state" which can be any additional information
For example, RNN and SSM layers may return their hidden state,
while some types of transformer layers (e.g. Transformer-XL) may want to pass a state as well
"""
return x, None
@property
def state_to_tensor(self):
"""Returns a function mapping a state to a single tensor.
This method should be implemented if one wants to use the hidden state instead of the output sequence for final prediction.
Currently only used with the StateDecoder.
"""
return lambda _: None
@property
def d_state(self):
""" Returns dimension of output of self.state_to_tensor """
return None
def default_state(self, *batch_shape, device=None):
"""Create initial state for a batch of inputs."""
return None
def step(self, x, state=None, **kwargs):
"""Step the model recurrently for one step of the input sequence.
For example, this should correspond to unrolling an RNN for one step.
If the forward pass has signature (B, L, H1) -> (B, L, H2),
this method should generally have signature (B, H1) -> (B, H2) with an optional recurrent state.
"""
raise NotImplementedError
def TransposedModule(module):
"""Wrap a SequenceModule class to accept transposed parameter, handle state, absorb kwargs"""
# https://stackoverflow.com/a/65470430/1980685
@functools.wraps(module, updated=())
class TransposedModule(module):
def __init__(self, *args, transposed=False, **kwargs):
super().__init__(*args, **kwargs)
self.transposed = transposed
def forward(self, x, state=None, **kwargs):
if self.transposed: x = x.transpose(-1, -2)
x, next_state = super().forward(x, state) # Don't use kwarg because nn.LSTM
next_state = None if state is None else next_state
if self.transposed: x = x.transpose(-1,-2)
return x, next_state
# https://stackoverflow.com/questions/5352781/how-to-set-class-names-dynamically
# TransposedModule.__name__ = module.__name__ # functools wraps is better solution
return TransposedModule
@TransposedModule
class SequenceIdentity(SequenceModule):
"""Simple SequenceModule for testing purposes"""
def __init__(self, d_model, dropout=0.0, **kwargs):
"""Default interface for SequenceModule
d_model: input dimension (sometimes denoted H for hidden dimension)
transposed: if True, inputs have axis ordering (B, H, L) instead of (B, H, L)
"""
super().__init__()
self.d_model = d_model
self.d_output = d_model
def forward(self, x, state=None):
return x, state
def default_state(self, *batch_shape, device=None):
return None
def step(self, x, state=None, **kwargs):
return x, state
|
hyena-dna-main
|
src/models/sequence/base.py
|
""" Wrapper around nn.MultiheadAttention to adhere to SequenceModule interface. """
import torch
import torch.nn.functional as F
from torch import nn
import hydra
from src.models.sequence.base import SequenceModule, TransposedModule
import src.models.nn.utils as U
from einops import rearrange
@TransposedModule
class MultiheadAttention(SequenceModule):
""" Simple wrapper for MultiheadAttention """
def __init__(self, d_model, n_heads, *args, causal=True, **kwargs):
super().__init__()
self.d_model = d_model
self.d_output = d_model
self.mha = nn.MultiheadAttention(d_model, n_heads, *args, batch_first=True, **kwargs)
self.causal = causal
def forward(self, src, attn_mask=None, key_padding_mask=None, state=None, **kwargs):
""" state should represent a mask and key padding mask """
if self.causal and attn_mask is None:
attn_mask = torch.triu(torch.ones(src.size(-2), src.size(-2),
dtype=torch.bool, device=src.device),
diagonal=1)
# attn_mask, key_padding_mask = state
# Note that this returns None for the second argument
y, _ = self.mha(src, src, src, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
return y, None
def step(self, x, state):
# TODO proper cached inference
# x: (B, D)
pass
class VitAttention(SequenceModule):
"""Copied from implementation for ViT: only used for ViT model
This attention class makes several simplifying assumptions (commonly satisfied in vision
applications):
1. q = k = v
2. No masks: no attention mask, no key padding mask
3. Embed dimension = Input dimension, i.e. projection matrices are square.
"""
@property
def d_output(self):
return self.dim
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
# proj_drop=0.,
packed_linear=True,
linear_cfg=None,
**kwargs,
):
"""packed_linear: whether to pack all 3 q_proj, k_proj, v_proj into 2 matrix.
This option is to be compatible with T2T-ViT pretrained weights, where there's only one
projection weight matrix.
"""
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if linear_cfg is not None:
packed_linear = False
self.packed_linear = packed_linear
if packed_linear:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
else:
if linear_cfg is None:
linear_cfg = {'_target_': 'torch.nn.Linear'}
self.q_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.k_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.v_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
# Removing this dropout because we do this in SequenceResidualBlock
# self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, state=None):
B, N, C = x.shape
if self.packed_linear:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
else:
q, k, v = self.q_proj(x), self.k_proj(x), self.v_proj(x)
q, k, v = [rearrange(x, 'b n (h d) -> b h n d', h=self.num_heads) for x in (q, k, v)]
# attn = (q @ k.transpose(-2, -1) * self.scale)
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = q.size()
_, _, k_seq_len, _ = k.size()
q = rearrange(q, 'b h t d -> (b h) t d')
k = rearrange(k, 'b h s d -> (b h) d s')
# Preallocate attn_weights for `baddbmm`
attn = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=q.dtype, device=q.device)
attn = rearrange(torch.baddbmm(attn, q, k, beta=0, alpha=self.scale),
'(b h) t s -> b h t s', h = self.num_heads)
attn = F.softmax(attn, dim=-1, dtype=v.dtype)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
# x = self.proj_drop(x)
return x, None
|
hyena-dna-main
|
src/models/sequence/mha.py
|
import math
import torch
import torch.nn.functional as F
from einops import rearrange
from fftconv import fftconv_fwd, fftconv_bwd
@torch.jit.script
def _mul_sum(y, q):
return (y * q).sum(dim=1)
# reference convolution with residual connection
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
if k_rev is not None:
k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
k_f = k_f + k_rev_f.conj()
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3: k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
return (out * rearrange(dropout_mask, 'b H -> b H 1')).to(dtype=u.dtype)
else:
return out.to(dtype=u.dtype)
# reference H3 forward pass
def fftconv_h3_ref(k, ssm_kernel, D, q, v, head_dim=1, ssm_kernel_rev=None):
seqlen = k.shape[-1]
fft_size = 2 * seqlen
kv = (rearrange(k, 'b (h d1) l -> b d1 1 h l', d1=head_dim)
* rearrange(v, 'b (h d2) l -> b 1 d2 h l', d2=head_dim)) # b d1 d2 h l
kv_f = torch.fft.rfft(kv.to(dtype=ssm_kernel.dtype), n=fft_size) / fft_size
ssm_kernel_f = torch.fft.rfft(ssm_kernel, n=fft_size) # h L+1
if ssm_kernel_rev is not None:
ssm_kernel_rev_f = torch.fft.rfft(ssm_kernel_rev, n=fft_size) # h L+1
ssm_kernel_f = ssm_kernel_f + ssm_kernel_rev_f.conj()
y = torch.fft.irfft(kv_f * ssm_kernel_f, n=fft_size, norm='forward')[..., :seqlen] # b d1 d2 h l
out = y + kv * D.unsqueeze(-1) # b d1 d2 h l
q = rearrange(q, 'b (h d1) l -> b d1 1 h l', d1=head_dim)
if head_dim > 1:
out = _mul_sum(out, q)
return rearrange(out, 'b d2 h l -> b (h d2) l').to(dtype=k.dtype)
else:
return rearrange(out * q, 'b 1 1 h l -> b h l').to(dtype=k.dtype)
class FFTConvFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, u, k, D, dropout_mask=None, gelu=True, force_fp16_output=False,
output_hbl_layout=False, v=None, head_dim=1, q=None, fftfp16=False, k_rev=None):
seqlen = u.shape[-1]
fft_size = max(2 * 2 ** int(math.ceil(math.log2(seqlen))), 16)
k_f = torch.fft.rfft(k, n=fft_size)
if k_rev is not None:
k_f = k_f + torch.fft.rfft(k_rev, n=fft_size).conj()
if u.stride(-1) != 1:
u = u.contiguous()
k_f = k_f.contiguous()
D = D.contiguous()
if v is not None and v.stride(-1) != 1:
v = v.contiguous()
if q is not None and q.stride(-1) != 1:
q = q.contiguous()
if dropout_mask is not None:
dropout_mask = dropout_mask.contiguous()
ctx.save_for_backward(u, k_f, D, dropout_mask, v, q)
ctx.output_hbl_layout = output_hbl_layout
ctx.head_dim = head_dim
ctx.gelu = gelu
ctx.fftfp16 = fftfp16
ctx.has_k_rev = k_rev is not None
out = fftconv_fwd(u, k_f, D, v, head_dim, q, dropout_mask, gelu, False, False, fft_size, force_fp16_output, output_hbl_layout, fftfp16)
return out
@staticmethod
def backward(ctx, dout):
if ctx.output_hbl_layout:
dout = rearrange(rearrange(dout, 'b h l -> h b l').contiguous(), 'h b l -> b h l')
else:
dout = dout.contiguous()
u, k_f, D, dropout_mask, v, q = ctx.saved_tensors
seqlen = u.shape[-1]
fft_size = max(2 * 2 ** int(math.ceil(math.log2(seqlen))), 16)
du, dk_f, dD, dv, dq = fftconv_bwd(dout, u, k_f, D, v, ctx.head_dim, q, dropout_mask, ctx.gelu, False, False, fft_size,
ctx.output_hbl_layout, ctx.fftfp16)
dk = torch.fft.irfft(dk_f, n=fft_size, norm='forward')[..., :seqlen]
dk_rev = (None if not ctx.has_k_rev
else torch.fft.irfft(dk_f.conj(), n=fft_size, norm='forward')[..., :seqlen])
if v is not None:
dv = dv.to(dtype=v.dtype) # We do atomicAdd in fp32 so might need to convert to fp16
return du, dk, dD, None, None, None, None, dv if v is not None else None, None, dq if q is not None else None, None, dk_rev
def fftconv_func(u, k, D, dropout_mask=None, gelu=True, force_fp16_output=False,
output_hbl_layout=False, v=None, head_dim=1, q=None, fftfp16=False, k_rev=None):
return FFTConvFunc.apply(u, k, D, dropout_mask, gelu, force_fp16_output,
output_hbl_layout, v, head_dim, q, fftfp16, k_rev)
|
hyena-dna-main
|
src/ops/fftconv.py
|
"""pykeops implementations of the Vandermonde matrix multiplication kernel used in the S4D kernel."""
import math
import torch
from einops import rearrange, repeat
from opt_einsum import contract
import os
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
try:
from cauchy_mult import vand_log_mult_sym_fwd, vand_log_mult_sym_bwd
except:
vand_log_mult_sym_fwd, vand_log_mult_sym_bwd = None, None
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def _c2r(x): return torch.view_as_real(x)
def _r2c(x): return torch.view_as_complex(x)
def vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
if conj:
x = _conj(x)
v = _conj(v)
vandermonde_matrix = x.unsqueeze(-1) ** torch.arange(L).to(x) # (... N L)
vandermonde_prod = torch.sum(v.unsqueeze(-1) * vandermonde_matrix, dim=-2) # (... L)
return vandermonde_prod
def log_vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... n, ... n l -> ... l', v, vandermonde_matrix) # (... L)
if conj:
return 2*vandermonde_prod.real
else:
return vandermonde_prod
def log_vandermonde_lazy(v, x, L, conj=True):
if conj:
v = _conj(v)
x = _conj(x)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v_l = LazyTensor(rearrange(v, '... N -> ... N 1 1'))
x_l = LazyTensor(rearrange(x, '... N -> ... N 1 1'))
l_l = LazyTensor(rearrange(l, '... L -> ... 1 L 1'))
# exp
vand = (x_l * l_l).exp()
s = (v_l*vand).sum(dim=len(v_l.shape)-2)
return s.squeeze(-1)
def log_vandermonde(v, x, L, conj=True):
expr = 'ComplexMult(v, ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'v = Vj(2)',
'x = Vj(2)',
'l = Vi(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(v, x, l, backend='GPU')
if conj:
return 2*_r2c(r).real
else:
return _r2c(r)
def log_vandermonde_transpose_naive(u, v, x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... l, ... n, ... n l -> ... n', u.to(x), v.to(x), vandermonde_matrix) # (... L)
return vandermonde_prod
def log_vandermonde_transpose(u, v, x, L):
"""
u: ... H L
v: ... H N
x: ... H N
Returns: ... H N
V = Vandermonde(a, L) : (H N L)
contract_L(V * u * v)
"""
expr = 'ComplexMult(ComplexMult(v, u), ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'u = Vj(2)',
'v = Vi(2)',
'x = Vi(2)',
'l = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
u, v, x, l = _broadcast_dims(u, v, x, l)
u = _c2r(u)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(u, v, x, l, backend='GPU')
return _r2c(r)
def _log_vandermonde_matmul(x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
return vandermonde_matrix
def log_vandermonde_matmul(v, K):
prod = contract('...n, ...nl -> ...l', v, K)
return 2*prod.real
class LogVandMultiplySymmetric(torch.autograd.Function):
@staticmethod
def forward(ctx, v, x, L):
batch, N = v.shape
supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
if not N in supported_N_values:
raise NotImplementedError(f'Only support N values in {supported_N_values}')
max_L_value = 32 * 1024 * 64 * 1024
if L > max_L_value:
raise NotImplementedError(f'Only support L values <= {max_L_value}')
if not v.is_cuda and x.is_cuda:
raise NotImplementedError(f'Only support CUDA tensors')
ctx.save_for_backward(v, x)
return vand_log_mult_sym_fwd(v, x, L)
@staticmethod
def backward(ctx, dout):
v, x = ctx.saved_tensors
dv, dx = vand_log_mult_sym_bwd(v, x, dout)
return dv, dx, None
if vand_log_mult_sym_fwd and vand_log_mult_sym_bwd is not None:
log_vandermonde_fast = LogVandMultiplySymmetric.apply
else:
log_vandermonde_fast = None
|
hyena-dna-main
|
src/ops/vandermonde.py
|
""" Old utilities for parallel scan implementation of Linear RNNs. """
# TODO this file could use much cleanup
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from src.models.functional.toeplitz import triangular_toeplitz_multiply, triangular_toeplitz_multiply_padded
from src.utils.permutations import bitreversal_po2, bitreversal_permutation
### Utilities
def shift_up(a, s=None, drop=True, dim=0):
assert dim == 0
if s is None:
s = torch.zeros_like(a[0, ...])
s = s.unsqueeze(dim)
if drop:
a = a[:-1, ...]
return torch.cat((s, a), dim=dim)
def interleave(a, b, uneven=False, dim=0):
""" Interleave two tensors of same shape """
# assert(a.shape == b.shape)
assert dim == 0 # TODO temporary to make handling uneven case easier
if dim < 0:
dim = N + dim
if uneven:
a_ = a[-1:, ...]
a = a[:-1, ...]
c = torch.stack((a, b), dim+1)
out_shape = list(a.shape)
out_shape[dim] *= 2
c = c.view(out_shape)
if uneven:
c = torch.cat((c, a_), dim=dim)
return c
def batch_mult(A, u, has_batch=None):
""" Matrix mult A @ u with special case to save memory if u has additional batch dim
The batch dimension is assumed to be the second dimension
A : (L, ..., N, N)
u : (L, [B], ..., N)
has_batch: True, False, or None. If None, determined automatically
Output:
x : (L, [B], ..., N)
A @ u broadcasted appropriately
"""
if has_batch is None:
has_batch = len(u.shape) >= len(A.shape)
if has_batch:
u = u.permute([0] + list(range(2, len(u.shape))) + [1])
else:
u = u.unsqueeze(-1)
v = (A @ u)
if has_batch:
v = v.permute([0] + [len(u.shape)-1] + list(range(1, len(u.shape)-1)))
else:
v = v[..., 0]
return v
### Main unrolling functions
def unroll(A, u):
"""
A : (..., N, N) # TODO I think this can't take batch dimension?
u : (L, ..., N)
output : x (..., N) # TODO a lot of these shapes are wrong
x[i, ...] = A^{i} @ u[0, ...] + ... + A @ u[i-1, ...] + u[i, ...]
"""
m = u.new_zeros(u.shape[1:])
outputs = []
for u_ in torch.unbind(u, dim=0):
m = F.linear(m, A) + u_
outputs.append(m)
output = torch.stack(outputs, dim=0)
return output
def parallel_unroll_recursive(A, u):
""" Bottom-up divide-and-conquer version of unroll. """
# Main recursive function
def parallel_unroll_recursive_(A, u):
if u.shape[0] == 1:
return u
u_evens = u[0::2, ...]
u_odds = u[1::2, ...]
# u2 = F.linear(u_evens, A) + u_odds
u2 = (A @ u_evens.unsqueeze(-1)).squeeze(-1) + u_odds
A2 = A @ A
x_odds = parallel_unroll_recursive_(A2, u2)
# x_evens = F.linear(shift_up(x_odds), A) + u_evens
x_evens = (A @ shift_up(x_odds).unsqueeze(-1)).squeeze(-1) + u_evens
x = interleave(x_evens, x_odds, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
return parallel_unroll_recursive_(A, u)[:n, ...]
def parallel_unroll_recursive_br(A, u):
""" Same as parallel_unroll_recursive but uses bit reversal for locality. """
# Main recursive function
def parallel_unroll_recursive_br_(A, u):
n = u.shape[0]
if n == 1:
return u
m = n//2
u_0 = u[:m, ...]
u_1 = u[m:, ...]
u2 = F.linear(u_0, A) + u_1
A2 = A @ A
x_1 = parallel_unroll_recursive_br_(A2, u2)
x_0 = F.linear(shift_up(x_1), A) + u_0
# x = torch.cat((x_0, x_1), dim=0) # is there a way to do this with cat?
x = interleave(x_0, x_1, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
x = parallel_unroll_recursive_br_(A, u)
return x[:n, ...]
def parallel_unroll_iterative(A, u):
""" Bottom-up divide-and-conquer version of unroll, implemented iteratively """
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
# Main recursive loop, flattened
us = [] # stores the u_0 terms in the recursive version
N_ = N
As = [] # stores the A matrices
for l in range(m):
N_ = N_ // 2
As.append(A)
u_0 = u[:N_, ...]
us.append(u_0)
u = F.linear(u_0, A) + u[N_:, ...]
A = A @ A
x_0 = []
x = u # x_1
for l in range(m-1, -1, -1):
x_0 = F.linear(shift_up(x), As[l]) + us[l]
x = interleave(x_0, x, dim=0)
return x[:n, ...]
def variable_unroll_sequential(A, u, s=None, variable=True):
""" Unroll with variable (in time/length) transitions A.
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] @ s + A[i..1] @ u[0] + ... + A[i] @ u[i-1] + u[i]
"""
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
has_batch = len(u.shape) >= len(A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
# s = F.linear(s, A_) + u_
s = batch_mult(A_.unsqueeze(0), s.unsqueeze(0), has_batch)[0]
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
def variable_unroll(A, u, s=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll. """
if u.shape[0] <= recurse_limit:
return variable_unroll_sequential(A, u, s, variable)
if s is None:
s = torch.zeros_like(u[0])
uneven = u.shape[0] % 2 == 1
has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = A_1 @ A_0_
# Recursive call
x_1 = variable_unroll(A_10, u_10, s, variable, recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
def variable_unroll_general_sequential(A, u, s, op, variable=True):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
if not variable:
A = A.expand((u.shape[0],) + A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
s = op(A_, s)
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
def variable_unroll_matrix_sequential(A, u, s=None, variable=True):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
return variable_unroll_general_sequential(A, u, s, op, variable=True)
def variable_unroll_toeplitz_sequential(A, u, s=None, variable=True, pad=False):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
if pad:
n = A.shape[-1]
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
ret = variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply_padded, variable=True)
ret = ret[..., :n]
return ret
return variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply, variable=True)
### General parallel scan functions with generic binary composition operators
def variable_unroll_general(A, u, s, op, compose_op=None, sequential_op=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll.
compose is an optional function that defines how to compose A without multiplying by a leaf u
"""
if u.shape[0] <= recurse_limit:
if sequential_op is None:
sequential_op = op
return variable_unroll_general_sequential(A, u, s, sequential_op, variable)
if compose_op is None:
compose_op = op
uneven = u.shape[0] % 2 == 1
# has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = op(A_1, u_0_) # batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = compose_op(A_1, A_0_)
# Recursive call
x_1 = variable_unroll_general(A_10, u_10, s, op, compose_op, sequential_op, variable=variable, recurse_limit=recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = op(A_0, x_0) # batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
def variable_unroll_matrix(A, u, s=None, variable=True, recurse_limit=16):
if s is None:
s = torch.zeros_like(u[0])
has_batch = len(u.shape) >= len(A.shape)
op = lambda x, y: batch_mult(x, y, has_batch)
sequential_op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
matmul = lambda x, y: x @ y
return variable_unroll_general(A, u, s, op, compose_op=matmul, sequential_op=sequential_op, variable=variable, recurse_limit=recurse_limit)
def variable_unroll_toeplitz(A, u, s=None, variable=True, recurse_limit=8, pad=False):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (L, [B], ..., N) same shape as u
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
# Add the batch dimension to A if necessary
A_batch_dims = len(A.shape) - int(variable)
u_batch_dims = len(u.shape)-1
if u_batch_dims > A_batch_dims:
# assert u_batch_dims == A_batch_dims + 1
if variable:
while len(A.shape) < len(u.shape):
A = A.unsqueeze(1)
# else:
# A = A.unsqueeze(0)
if s is None:
s = torch.zeros_like(u[0])
if pad:
n = A.shape[-1]
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
op = triangular_toeplitz_multiply_padded
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
ret = ret[..., :n]
return ret
op = triangular_toeplitz_multiply
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
return ret
|
hyena-dna-main
|
src/ops/unroll.py
|
""" Compute a Krylov function efficiently. (S4 renames the Krylov function to a "state space kernel")
A : (N, N)
b : (N,)
c : (N,)
Return: [c^T A^i b for i in [L]]
"""
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from src.ops.toeplitz import causal_convolution
def krylov_sequential(L, A, b, c=None):
""" Constant matrix A
A : (..., N, N)
b : (..., N)
c : (..., N)
Returns
if c:
x : (..., L)
x[i, l] = c[i] @ A^l @ b[i]
else:
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
# Check which of dim b and c is smaller to save memory
if c is not None and c.numel() < b.numel():
return krylov_sequential(L, A.transpose(-1, -2), c, b)
b_ = b
x = []
for _ in range(L):
if c is not None:
x_ = torch.sum(c*b_, dim=-1) # (...) # could be faster with matmul or einsum?
else:
x_ = b_
x.append(x_)
b_ = (A @ b_.unsqueeze(-1)).squeeze(-1)
x = torch.stack(x, dim=-1)
return x
def krylov(L, A, b, c=None, return_power=False):
"""
Compute the Krylov matrix (b, Ab, A^2b, ...) using the squaring trick.
If return_power=True, return A^{L-1} as well
"""
# TODO There is an edge case if L=1 where output doesn't get broadcasted, which might be an issue if caller is expecting broadcasting semantics... can deal with it if it arises
x = b.unsqueeze(-1) # (..., N, 1)
A_ = A
AL = None
if return_power:
AL = torch.eye(A.shape[-1], dtype=A.dtype, device=A.device)
_L = L-1
done = L == 1
# loop invariant: _L represents how many indices left to compute
while not done:
if return_power:
if _L % 2 == 1: AL = A_ @ AL
_L //= 2
# Save memory on last iteration
l = x.shape[-1]
if L - l <= l:
done = True
_x = x[..., :L-l]
else: _x = x
_x = A_ @ _x
x = torch.cat([x, _x], dim=-1) # there might be a more efficient way of ordering axes
if not done: A_ = A_ @ A_
assert x.shape[-1] == L
if c is not None:
x = torch.einsum('...nl, ...n -> ...l', x, c)
x = x.contiguous() # WOW!!
if return_power:
return x, AL
else:
return x
@torch.no_grad()
def power(L, A, v=None):
""" Compute A^L and the scan sum_i A^i v_i
A: (..., N, N)
v: (..., N, L)
"""
I = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
l = 1
while True:
if L % 2 == 1: I = powers[-1] @ I
L //= 2
if L == 0: break
l *= 2
if v is None:
powers = [powers[-1] @ powers[-1]]
else:
powers.append(powers[-1] @ powers[-1])
if v is None: return I
# Invariants:
# powers[-1] := A^l
# l := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (l == L)
k = v.size(-1) - l
v_ = powers.pop() @ v[..., l:]
v = v[..., :l]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, '... (z l) -> ... z l', z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return I, v.squeeze(-1)
def krylov_toeplitz(L, A, b, c=None):
""" Specializes to lower triangular Toeplitz matrix A represented by its diagonals
A : (..., N)
b : (..., N)
c : (..., N)
Returns
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
x = b.unsqueeze(0) # (1, ..., N)
A_ = A
while x.shape[0] < L:
xx = causal_convolution(A_, x)
x = torch.cat([x, xx], dim=0) # there might be a more efficient way of ordering axes
A_ = causal_convolution(A_, A_)
x = x[:L, ...] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
def krylov_toeplitz_(L, A, b, c=None):
""" Padded version of krylov_toeplitz that saves some fft's
TODO currently not faster than original version, not sure why
"""
N = A.shape[-1]
x = b.unsqueeze(0) # (1, ..., N)
x = F.pad(x, (0, N))
A = F.pad(A, (0, N))
done = L == 1
while not done:
l = x.shape[0]
# Save memory on last iteration
if L - l <= l:
done = True
_x = x[:L-l]
else: _x = x
Af = torch.fft.rfft(A, n=2*N, dim=-1)
xf = torch.fft.rfft(_x, n=2*N, dim=-1)
xf_ = Af * xf
x_ = torch.fft.irfft(xf_, n=2*N, dim=-1)
x_[..., N:] = 0
x = torch.cat([x, x_], dim=0) # there might be a more efficient way of ordering axes
if not done:
A = torch.fft.irfft(Af*Af, n=2*N, dim=-1)
A[..., N:] = 0
x = x[:L, ..., :N] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
|
hyena-dna-main
|
src/ops/krylov.py
|
""" Utilities for computing convolutions.
There are 3 equivalent views:
1. causal convolution
2. multiplication of (lower) triangular Toeplitz matrices
3. polynomial multiplication (mod x^N)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def construct_toeplitz(v, f=0.0):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{n-1} @ v]
where A = Z_f. This uses vectorized indexing and cumprod so it's much
faster than using the Krylov function.
Parameters:
v: the starting vector of size n or (rank, n).
f: real number
Returns:
K: Krylov matrix of size (n, n) or (rank, n, n).
"""
n = v.shape[-1]
a = torch.arange(n, device=v.device)
b = -a
indices = a[:, None] + b[None]
K = v[..., indices]
K[..., indices < 0] *= f
return K
def triangular_toeplitz_multiply_(u, v, sum=None):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
uv_f = u_f * v_f
if sum is not None:
uv_f = uv_f.sum(dim=sum)
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
def triangular_toeplitz_multiply_padded_(u, v):
""" Same as triangular_toeplitz_multiply but inputs and output assume to be 0-padded already. """
n = u.shape[-1]
assert n % 2 == 0
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n:] = 0
return output
class TriangularToeplitzMult(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
return triangular_toeplitz_multiply_(u, v)
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultFast(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad.flip(-1), (0, n))
g_f = torch.fft.rfft(g_expand, n=2*n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=2*n, dim=-1)[..., :n]
d_v = torch.fft.irfft(gu_f, n=2*n, dim=-1)[..., :n]
d_u = d_u.flip(-1)
d_v = d_v.flip(-1)
return d_u, d_v
class TriangularToeplitzMultPadded(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
output = triangular_toeplitz_multiply_(u, v)
return output
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_padded_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_padded_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultPaddedFast(torch.autograd.Function):
""" Trade off speed (20-25% faster) for more memory (20-25%) """
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n//2:].zero_()
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad[..., :n//2].flip(-1), (0, n//2))
g_f = torch.fft.rfft(g_expand, n=n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=n, dim=-1)
d_v = torch.fft.irfft(gu_f, n=n, dim=-1)
d_u[..., n//2:].zero_()
d_v[..., n//2:].zero_()
d_u[..., :n//2] = d_u[..., :n//2].flip(-1) # TODO
d_v[..., :n//2] = d_v[..., :n//2].flip(-1) # TODO
return d_u, d_v
# triangular_toeplitz_multiply = triangular_toeplitz_multiply_
triangular_toeplitz_multiply = TriangularToeplitzMult.apply
triangular_toeplitz_multiply_fast = TriangularToeplitzMultFast.apply
triangular_toeplitz_multiply_padded = TriangularToeplitzMultPadded.apply
triangular_toeplitz_multiply_padded_fast = TriangularToeplitzMultPaddedFast.apply
def causal_convolution(u, v, fast=True, pad=False):
if not pad and not fast:
return triangular_toeplitz_multiply(u, v)
if not pad and fast:
return triangular_toeplitz_multiply_fast(u, v)
if pad and not fast:
return triangular_toeplitz_multiply_padded(u, v)
if pad and fast:
return triangular_toeplitz_multiply_padded_fast(u, v)
|
hyena-dna-main
|
src/ops/toeplitz.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for metaseq.")
def write_version_py():
with open(os.path.join("metaseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to metaseq/version.py
with open(os.path.join("metaseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
NumpyExtension(
"metaseq.data.data_utils_fast",
sources=["metaseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"metaseq.data.token_block_utils_fast",
sources=["metaseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f metaseq/*.so metaseq/**/*.so metaseq/*.pyd metaseq/**/*.pyd"],
shell=True,
)
def do_setup(package_data):
setup(
name="metaseq",
version=version,
description="MetaSeq, a framework for large language models, from Meta",
url="https://github.com/facebookresearch/metaseq",
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
# protobuf version pinned due to tensorboard not pinning a version.
# https://github.com/protocolbuffers/protobuf/issues/10076
"protobuf==3.20.1",
"azure-storage-blob",
"boto3",
"black==22.1.0",
"click==8.0.4",
"cython",
'dataclasses; python_version<"3.7"',
"editdistance",
"fire",
"flask==2.1.1", # for api
"hydra-core>=1.1.0,<1.2",
"iopath",
"ipdb",
"ipython",
"Jinja2==3.1.1", # for evals
"markupsafe", # for evals
"more_itertools",
"mypy",
"ninja",
'numpy; python_version>="3.7"',
"omegaconf<=2.1.1",
"pre-commit",
"pytest",
"regex",
"sklearn", # for evals
"sacrebleu", # for evals
"tensorboard==2.8.0",
"timeout-decorator",
"tokenizers",
"torch",
"tqdm",
"typing_extensions",
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"scripts",
"scripts.*",
"tests",
"tests.*",
]
),
extras_require={
"dev": [
"flake8==3.9.2",
"black==22.1.0",
# test deps
"iopath",
"transformers",
"pyarrow",
"boto3",
]
},
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"metaseq-train = metaseq_cli.train:cli_main",
"metaseq-validate = metaseq_cli.validate:cli_main",
"opt-baselines = metaseq.launcher.opt_baselines:cli_main",
"metaseq-api-local = metaseq_cli.interactive_hosted:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="metaseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
package_data = {"metaseq": (get_files(os.path.join("metaseq", "config")))}
do_setup(package_data)
|
flash_metaseq-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utils for gpu_tests.
"""
import os
import unittest
def is_this_circleci():
"""
Return if we are currently running in CircleCI.
"""
return bool(os.environ.get("CIRCLECI"))
def skipIfCircleCI(testfn, reason="Test disabled in CircleCI"):
"""
Decorate a test to skip if running on CircleCI.
"""
return unittest.skipIf(is_this_circleci(), reason)(testfn)
|
flash_metaseq-main
|
gpu_tests/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
import unittest
import torch
from omegaconf import OmegaConf
from metaseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestGradientScaling(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().half()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half()
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda().half()
self.params = list(self.model.parameters())
self.cfg_dls = OmegaConf.create(
{
"optimization": {
"lr": [0.1],
},
"optimizer": {
"_name": "adam",
"lr": [0.1],
"adam_betas": "(0.9, 0.999)",
"adam_eps": 1e-8,
"weight_decay": 0.0,
},
"common": {
"bf16": False,
"fp16_init_scale": 1,
"fp16_scale_window": 1,
"fp16_scale_tolerance": 1,
"threshold_loss_scale": 1,
"min_loss_scale": 1e-4,
},
}
)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
y = model(self.x)
loss = self.loss_fn(y, self.target)
optimizer.backward(loss)
self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16))
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
optimizer.step()
self.assertEqual(
model.weight,
torch.tensor(
[[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(
model.bias,
torch.tensor(
[5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(optimizer.scaler.loss_scale, 2.0)
def test_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
self.assertTrue(
all(
torch.all(
fp32_params.eq(
torch.tensor(
[3.1000, 5.1000], device="cuda:0", requires_grad=True
)
)
)
for fp32_params in optimizer.fp32_params.values()
)
)
def test_memory_efficient(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestBF16(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().bfloat16()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = (
torch.tensor([self.x * weight + bias + self.error]).cuda().bfloat16()
)
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda().bfloat16()
self.params = list(self.model.parameters())
self.cfg_dls = OmegaConf.create(
{
"distributed_training": {
"distributed_world_size": 1,
},
"optimization": {
"lr": [0.1],
"update_freq": [1],
},
"optimizer": {
"_name": "adam",
"lr": [0.1],
"adam_betas": "(0.9, 0.999)",
"adam_eps": 1e-8,
"weight_decay": 0.0,
},
"common": {
"model_parallel_size": 1,
"bf16": True,
"fp16": True,
},
}
)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
y = model(self.x)
loss = self.loss_fn(y, self.target)
optimizer.backward(loss)
self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.bfloat16))
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
optimizer.step()
self.assertEqual(
model.weight,
torch.tensor(
[[3.0938]], device="cuda:0", dtype=torch.bfloat16, requires_grad=True
),
)
self.assertEqual(
model.bias,
torch.tensor(
[5.1016], device="cuda:0", dtype=torch.bfloat16, requires_grad=True
),
)
self.assertIsNone(optimizer.scaler)
def test_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
self.assertTrue(
all(
torch.all(
fp32_params.eq(
torch.tensor(
[3.1000, 5.1000], device="cuda:0", requires_grad=True
)
)
)
for fp32_params in optimizer.fp32_params.values()
)
)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
gpu_tests/test_fp16_optimizer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from metaseq.modules.checkpoint_activations import checkpoint_wrapper
class Model(nn.Module):
def __init__(
self, use_pytorch_checkpoint=False, use_metaseq_checkpoint=False, **kwargs
):
super().__init__()
torch.manual_seed(0)
self.use_pytorch_checkpoint = use_pytorch_checkpoint
self.ffn = nn.Sequential(
nn.Linear(32, 128),
# add a Dropout layer to test RNG save/restore
nn.Dropout(p=0.5),
nn.Linear(128, 32),
)
if use_metaseq_checkpoint:
self.ffn = checkpoint_wrapper(self.ffn, **kwargs)
self.out = nn.Linear(32, 1)
def forward(self, x):
if self.use_pytorch_checkpoint:
x = checkpoint(self.ffn, x)
else:
x = self.ffn(x)
return self.out(x)
class TestActivationCheckpointing(unittest.TestCase):
def _test_checkpoint_wrapper(self, device, log_memory_usage=False):
def get_loss_and_gnorm(model):
torch.manual_seed(1)
input = torch.rand(2, 16, 32).requires_grad_(True).to(device)
model.zero_grad()
loss = model(input).sum()
loss.backward()
gnorm = torch.norm(
torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()])
)
return {"loss": loss, "gnorm": gnorm}
model = Model().to(device)
no_cpt = get_loss_and_gnorm(model)
model = Model(use_pytorch_checkpoint=True).to(device)
pyt_cpt = get_loss_and_gnorm(model)
torch.testing.assert_allclose(no_cpt["loss"], pyt_cpt["loss"])
torch.testing.assert_allclose(no_cpt["gnorm"], pyt_cpt["gnorm"])
model = Model(use_metaseq_checkpoint=True).to(device)
metaseq_cpt = get_loss_and_gnorm(model)
torch.testing.assert_allclose(no_cpt["loss"], metaseq_cpt["loss"])
torch.testing.assert_allclose(no_cpt["gnorm"], metaseq_cpt["gnorm"])
model = Model(use_metaseq_checkpoint=True, offload_to_cpu=True).to(device)
metaseq_cpt_offload = get_loss_and_gnorm(model)
torch.testing.assert_allclose(no_cpt["loss"], metaseq_cpt_offload["loss"])
torch.testing.assert_allclose(no_cpt["gnorm"], metaseq_cpt_offload["gnorm"])
def test_checkpoint_wrapper_cpu(self):
self._test_checkpoint_wrapper(device=torch.device("cpu"))
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_checkpoint_wrapper_cuda(self):
self._test_checkpoint_wrapper(device=torch.device("cuda"))
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
gpu_tests/test_activation_checkpointing.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import unittest
import torch
from omegaconf import OmegaConf
from metaseq.optim.adam import MetaseqAdam
from metaseq.optim.lr_scheduler.polynomial_decay_schedule import (
PolynomialDecayLRSchedule,
)
class TestPolynomialLRScheduler(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0])
weight = 3.0
bias = 5.0
error = 1.0
self.target = torch.tensor([self.x * weight + bias + error])
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.params = list(self.model.parameters())
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def _get_adam(self, starting_lr):
return MetaseqAdam(
cfg=OmegaConf.create(
vars(
argparse.Namespace(
adam_betas="(0.9, 0.999)",
adam_eps=1e-8,
weight_decay=0.0,
lr=[starting_lr],
)
)
),
params=self.params,
)
@staticmethod
def _get_polynomial_lr_schedule(
warmup_updates,
power,
total_updates,
starting_lr,
end_lr,
zero_lr_warmup_steps,
optimizer,
):
return PolynomialDecayLRSchedule(
cfg=OmegaConf.create(
vars(
argparse.Namespace(
warmup_updates=warmup_updates,
end_learning_rate=end_lr,
power=power,
total_num_update=total_updates,
lr=[starting_lr],
zero_lr_warmup_steps=zero_lr_warmup_steps,
)
)
),
optimizer=optimizer,
)
def test_polynomial_decay_no_adam_warmup(self):
starting_lr = 0.1
total_updates = 50
warmup_updates = 20
adam_warmup = 0
power = 1
adam_optim = self._get_adam(starting_lr)
# Test setting end_lr, adam_warmup = 0
end_lr = starting_lr * 0.1
lr_sched = self._get_polynomial_lr_schedule(
warmup_updates,
power,
total_updates,
starting_lr,
end_lr,
adam_warmup,
adam_optim,
)
# Init warmup period, halfway mark
self.assertAlmostEqual(
lr_sched.step_update(warmup_updates // 2), starting_lr * 0.5
)
# Done warming up
self.assertAlmostEqual(lr_sched.step_update(warmup_updates), starting_lr)
# Linear decay, halfway mark
halfway = warmup_updates + (total_updates - warmup_updates) // 2
self.assertAlmostEqual(
lr_sched.step_update(halfway), end_lr + (starting_lr - end_lr) * 0.5
)
# End of decay
self.assertAlmostEqual(lr_sched.step_update(total_updates), end_lr)
# Test power == 2
power = 2
end_lr = 0
lr_sched = self._get_polynomial_lr_schedule(
warmup_updates,
power,
total_updates,
starting_lr,
end_lr,
adam_warmup,
adam_optim,
)
# Init warmup period, halfway mark
self.assertAlmostEqual(
lr_sched.step_update(warmup_updates // 2), starting_lr * 0.5
)
# Done warming up
self.assertAlmostEqual(lr_sched.step_update(warmup_updates), starting_lr)
# Polynomial power == 2 decay, halfway mark
self.assertAlmostEqual(
lr_sched.step_update(halfway), end_lr + (starting_lr - end_lr) * 0.5**2
)
def test_polynomial_decay_with_adam_warmup(self):
starting_lr = 0.1
total_updates = 50
warmup_updates = 20
adam_warmup = 10
power = 1
adam_optim = self._get_adam(starting_lr)
end_lr = starting_lr * 0.1
lr_sched = self._get_polynomial_lr_schedule(
warmup_updates,
power,
total_updates,
starting_lr,
end_lr,
adam_warmup,
adam_optim,
)
# Init warmup period, during adam warmup
self.assertEqual(lr_sched.step_update(adam_warmup // 2), 0)
# Init warmup period, past adam warmup
self.assertAlmostEqual(
lr_sched.step_update(warmup_updates // 2 + adam_warmup), starting_lr * 0.5
)
# Done warming up
total_warmup = adam_warmup + warmup_updates
self.assertAlmostEqual(lr_sched.step_update(total_warmup), starting_lr)
# Linear decay, halfway mark
halfway = total_warmup + (total_updates - total_warmup) // 2
self.assertAlmostEqual(
lr_sched.step_update(halfway), end_lr + (starting_lr - end_lr) * 0.5
)
# End of decay
self.assertAlmostEqual(lr_sched.step_update(total_updates), end_lr)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
tests/test_polynomial_lr.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import unittest
import numpy as np
from metaseq.data import ListDataset, ResamplingDataset
class TestResamplingDataset(unittest.TestCase):
def setUp(self):
self.strings = ["ab", "c", "def", "ghij"]
self.weights = [4.0, 2.0, 7.0, 1.5]
self.size_ratio = 2
self.dataset = ListDataset(
self.strings, np.array([len(s) for s in self.strings])
)
def _test_common(self, resampling_dataset, iters):
assert len(self.dataset) == len(self.strings) == len(self.weights)
assert len(resampling_dataset) == self.size_ratio * len(self.strings)
results = {"ordered_by_size": True, "max_distribution_diff": 0.0}
totalfreqs = 0
freqs = collections.defaultdict(int)
for epoch_num in range(iters):
resampling_dataset.set_epoch(epoch_num)
indices = resampling_dataset.ordered_indices()
assert len(indices) == len(resampling_dataset)
prev_size = -1
for i in indices:
cur_size = resampling_dataset.size(i)
# Make sure indices map to same sequences within an epoch
assert resampling_dataset[i] == resampling_dataset[i]
# Make sure length of sequence is correct
assert cur_size == len(resampling_dataset[i])
freqs[resampling_dataset[i]] += 1
totalfreqs += 1
if prev_size > cur_size:
results["ordered_by_size"] = False
prev_size = cur_size
assert set(freqs.keys()) == set(self.strings)
for s, weight in zip(self.strings, self.weights):
freq = freqs[s] / totalfreqs
expected_freq = weight / sum(self.weights)
results["max_distribution_diff"] = max(
results["max_distribution_diff"], abs(expected_freq - freq)
)
return results
def test_resampling_dataset_batch_by_size_false(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=False,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = False, the batches should be returned in
# arbitrary order of size.
assert not results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
def test_resampling_dataset_batch_by_size_true(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=True,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = True, the batches should be returned in
# increasing order of size.
assert results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
tests/test_resampling_dataset.py
|
flash_metaseq-main
|
tests/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import random
import string
import tempfile
import unittest
import torch
from tests.utils import train_language_model
try:
import tokenizers # noqa
has_hf_tokenizers = True
except ImportError:
has_hf_tokenizers = False
def write_one_jsonl_(jsonl_path, num_lines=5, text_len_min=5, text_len_max=50):
data = []
with open(jsonl_path, "w") as h:
for _ in range(num_lines):
text_len = random.choice(range(text_len_min, text_len_max))
data.append(
{"text": "".join(random.choices(string.ascii_letters, k=text_len))}
)
print(json.dumps(data[-1]), file=h)
return
def write_dummy_jsonl_data_dir_(data_dir, num_lines=500):
for subset in ["train", "valid"]:
for shard in range(2):
shard_dir = os.path.join(data_dir, subset, f"{shard:02}")
os.makedirs(shard_dir)
for dataset in ["a", "b"]:
write_one_jsonl_(
os.path.join(shard_dir, f"dataset_{dataset}.jsonl"),
num_lines=num_lines,
)
def write_dummy_bpe_(data_dir):
from tokenizers import ByteLevelBPETokenizer
tokenizer = ByteLevelBPETokenizer(add_prefix_space=True)
tokenizer.train(
[],
vocab_size=500,
special_tokens=["<s>", "<pad>", "</s>", "<unk>"],
show_progress=False,
)
vocab, merges = tokenizer.save_model(data_dir)
return vocab, merges
class TestReproducibility(unittest.TestCase):
@unittest.skipIf(not has_hf_tokenizers, "skip test if tokenizers is missing")
def _test_reproducibility(
self,
name,
extra_flags=None,
delta=0.0001,
resume_checkpoint="checkpoint1.pt",
max_epoch=3,
):
def get_last_log_stats_containing_string(log_records, search_string):
for log_record in logs.records[::-1]:
if isinstance(log_record.msg, str) and search_string in log_record.msg:
return json.loads(log_record.msg)
if extra_flags is None:
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
write_dummy_jsonl_data_dir_(data_dir)
vocab, merges = write_dummy_bpe_(data_dir)
# train epochs 1 and 2 together
with self.assertLogs() as logs:
train_language_model(
data_dir=data_dir,
arch="transformer_lm_gpt2_tiny",
extra_flags=[
"--vocab-filename",
vocab,
"--merges-filename",
merges,
"--dropout",
"0.0",
"--log-format",
"json",
"--log-interval",
"1",
"--max-epoch",
str(max_epoch),
"--batch-size",
"2",
]
+ extra_flags,
task="streaming_language_modeling",
max_tokens=None,
)
train_log = get_last_log_stats_containing_string(logs.records, "train_loss")
valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss")
# train epoch 2, resuming from previous checkpoint 1
os.rename(
os.path.join(data_dir, resume_checkpoint),
os.path.join(data_dir, "checkpoint_last.pt"),
)
with self.assertLogs() as logs:
train_language_model(
data_dir=data_dir,
arch="transformer_lm_gpt2_tiny",
extra_flags=[
"--vocab-filename",
vocab,
"--merges-filename",
merges,
"--dropout",
"0.0",
"--log-format",
"json",
"--log-interval",
"1",
"--max-epoch",
str(max_epoch),
"--batch-size",
"2",
]
+ extra_flags,
task="streaming_language_modeling",
max_tokens=None,
)
train_res_log = get_last_log_stats_containing_string(
logs.records, "train_loss"
)
valid_res_log = get_last_log_stats_containing_string(
logs.records, "valid_loss"
)
for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]:
self.assertAlmostEqual(
float(train_log[k]), float(train_res_log[k]), delta=delta
)
for k in [
"valid_loss",
"valid_ppl",
"valid_num_updates",
"valid_best_loss",
]:
self.assertAlmostEqual(
float(valid_log[k]), float(valid_res_log[k]), delta=delta
)
def test_reproducibility(self):
self._test_reproducibility("test_reproducibility")
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_reproducibility_fp16(self):
self._test_reproducibility(
"test_reproducibility_fp16",
[
"--fp16",
"--fp16-init-scale",
"4096",
],
delta=0.011,
)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility(
"test_reproducibility_memory_efficient_fp16",
[
"--memory-efficient-fp16",
"--fp16-init-scale",
"4096",
],
)
def test_mid_epoch_reproducibility(self):
self._test_reproducibility(
"test_mid_epoch_reproducibility",
["--save-interval-updates", "3"],
resume_checkpoint="checkpoint_1_3.pt",
max_epoch=1,
)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
tests/test_streaming_language_modeling_task.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import random
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
import metaseq.distributed.utils as distributed_utils
from metaseq import options, utils
from metaseq.data import Dictionary, data_utils
from metaseq.dataclass.utils import convert_namespace_to_omegaconf
from metaseq.models import (
BaseEncoder,
EncoderDecoderModel,
IncrementalDecoder,
)
from metaseq.models.base_encoder import EncoderOut
from metaseq.tasks import LegacyTask
from metaseq_cli import train, validate
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
pad_to_multiple=1,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
def check_alignment(alignment, src_len, tgt_len):
if alignment is None or len(alignment) == 0:
return False
if (
alignment[:, 0].max().item() >= src_len - 1
or alignment[:, 1].max().item() >= tgt_len - 1
):
return False
return True
def compute_alignment_weights(alignments):
"""
Given a tensor of shape [:, 2] containing the source-target indices
corresponding to the alignments, a weight vector containing the
inverse frequency of each target index is computed.
For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then
a tensor containing [1., 0.5, 0.5, 1] should be returned (since target
index 3 is repeated twice)
"""
align_tgt = alignments[:, 1]
_, align_tgt_i, align_tgt_c = torch.unique(
align_tgt, return_inverse=True, return_counts=True
)
align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]
return 1.0 / align_weights.float()
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor(
[s["source"].ne(pad_idx).long().sum() for s in samples]
)
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
).index_select(0, sort_order)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge(
"prev_output_tokens",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
elif input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select(
0, sort_order
)
if samples[0].get("alignment", None) is not None:
bsz, tgt_sz = batch["target"].shape
src_sz = batch["net_input"]["src_tokens"].shape[1]
offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz
if left_pad_source:
offsets[:, 0] += src_sz - src_lengths
if left_pad_target:
offsets[:, 1] += tgt_sz - tgt_lengths
alignments = [
alignment + offset
for align_idx, offset, src_len, tgt_len in zip(
sort_order, offsets, src_lengths, tgt_lengths
)
for alignment in [samples[align_idx]["alignment"].view(-1, 2)]
if check_alignment(alignment, src_len, tgt_len)
]
if len(alignments) > 0:
alignments = torch.cat(alignments, dim=0)
align_weights = compute_alignment_weights(alignments)
batch["alignments"] = alignments
batch["align_weights"] = align_weights
if samples[0].get("constraints", None) is not None:
# Collate the packed constraints across the samples, padding to
# the length of the longest sample.
lens = [sample.get("constraints").size(0) for sample in samples]
constraints = torch.zeros((len(samples), max(lens))).long()
for i, sample in enumerate(samples):
constraints[i, 0 : lens[i]] = samples[i].get("constraints")
batch["constraints"] = constraints.index_select(0, sort_order)
return batch
def dummy_dictionary(vocab_size, prefix="token_"):
d = Dictionary()
for i in range(vocab_size):
token = prefix + str(i)
d.add_symbol(token)
d.finalize(padding_factor=1) # don't add extra padding symbols
return d
def dummy_dataloader(
samples,
padding_idx=1,
eos_idx=2,
batch_size=None,
):
if batch_size is None:
batch_size = len(samples)
# add any missing data to samples
for i, sample in enumerate(samples):
if "id" not in sample:
sample["id"] = i
# create dataloader
dataset = TestDataset(samples)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)),
)
return iter(dataloader)
def sequence_generator_setup():
# construct dummy dictionary
d = dummy_dictionary(vocab_size=2)
eos = d.eos()
w1 = 4
w2 = 5
# construct source data
src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0)
[0.0, unk, 0.9, 0.1], # w2: 0.1
# sentence 2:
[0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25)
[0.00, unk, 0.10, 0.9], # w2: 0.3
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9
[
0.6,
unk,
0.2,
0.2,
], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6)
# sentence 2:
[
0.60,
unk,
0.4,
0.00,
], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6)
[0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9
]
),
# step 3:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[
1.0,
unk,
0.0,
0.0,
], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0)
[
1.0,
unk,
0.0,
0.0,
], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0)
# sentence 2:
[
0.1,
unk,
0.5,
0.4,
], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1)
[
1.0,
unk,
0.0,
0.0,
], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0)
]
),
]
task = TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
tgt_dict = task.target_dictionary
return tgt_dict, w1, w2, src_tokens, src_lengths, model
def create_dummy_data(
data_dir, num_examples=100, maxlen=20, alignment=False, languages=None
):
def _create_dummy_data(dir, filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(dir, filename), "w") as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = " ".join(map(chr, data[offset : offset + ex_len]))
print(ex_str, file=h)
offset += ex_len
def _create_dummy_alignment_data(filename_src, filename_tgt, filename):
with open(os.path.join(data_dir, filename_src), "r") as src_f, open(
os.path.join(data_dir, filename_tgt), "r"
) as tgt_f, open(os.path.join(data_dir, filename), "w") as h:
for src, tgt in zip(src_f, tgt_f):
src_len = len(src.split())
tgt_len = len(tgt.split())
avg_len = (src_len + tgt_len) // 2
num_alignments = random.randint(avg_len // 2, 2 * avg_len)
src_indices = torch.floor(torch.rand(num_alignments) * src_len).int()
tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int()
ex_str = " ".join(
[
"{}-{}".format(src, tgt)
for src, tgt in zip(src_indices, tgt_indices)
]
)
print(ex_str, file=h)
files_to_write = [
"train.in",
"train.out",
"valid.in",
"valid.out",
"test.in",
"test.out",
]
if languages is None: # En only dummy dataset
for f in files_to_write:
_create_dummy_data(data_dir, f)
else:
for lang in languages:
lang_dir = os.path.join(data_dir, lang)
os.makedirs(lang_dir, exist_ok=True)
for f in files_to_write:
_create_dummy_data(lang_dir, f)
if alignment:
_create_dummy_alignment_data("train.in", "train.out", "train.align")
_create_dummy_alignment_data("valid.in", "valid.out", "valid.align")
_create_dummy_alignment_data("test.in", "test.out", "test.align")
class TestDataset(torch.utils.data.Dataset):
def __init__(self, data):
super().__init__()
self.data = data
self.sizes = None
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class TestTranslationTask(LegacyTask):
def __init__(self, args, src_dict, tgt_dict, model):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.model = model
@classmethod
def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None):
return cls(args, src_dict, tgt_dict, model)
def build_model(self, args):
return TestModel.build_model(args, self)
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
class TestModel(EncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestEncoder(BaseEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
return EncoderOut(
encoder_out=src_tokens,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestIncrementalDecoder(IncrementalDecoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
assert hasattr(args, "beam_probs") or hasattr(args, "probs")
args.max_decoder_positions = getattr(args, "max_decoder_positions", 100)
self.args = args
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bbsz = prev_output_tokens.size(0)
vocab = len(self.dictionary)
src_len = encoder_out.encoder_out.size(1)
tgt_len = prev_output_tokens.size(1)
# determine number of steps
if incremental_state is not None:
# cache step number
step = utils.get_incremental_state(self, incremental_state, "step")
if step is None:
step = 0
utils.set_incremental_state(self, incremental_state, "step", step + 1)
steps = [step]
else:
steps = list(range(tgt_len))
# define output in terms of raw probs
if hasattr(self.args, "probs"):
assert (
self.args.probs.dim() == 3
), "expected probs to have size bsz*steps*vocab"
probs = self.args.probs.index_select(1, torch.LongTensor(steps))
else:
probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_()
for i, step in enumerate(steps):
# args.beam_probs gives the probability for every vocab element,
# starting with eos, then unknown, and then the rest of the vocab
if step < len(self.args.beam_probs):
probs[:, i, self.dictionary.eos() :] = self.args.beam_probs[step]
else:
probs[:, i, self.dictionary.eos()] = 1.0
# random attention
attn = torch.rand(bbsz, tgt_len, src_len)
dev = prev_output_tokens.device
return probs.to(dev), {"attn": [attn.to(dev)]}
def get_normalized_probs(self, net_output, log_probs, _):
# the decoder returns probabilities directly
probs = net_output[0]
if log_probs:
return probs.log()
else:
return probs
def max_positions(self):
return self.args.max_decoder_positions
class TestReshapingEncoder(BaseEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
b_sz, t_sz = src_tokens.shape
padding_needed = t_sz % 2
x = src_tokens
if padding_needed > 0:
padding_needed = 2 - padding_needed
x = F.pad(x, (0, padding_needed))
return EncoderOut(
encoder_out=x.view(b_sz, -1, 2),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestReshapingModel(EncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestReshapingEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestAdditionalInputEncoder(BaseEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
assert "fancy_other_input" in kwargs
assert kwargs["fancy_other_input"] is not None
return EncoderOut(
encoder_out=src_tokens,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestAdditionalInputModel(EncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestAdditionalInputEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
def train_language_model(
data_dir,
arch,
extra_flags=None,
run_validation=False,
extra_valid_flags=None,
task="language_modeling",
world_size=1,
max_tokens: Optional[int] = 500,
):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
task,
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--distributed-world-size",
str(world_size),
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (["--max-tokens", str(max_tokens)] if max_tokens is not None else [])
+ (extra_flags or []),
)
cfg = convert_namespace_to_omegaconf(train_args)
distributed_utils.call_main(cfg, train.main)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
"--task",
task,
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--valid-subset",
"valid",
"--max-tokens",
"500",
"--num-workers",
"0",
]
+ (extra_valid_flags or []),
)
validate.main(validate_args)
|
flash_metaseq-main
|
tests/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import unittest
import torch
from omegaconf import OmegaConf
from metaseq.optim.adam import MetaseqAdam
from metaseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestMemoryEfficientFP16(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_load_state_dict(self):
# define simple FP16 model
model = torch.nn.Linear(5, 5).cuda().half()
params = list(model.parameters())
# initialize memory efficient FP16 optimizer
# with pseudo DictConfigs
optimizer = MetaseqAdam(
cfg=OmegaConf.create(
vars(
argparse.Namespace(
adam_betas="(0.9, 0.999)",
adam_eps=1e-8,
weight_decay=0.0,
lr=[0.00001],
)
)
),
params=params,
)
me_optimizer = MemoryEfficientFP16Optimizer(
cfg=OmegaConf.create(
{
"common": vars(
argparse.Namespace(
fp16_init_scale=1,
fp16_scale_window=1,
fp16_scale_tolerance=1,
threshold_loss_scale=1,
min_loss_scale=1e-4,
)
)
}
),
params=params,
optimizer=optimizer,
)
# optimizer state is created in the first step
loss = model(torch.rand(5).cuda().half()).sum()
me_optimizer.backward(loss)
me_optimizer.step()
# reload state
state = me_optimizer.state_dict()
me_optimizer.load_state_dict(state)
for k, v in me_optimizer.optimizer.state.items():
self.assertTrue(k.dtype == torch.float16)
for v_i in v.values():
if torch.is_tensor(v_i):
self.assertTrue(v_i.dtype == torch.float32)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
tests/test_memory_efficient_fp16.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import torch
import tests.utils as test_utils
from metaseq import search
from metaseq.data.dictionary import Dictionary
from metaseq.models.transformer_lm import TransformerLanguageModel
from metaseq.sequence_generator import SequenceGenerator
from metaseq.tasks.base_task import LegacyTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), n=1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
class TestJitSequenceGeneratorBase(unittest.TestCase):
def setUp(self):
self.task, self.parser = get_dummy_task_and_parser()
eos = self.task.tgt_dict.eos()
src_tokens = torch.randint(3, 50, (2, 10)).long()
src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1)
src_lengths = torch.LongTensor([2, 10])
self.sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
}
TransformerLanguageModel.add_args(self.parser)
args = self.parser.parse_args([])
args.decoder_layers = 1
self.transformer_model = TransformerLanguageModel.build_model(args, self.task)
def assertOutputEqual(self, hypo, pos_probs):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores)
self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel())
def assertTensorSizeEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
def assertHypoEqual(self, h1, h2):
"Check two hypos are equal"
self.assertTensorEqual(h1["tokens"], h2["tokens"])
self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"])
self.assertLess(abs(h1["score"] - h2["score"]), 1e-6)
self.assertAlmostEqual(h1["attention"], h2["attention"])
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
JIT_MSG = "Targeting OSS scriptability for the 1.6 release"
@unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG)
class TestJitSequenceGenerator(TestJitSequenceGeneratorBase):
def test_export_transformer(self):
model = self.transformer_model
torch.jit.script(model)
def test_sequence_generator(self):
model = self.transformer_model
generator = SequenceGenerator(
[model],
self.task.tgt_dict,
beam_size=2,
max_len_b=10,
)
scripted_model = torch.jit.script(generator)
self._test_save_and_load(scripted_model)
class TestExportSearch(unittest.TestCase):
def setUp(self):
task, _ = get_dummy_task_and_parser()
self.tgt_dict = task.tgt_dict
self.min_top1_prob = 0.4
def test_export_sampling(self):
low_sampling_topp = self.min_top1_prob / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=low_sampling_topp
)
torch.jit.script(search_strategy)
class TestSequenceGeneratorBase(unittest.TestCase):
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
class TestSequenceGenerator(TestSequenceGeneratorBase):
def setUp(self):
(
self.tgt_dict,
self.w1,
self.w2,
src_tokens,
src_lengths,
self.model,
) = test_utils.sequence_generator_setup()
self.sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
}
def test_with_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
# Sentence 1: unchanged from the normalized case
# Sentence 2: beams swap order
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_maxlen(self):
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, max_len_b=2
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
class TestTopPSamplingSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor(
[
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
]
)
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
# The minimal probability of top 2 tokens.
self.min_top2_prob = 0.75
# The minimal probability of the top 1 token.
self.min_top1_prob = 0.4
w1_prob = self.min_top1_prob
w2_prob = self.min_top2_prob - self.min_top1_prob
eos_prob = 1 - self.min_top2_prob
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
]
),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_topp_sampling_search_low_prob(self):
# Given a prob low enough to top-P sampling, we expect only the top
# 1 token to be sampled, which always results in the same output.
low_sampling_topp = self.min_top1_prob / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=low_sampling_topp
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1 = self.eos, self.w1
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w1, eos])
self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0])
def test_topp_sampling_search_high_prob(self):
# Given a prob high enough to top-P sampling, any of the top 2
# tokens could be sampled. This can cause different outputs.
high_sampling_topp = (self.min_top1_prob + self.min_top2_prob) / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=high_sampling_topp
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertTrue(
self.hypoTokens(hypos[0][0], [w1, w1, eos])
or self.hypoTokens(hypos[0][0], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0])
)
# sentence 1, beam 2
self.assertTrue(
self.hypoTokens(hypos[0][1], [w1, w1, eos])
or self.hypoTokens(hypos[0][1], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0])
)
# sentence 2, beam 1
self.assertTrue(
self.hypoTokens(hypos[1][0], [w1, w1, eos])
or self.hypoTokens(hypos[1][0], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0])
)
# sentence 2, beam 2
self.assertTrue(
self.hypoTokens(hypos[1][1], [w1, w1, eos])
or self.hypoTokens(hypos[1][1], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0])
)
def hypoTokens(self, hypo, tokens):
return self.tensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def hypoScore(self, hypo, pos_probs, normalized=True):
pos_scores = torch.FloatTensor(pos_probs).log()
if not self.almostEqual(hypo["positional_scores"], pos_scores):
return False
if pos_scores.numel() != hypo["tokens"].numel():
return False
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()
return abs(score - hypo["score"]) < 1e-6
def almostEqual(self, t1, t2):
return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4
def tensorEqual(self, t1, t2):
return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
tests/test_sequence_generator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
try:
import boto3
from metaseq.s3_utils import S3PathHandler
except ImportError:
boto3 = None
S3PathHandler = None
# Hack to make the test cases ordered.
# https://stackoverflow.com/questions/4005695/changing-order-of-unit-tests-in-python
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: y > x
@unittest.skipIf(not boto3, "Requires boto3 install")
class TestsS3(unittest.TestCase):
s3_auth = False
skip_s3_auth_required_tests_message = (
"Provide an s3 project and bucket you are"
+ "authorised against, then set the s3_auth flag to True"
)
#############################################
# Shared
#############################################
@classmethod
def setUpClass(cls):
# NOTE: user can change these locations.
cls.s3_bucket = "fairusersglobal"
cls.s3_rel_path = os.path.expandvars(
"users/$USER/private/home/$USER/.metaseq/test_s3_pathhandler"
)
cls.s3_full_path = "s3://" + cls.s3_bucket + "/" + cls.s3_rel_path
cls.s3_pathhandler = S3PathHandler()
@classmethod
def tearDownClass(cls, _s3_auth=s3_auth):
if not _s3_auth:
return
# Recursive deletion is not implemented,
# so let's delete each file and directory.
# Delete all files
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir1", "f1_write_string"]))
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir1", "f2_write_bytes"]))
cls.s3_pathhandler._rm(
"/".join([cls.s3_full_path, "dir2", "f1_write_string_from_local"])
)
cls.s3_pathhandler._rm(
"/".join([cls.s3_full_path, "dir2", "f2_write_bytes_from_local"])
)
cls.s3_pathhandler._rm(
"/".join([cls.s3_full_path, "dir2", "f3_write_string_from_local"])
)
cls.s3_pathhandler._rm(
"/".join([cls.s3_full_path, "dir2", "f4_write_bytes_from_local"])
)
# Delete all directories.
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir3", "dir4/"]))
for i in (1, 2, 3):
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, f"dir{i}/"]))
assert cls.s3_pathhandler._ls(cls.s3_full_path) == []
#############################################
# Up here, test class attributes,
# and helpers that don't require S3 access.
#############################################
def test_00_supported_prefixes(self):
supported_prefixes = self.s3_pathhandler._get_supported_prefixes()
self.assertEqual(supported_prefixes, ["s3://"])
# # Require S3 Authentication ====>
#############################################
# Organization of s3 setup
# dir1/
# f1 <- small (via open)
# f2 <- large checkpoint file (via open)
# dir2/
# f3 <- small (via copy(), from dir1)
# f4 <- large checkpoint file (via copy_from_local)
# dir3/
# dir4/
#############################################
#############################################
# auth
# Just check that client loads properly
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_01_add_client_to_handler(self):
self.s3_pathhandler._get_client(
"/".join([self.s3_full_path, "path", "file.txt"])
)
# self.assertTrue(isinstance(self.s3_pathhandler.client, botocore.client.S3)) # TODO
# TODO: make sure that the error message displays properly if authentication is messed up.
#############################################
# mkdirs
# Set up the dirs
# (in BASE)
# +dir1
# +dir2
# +dir3
# +dir4
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_02_mkdirs_must_end_with_slash(self):
with self.assertRaises(AssertionError):
self.s3_pathhandler._mkdirs("/".join([self.s3_full_path, "fail"]))
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_03_mkdirs(self):
# dir{1,2,3} in BASE
for i in (1, 2, 3):
self.s3_pathhandler._mkdirs("/".join([self.s3_full_path, f"dir{i}/"]))
# Make a nested directory in dir3
self.s3_pathhandler._mkdirs("/".join([self.s3_full_path, "dir3/dir4/"]))
#############################################
# open (w/wb)
# +f1
# +f2
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_04_open_write_mode(self):
with self.s3_pathhandler._open(
"/".join([self.s3_full_path, "dir1", "f1_write_string"]), "w"
) as f:
f.write("This is a test of writing a string.")
with self.s3_pathhandler._open(
"/".join([self.s3_full_path, "dir1", "f2_write_bytes"]), "wb"
) as f:
f.write(b"This is a test of writing bytes.")
#############################################
# open (r/rb)
# read f1
# read f2
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_05_open_read_mode(self):
with self.s3_pathhandler._open(
"/".join([self.s3_full_path, "dir1", "f1_write_string"]), "r"
) as f:
self.assertEqual(f.read(), "This is a test of writing a string.")
with self.s3_pathhandler._open(
"/".join([self.s3_full_path, "dir1", "f2_write_bytes"]), "rb"
) as f:
self.assertEqual(f.read(), b"This is a test of writing bytes.")
#############################################
# isdir / isfile / exists
# test dir{1,2,3,4}
# test f{1,2}
# test nonexistants
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_06_exists(self):
# Path does not exist (if file)
self.assertFalse(
self.s3_pathhandler._exists("/".join([self.s3_full_path, "dir1", "FAIL"]))
)
# Path does not exist (if dir)
self.assertFalse(
self.s3_pathhandler._exists("/".join([self.s3_full_path, "FAIL/"]))
)
# Path exists (is file)
self.assertTrue(
self.s3_pathhandler._exists(
"/".join([self.s3_full_path, "dir1", "f1_write_string"])
)
)
# Path exists (is dir)
self.assertTrue(
self.s3_pathhandler._exists("/".join([self.s3_full_path, "dir1/"]))
)
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_07_isdir(self):
# Path does not exist (if file)
self.assertFalse(
self.s3_pathhandler._isdir("/".join([self.s3_full_path, "dir1", "FAIL"]))
)
# Path does not exist (if dir)
self.assertFalse(
self.s3_pathhandler._isdir("/".join([self.s3_full_path, "FAIL/"]))
)
# Path exists (is file)
self.assertFalse(
self.s3_pathhandler._isdir(
"/".join([self.s3_full_path, "dir1", "f1_write_string"])
)
)
# Path exists (is dir)
self.assertTrue(
self.s3_pathhandler._isdir("/".join([self.s3_full_path, "dir1/"]))
)
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_08_isfile(self):
# Path does not exist (if file)
self.assertFalse(
self.s3_pathhandler._isfile("/".join([self.s3_full_path, "dir1", "FAIL"]))
)
# Path does not exist (if dir)
self.assertFalse(
self.s3_pathhandler._isfile("/".join([self.s3_full_path, "FAIL/"]))
)
# Path exists (is file)
self.assertTrue(
self.s3_pathhandler._isfile(
"/".join([self.s3_full_path, "dir1", "f1_write_string"])
)
)
# Path exists (is dir)
self.assertFalse(
self.s3_pathhandler._isfile("/".join([self.s3_full_path, "dir1/"]))
)
#############################################
# copy
# copy f1 -> f3
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_09_copy(self):
self.assertTrue(
self.s3_pathhandler._copy(
"/".join([self.s3_full_path, "dir1", "f1_write_string"]),
"/".join([self.s3_full_path, "dir2", "f3_write_string"]),
)
)
#############################################
# ls
# ls dir{1,2,3,4}
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_10_ls(self):
# Path does not exist (if file)
self.assertEqual(
[], self.s3_pathhandler._ls("/".join([self.s3_full_path, "dir1", "FAIL"]))
)
# Path does not exist (if dir)
self.assertEqual(
[], self.s3_pathhandler._ls("/".join([self.s3_full_path, "FAIL/"]))
)
# Path exists (is file)
self.assertEqual(
["/".join([self.s3_rel_path, "dir1", "f1_write_string"])],
self.s3_pathhandler._ls(
"/".join([self.s3_full_path, "dir1", "f1_write_string"])
),
)
# Path exists (is dir)
self.assertEqual(
{
"/".join(
[self.s3_rel_path, "dir1/"]
), # TODO: should the trailing slash be
"/".join([self.s3_rel_path, "dir1", "f1_write_string"]),
"/".join([self.s3_rel_path, "dir1", "f2_write_bytes"]),
},
set(self.s3_pathhandler._ls("/".join([self.s3_full_path, "dir1/"]))),
)
#############################################
# rm
# rm f3
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_11_rm(self):
path = "/".join([self.s3_full_path, "dir2", "f3_write_string"])
self.assertTrue(self.s3_pathhandler._exists(path))
self.assertTrue(self.s3_pathhandler._isfile(path))
self.assertFalse(self.s3_pathhandler._isdir(path))
self.s3_pathhandler._rm(path)
self.assertFalse(self.s3_pathhandler._exists(path))
self.assertFalse(self.s3_pathhandler._isfile(path))
self.assertFalse(self.s3_pathhandler._isdir(path))
#############################################
# get_local_path
# Retrieve f{1,2}
# Check file contents.
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_12_get_local_path(self):
s3_path_f1 = "/".join([self.s3_full_path, "dir1", "f1_write_string"])
s3_path_f2 = "/".join([self.s3_full_path, "dir1", "f2_write_bytes"])
local_path_f1 = self.s3_pathhandler._get_local_path(s3_path_f1)
local_path_f2 = self.s3_pathhandler._get_local_path(s3_path_f2)
with open(local_path_f1, "r") as f:
self.assertEqual(f.read(), "This is a test of writing a string.")
with open(local_path_f2, "rb") as f:
self.assertEqual(f.read(), b"This is a test of writing bytes.")
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_13_get_local_path_idempotent(self):
"""
Call _get_local_path multiple times.
Check that we keep returning the same cached copy instead of redownloading.
"""
s3_path_f1 = "/".join([self.s3_full_path, "dir1", "f1_write_string"])
REPEATS = 3
local_paths = [
self.s3_pathhandler._get_local_path(s3_path_f1) for _ in range(REPEATS)
]
for local_path in local_paths[1:]:
self.assertEqual(local_path, local_paths[0])
with open(local_paths[0], "r") as f:
self.assertEqual(f.read(), "This is a test of writing a string.")
# TODO: make sure it fails if asked for a directory
# TODO: make sure that the returned path is appropriately placed.
##############################################
# copy_from_local
# Upload local copies of f1, f2 -> f3, f4.
# Check contents via open(), and via another get_local_path
##############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_14_copy_from_local(self):
s3_src_path_f1 = "/".join([self.s3_full_path, "dir1", "f1_write_string"])
s3_src_path_f2 = "/".join([self.s3_full_path, "dir1", "f2_write_bytes"])
local_path_f1 = self.s3_pathhandler._get_local_path(s3_src_path_f1)
local_path_f2 = self.s3_pathhandler._get_local_path(s3_src_path_f2)
s3_dst_path_f1 = "/".join(
[self.s3_full_path, "dir2", "f1_write_string_from_local"]
)
s3_dst_path_f2 = "/".join(
[self.s3_full_path, "dir2", "f2_write_bytes_from_local"]
)
self.assertTrue(
self.s3_pathhandler._copy_from_local(local_path_f1, s3_dst_path_f1)
)
self.assertTrue(
self.s3_pathhandler._copy_from_local(local_path_f2, s3_dst_path_f2)
)
#############################################
# symlink
# should fail
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_15_symlink(self):
s3_src_path_f1 = "/".join([self.s3_full_path, "dir1", "f1_write_string"])
s3_dst_path_f1 = "/".join(
[self.s3_full_path, "dir2", "f1_write_string_symlink"]
)
with self.assertRaises(NotImplementedError):
self.s3_pathhandler._symlink(s3_src_path_f1, s3_dst_path_f1)
|
flash_metaseq-main
|
tests/test_s3_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from metaseq.data import MonolingualDataset
from metaseq.tasks.language_modeling import LanguageModelingTask, LanguageModelingConfig
from tests import utils as test_utils
class TestLMContextWindow(unittest.TestCase):
def test_eval_dataloader(self):
dictionary = test_utils.dummy_dictionary(10)
assert len(dictionary) == 14 # 4 extra special symbols
assert dictionary.pad() == 1
dataset = test_utils.TestDataset(
[
torch.tensor([4, 5, 6, 7], dtype=torch.long),
torch.tensor([8, 9, 10, 11], dtype=torch.long),
torch.tensor([12, 13], dtype=torch.long),
]
)
dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary)
config = LanguageModelingConfig(tokens_per_sample=4)
task = LanguageModelingTask(config, dictionary)
eval_dataloader = task.eval_lm_dataloader(
dataset=dataset,
batch_size=1,
context_window=2,
)
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [4, 5, 6, 7, 1, 1]
assert batch["target"][0].tolist() == [4, 5, 6, 7, 1, 1]
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [6, 7, 8, 9, 10, 11]
assert batch["target"][0].tolist() == [1, 1, 8, 9, 10, 11]
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [10, 11, 12, 13]
assert batch["target"][0].tolist() == [1, 1, 12, 13]
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
tests/test_lm_context_window.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import sys
import tempfile
import unittest
from typing import Optional
from unittest.mock import MagicMock
class TestFileIO(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_tmpfile_contents = "Hello, World"
@classmethod
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f:
cls._tmpfile = f.name
f.write(cls._tmpfile_contents)
f.flush()
@classmethod
def tearDownClass(cls) -> None:
# Cleanup temp working dir.
if cls._tmpdir is not None:
shutil.rmtree(cls._tmpdir) # type: ignore
def test_file_io(self):
from metaseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_oss(self):
# Mock fvcore to simulate oss environment.
sys.modules["fvcore"] = MagicMock()
from metaseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_async(self):
# ioPath `PathManager` is initialized after the first `opena` call.
try:
from metaseq.file_io import IOPathPathManager, PathManager
self.assertIsNone(IOPathPathManager)
_asyncfile = os.path.join(self._tmpdir, "async.txt")
f = PathManager.opena(_asyncfile, "wb")
f.close()
from metaseq.file_io import IOPathPathManager
self.assertIsNotNone(IOPathPathManager)
finally:
self.assertTrue(PathManager.async_close())
|
flash_metaseq-main
|
tests/test_file_io.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import torch
from metaseq.data.dictionary import Dictionary
from metaseq.modules import multihead_attention, sinusoidal_positional_embedding
from metaseq.tasks.base_task import LegacyTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
Return a dummy task and argument parser, which can be used to
create a model/criterion.
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
def _test_save_and_load(scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
class TestExportModels(unittest.TestCase):
def test_export_multihead_attention(self):
module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
scripted = torch.jit.script(module)
_test_save_and_load(scripted)
def test_incremental_state_multihead_attention(self):
module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module1 = torch.jit.script(module1)
module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module2 = torch.jit.script(module2)
state = {}
state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])})
state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])})
v1 = module1.get_incremental_state(state, "key")["a"]
v2 = module2.get_incremental_state(state, "key")["a"]
self.assertEqual(v1, 1)
self.assertEqual(v2, 2)
def test_positional_embedding(self):
module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding(
embedding_dim=8, padding_idx=1
)
scripted = torch.jit.script(module)
_test_save_and_load(scripted)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
tests/test_export.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import sys
import unittest
import torch
from metaseq.distributed import utils as dist_utils
from .utils import objects_are_equal, spawn_and_init
class DistributedTest(unittest.TestCase):
def setUp(self):
if not torch.cuda.is_available():
raise unittest.SkipTest("CUDA not available, skipping test")
if sys.platform == "win32":
raise unittest.SkipTest("NCCL doesn't support Windows, skipping test")
if torch.cuda.device_count() < 2:
raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping")
class TestBroadcastObject(DistributedTest):
def test_str(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object, "hello world"
),
world_size=2,
)
def test_tensor(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object,
torch.rand(5),
),
world_size=2,
)
def test_complex(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object,
{
"a": "1",
"b": [2, torch.rand(2, 3), 3],
"c": (torch.rand(2, 3), 4),
"d": {5, torch.rand(5)},
"e": torch.rand(5),
"f": torch.rand(5).int().cuda(),
},
),
world_size=2,
)
@staticmethod
def _test_broadcast_object(ref_obj, rank, group):
obj = dist_utils.broadcast_object(
ref_obj if rank == 0 else None, src_rank=0, group=group
)
assert objects_are_equal(ref_obj, obj)
class TestAllGatherList(DistributedTest):
def test_str_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
"hello world",
),
world_size=2,
)
def test_tensor_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
torch.rand(5),
),
world_size=2,
)
def test_complex_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
{
"a": "1",
"b": [2, torch.rand(2, 3), 3],
"c": (torch.rand(2, 3), 4),
"d": {5, torch.rand(5)},
"e": torch.rand(5),
"f": torch.rand(5).int(),
},
),
world_size=2,
)
@staticmethod
def _test_all_gather_list_equality(ref_obj, rank, group):
objs = dist_utils.all_gather_list(ref_obj, group)
for obj in objs:
assert objects_are_equal(ref_obj, obj)
def test_rank_tensor(self):
spawn_and_init(
TestAllGatherList._test_all_gather_list_rank_tensor, world_size=2
)
@staticmethod
def _test_all_gather_list_rank_tensor(rank, group):
obj = torch.tensor([rank])
objs = dist_utils.all_gather_list(obj, group)
for i, obj in enumerate(objs):
assert obj.item() == i
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
tests/distributed/test_utils.py
|
flash_metaseq-main
|
tests/distributed/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import tempfile
import torch
def spawn_and_init(fn, world_size, args=None):
if args is None:
args = ()
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
torch.multiprocessing.spawn(
fn=functools.partial(init_and_run, fn, args),
args=(
world_size,
tmp_file.name,
),
nprocs=world_size,
join=True,
)
def distributed_init(rank, world_size, tmp_file):
torch.distributed.init_process_group(
backend="nccl",
init_method="file://{}".format(tmp_file),
world_size=world_size,
rank=rank,
)
torch.cuda.set_device(rank)
def init_and_run(fn, args, rank, world_size, tmp_file):
distributed_init(rank, world_size, tmp_file)
group = torch.distributed.new_group()
fn(rank, group, *args)
def objects_are_equal(a, b) -> bool:
if type(a) is not type(b):
return False
if isinstance(a, dict):
if set(a.keys()) != set(b.keys()):
return False
for k in a.keys():
if not objects_are_equal(a[k], b[k]):
return False
return True
elif isinstance(a, (list, tuple, set)):
if len(a) != len(b):
return False
return all(objects_are_equal(x, y) for x, y in zip(a, b))
elif torch.is_tensor(a):
return (
a.size() == b.size()
and a.dtype == b.dtype
and a.device == b.device
and torch.all(a == b)
)
else:
return a == b
|
flash_metaseq-main
|
tests/distributed/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import string
import tempfile
import unittest
import torch
from metaseq import tokenizer
from metaseq.data import Dictionary
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = [
"A B C D",
"B C D",
"C D",
"D",
]
ref_ids1 = list(
map(
torch.IntTensor,
[
[4, 5, 6, 7, 2],
[5, 6, 7, 2],
[6, 7, 2],
[7, 2],
],
)
)
ref_ids2 = list(
map(
torch.IntTensor,
[
[7, 6, 5, 4, 2],
[6, 5, 4, 2],
[5, 4, 2],
[4, 2],
],
)
)
# build dictionary
d = Dictionary()
for line in txt:
d.encode_line(line, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(dictionary.encode_line(line, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for toks, ref_toks in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
# check finalized dictionary
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
# write to disk and reload
with tempfile.NamedTemporaryFile(mode="w") as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
def test_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999 #metaseq:overwrite\n"
"<s> 999 #metaseq:overwrite\n"
"</s> 999 #metaseq:overwrite\n"
", 999\n"
"▁de 999\n"
)
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index("<pad>"), 1)
self.assertEqual(d.index("foo"), 3)
self.assertEqual(d.index("<unk>"), 4)
self.assertEqual(d.index("<s>"), 5)
self.assertEqual(d.index("</s>"), 6)
self.assertEqual(d.index(","), 7)
self.assertEqual(d.index("▁de"), 8)
def test_no_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n"
)
d = Dictionary()
with self.assertRaisesRegex(RuntimeError, "Duplicate"):
d.add_from_file(dict_file)
def test_space(self):
# for example, character models treat space as a symbol
dict_file = io.StringIO(" 999\n" "a 999\n" "b 999\n")
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index(" "), 4)
self.assertEqual(d.index("a"), 5)
self.assertEqual(d.index("b"), 6)
def test_add_file_to_dict(self):
counts = {}
num_lines = 100
per_line = 10
with tempfile.TemporaryDirectory("test_sampling") as data_dir:
filename = os.path.join(data_dir, "dummy.txt")
with open(filename, "w", encoding="utf-8") as data:
for c in string.ascii_letters:
line = f"{c} " * per_line
for _ in range(num_lines):
data.write(f"{line}\n")
counts[c] = per_line * num_lines
per_line += 5
dict = Dictionary()
Dictionary.add_file_to_dictionary(
filename, dict, tokenizer.tokenize_line, 10
)
dict.finalize(threshold=0, nwords=-1, padding_factor=8)
for c in string.ascii_letters:
count = dict.get_count(dict.index(c))
self.assertEqual(
counts[c], count, f"{c} count is {count} but should be {counts[c]}"
)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/test_dictionary.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from metaseq import utils
class TestUtils(unittest.TestCase):
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[1, 9, 9, 9, 9],
[1, 1, 1, 9, 9],
]
)
left_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 2, 3, 4, 5],
[1, 1, 1, 2, 3],
]
)
right_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 1],
[9, 9, 1, 1, 1],
]
)
right_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[2, 3, 4, 5, 1],
[2, 3, 1, 1, 1],
]
)
self.assertAlmostEqual(
left_pad_output,
utils.make_positions(left_pad_input, pad),
)
self.assertAlmostEqual(
right_pad_output,
utils.make_positions(right_pad_input, pad),
)
def test_clip_grad_norm_(self):
params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, 0.0)
params = [torch.nn.Parameter(torch.zeros(5)) for _ in range(3)]
for p in params:
p.grad = torch.arange(1.0, 6.0)
grad_norm = utils.clip_grad_norm_(params, 1.0, "l2")
exp_grad_norm = torch.arange(1.0, 6.0).repeat(3).norm()
self.assertTrue(torch.is_tensor(grad_norm))
self.assertAlmostEqual(grad_norm, exp_grad_norm)
grad_norm = utils.clip_grad_norm_(params, 1.0, "l2")
self.assertAlmostEqual(grad_norm, torch.tensor(1.0))
for p in params:
p.grad = torch.arange(1.0, 6.0)
grad_norm = utils.clip_grad_norm_(params, 1.0, "inf")
exp_grad_norm = torch.arange(1.0, 6.0).max()
self.assertEqual(grad_norm, exp_grad_norm)
def test_resolve_max_positions_with_tuple(self):
resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000)
self.assertEqual(resolved, (2000, 100, 2000))
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/test_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import uuid
from metaseq import metrics
class TestMetrics(unittest.TestCase):
def test_nesting(self):
with metrics.aggregate() as a:
metrics.log_scalar("loss", 1)
with metrics.aggregate() as b:
metrics.log_scalar("loss", 2)
self.assertEqual(a.get_smoothed_values()["loss"], 1.5)
self.assertEqual(b.get_smoothed_values()["loss"], 2)
def test_new_root(self):
with metrics.aggregate() as a:
metrics.log_scalar("loss", 1)
with metrics.aggregate(new_root=True) as b:
metrics.log_scalar("loss", 2)
self.assertEqual(a.get_smoothed_values()["loss"], 1)
self.assertEqual(b.get_smoothed_values()["loss"], 2)
def test_nested_new_root(self):
with metrics.aggregate() as layer1:
metrics.log_scalar("loss", 1)
with metrics.aggregate(new_root=True) as layer2:
metrics.log_scalar("loss", 2)
with metrics.aggregate() as layer3:
metrics.log_scalar("loss", 3)
with metrics.aggregate(new_root=True) as layer4:
metrics.log_scalar("loss", 4)
metrics.log_scalar("loss", 1.5)
self.assertEqual(layer4.get_smoothed_values()["loss"], 4)
self.assertEqual(layer3.get_smoothed_values()["loss"], 3)
self.assertEqual(layer2.get_smoothed_values()["loss"], 2.5)
self.assertEqual(layer1.get_smoothed_values()["loss"], 1.25)
def test_named(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar("loss", 1)
metrics.log_scalar("loss", 3)
with metrics.aggregate(name):
metrics.log_scalar("loss", 2)
self.assertEqual(metrics.get_smoothed_values(name)["loss"], 1.5)
def test_nested_duplicate_names(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar("loss", 1)
with metrics.aggregate() as other:
with metrics.aggregate(name):
metrics.log_scalar("loss", 2)
metrics.log_scalar("loss", 6)
self.assertEqual(metrics.get_smoothed_values(name)["loss"], 3)
self.assertEqual(other.get_smoothed_values()["loss"], 2)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/test_metrics.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import tests.utils as test_utils
import torch
from metaseq.sequence_scorer import SequenceScorer
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
# construct dataloader
data = [
{
"source": torch.LongTensor([w1, w2, eos]),
"target": torch.LongTensor([w1, w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, eos]),
},
]
data_itr = test_utils.dummy_dataloader(data)
# specify expected output probabilities
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.6, 0.4], # sentence 1
[0.0, unk, 0.4, 0.6], # sentence 2
[0.0, unk, 0.7, 0.3], # sentence 3
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.2, 0.7], # sentence 1
[0.0, unk, 0.8, 0.2], # sentence 2
[0.7, unk, 0.1, 0.2], # sentence 3
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[0.10, unk, 0.50, 0.4], # sentence 1
[0.15, unk, 0.15, 0.7], # sentence 2
[0.00, unk, 0.00, 0.0], # sentence 3
]
),
# step 3:
torch.FloatTensor(
[
# eos w1 w2
[0.9, unk, 0.05, 0.05], # sentence 1
[0.0, unk, 0.00, 0.0], # sentence 2
[0.0, unk, 0.00, 0.0], # sentence 3
]
),
]
expected_scores = [
[0.6, 0.7, 0.5, 0.9], # sentence 1
[0.6, 0.8, 0.15], # sentence 2
[0.3, 0.7], # sentence 3
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer(task.target_dictionary)
for sample in data_itr:
hypos = task.inference_step(scorer, [model], sample)
for id, hypos_id in zip(sample["id"].tolist(), hypos):
self.assertHypoTokens(hypos_id[0], data[id]["target"])
self.assertHypoScore(hypos_id[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/test_sequence_scorer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from metaseq.modules.multihead_attention import MultiheadAttention
class TestMultiheadAttention(unittest.TestCase):
def test_append_prev_key_padding_mask(self):
bsz = 1
src_len = 4
cases = [
# no padding mask
(None, None, None),
# current padding mask only
(
torch.tensor([[1]]).bool(),
None,
torch.tensor([[0, 0, 0, 1]]).bool(),
),
# previous padding mask only
(
None,
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 0]]).bool(),
),
# both padding masks
(
torch.tensor([[1]]).bool(),
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 1]]).bool(),
),
]
for c in cases:
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
c[0],
c[1],
batch_size=bsz,
src_len=src_len,
static_kv=False,
)
if key_padding_mask is not None:
self.assertTrue(
torch.all(torch.eq(key_padding_mask, c[2])),
f"Unexpected resultant key padding mask: {key_padding_mask}"
f" given current: {c[0]} and previous: {c[1]}",
)
self.assertEqual(key_padding_mask.size(0), bsz)
self.assertEqual(key_padding_mask.size(1), src_len)
else:
self.assertIsNone(c[2])
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/test_multihead_attention.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
from metaseq.data.data_utils_fast import batch_by_size_fn
from metaseq.data.data_utils_fast import batch_by_size_vec
class TestBatchBySize(unittest.TestCase):
@classmethod
def batch_by_size_baseline(
cls,
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
):
"""Simple, reliable and slow implementation of batch by size"""
batches = []
start = 0
while start < len(indices):
for end in range(start + 1, len(indices) + 1):
max_val = max(num_tokens_vec[pos] for pos in range(start, end))
sent_count = end - start
num_tokens = max_val * sent_count
overflow = num_tokens > max_tokens > 0 or sent_count > max_sentences > 0
terminate = overflow or end == len(indices)
if overflow:
sent_count -= 1
if terminate:
if sent_count > bsz_mult:
sent_count = sent_count - sent_count % bsz_mult
batches.append(indices[start : start + sent_count])
start = start + sent_count
break
return batches
@classmethod
def _get_error_message(
cls, max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results
):
return f"""Reference batch_by_size implementation should produce
same output as the baseline method.
Params:
max_sentences={max_sentences},
max_tokens={max_tokens},
bsz_mult={bsz_mult},
num_tokens_vec={num_tokens_vec},
expected_batches={validation},
returned_batches={results}"""
def _compare_results(
self,
indices_len,
batch_by_size_impl,
max_sentences,
max_tokens,
bsz_mult,
num_tokens_vec,
):
indices = np.array(list(range(indices_len)))
validation = self.batch_by_size_baseline(
indices,
num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
bsz_mult=bsz_mult,
)
results = batch_by_size_impl(
indices,
num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
bsz_mult=bsz_mult,
)
error_msg = self._get_error_message(
max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results
)
self.assertEqual(len(validation), len(results), error_msg)
for first, second in zip(validation, results):
self.assertTrue(np.array_equal(first, second), error_msg)
def _run_compare_with_baseline_sweep(self, batch_by_size_impl):
"""Compare reference batch_by_size implementation with batch_by_size_baseline
across a dense grid of hyperparam values"""
MAX_MAX_TOKENS = 10
NUM_TOKENS_VECS_COUNT = 5
for indices_len in [10, 11]: # try odd and even len of indices
for max_sentences in range(0, indices_len + 2):
for max_tokens in range(0, MAX_MAX_TOKENS):
for bsz_mult in range(1, max(MAX_MAX_TOKENS, indices_len) + 2):
for _ in range(NUM_TOKENS_VECS_COUNT):
num_tokens_vec = np.random.randint(
0, max_tokens + 1, size=indices_len
)
self._compare_results(
indices_len,
batch_by_size_impl,
max_sentences,
max_tokens,
bsz_mult,
num_tokens_vec,
)
class TestBatchBySizeVec(TestBatchBySize):
def test_compare_with_baseline(self):
self._run_compare_with_baseline_sweep(batch_by_size_vec)
class TestBatchBySizeFn(TestBatchBySize):
def test_compare_with_baseline(self):
def batch_by_size_fn_wrapper(
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
):
def num_tokens_fn(idx):
return num_tokens_vec[idx]
return batch_by_size_fn(
indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult
)
self._run_compare_with_baseline_sweep(batch_by_size_fn_wrapper)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/test_data_utils.py
|
flash_metaseq-main
|
cpu_tests/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from metaseq.data import StreamingTokenBlockDataset
class TensorListDataset(torch.utils.data.Dataset):
def __init__(self, tensor_list):
self.tensor_list = tensor_list
def __getitem__(self, index):
return self.tensor_list[index]
def __len__(self):
return len(self.tensor_list)
def get_simple_dataset():
return TensorListDataset(
[
torch.LongTensor([0]),
torch.LongTensor([1, 2, 3]),
torch.LongTensor([4]),
torch.LongTensor([5]),
torch.LongTensor([6, 7, 8]),
torch.LongTensor([9, 10]),
]
)
class TestStreamingTokenBlockDataset(unittest.TestCase):
def test_drop_last_True(self):
self._test_simple(drop_last=True)
def test_drop_last_False(self):
self._test_simple(drop_last=False)
def test_buffer_drop_last_True(self):
self._test_buffer(drop_last=True)
def test_buffer_drop_last_False(self):
self._test_buffer(drop_last=False)
def test_very_large_buffer_drop_last_True(self):
self._test_very_large_buffer(drop_last=True)
def test_very_large_buffer_drop_last_False(self):
self._test_very_large_buffer(drop_last=False)
def _test_simple(self, drop_last):
dataset = get_simple_dataset()
token_block_ds = StreamingTokenBlockDataset(
dataset,
block_size=2,
drop_last=drop_last,
padding_idx=-1,
)
dataloader = iter(token_block_ds)
assert next(dataloader)["block"].tolist() == [0, 1]
assert next(dataloader)["block"].tolist() == [2, 3]
assert next(dataloader)["block"].tolist() == [4, 5]
assert next(dataloader)["block"].tolist() == [6, 7]
assert next(dataloader)["block"].tolist() == [8, 9]
if not drop_last:
assert next(dataloader)["block"].tolist() == [10, -1]
with self.assertRaises(StopIteration):
next(dataloader)
def _test_buffer(self, drop_last, seed=42):
# maintain shadow rng to ensure iteration order matches expectations
shadow_rng = np.random.default_rng(2273 + seed)
dataset = get_simple_dataset()
token_block_ds = StreamingTokenBlockDataset(
dataset,
block_size=2,
drop_last=drop_last,
padding_idx=-1,
shuffle_buffer_size=3,
seed=seed,
)
dataloader = iter(token_block_ds)
# we expect token_block_ds to buffer the first three blocks,
# then return random blocks and replace them thereafter
expected_buffer = [
[0, 1],
[2, 3],
[4, 5],
]
next_idx = shadow_rng.integers(3)
assert next(dataloader)["block"].tolist() == expected_buffer[next_idx]
expected_buffer[next_idx] = [6, 7]
next_idx = shadow_rng.integers(3)
assert next(dataloader)["block"].tolist() == expected_buffer[next_idx]
expected_buffer[next_idx] = [8, 9]
next_idx = shadow_rng.integers(3)
assert next(dataloader)["block"].tolist() == expected_buffer[next_idx]
if not drop_last:
expected_buffer[next_idx] = [10, -1]
else:
expected_buffer.pop(next_idx)
while expected_buffer:
next_idx = shadow_rng.integers(len(expected_buffer))
assert next(dataloader)["block"].tolist() == expected_buffer[next_idx]
expected_buffer.pop(next_idx)
with self.assertRaises(StopIteration):
next(dataloader)
def _test_very_large_buffer(self, drop_last, seed=42):
# maintain shadow rng to ensure iteration order matches expectations
shadow_rng = np.random.default_rng(2273 + seed)
dataset = get_simple_dataset()
token_block_ds = StreamingTokenBlockDataset(
dataset,
block_size=2,
drop_last=drop_last,
padding_idx=-1,
shuffle_buffer_size=100, # bigger than full dataset
seed=seed,
)
dataloader = iter(token_block_ds)
expected_buffer = [
[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
]
if not drop_last:
expected_buffer.append([10, -1])
while expected_buffer:
next_idx = shadow_rng.integers(len(expected_buffer))
assert next(dataloader)["block"].tolist() == expected_buffer[next_idx]
expected_buffer.pop(next_idx)
with self.assertRaises(StopIteration):
next(dataloader)
def _test_break_mode_eos_pad_8(self):
dataset = TensorListDataset(
[
torch.LongTensor([0]),
torch.LongTensor([1, 2, 3]),
torch.LongTensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
]
)
token_block_ds = StreamingTokenBlockDataset(
dataset,
block_size=10,
drop_last=False,
padding_idx=-1,
break_mode="eos_pad_8",
)
expected_buffer = [
[0, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 2, 3, -1, -1, -1, -1, -1, -1],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
]
dataloader = iter(token_block_ds)
assert (
next(dataloader)["block"].tolist() == expected_buffer[0]
) # padding to multiple of 8 + 1
assert (
next(dataloader)["block"].tolist() == expected_buffer[1]
) # padding to multiple of 8 + 1
assert (
next(dataloader)["block"].tolist() == expected_buffer[2]
) # padding to block size
with self.assertRaises(StopIteration):
next(dataloader)
|
flash_metaseq-main
|
cpu_tests/test_streaming_token_block_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from metaseq.data import PartitionedStreamingDataset
class TensorListIterableDataset(torch.utils.data.IterableDataset):
def __init__(self, tensor_list):
self.tensor_list = tensor_list
def __iter__(self):
for tensor in self.tensor_list:
yield tensor
def get_simple_dataset():
return TensorListIterableDataset(
[
torch.LongTensor([0, 1]),
torch.LongTensor([2, 3]),
torch.LongTensor([4, 5]),
torch.LongTensor([6, 7]),
torch.LongTensor([8, 9]),
]
)
class TestPartitionedStreamingDataset(unittest.TestCase):
def test_drop_last_True_shard_0(self):
self._test_simple(drop_last=True, shard_id=0)
def test_drop_last_True_shard_1(self):
self._test_simple(drop_last=True, shard_id=1)
def test_drop_last_False_shard_0(self):
self._test_simple(drop_last=False, shard_id=0)
def test_drop_last_False_shard_1(self):
self._test_simple(drop_last=False, shard_id=1)
def _test_simple(self, drop_last, shard_id):
dataset = get_simple_dataset()
partitioned_ds = PartitionedStreamingDataset(
dataset,
num_shards=2,
shard_id=shard_id,
drop_last=drop_last,
)
dataloader = iter(partitioned_ds)
if shard_id == 0:
assert next(dataloader).tolist() == [0, 1]
assert next(dataloader).tolist() == [4, 5]
if not drop_last:
assert next(dataloader).tolist() == [8, 9]
else:
assert shard_id == 1
assert next(dataloader).tolist() == [2, 3]
assert next(dataloader).tolist() == [6, 7]
if not drop_last:
assert next(dataloader) is None
with self.assertRaises(StopIteration):
next(dataloader)
|
flash_metaseq-main
|
cpu_tests/test_partitioned_streaming_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import tests.utils as test_utils
import torch
from metaseq.data import TokenBlockDataset
class TestTokenBlockDataset(unittest.TestCase):
def _build_dataset(self, data, **kwargs):
sizes = [len(x) for x in data]
underlying_ds = test_utils.TestDataset(data)
return TokenBlockDataset(underlying_ds, sizes, **kwargs)
def test_eos_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [1])
self.assertEqual(ds[2].tolist(), [8, 7, 6, 1])
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1])
self.assertEqual(ds[2].tolist(), [1])
def test_block_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none")
self.assertEqual(ds[0].tolist(), [5, 4, 3])
self.assertEqual(ds[1].tolist(), [2, 1, 8])
self.assertEqual(ds[2].tolist(), [7, 6, 1])
self.assertEqual(ds[3].tolist(), [9, 1])
def test_complete_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(
data, block_size=6, pad=0, eos=1, break_mode="complete"
)
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1])
data = [
torch.tensor([4, 3, 2, 1], dtype=torch.long),
torch.tensor([5, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([6, 1], dtype=torch.long),
]
ds = self._build_dataset(
data, block_size=3, pad=0, eos=1, break_mode="complete"
)
self.assertEqual(ds[0].tolist(), [4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [5, 1, 1])
self.assertEqual(ds[2].tolist(), [6, 1])
def test_4billion_tokens(self):
"""Regression test for numpy type promotion issue https://github.com/numpy/numpy/issues/5745"""
data = [torch.tensor(list(range(10000)), dtype=torch.long)] * 430000
ds = self._build_dataset(
data, block_size=6, pad=0, eos=1, break_mode="complete"
)
ds[-1] # __getitem__ works
start, end = ds.slice_indices[-1]
assert end > 4294967295 # data must be sufficiently large to overflow uint32
assert not isinstance(
end + 1, float
) # this would also raise, since np.uint64(1) + 1 => 2.0
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/test_token_block_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import tempfile
import unittest
from typing import Optional
class TestFileChunker(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_line_content = "Hello, World\n"
_num_bytes = None
_num_lines = 200
_num_splits = 20
@classmethod
def setUpClass(cls) -> None:
cls._num_bytes = len(cls._line_content.encode("utf-8"))
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f:
cls._tmpfile = f.name
for _i in range(cls._num_lines):
f.write(cls._line_content)
f.flush()
@classmethod
def tearDownClass(cls) -> None:
# Cleanup temp working dir.
if cls._tmpdir is not None:
shutil.rmtree(cls._tmpdir) # type: ignore
def test_find_offsets(self):
from metaseq.file_chunker_utils import find_offsets
offsets = find_offsets(self._tmpfile, self._num_splits)
self.assertEqual(len(offsets), self._num_splits + 1)
(zero, *real_offsets, last) = offsets
self.assertEqual(zero, 0)
for i, o in enumerate(real_offsets):
self.assertEqual(
o,
self._num_bytes
+ ((i + 1) * self._num_bytes * self._num_lines / self._num_splits),
)
self.assertEqual(last, self._num_bytes * self._num_lines)
def test_readchunks(self):
from metaseq.file_chunker_utils import Chunker, find_offsets
offsets = find_offsets(self._tmpfile, self._num_splits)
for start, end in zip(offsets, offsets[1:]):
with Chunker(self._tmpfile, start, end) as lines:
all_lines = list(lines)
num_lines = self._num_lines / self._num_splits
self.assertAlmostEqual(
len(all_lines), num_lines, delta=1
) # because we split on the bites, we might end up with one more/less line in a chunk
self.assertListEqual(
all_lines, [self._line_content for _ in range(len(all_lines))]
)
|
flash_metaseq-main
|
cpu_tests/test_file_chunker_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from metaseq.data import StreamingShuffleDataset
class TensorListDataset(torch.utils.data.Dataset):
def __init__(self, tensor_list):
self.tensor_list = tensor_list
def __getitem__(self, index):
return self.tensor_list[index]
def __len__(self):
return len(self.tensor_list)
def get_simple_dataset():
return TensorListDataset(
[
torch.LongTensor([0]),
torch.LongTensor([1, 2, 3]),
torch.LongTensor([4]),
torch.LongTensor([5]),
torch.LongTensor([6, 7, 8]),
torch.LongTensor([9, 10]),
]
)
class TestStreamingShuffleDataset(unittest.TestCase):
def test_set_epoch(self):
dataset = get_simple_dataset()
shuffle_ds = StreamingShuffleDataset(dataset, seed=0)
shuffle_ds.set_epoch(1)
ref_epoch1 = list(shuffle_ds)
shuffle_ds.set_epoch(2)
ref_epoch2 = list(shuffle_ds)
self.assertTrue(
torch.cat(ref_epoch1).tolist() == torch.cat(ref_epoch1).tolist()
)
self.assertFalse(
torch.cat(ref_epoch1).tolist() == torch.cat(ref_epoch2).tolist()
)
shuffle_ds.set_epoch(1)
self._compare(ref_epoch1, shuffle_ds)
shuffle_ds.set_epoch(2)
self._compare(ref_epoch2, shuffle_ds)
shuffle_ds.set_epoch(2)
self._compare(ref_epoch2, shuffle_ds)
shuffle_ds.set_epoch(1)
self._compare(ref_epoch1, shuffle_ds)
def _compare(self, reference, dataset):
ref_itr = iter(reference)
ds_itr = iter(dataset)
for ref, ds in zip(ref_itr, ds_itr):
self.assertEqual(ref.tolist(), ds.tolist())
with self.assertRaises(StopIteration):
next(ref_itr)
with self.assertRaises(StopIteration):
next(ds_itr)
|
flash_metaseq-main
|
cpu_tests/test_streaming_shuffle_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from metaseq.data import iterators
class TensorListIterableDataset(torch.utils.data.IterableDataset):
def __init__(self, tensor_list):
self.tensor_list = tensor_list
def __iter__(self):
for tensor in self.tensor_list:
yield tensor
def get_simple_dataset():
return TensorListIterableDataset(
[
torch.LongTensor([0, 1]),
torch.LongTensor([2, 3]),
torch.LongTensor([4, 5]),
torch.LongTensor([6, 7]),
torch.LongTensor([8, 9]),
]
)
class TestStreamingIterators(unittest.TestCase):
def test_streaming_counting_iterator(self):
ref = list(range(10))
itr = iterators.StreamingCountingIterator(ref)
for i, ref_i in enumerate(ref):
self.assertTrue(itr.has_next())
self.assertEqual(itr.n, i)
self.assertEqual(next(itr), ref_i)
self.assertEqual(itr.n, len(ref))
self.assertFalse(itr.has_next())
with self.assertRaises(StopIteration):
next(itr)
def test_streaming_epoch_batch_iterator_drop_last_True(self):
self._test_streaming_epoch_batch_iterator(drop_last=True)
def test_streaming_epoch_batch_iterator_drop_last_False(self):
self._test_streaming_epoch_batch_iterator(drop_last=False)
def test_streaming_epoch_batch_iterator_state_dict(self):
def hook_fn(epoch_batch_itr, itr):
new_epoch_batch_itr = iterators.StreamingEpochBatchIterator(
# recreate the dataset
dataset=get_simple_dataset(),
batch_size=epoch_batch_itr.batch_size,
collate_fn=epoch_batch_itr.collate_fn,
drop_last=epoch_batch_itr.drop_last,
)
new_epoch_batch_itr.load_state_dict(epoch_batch_itr.state_dict())
return new_epoch_batch_itr, new_epoch_batch_itr.next_epoch_itr()
self._test_streaming_epoch_batch_iterator(drop_last=True, hook_fn=hook_fn)
self._test_streaming_epoch_batch_iterator(drop_last=False, hook_fn=hook_fn)
def _test_streaming_epoch_batch_iterator(self, drop_last, hook_fn=None):
dataset = get_simple_dataset()
epoch_batch_itr = iterators.StreamingEpochBatchIterator(
dataset,
batch_size=2,
collate_fn=torch.cat,
drop_last=drop_last,
)
assert epoch_batch_itr.next_epoch_idx == 1
itr = epoch_batch_itr.next_epoch_itr()
assert epoch_batch_itr.iterations_in_epoch == 0
assert not epoch_batch_itr.end_of_epoch()
if hook_fn is not None:
epoch_batch_itr, itr = hook_fn(epoch_batch_itr, itr)
assert next(itr).tolist() == [0, 1, 2, 3]
assert epoch_batch_itr.iterations_in_epoch == 1
assert not epoch_batch_itr.end_of_epoch()
if hook_fn is not None:
epoch_batch_itr, itr = hook_fn(epoch_batch_itr, itr)
assert next(itr).tolist() == [4, 5, 6, 7]
assert epoch_batch_itr.iterations_in_epoch == 2
if not drop_last:
if hook_fn is not None:
epoch_batch_itr, itr = hook_fn(epoch_batch_itr, itr)
assert next(itr).tolist() == [8, 9]
assert epoch_batch_itr.iterations_in_epoch == 3
assert epoch_batch_itr.end_of_epoch()
with self.assertRaises(StopIteration):
next(itr)
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/test_streaming_iterators.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import random
import string
import tempfile
import unittest
from unittest.mock import MagicMock
from metaseq.data import JsonlDataset
def write_one_jsonl_(jsonl_path, num_lines=5, text_len_min=5, text_len_max=50):
data = []
with open(jsonl_path, "w") as h:
for _ in range(num_lines):
text_len = random.choice(range(text_len_min, text_len_max))
data.append(
{"text": "".join(random.choices(string.ascii_letters, k=text_len))}
)
print(json.dumps(data[-1]), file=h)
return data
class TestJsonlDataset(unittest.TestCase):
def test_one_line(self):
self._test_jsonl_dataset(num_lines=1)
def test_multiple_lines(self):
self._test_jsonl_dataset(num_lines=5)
def test_bad_cache(self):
with tempfile.NamedTemporaryFile() as jsonl_file:
write_one_jsonl_(jsonl_file.name, num_lines=3)
dataset = JsonlDataset(jsonl_file.name)
assert len(dataset) == 3
write_one_jsonl_(jsonl_file.name, num_lines=5)
dataset = JsonlDataset(jsonl_file.name)
assert len(dataset) == 3 # it's still 3 because of the cache
os.remove(dataset.cache)
dataset = JsonlDataset(jsonl_file.name)
assert len(dataset) == 5 # it's now 5 because the cache is recreated
def test_tokenizer(self, num_lines=5):
def tokenizer_fn(jsonl):
return list(jsonl["text"])
tokenizer = MagicMock(wraps=tokenizer_fn)
with tempfile.NamedTemporaryFile() as jsonl_file:
orig_data = write_one_jsonl_(jsonl_file.name, num_lines=num_lines)
assert len(orig_data) == num_lines
dataset = JsonlDataset(jsonl_file.name, tokenizer=tokenizer)
assert tokenizer.call_count == 0
foo = dataset[1]
assert foo == list(orig_data[1]["text"])
assert tokenizer.call_count == 1
foo = dataset[1]
assert tokenizer.call_count == 2
foo = dataset[4]
assert foo == list(orig_data[4]["text"])
assert tokenizer.call_count == 3
def _test_jsonl_dataset(self, num_lines, tokenizer=None):
with tempfile.NamedTemporaryFile() as jsonl_file:
orig_data = write_one_jsonl_(jsonl_file.name, num_lines=num_lines)
assert len(orig_data) == num_lines
dataset = JsonlDataset(jsonl_file.name, tokenizer=None)
assert len(dataset) == len(orig_data)
for orig_json, read_json in zip(orig_data, dataset):
assert orig_json.keys() == read_json.keys()
for k in orig_json.keys():
assert orig_json[k] == read_json[k]
|
flash_metaseq-main
|
cpu_tests/test_jsonl_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch import nn
from metaseq.distributed import ModuleProxyWrapper
from tests.distributed.utils import objects_are_equal
class MockDDPWrapper(nn.Module):
"""A simple wrapper with an interface similar to DistributedDataParallel."""
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, x):
return self.module(x)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(5, 10)
self.xyz = "hello"
def forward(self, x):
return self.linear(x)
def get_xyz(self):
return self.xyz
class TestModuleProxyWrapper(unittest.TestCase):
def _get_module(self):
module = Model()
wrapped_module = MockDDPWrapper(module)
wrapped_module = ModuleProxyWrapper(wrapped_module)
return wrapped_module, module
def test_getattr_forwarding(self):
wrapped_module, module = self._get_module()
assert module.xyz == "hello"
assert module.get_xyz() == "hello"
assert wrapped_module.xyz == "hello"
wrapped_module.xyz = "world"
assert wrapped_module.xyz == "world"
assert module.get_xyz() == "hello"
def test_state_dict(self):
wrapped_module, module = self._get_module()
assert objects_are_equal(wrapped_module.state_dict(), module.state_dict())
def test_load_state_dict(self):
wrapped_module, module = self._get_module()
wrapped_module.load_state_dict(module.state_dict())
input = torch.rand(4, 5)
torch.testing.assert_allclose(wrapped_module(input), module(input))
def test_forward(self):
wrapped_module, module = self._get_module()
input = torch.rand(4, 5)
torch.testing.assert_allclose(wrapped_module(input), module(input))
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/distributed/test_module_proxy_wrapper.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import signal
import time
import unittest
import torch
from torch import nn
from metaseq.distributed import DistributedTimeoutWrapper
class ModuleWithDelay(nn.Module):
def __init__(self, delay):
super().__init__()
self.delay = delay
def forward(self, x):
time.sleep(self.delay)
return x
class TestDistributedTimeoutWrapper(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_no_timeout(self):
module = DistributedTimeoutWrapper(ModuleWithDelay(1), 0, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout()
def test_timeout_safe(self):
module = DistributedTimeoutWrapper(ModuleWithDelay(1), 10, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout()
def test_timeout_killed(self):
with self.assertRaises(KeyboardInterrupt):
module = DistributedTimeoutWrapper(ModuleWithDelay(5), 1, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout()
if __name__ == "__main__":
unittest.main()
|
flash_metaseq-main
|
cpu_tests/distributed/test_distributed_timeout_wrapper.py
|
flash_metaseq-main
|
cpu_tests/distributed/__init__.py
|
|
#!/usr/bin/env python3 -u
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Host the demo.
Launch with `python -m metaseq_cli.interactive_hosted` to run locally.
See docs/api.md for more information.
"""
import os
import queue
import pkg_resources
import random
import shutil
import threading
import torch
from flask import Flask, request
from metaseq import options
from metaseq.dataclass.configs import MetaseqConfig
from metaseq.dataclass.utils import convert_namespace_to_omegaconf
from metaseq.distributed import utils as dist_utils
from metaseq.hub_utils import GeneratorInterface
from metaseq.service.queue import PriorityQueueRingShard
from metaseq.service.workers import WorkItem
from metaseq.service.constants import (
MAX_SEQ_LEN,
MAX_BATCH_TOKENS,
DEFAULT_PORT,
TOTAL_WORLD_SIZE,
CHECKPOINT_LOCAL,
CHECKPOINT_FOLDER,
LAUNCH_ARGS,
)
from metaseq.service.utils import get_my_ip, encode_fn, build_logger
from metaseq.service.responses import OAIResponse
app = Flask(__name__)
# global state (mutable!)
cfg = None
port = DEFAULT_PORT
BATCH_QUEUE = PriorityQueueRingShard()
logger = build_logger()
def batching_loop(timeout=100, max_tokens=MAX_BATCH_TOKENS):
"""
batching_loop is an infinite loop responsible for executing generations.
GPUs benefit from batching requests, but we expect workloads to come
in non-uniformly. This loop groups requests together (via BATCH_QUEUE)
and executes them in one batch. In order to keep latency low, unfilled
batches are executed within a window of :timeout: milliseconds.
batching_loop also performs dynamic batching, in order to minimize the
amount of padding by grouping like-sized workloads together. As a result
batching loop will provide preferential treatment to smaller workloads. At
the current moment, there is no TTL logic to ensure a maximum wait time.
For a rough overview of dynamic batching, see
https://parl.ai/docs/tutorial_worlds.html#dynamic-batching.
:param timeout: The max queue time before a non-full batch is launched.
:param max_tokens: the maximum number of tokens that can be processed
concurrently. model specific and empirical.
"""
# TODO(roller):
# - group by generation type, topp etc, as we cannot share these
# - modify timeout logic to be cumulative
global BATCH_QUEUE
batch = []
while True:
try:
# for now, we only have 1 worker, so can always index to shard 0
target_queue = BATCH_QUEUE.queue_shards[0].get_largest_queue()
if not target_queue:
continue
# dynamic batching: group like-sized items to reduce the cost
# of padding. See PR#20 for additional context.
item = target_queue.get(timeout=timeout / 1000)
# accumulate the batch until it gets too big
longest = max([item] + batch).cost
batch_cost = longest * (len(batch) + 1)
if batch and batch_cost > max_tokens:
# we're over budget, put it back in the queue
target_queue.put(item)
raise queue.Empty
else:
# batch is empty or under budget
batch.append(item)
except queue.Empty:
if batch:
request_object = {
"inputs": [],
"min_tokens": [],
"max_tokens": [],
}
for work_item in batch:
ro = work_item.data
request_object["inputs"].append(ro["input"])
request_object["min_tokens"].append(ro.get("min_tokens", 0))
request_object["max_tokens"].append(
ro.get("max_tokens", MAX_SEQ_LEN)
)
# assumption: everyone has the same remaining args
for key in [
"temperature",
"top_p",
"n",
"best_of",
"echo",
"logprobs",
"stop",
]:
if key in ro:
request_object[key] = ro[key]
# do the actual generations
request_object["seed"] = random.randint(1, 20000)
dist_utils.broadcast_object(
request_object, src_rank=0, group=dist_utils.get_global_group()
)
generations = generator.generate(**request_object)
# broadcast them back
for work_item, gen in zip(batch, generations):
work_item.return_queue.put((work_item.uid, gen))
batch.clear()
else:
# back to the loop
continue
def worker_main(cfg1: MetaseqConfig, namespace_args=None):
# disable multithreading in tokenizers and torch, as different Flask threads
# may then fight for resources.
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.set_num_threads(1)
global generator
global MODE
# make sure generations are stochastic since we have many workers
torch.manual_seed(random.randint(1, 20000))
torch.cuda.manual_seed(random.randint(1, 20000))
MODE = "worker"
cfg = cfg1
generator = GeneratorInterface(cfg)
models = generator.load_model() # noqa: F841
logger.info(f"loaded model {cfg.distributed_training.distributed_rank}")
request_object = dist_utils.broadcast_object(
None, src_rank=0, group=dist_utils.get_global_group()
)
if torch.distributed.get_rank() == 0:
logger.info(f"Worker engaged! {get_my_ip()}:{port}")
thread = threading.Thread(target=batching_loop, daemon=True)
thread.start()
app.run(host="0.0.0.0", port=port, threaded=True)
else:
# useful in FSDP setting
logger.info(f"Looping engaged! {get_my_ip()}:{port}")
while True:
request_object = dist_utils.broadcast_object(
None, src_rank=0, group=dist_utils.get_global_group()
)
_ = generator.generate(**request_object)
@app.route("/completions", methods=["POST"])
@app.route("/v1/engines/<engine>/completions", methods=["POST"])
@app.route("/v2/engines/<engine>/completions", methods=["POST"])
@app.route("/engines/<engine>/completions", methods=["POST"])
def completions(engine=None):
# prompt can be 4 types:
# - str. Basic case. Return one generation.
# - list of ints. Pretokenized. Return one generation
# - list of str. Multiple generations, one per prompt
# - list of list of ints. Pretokenized multiple generations.
# our approach is to turn everything into the last case
prompts = request.json["prompt"]
del request.json["prompt"]
generation_args = request.json
if isinstance(prompts, str):
# single string. tokenize and turn it to the single pre-tokenized case
prompts = [encode_fn(generator, prompts)]
assert isinstance(prompts, list)
assert len(prompts) > 0
if isinstance(prompts[0], str):
# multi string
prompts = [encode_fn(generator, p) for p in prompts]
elif isinstance(prompts[0], int):
# single pre-tokenized
prompts = [prompts]
assert isinstance(prompts[0], list)
# final case: multi pre-tokenized
assert len(prompts[0]) > 0
if "min_tokens" in generation_args:
generation_args["min_tokens"] = int(generation_args["min_tokens"])
if "max_tokens" in generation_args:
generation_args["max_tokens"] = int(generation_args["max_tokens"])
if "stop" in generation_args:
stop = generation_args["stop"]
if stop is None:
pass
elif isinstance(stop, str):
stop = [encode_fn(generator, stop)[0]]
else:
stop = [encode_fn(generator, s)[0] for s in stop]
generation_args["stop"] = stop
if "temperature" in generation_args:
generation_args["temperature"] = round(float(generation_args["temperature"]), 1)
else:
generation_args["temperature"] = 1.0
if "top_p" in generation_args:
generation_args["top_p"] = round(float(generation_args["top_p"]), 1)
else:
generation_args["top_p"] = 1.0
# beam search top n
if "n" in generation_args:
generation_args["n"] = int(generation_args["n"])
else:
generation_args["n"] = 1
ret_queue = queue.Queue()
for i, prompt in enumerate(prompts):
request_object = {"input": prompt, **generation_args}
max_len = generation_args.get("max_tokens", 0)
BATCH_QUEUE.put(WorkItem(len(prompt) + max_len, i, ret_queue, request_object))
unordered_results = []
for _ in prompts:
unordered_results.append(ret_queue.get())
# resort results by the original ordering
# weirdly, openai returns to you a flat list if you gave multiple prompts
reordered = sorted(unordered_results, key=lambda x: x[0])
results = []
for prompt, (_, generations) in zip(prompts, reordered):
results += generations
# transform the result into the openai format
return OAIResponse(results).__dict__()
@app.route("/")
def index():
# TODO(roller): decouple demopage.html
fn = pkg_resources.resource_filename("metaseq", "service/index.html")
with open(fn) as f:
return f.read()
def _copy_checkpoint_cache():
if CHECKPOINT_LOCAL == CHECKPOINT_FOLDER:
# user didn't have a local SSD
return
if os.path.exists(os.path.dirname(CHECKPOINT_LOCAL)):
logger.info("Local checkpoint copy already exists, skipping copy")
else:
logger.info(
f"Making a local copy of the checkpoint. {CHECKPOINT_FOLDER} -> {CHECKPOINT_LOCAL}"
)
shutil.copytree(CHECKPOINT_FOLDER, os.path.dirname(CHECKPOINT_LOCAL))
def cli_main():
"""
Hosted version of the web UI for generation.
"""
_copy_checkpoint_cache()
global port, MODE, cfg
parser = options.get_generation_parser()
# dumb defaults overriding
parser.set_defaults(lr_scheduler=None, criterion=None)
flat_launch_args = []
for s in LAUNCH_ARGS:
flat_launch_args += s.split()
args = options.parse_args_and_arch(parser, input_args=flat_launch_args)
args.data = os.path.dirname(args.path) # hardcode the data arg
port = DEFAULT_PORT
cfg = convert_namespace_to_omegaconf(args)
cfg.distributed_training.distributed_world_size = TOTAL_WORLD_SIZE
dist_utils.call_main(cfg, worker_main, namespace_args=args)
if __name__ == "__main__":
cli_main()
|
flash_metaseq-main
|
metaseq_cli/interactive_hosted.py
|
flash_metaseq-main
|
metaseq_cli/__init__.py
|
|
#!/usr/bin/env python3 -u
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from argparse import Namespace
from itertools import chain
import torch
from omegaconf import DictConfig
from metaseq import checkpoint_utils, distributed_utils, options, utils
from metaseq.dataclass.utils import convert_namespace_to_omegaconf
from metaseq.logging import metrics, progress_bar
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("metaseq_cli.validate")
def main(cfg: DictConfig, override_args=None):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_world_size > 1:
data_parallel_world_size = distributed_utils.get_data_parallel_world_size()
data_parallel_rank = distributed_utils.get_data_parallel_rank()
else:
data_parallel_world_size = 1
data_parallel_rank = 0
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, "model_overrides", "{}")))
else:
overrides = None
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(saved_cfg)
# Build criterion
criterion = task.build_criterion(saved_cfg.criterion)
criterion.eval()
for subset in cfg.dataset.valid_subset.split(","):
try:
task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task)
dataset = task.dataset(subset)
except KeyError:
raise Exception("Cannot find dataset: " + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=data_parallel_world_size,
shard_id=data_parallel_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.get_progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
prefix=f"valid on '{subset}' subset",
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if data_parallel_world_size > 1:
log_outputs = distributed_utils.all_gather_list(
log_outputs,
max_size=cfg.common.all_gather_list_size,
group=distributed_utils.get_data_parallel_group(),
)
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
distributed_utils.call_main(
convert_namespace_to_omegaconf(args), main, override_args=override_args
)
if __name__ == "__main__":
cli_main()
|
flash_metaseq-main
|
metaseq_cli/validate.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import functools
import logging
import math
import os
import subprocess
import sys
import time
from typing import Dict, Optional, Any, List, Tuple, Callable
import numpy as np
import torch
import torch.profiler as profiler
from omegaconf import DictConfig, OmegaConf
from metaseq import (
checkpoint_utils,
options,
tasks,
utils,
)
from metaseq.data import iterators, data_utils
from metaseq.data.plasma_utils import PlasmaStore
from metaseq.dataclass.utils import convert_namespace_to_omegaconf
from metaseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils
from metaseq.file_io import PathManager
from metaseq.logging import meters, metrics, progress_bar
from metaseq.model_parallel.megatron_trainer import MegatronTrainer
from metaseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logging.Formatter.converter = time.gmtime # Enforce UTC timestamps
logger = logging.getLogger("metaseq_cli.train")
def main(cfg: DictConfig) -> None:
utils.import_user_module(cfg.common)
if (
distributed_utils.is_master(cfg.distributed_training)
and "job_logging_cfg" in cfg
):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
if cfg.common.log_file is not None:
handler = logging.FileHandler(filename=cfg.common.log_file)
logger.addHandler(handler)
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print nvidia smi stats
logger.info(metrics.get_nvidia_smi_gpu_memory_stats_str())
# Print args
logger.info(cfg)
if cfg.checkpoint.write_checkpoints_asynchronously:
try:
import iopath # noqa: F401
except ImportError:
logging.exception(
"Asynchronous checkpoint writing is specified but iopath is "
"not installed: `pip install iopath`"
)
return
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
if cfg.distributed_training.ddp_backend == "fully_sharded":
extra = {
"use_sharded_state": cfg.distributed_training.use_sharded_state,
}
with fsdp_enable_wrap(cfg.distributed_training, **extra):
model = fsdp_wrap(
task.build_model(cfg.model),
process_group=distributed_utils.get_data_parallel_group(),
)
else:
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("criterion: {}".format(criterion.__class__.__name__))
logger.info(
"num. model params: {:,} (num. trained: {:,})".format(
sum(getattr(p, "_orig_size", p).numel() for p in model.parameters()),
sum(
getattr(p, "_orig_size", p).numel()
for p in model.parameters()
if p.requires_grad
),
)
)
logger.info(metrics.get_nvidia_smi_gpu_memory_stats_str())
# Load valid dataset (we load training data below, based on the latest checkpoint)
# We load the valid dataset AFTER building the model
data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg)
if cfg.dataset.combine_valid_subsets:
task.load_dataset("valid", combine=True, epoch=1)
else:
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per GPU = {} and batch size per GPU = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
logger.info(metrics.get_nvidia_smi_gpu_memory_stats_str())
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=True,
)
max_epoch = cfg.optimization.max_epoch or math.inf
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=True,
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
# ioPath implementation to wait for all asynchronous file writes to complete.
if cfg.checkpoint.write_checkpoints_asynchronously:
logger.info(
"ioPath PathManager waiting for all asynchronous checkpoint "
"writes to finish."
)
PathManager.async_close()
logger.info("ioPath PathManager finished waiting.")
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.BaseTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=True,
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
if update_freq > 1:
itr = iterators.GroupedIterator(
itr,
update_freq,
skip_remainder_batch=(
not cfg.optimization.train_with_epoch_remainder_batch
),
)
progress = progress_bar.get_progress_bar(
itr,
log_format=cfg.common.log_format,
log_file=cfg.common.log_file,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
)
progress.update_config(_flatten_config(cfg))
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
def train(
i,
samples,
):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
if update_freq == 1:
samples = [samples]
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg,
trainer,
task,
epoch_itr,
valid_subsets,
end_of_epoch,
log_output is not None,
)
return valid_losses, should_stop
for i, samples in enumerate(progress):
if (
distributed_utils.get_global_rank() == 0
and cfg.common.new_profiler
and i == 5
):
logger.info("STARTING PROFILER")
with profiler.profile() as prof:
valid_losses, should_stop = train(i, samples)
torch.cuda.synchronize()
prof.export_chrome_trace(
os.path.join(cfg.checkpoint.save_dir, "profiler_trace.json")
)
else:
valid_losses, should_stop = train(i, samples)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
# remove any legacy Namespaces and replace with a single "args"
namespace = None
for k, v in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if namespace is not None:
config["args"] = vars(namespace)
return config
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.BaseTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
was_successful_step: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
# was_successful_step is necessary since we don't increment step counters
# on OOM or overflow. Thus if we get multiple bad steps right after
# loading a checkpoint (when step counter is exactly when we would step)
# then we will start overwriting! omg!
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or should_stop
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
and was_successful_step
)
)
do_validate = (
(
not end_of_epoch and do_save and not cfg.checkpoint.no_best_checkpoints
) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or should_stop
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
and was_successful_step
)
) and not cfg.dataset.disable_validation
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(
cfg.checkpoint,
trainer,
epoch_itr,
valid_losses[0],
training_finished=should_stop,
async_callback_fn=functools.partial(post_checkpoint_callback, cfg)
if cfg.checkpoint.cloud_upload_path
else None,
)
trainer.reset_dummy_batch(epoch_itr.first_batch)
return valid_losses, should_stop
def post_checkpoint_callback(cfg, filename):
if cfg.checkpoint.cloud_upload_path is not None:
if "blob.core.windows.net" in cfg.checkpoint.cloud_upload_path:
azcopy_logs = filename + "_azcopy_logs"
os.environ["AZCOPY_CONCURRENCY_VALUE"] = "10"
os.environ["AZCOPY_LOG_LOCATION"] = azcopy_logs
os.makedirs(azcopy_logs, exist_ok=True)
logger.info(
f"preparing to azcopy {filename} to {cfg.checkpoint.cloud_upload_path}; logs in {azcopy_logs}"
)
cmd = [
"azcopy", # TODO(susanz): require azcopy to be installed.
"copy",
"--cap-mbps",
"96.0",
filename,
cfg.checkpoint.cloud_upload_path,
]
res = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.returncode != 0:
print("Error: {}, azcopy failed".format(res.returncode))
print("Azcopy stdout = {}".format(res.stdout))
sys.exit(1)
# Delete original checkpoint on local storage
# TODO make this configurable
logger.info(
f"Successfully copied {filename} to {cfg.checkpoint.cloud_upload_path}"
)
os.remove(filename)
else:
try:
# PathManager only supports writing to S3, but this function call
# can be replaced with other APIs for copying checkpoints.
PathManager.copy_from_local(
filename,
os.path.join(
cfg.checkpoint.cloud_upload_path, os.path.basename(filename)
),
overwrite=True,
)
except (FileNotFoundError, AssertionError) as e:
logger.info(f"could not upload {filename}: {e}")
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.BaseTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
with metrics.aggregate(new_root=True) as combined_agg:
for subset in subsets:
logger.info(
'begin validation on "{}" subset on rank {}'.format(
subset, distributed_utils.get_global_rank()
)
)
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(
shuffle=False, set_dataset_epoch=False # use a fixed valid set
)
logger.info(
'got valid iterator on "{}" subset on rank {}'.format(
subset, distributed_utils.get_global_rank()
)
)
progress = progress_bar.get_progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
)
logger.info(
'Begin looping over validation "{}" subset with length "{}"'.format(
subset, len(progress)
)
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate() as agg:
for i, sample in enumerate(progress):
if (
cfg.dataset.max_valid_steps is not None
and i > cfg.dataset.max_valid_steps
):
break
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
stats = get_valid_stats(cfg, trainer, combined_agg.get_smoothed_values())
progress.print(stats, tag="valid/combined", step=trainer.get_num_updates())
return valid_losses
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
# For training - this is where arg parsing happens.
cfg = convert_namespace_to_omegaconf(args)
if cfg.common.use_plasma_view:
server = PlasmaStore(path=cfg.common.plasma_path)
logger.info(
f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}"
)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
if __name__ == "__main__":
cli_main()
|
flash_metaseq-main
|
metaseq_cli/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Host the demo.
Launch with `python -m metaseq_cli.interactive_hosted` to run locally.
See docs/api.md for more information.
"""
import os
import random
import sys
import logging
import torch
from metaseq import options
from metaseq.dataclass.configs import MetaseqConfig
from metaseq.dataclass.utils import convert_namespace_to_omegaconf
from metaseq.distributed import utils as dist_utils
from metaseq.hub_utils import GeneratorInterface
from metaseq.service.constants import (
TOTAL_WORLD_SIZE,
LAUNCH_ARGS,
)
from metaseq.service.utils import encode_fn, build_logger
logger = build_logger()
def input_loop():
inp = []
while True:
try:
# green display, bold user prompt
display = (
"\033[32mPrompt (ctrl-D to end input, ctrl-C to quit):\n\033[0;1m"
if not inp
else ""
)
data = input(display)
inp.append(data)
except KeyboardInterrupt:
# reset the formatting
sys.stdout.write("\033[0m")
raise
except EOFError:
break
# reset the formatting
sys.stdout.write("\033[0m")
logger.debug(f"Input: {inp}")
return "\n".join(inp)
def worker_main(cfg: MetaseqConfig, namespace_args=None):
global generator
# make sure generations are stochastic since we have many workers
torch.manual_seed(random.randint(1, 20000))
torch.cuda.manual_seed(random.randint(1, 20000))
generator = GeneratorInterface(cfg)
models = generator.load_model() # noqa: F841
# quiet some of the stuff for visual aspects
logging.getLogger("metaseq.hub_utils").setLevel(logging.WARNING)
logger.info(f"loaded model {cfg.distributed_training.distributed_rank}")
request_object = dist_utils.broadcast_object(
None, src_rank=0, group=dist_utils.get_global_group()
)
if torch.distributed.get_rank() == 0:
while True:
prompt = input_loop()
tokens = encode_fn(generator, prompt)
request_object = {
"inputs": [tokens],
"max_tokens": [128],
}
dist_utils.broadcast_object(
request_object, src_rank=0, group=dist_utils.get_global_group()
)
generations = generator.generate(**request_object)
print(generations[0][0]["text"])
else:
# useful in FSDP setting
while True:
request_object = dist_utils.broadcast_object(
None, src_rank=0, group=dist_utils.get_global_group()
)
_ = generator.generate(**request_object)
def cli_main():
"""
Command line interactive.
"""
parser = options.get_generation_parser()
# dumb defaults overriding
parser.set_defaults(lr_scheduler=None, criterion=None)
flat_launch_args = []
for s in LAUNCH_ARGS:
flat_launch_args += s.split()
args = options.parse_args_and_arch(parser, input_args=flat_launch_args)
args.data = os.path.dirname(args.path) # hardcode the data arg
cfg = convert_namespace_to_omegaconf(args)
cfg.distributed_training.distributed_world_size = TOTAL_WORLD_SIZE
dist_utils.call_main(cfg, worker_main, namespace_args=args)
if __name__ == "__main__":
cli_main()
|
flash_metaseq-main
|
metaseq_cli/interactive_cli.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from metaseq import utils
from metaseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
OptimizationConfig,
ReshardConfig,
)
from metaseq.dataclass.utils import gen_parser_from_dataclass
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
return parser
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_reshard_parser(task="language_modeling"):
parser = get_eval_lm_parser(default_task=task)
add_reshard_args(parser)
return parser
def add_reshard_args(parser):
group = parser.add_argument_group("reshard")
gen_parser_from_dataclass(group, ReshardConfig())
return group
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from metaseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from metaseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
# Add *-specific args to parser.
from metaseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
# hack since we don't want to call "fixed" LR scheduler
if choice == "fixed":
choice = "inverse_sqrt"
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from metaseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under metaseq/tasks/
from metaseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under metaseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from metaseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
|
flash_metaseq-main
|
metaseq/options.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger = logging.getLogger(__name__)
class NanDetector:
"""
Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name
"""
def __init__(self, model, forward=True, backward=True):
self.bhooks = []
self.fhooks = []
self.forward = forward
self.backward = backward
self.named_parameters = list(model.named_parameters())
self.reset()
for name, mod in model.named_modules():
mod.__module_name = name
self.add_hooks(mod)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
# Dump out all model gnorms to enable better debugging
norm = {}
gradients = {}
for name, param in self.named_parameters:
if param.grad is not None:
grad_norm = torch.norm(param.grad.data, p=2, dtype=torch.float32)
norm[name] = grad_norm.item()
if torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any():
gradients[name] = param.grad.data
if len(gradients) > 0:
logger.info("Detected nan/inf grad norm, dumping norms...")
logger.info(f"norms: {norm}")
logger.info(f"gradients: {gradients}")
self.close()
def add_hooks(self, module):
if self.forward:
self.fhooks.append(module.register_forward_hook(self.fhook_fn))
if self.backward:
self.bhooks.append(module.register_backward_hook(self.bhook_fn))
def reset(self):
self.has_printed_f = False
self.has_printed_b = False
def _detect(self, tensor, name, backward):
err = None
if (
torch.is_floating_point(tensor)
# single value tensors (like the loss) will not provide much info
and tensor.numel() >= 2
):
with torch.no_grad():
if torch.isnan(tensor).any():
err = "NaN"
elif torch.isinf(tensor).any():
err = "Inf"
if err is not None:
err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}"
return err
def _apply(self, module, inp, x, backward):
if torch.is_tensor(x):
if isinstance(inp, tuple) and len(inp) > 0:
inp = inp[0]
err = self._detect(x, module.__module_name, backward)
if err is not None:
if torch.is_tensor(inp) and not backward:
err += (
f" input max: {inp.max().item()}, input min: {inp.min().item()}"
)
has_printed_attr = "has_printed_b" if backward else "has_printed_f"
logger.warning(err)
setattr(self, has_printed_attr, True)
elif isinstance(x, dict):
for v in x.values():
self._apply(module, inp, v, backward)
elif isinstance(x, list) or isinstance(x, tuple):
for v in x:
self._apply(module, inp, v, backward)
def fhook_fn(self, module, inp, output):
if not self.has_printed_f:
self._apply(module, inp, output, backward=False)
def bhook_fn(self, module, inp, output):
if not self.has_printed_b:
self._apply(module, inp, output, backward=True)
def close(self):
for hook in self.fhooks + self.bhooks:
hook.remove()
|
flash_metaseq-main
|
metaseq/nan_detector.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
from typing import Union
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig
from metaseq.dataclass import MetaseqDataclass
from metaseq.dataclass.utils import populate_dataclass, merge_with_parent
REGISTRIES = {}
def setup_registry(registry_name: str, base_class=None, default=None, required=False):
assert registry_name.startswith("--")
registry_name = registry_name[2:].replace("-", "_")
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
DATACLASS_REGISTRY = {}
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
"registry": REGISTRY,
"default": default,
"dataclass_registry": DATACLASS_REGISTRY,
}
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
if choice and choice in DATACLASS_REGISTRY:
dc = DATACLASS_REGISTRY[choice]
cfg = merge_with_parent(dc(), cfg)
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = populate_dataclass(DATACLASS_REGISTRY[choice](), cfg)
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
def register_x(name, dataclass=None):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError(
"Cannot register duplicate {} ({})".format(registry_name, name)
)
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
"Cannot register {} with duplicate class name ({})".format(
registry_name, cls.__name__
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError(
"{} must extend {}".format(cls.__name__, base_class.__name__)
)
if dataclass is not None and not issubclass(dataclass, MetaseqDataclass):
raise ValueError(
"Dataclass {} must extend MetaseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if cls.__dataclass is not None:
DATACLASS_REGISTRY[name] = cls.__dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group=registry_name, node=node, provider="metaseq")
REGISTRY[name] = cls
return cls
return register_x_cls
return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
|
flash_metaseq-main
|
metaseq/registry.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import typing as tp
def _safe_readline(fd) -> str:
pos = fd.tell()
while True:
try:
return fd.readline()
except UnicodeDecodeError:
pos -= 1
fd.seek(pos) # search where this character begins
def find_offsets(filename: str, num_chunks: int) -> tp.List[int]:
"""
given a file and a number of chuncks, find the offsets in the file
to be able to chunk around full lines.
"""
with open(filename, "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
_safe_readline(f)
offsets[i] = f.tell()
offsets[-1] = size
return offsets
class ChunkLineIterator:
"""
Iterator to properly iterate over lines of a file chunck.
"""
def __init__(self, fd, start_offset: int, end_offset: int):
self._fd = fd
self._start_offset = start_offset
self._end_offset = end_offset
def __iter__(self) -> tp.Iterable[str]:
self._fd.seek(self._start_offset)
# next(f) breaks f.tell(), hence readline() must be used
line = _safe_readline(self._fd)
while line:
pos = self._fd.tell()
# f.tell() does not always give the byte position in the file
# sometimes it skips to a very large number
# it is unlikely that through a normal read we go from
# end bytes to end + 2**32 bytes (4 GB) and this makes it unlikely
# that the procedure breaks by the undeterministic behavior of
# f.tell()
if (
self._end_offset > 0
and pos > self._end_offset
and pos < self._end_offset + 2**32
):
break
yield line
line = self._fd.readline()
class Chunker:
"""
contextmanager to read a chunck of a file line by line.
"""
def __init__(self, path: str, start_offset: int, end_offset: int):
self.path = path
self.start_offset = start_offset
self.end_offset = end_offset
def __enter__(self) -> ChunkLineIterator:
self.fd = open(self.path, "r", encoding="utf-8")
return ChunkLineIterator(self.fd, self.start_offset, self.end_offset)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.fd.close()
|
flash_metaseq-main
|
metaseq/file_chunker_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import os
import sys
try:
from .version import __version__ # noqa
except ImportError:
version_txt = os.path.join(os.path.dirname(__file__), "version.txt")
with open(version_txt) as f:
__version__ = f.read().strip()
__all__ = ["pdb"]
# backwards compatibility to support `from metaseq.X import Y`
from metaseq.distributed import utils as distributed_utils
from metaseq.logging import meters, metrics # noqa
sys.modules["metaseq.distributed_utils"] = distributed_utils
sys.modules["metaseq.meters"] = meters
sys.modules["metaseq.metrics"] = metrics
# initialize hydra
from metaseq.dataclass.initialize import hydra_init # noqa: E402
hydra_init()
import metaseq.criterions # noqa
import metaseq.distributed # noqa
import metaseq.models # noqa
import metaseq.modules # noqa
import metaseq.optim # noqa
import metaseq.optim.lr_scheduler # noqa
import metaseq.pdb # noqa
import metaseq.tasks # noqa
import metaseq.benchmark # noqa
import metaseq.model_parallel # noqa
|
flash_metaseq-main
|
metaseq/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from metaseq import search
logger = logging.getLogger(__name__)
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size: int = 1,
max_len_a: int = 0,
max_len_b: int = 200,
min_len: int = 1,
temperature: float = 1.0,
search_strategy=None,
need_logprobs: bool = False,
stop: Optional[List[int]] = None,
profile=False,
):
"""Generates translations of a given source sentence.
Args:
models: ensemble of models
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
stop: An optional list of other tokens that can cause early termination.
need_logprobs (bool): Return additional log-prob distributions for
every timestep of the search.
"""
super().__init__()
self.model = models[0]
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.need_logprobs = need_logprobs
self.stop = stop if stop is not None else []
self.temperature = temperature
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
self.model.eval()
self.profile = profile
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations."""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs):
"""Generate translations. Match the api of other metaseq generators."""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
incremental_states = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception("expected src_tokens or source in net input")
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
max_len = min(self.model.max_decoder_positions() - 1, self.max_len_b or 1e99)
min_len = min(max_len - 1, self.min_len or 0)
assert (
min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
finished = [
False for i in range(bsz)
] # a boolean array indicating if the sentence at the index is finished or not
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
# notes:
# - scores \in FloatTensor(bsz * beam_size, max_len + 1)
# - tokens \in LongTensor(bsz * beam_size, max_len + 2)
# - src_tokens \in LongTensor(bsz, prompt_len)
# - all_lprobs \in FloatTensor(bsz * beam_size, max_len + 1, vocab_size)
# is the next word distribution at every timestep
if self.need_logprobs:
# lprobs are costly for memory, so only compute them if we have to
all_lprobs = (
torch.zeros(bsz * beam_size, max_len + 1, self.vocab_size)
.to(src_tokens)
.float()
)
# first forward through all the fixed tokens with forced decoding we'll
# need to handle normalization and prep for bookkeeping of incremental
# decoding
start_step = src_tokens.shape[1]
# set all the forced tokens
tokens[:, :start_step] = src_tokens
# compute the model predictions
model_out = self.model.decoder(
tokens[:, :start_step],
incremental_state=incremental_states,
)
# normalize
model_out[0].div_(self.temperature, rounding_mode="trunc")
# lprobs is the log probability of each possible token in every position
# lprobs \in FloatTensor(bsz * beam_size, prompt_len, vocab_size)
lprobs = self.model.get_normalized_probs(model_out, log_probs=True, sample=None)
# don't allow generation of eos/pad
model_out[0][:, :, self.eos] = -math.inf
model_out[0][:, :, self.pad] = -math.inf
for stop_token in self.stop:
model_out[0][:, :, stop_token] = -math.inf
if self.need_logprobs:
all_lprobs[:, :start_step] = lprobs.type_as(all_lprobs)
else:
all_lprobs = None
# find and store the logprobs of each prompt token, cutting out the
# rest of the vocab. Note the shift of 1 here b/c autoregressive.
prompt_tokens = tokens[:, 1:start_step].unsqueeze(-1)
# look up a specific vocab logprob, and broadcast it into scores
toscores = torch.gather(lprobs, -1, prompt_tokens).squeeze(-1)
scores[:, : start_step - 1] = toscores.type_as(scores)
# reset scores after the last point of forced decoding and gather the
# probabilities of the most recent token prediction, as search
# decisions are only over the most recent token.
lprobs_cut = []
for i in range(src_tokens.shape[0]):
prompt_len = src_lengths[i]
scores[i * beam_size : (i + 1) * beam_size, prompt_len:] = 0.0 # reset
lprobs_cut.append(lprobs[i * beam_size : (i + 1) * beam_size, prompt_len])
lprobs = torch.cat(lprobs_cut, dim=0)
# finally, scores is actually stored as the cumulative NLL, but we have
# individual NLL scores right now
scores = scores.cumsum(dim=1)
# start from previous timestep because we still have to do beam search
# bookkeeping (i.e. finalize the hypothesis if it's the final token)
for step in range(start_step - 1, max_len + 1):
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
# note we may be decreasing the size of the incremental state or
# all_lprobs here, as beams hit EOS.
self.model.decoder.reorder_incremental_state_scripting(
incremental_states, reorder_state
)
if self.need_logprobs:
all_lprobs = all_lprobs.index_select(0, reorder_state)
# the first step is already computed via forced decoding above, but
# we still need to do the beam search aspects, so only compute the
# forward pass if we're past the prompt tokens
if step != start_step - 1:
model_out = self.model.decoder(
tokens[:, : step + 1],
incremental_state=incremental_states,
)
model_out[0].div_(self.temperature)
lprobs = self.model.get_normalized_probs(
model_out, log_probs=True, sample=None
)
# search decisions are only over the most recent token
lprobs = lprobs[:, -1, :]
if self.need_logprobs:
all_lprobs[:, step] = lprobs
if step < min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
for stop_token in self.stop:
lprobs[:, stop_token] = -math.inf
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
# underlying search indexes from first token being generated,
# so we need to account for the size of the prompt.
step - start_step + 1,
lprobs.view(bsz, -1, self.vocab_size),
scores[:, start_step - 1 : step].view(bsz, beam_size, -1),
tokens[:, start_step - 1 : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos)
for stop_token in self.stop:
# if there are other early stopping tokens, allow those to trigger stop
eos_mask = eos_mask | cand_indices.eq(stop_token)
eos_mask = eos_mask & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
max_len,
all_lprobs,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses. note that we cut off
# the prompt tokens here for ease of bookkeeping.
# Set the tokens for each beam (can select the same row more than once)
tokens[:, start_step : step + 1] = torch.index_select(
tokens[:, start_step : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > start_step:
scores[:, start_step:step] = torch.index_select(
scores[:, start_step:step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
max_len: int,
all_lprobs: Optional[Tensor],
):
"""
Finalize hypothesis, store finalized information in `finalized`, and
change `finished` accordingly. A sentence is finalized when {beam_size}
finished items have been collected for it.
Returns number of sentences (not beam items) being finalized. These
will be removed from the batch and not processed further.
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# The keys here are of the form "{sent}_{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
# For every finished beam item
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
# sentence index in the current (possibly reduced) batch
unfin_idx = torch.div(idx, beam_size, rounding_mode="trunc")
# sentence index in the original (unreduced) batch
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent]) < beam_size:
fin = {
"tokens": tokens_clone[i],
"score": score,
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
if all_lprobs is not None:
fin["distributions"] = all_lprobs[i]
finalized[sent].append(fin)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
|
flash_metaseq-main
|
metaseq/sequence_generator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import pdb
import sys
__all__ = ["set_trace"]
_stdin = [None]
_stdin_lock = multiprocessing.Lock()
try:
_stdin_fd = sys.stdin.fileno()
except Exception:
_stdin_fd = None
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from metaseq import pdb; pdb.set_trace()`
"""
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with _stdin_lock:
try:
if _stdin_fd is not None:
if not _stdin[0]:
_stdin[0] = os.fdopen(_stdin_fd)
sys.stdin = _stdin[0]
self.cmdloop()
finally:
sys.stdin = stdin_bak
def set_trace():
pdb = MultiprocessingPdb()
pdb.set_trace(sys._getframe().f_back)
def set_trace_rank0():
import metaseq.distributed.utils as dist_utils
if dist_utils.get_global_rank() == 0:
set_trace()
else:
while True:
pass
|
flash_metaseq-main
|
metaseq/pdb.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime as dt
import io
import logging
import os
import shutil
import types
from functools import partial
from typing import Any, Dict, IO, List, Optional, Tuple, Union
import boto3
import botocore
from boto3.s3.transfer import TransferConfig
from iopath.common.file_io import file_lock, get_cache_dir, PathHandler
logger = logging.getLogger(__name__)
# Override for close() on files to write to Amazon S3
def s3_close_and_upload(self, client, bucket, s3_path, transfer_config):
# Seek to start, for use by upload_fileobj.
self.seek(0)
# Reinstall the proper close.
self.close = self._close
# upload_fileobj needs bytes
# NOTE: This a very undesirable hack.
if isinstance(self, io.StringIO):
self = io.BytesIO(self.getvalue().encode("utf-8"))
# Upload
try:
client.upload_fileobj(
self,
bucket,
s3_path,
Config=transfer_config,
)
except botocore.exceptions.ClientError as e:
raise OSError(f"Error in file upload - {e}" f"{type(e).__name__}: {e}") from e
class S3PathHandler(PathHandler):
"""
Support for Amazon Simple Storage Service (S3)
PathHanlder methods, at a glance:
File --torch.load-> In --open(..., 'w')-> Amazon <- _exists,_isfile,_isdir,_ls,_rm ...
System <-torch.save-- Mem. <-open(..., 'r')-- S3
<----------------_copy_from_local-----------------
----------------_get_local_path ----------------->
Mem usage, for processing N bytes:
open(..., mode)
mode=='w': 2N, due to fully buffering user input,
*and doing naive conversion from StringIO -> BytesIO*,
before writing to S3
^ Potential for optimization.
mode=='wb': N, due to fully buffering user input, before writing to S3.
mode=='r': N, due to fully buffering file in memory
mode=='rb': N, due to fully buffering file in memory
_copy_from_local: ≈0. boto3 streams from file system directly to s3
_get_local_path: ≈0. boto3 streams from s3 directly from s3 to file system
"""
# Disable failures if not all args are specified.
_strict_kwargs_check = False
S3_PREFIX = "s3://"
CACHE_SUBDIR_NAME = "s3_cache"
def __init__(
self,
cache_dir: Optional[str] = None,
transfer_config_kwargs: Optional[Dict] = None,
):
"""
Args:
cache_dir (str): Local filesystem directory to use for caching. If None,
uses default from `file_io.get_cache_dir()`.
transfer_config_kwargs (dict): Settings for boto3.s3.transfer.TransferConfig.
Used to specify settings for multipart transfers.
See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3.html for details.
"""
self.cache_dir = cache_dir
self.transfer_config = TransferConfig(
**(transfer_config_kwargs if transfer_config_kwargs else {})
)
def _get_supported_prefixes(self) -> List[str]:
"""
Returns:
List[str]: the list of URI prefixes this PathHandler can support
"""
return [self.S3_PREFIX]
def _parse_uri(self, uri: str) -> Tuple[str, str]:
"""
Parses a "s3://bucket/path" URI into `bucket` and `path` strings.
Args:
uri (str): A s3:// URI.
Returns:
bucket (str): the s3 bucket.
path (str): the path on the s3 system.
"""
splits = uri.replace(self.S3_PREFIX, "").split("/")
bucket = splits[0]
path = "/".join(splits[1:])
return bucket, path
def _get_client(self, bucket: str):
# TODO: Consider pid-based cache: https://fburl.com/code/xsz3wrv6
if not hasattr(self, "client"):
try:
session = boto3.Session()
self.client = session.client("s3")
except botocore.exceptions.NoCredentialsError as e:
logger.error(
" See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html "
" for method of using environment variable to point to aws credentials, and the "
" order in which boto will search for said credentials. "
)
logger.error(
"Boto3 searches via the order below. If on FAIR Cluster, method 4 may be most convenient."
""
"The order in which Boto3 searches for credentials is:"
"1) [UNUSED] Passing credentials as parameters in the boto.client() method"
"2) [UNUSED] Passing credentials as parameters when creating a Session object"
"3) Environment variables"
" AWS_ACCESS_KEY_ID - The access key for your AWS account."
" AWS_SECRET_ACCESS_KEY - The secret key for your AWS account."
" AWS_SESSION_TOKEN - The session key for your AWS account."
" This is only needed when you are using temporary credentials. "
"4) Shared credential file (~/.aws/credentials)"
" default: ~/.aws/credentials"
" changed via: AWS_SHARED_CREDENTIALS_FILE"
" *for FAIR cluster usage: `export AWS_SHARED_CREDENTIALS_FILE=~/.fairusers_aws/credentials`"
"5) AWS config file (~/.aws/config)"
" default: ~/.aws/config"
" changed via: AWS_CONFIG_FILE"
"6) Assume Role provider"
"7) Boto2 config file (/etc/boto.cfg and ~/.boto)"
"8) Instance metadata service on an Amazon EC2 instance that has an IAM role configured."
)
raise OSError(
f"Error in making s3 client for bucket {bucket}"
f"{type(e).__name__}: {e}"
) from e
return self.client
def _local_cache_path(
self,
path: str,
):
"""
Helper that returns a local cache path for a given uri.
Args:
path (str): A URI supported by this PathHandler.
Returns:
local_cache_path (str): a file path which exists on the local file system,
in a cache directory.
"""
bucket, file_path = self._parse_uri(path)
return os.path.join(
get_cache_dir(self.cache_dir), self.CACHE_SUBDIR_NAME, file_path
)
def _get_local_path(self, path: str, **kwargs: Any) -> str:
"""
Get a filepath which is compatible with native Python I/O such as `open`
and `os.path`.
If URI points to a remote resource, this function may download and cache
the resource to local disk. In this case, the cache stays on filesystem
(under `file_io.get_cache_dir()`) and will be used by a different run.
Therefore this function is meant to be used with read-only resources.
Args:
path (str): A URI supported by this PathHandler
Returns:
local_path (str): a file path which exists on the local file system
"""
self._check_kwargs(kwargs)
# Cheap check first.
if path.endswith("/"):
raise NotImplementedError(
"S3PathHandler does not currently support downloading directories"
)
assert self._isfile(path)
local_path = self._local_cache_path(path)
with file_lock(local_path):
if os.path.exists(local_path):
# If local object's last modified time is *after* remote object's last modified
# time, do not use the cache. Instead, redownload.
response = self._head_object(path)
if response is not None:
remote_dt = response["LastModified"]
local_dt = dt.datetime.fromtimestamp(
os.path.getmtime(local_path)
).astimezone()
# NOTE: may consider still avoid cache if times are close, to avoid a race condition.
# Currently, a lengthy download of a very recent but stale file would have a late
# local last modified timestamp, and would be improperly used.
# Better fix: set last modified time via the remote object's last modified time,
# in download_file().
if (local_dt - remote_dt) > dt.timedelta(minutes=0):
logger.info(
"URL {} was already cached in {}".format(path, local_path)
)
return local_path
logger.info("Caching {} ...".format(path))
tmp = local_path + ".tmp"
# clean-up tmp if found, because if tmp exists, it must be a dirty
# result of a previously process that didn't cleanup itself.
if os.path.isfile(tmp):
os.unlink(tmp)
bucket, s3_path = self._parse_uri(path)
client = self._get_client(bucket)
try:
response = client.download_file(
bucket, s3_path, tmp, Config=self.transfer_config
)
# First download to tmp, then move it, because move is
# (almost?) atomic when src and dst are in the same file
# system. This will avoid partial cache state if the
# process is killed.
shutil.move(tmp, local_path)
finally:
try:
os.unlink(tmp)
except Exception:
pass
logger.info("URL {} cached in {}".format(path, local_path))
return local_path
def _copy_from_local(
self, local_path: str, dst_path: str, overwrite: bool = False, **kwargs: Any
) -> bool:
"""
Copies a local file to the specified URI.
If the URI is another local path, this should be functionally identical
to copy.
Args:
local_path (str): a file path which exists on the local file system
dst_path (str): A URI supported by this PathHandler
overwrite (bool): Bool flag for forcing overwrite of existing URI
Returns:
status (bool): True on success
"""
self._check_kwargs(kwargs)
# Just checking this to avoid expensive API calls in self._isdir().
if local_path.endswith("/") or dst_path.endswith("/"):
raise NotImplementedError(
"S3PathHandler does not currently support uploading directories"
)
bucket, s3_path = self._parse_uri(dst_path)
client = self._get_client(bucket)
try:
client.upload_file(local_path, bucket, s3_path, Config=self.transfer_config)
return True
except botocore.exceptions.ClientError as e:
logger.error("Error in file upload - {}".format(str(e)))
return False
def _decorate_buf_with_s3_methods(
self,
buffer: Union[IO[str], IO[bytes]],
client: Any,
bucket: str,
s3_path: str,
transfer_config: Any,
):
# Save old close method.
buffer._close = buffer.close
# Add in our new close method.
fn = partial(
s3_close_and_upload,
client=client,
bucket=bucket,
s3_path=s3_path,
transfer_config=transfer_config,
)
buffer.close = types.MethodType(fn, buffer)
def _open(
self,
path: str,
mode: str = "r",
buffering: int = -1,
# The following three arguments are unused,
# But are included to avoid triggering WARNING
# messages from _check_kargs.
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
**kwargs: Any,
) -> Union[IO[str], IO[bytes]]:
"""
Open a stream to a URI, similar to the built-in `open`.
Args:
path (str): A URI supported by this PathHandler
mode (str): Specifies the mode in which the file is opened. It defaults
to 'r'.
buffering (int): An optional integer used to set the buffering policy.
Pass 0 to switch buffering off and an integer >= 1 to indicate the
size in bytes of a fixed-size chunk buffer. When no buffering
argument is given, the default buffering policy depends on the
underlying I/O implementation.
Returns:
file: a file-like object.
"""
self._check_kwargs(kwargs)
bucket, s3_path = self._parse_uri(path)
client = self._get_client(bucket)
# AWS methods download_fileobj() and upload_fileobj()
# both expect binary file-like objects.
if "r" in mode:
# 1. Download into io.BytesIO.
# (binary format is required by download_fileobj.)
buffer = io.BytesIO()
try:
# NOTE: Will download entire file! Further optimization to
# only read a portion of the file could be implemented here.
# NOTE: We download into an in-memory buffer. If downloading to
# filesystem is desirable, use _get_local_path().
client.download_fileobj(
bucket, s3_path, buffer, Config=self.transfer_config
)
except botocore.exceptions.ClientError as e:
raise OSError(
f"Error in making s3 client for bucekt {bucket}"
f"{type(e).__name__}: {e}"
) from e
# 2. Set file-pointer to beginning of file.
buffer.seek(0)
# 3. Use convenient wrapper to make object look like StringIO,
# if user wants non-binary.
if "b" not in mode:
buffer = io.TextIOWrapper(buffer, encoding="utf-8")
return buffer
elif "w" in mode:
# 1. For writing, we give the user io.BytesIO or io.StringIO.
if "b" in mode:
buffer = io.BytesIO()
else:
buffer = io.StringIO()
# 2. Decorate buffer so that we upload when it's closed by user.
# If StringIO, decorator does a simple+expensive conversion
# to bytesIO before uploading.
# (because upload_fileobj requires binary)
self._decorate_buf_with_s3_methods(
buffer, client, bucket, s3_path, self.transfer_config
)
return buffer
else:
raise OSError(f"Unsupported open mode {mode}")
def _copy(
self, src_path: str, dst_path: str, overwrite: bool = False, **kwargs: Any
) -> bool:
"""
Copies a source path to a destination path.
Args:
src_path (str): A URI supported by this PathHandler
dst_path (str): A URI supported by this PathHandler
overwrite (bool): Bool flag for forcing overwrite of existing file
Returns:
status (bool): True on success
"""
self._check_kwargs(kwargs)
src_bucket, src_s3_path = self._parse_uri(src_path)
dst_bucket, dst_s3_path = self._parse_uri(dst_path)
assert src_bucket == dst_bucket, "For now, can only _copy() within a bucket."
client = self._get_client(src_bucket)
try:
client.copy(
{
"Bucket": src_bucket,
"Key": src_s3_path,
},
dst_bucket,
dst_s3_path,
Config=self.transfer_config,
)
return True
except botocore.exceptions.ClientError as e:
logger.error("Error in file copy - {}".format(str(e)))
return False
def _head_object(self, path: str) -> Optional[Dict]:
bucket, s3_path = self._parse_uri(path)
client = self._get_client(bucket)
try:
# Raises exception if not exists, else it exists.
response = client.head_object(Bucket=bucket, Key=s3_path)
return response
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"] == "Bad Request":
raise OSError(
f"Error in checking s3 path {path} - " f"{type(e).__name__}: {e}"
) from e
return None
def _exists(self, path: str, **kwargs: Any) -> bool:
"""
Checks if there is a resource at the given URI.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path exists
"""
self._check_kwargs(kwargs)
return self._head_object(path) is not None
def _isfile(self, path: str, **kwargs: Any) -> bool:
"""
Checks if the resource at the given URI is a file.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path is a file
"""
self._check_kwargs(kwargs)
# NOTE: this incurs an API call.
return not path.endswith("/") and self._exists(path, **kwargs)
def _isdir(self, path: str, **kwargs: Any) -> bool:
"""
Checks if the resource at the given URI is a directory.
Args:
path (str): A URI supported by this PathHandler
Returns:
bool: true if the path is a directory
"""
self._check_kwargs(kwargs)
# NOTE: this incurs an API call.
return path.endswith("/") and self._exists(path, **kwargs)
def _ls(self, path: str, **kwargs: Any) -> List[str]:
"""
List the contents of the directory at the provided URI.
Args:
path (str): A URI supported by this PathHandler
Returns:
List[str]: list of contents in given path
"""
self._check_kwargs(kwargs)
bucket, s3_path = self._parse_uri(path)
client = self._get_client(bucket)
try:
# Pagination needed if >1000 entries.
paginator = client.get_paginator("list_objects_v2")
pages = paginator.paginate(
Bucket=bucket,
Prefix=s3_path,
)
return [obj["Key"] for page in pages for obj in page.get("Contents", [])]
except botocore.exceptions.ClientError as e:
raise OSError(
f"Error in ls path {path} - " f"{type(e).__name__}: {e}"
) from e
def _mkdirs(self, path: str, **kwargs: Any) -> None:
"""
Recursive directory creation function. Like mkdir(), but makes all
intermediate-level directories needed to contain the leaf directory.
Similar to the native `os.makedirs`.
Args:
path (str): A URI supported by this PathHandler
"""
self._check_kwargs(kwargs)
assert path.endswith("/"), path
bucket, s3_path = self._parse_uri(path)
client = self._get_client(bucket)
try:
client.put_object(Bucket=bucket, Key=s3_path)
except botocore.exceptions.ClientError as e:
raise OSError(
f"Error in mkdirs path {path} - " f"{type(e).__name__}: {e}"
) from e
def _rm(self, path: str, **kwargs: Any) -> None:
"""
Remove the file (not directory) at the provided URI.
Args:
path (str): A URI supported by this PathHandler
"""
self._check_kwargs(kwargs)
bucket, s3_path = self._parse_uri(path)
client = self._get_client(bucket)
try:
client.delete_object(Bucket=bucket, Key=s3_path)
except botocore.exceptions.ClientError as e:
raise OSError(
f"Error in rm path {path} - " f"{type(e).__name__}: {e}"
) from e
|
flash_metaseq-main
|
metaseq/s3_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
|
flash_metaseq-main
|
metaseq/tokenizer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import copy
import logging
import os
import time
from argparse import Namespace
from typing import Any, Dict, Iterator, List, Optional
import numpy as np
import torch
from omegaconf import open_dict
from torch import nn
from metaseq import checkpoint_utils, tasks
from metaseq import utils
from metaseq.data import encoders
from metaseq.dataclass.configs import MetaseqConfig
from metaseq.dataclass.utils import convert_namespace_to_omegaconf
from metaseq.distributed import fsdp_enable_wrap, fsdp_wrap
from metaseq.distributed.utils import (
get_data_parallel_rank,
get_data_parallel_world_size,
)
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
archive_map=None,
**kwargs,
):
post_build_model_hook = kwargs.get("post_build_model_hook", None)
from metaseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == "checkpoint_file":
checkpoint_file = v
elif (
k != "path"
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path["path"]
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith("."):
kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
"merges.txt": "bpe_merges",
"vocab.json": "bpe_vocab",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if "user_dir" in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"]))
def _build_fn(train_cfg, task):
if post_build_model_hook:
return post_build_model_hook(task.build_model(train_cfg.model), task)
else:
return task.build_model(train_cfg.model)
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
suffix=kwargs.get("suffix", ""),
build_model_hook=lambda cfg, task: _build_fn(cfg, task),
)
return {
"args": args,
"task": task,
"models": models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
lang_tokens = {}
langs = None
add_lang_bos_token = False
def to_lang_token(self, lang):
return f"<{lang}>"
def setup_task(self):
self.src_dict = self.task.source_dictionary
self.tgt_dict = self.task.target_dictionary
if "langs" in self.cfg.task:
self.langs = self.cfg.task.langs
lang_tokens = [
self.to_lang_token(x.strip()) for x in self.cfg.task.langs.split(",")
]
# for debug purpose
for lang_token in lang_tokens:
if lang_token not in self.src_dict:
self.src_dict.add_symbol(lang_token)
if lang_token not in self.tgt_dict:
self.tgt_dict.add_symbol(lang_token)
self.lang_tokens = set(lang_tokens)
if "add_bos_token" in self.cfg.task:
# self.add_lang_bos_token = True
self.add_lang_bos_token = self.cfg.task.add_bos_token
def __init__(
self,
cfg,
task,
models,
moe_disable_padding=True,
skip_prepare_for_inference=False,
):
super().__init__()
self.cfg = cfg
self.task = task
self.setup_task()
self.models = nn.ModuleList(models)
# optimize model for generation
if not skip_prepare_for_inference:
for model in self.models:
# For moe models and eval_lm
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(
getattr(cfg.generation, "replace_unk", None)
)
self.tokenizer = encoders.build_tokenizer(cfg.tokenizer)
self.bpe = encoders.build_bpe(cfg.bpe)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(
self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs
) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(
self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs
) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [
hypos[0]
for hypos in self.generate(
tokenized_sentences, score_reference=True, **kwargs
)
]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
batch_size=None,
**kwargs,
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0),
beam=beam,
verbose=verbose,
batch_size=batch_size,
**kwargs,
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.deepcopy(self.cfg.generation)
with open_dict(gen_args):
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
rank, world_size = get_data_parallel_rank(), get_data_parallel_world_size()
batches = self._build_batches(
tokenized_sentences,
skip_invalid_size_inputs,
rank=rank,
world_size=world_size,
batch_size=batch_size,
)
# To ensure even batch count across workers, some batches might be dummy batches. We shouldn't score these.
first_batch = None
for batch in batches:
is_dummy_batch = False
if not first_batch and "net_input" in batch:
first_batch = batch
if "net_input" not in batch:
if first_batch is not None:
batch = first_batch
is_dummy_batch = True
else:
continue
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
if is_dummy_batch: # Don't score it or add it to hypotheses
continue
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.cfg, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
logger.info(
"P\t{}".format(
" ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"].tolist(),
)
)
)
)
if hypo["alignment"] is not None and getarg(
"print_alignment", False
):
logger.info(
"A\t{}".format(
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in hypo["alignment"]
]
)
)
)
return outputs
def get_sentence_and_language(self, sentence: str):
"""
If sentence is prefixed with the language, it is striped and both are replaced.
input: '<lang>en-EN</lang>Some sentence here'
output: en-EN, 'Some sentence here'
"""
lang_begin = "<lang>"
lang_end = "</lang>"
lang = None
if sentence.startswith(lang_begin):
idx = sentence.find(lang_end)
if idx > 0:
lang = sentence[: idx + len(lang_end)]
lang = lang.replace(lang_begin, "").replace(lang_end, "")
sentence = sentence[idx + len(lang_end) :]
return lang, sentence
def add_language_to_sentence(self, sentence: str, lang_token):
lang_begin = "<lang>"
lang_end = "</lang>"
lang_prefix = lang_begin + lang_token + lang_end
sentence = lang_prefix + sentence
return sentence
def encode(self, sentence: str) -> torch.LongTensor:
lang, sentence = self.get_sentence_and_language(sentence)
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
if lang is not None:
sentence = f"{lang} {sentence}"
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
# Remove the lang token
sent_split = sentence.split(" ", 1)
lang_token = None
if sent_split[0] in self.lang_tokens:
lang_token = sent_split[0]
sentence = sent_split[1]
sentence = self.remove_bpe(sentence)
sentence = self.detokenize(sentence)
if lang_token is not None:
sentence = self.add_language_to_sentence(sentence, lang_token)
return sentence
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self,
tokens: List[torch.LongTensor],
skip_invalid_size_inputs: bool,
world_size=None,
rank=None,
batch_size=None,
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
if batch_size is None:
batch_size = self.cfg.dataset.batch_size
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
num_shards=world_size,
shard_id=rank,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
class GeneratorInterface:
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, cfg: MetaseqConfig):
self.cfg = cfg
if isinstance(self.cfg, Namespace):
self.cfg = convert_namespace_to_omegaconf(self.cfg)
def load_model(self):
utils.import_user_module(self.cfg.common)
# Fix seed for stochastic decoding
if (
self.cfg.common.seed is not None
and not self.cfg.generation.no_seed_provided
):
np.random.seed(self.cfg.common.seed)
utils.set_torch_seed(self.cfg.common.seed)
# Setup task, e.g., translation
task = tasks.setup_task(self.cfg.task)
def _build_model(cfg, task):
model = task.build_model(cfg.model).half().cuda()
model.make_generation_fast_()
return fsdp_wrap(model)
# Load the model
overrides = ast.literal_eval(self.cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(self.cfg.common_eval.path))
with fsdp_enable_wrap(
self.cfg.distributed_training,
use_sharded_state=self.cfg.distributed_training.use_sharded_state,
):
models, _model_args, _task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(self.cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=self.cfg.checkpoint.checkpoint_suffix,
strict=(self.cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=self.cfg.checkpoint.checkpoint_shard_count,
build_model_hook=_build_model,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Handle tokenization and BPE
bpe = task.build_bpe(self.cfg.bpe)
# Set state
self.bpe = bpe
self.task = task
self.models = models
self.src_dict = src_dict
self.tgt_dict = tgt_dict
return models
def generate(
self,
inputs: List[List[int]],
min_tokens: List[int] = None,
max_tokens: List[int] = None,
temperature: float = 1.0,
top_p: float = -1.0,
logprobs: int = 0,
n: int = 1,
best_of: Optional[int] = None,
echo: bool = False,
stop: Optional[List[int]] = None,
seed: Optional[int] = None,
use_cuda: bool = True,
):
"""
Generate from sequences.
Parameters match those of the OpenAI API.
https://beta.openai.com/docs/api-reference/completions/create
inputs: a list of pre-tokenized prompts
min_tokens: blocks EOS until at least this many tokens is provided
max_tokens: forces EOS after this many tokens
temperature: softmax temperature
top_p: nucleus probability
log_probs: return this cutoff of the probability distribution
n: beam size
best_of: number of beams to return. must be <= n
echo: if true, returned text/tokens/scores includes the prompt.
This is useful for getting PPL evaluations.
stop: a list of terminating tokens
seed: an integer if desired
use_cuda: should we use GPUs.
"""
if seed:
utils.set_torch_seed(seed)
start_time = time.time()
total_generation_time = 0
# Initialize generator
if not best_of:
best_of = n
assert best_of >= n
self.cfg.generation.sampling_topp = top_p if top_p > 0 else -1
self.cfg.generation.sampling = top_p > 0.0
self.cfg.generation.beam = best_of
if temperature > 0:
self.cfg.generation.temperature = temperature
else:
self.cfg.generation.temperature = 1.0
MAX_SEQ_LEN = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in self.models]
)
# TODO(roller): simplify
retval = []
tokens = [torch.LongTensor(t) for t in inputs]
lengths = [len(t) for t in inputs]
batches = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
).next_epoch_itr(shuffle=False)
for batch in batches:
src_tokens = batch["net_input"]["src_tokens"]
src_lengths = batch["net_input"]["src_lengths"]
batchsize = src_tokens.size(0)
# set generation args
# prevent us from ever generating past our max sequence length
if max_tokens is None:
max_tokens = [MAX_SEQ_LEN] * batchsize
if min_tokens is None:
min_tokens = [0] * batchsize
total_max_tokens = min(
MAX_SEQ_LEN, max(max_tokens) + src_lengths.max().item()
)
total_min_tokens = max(min_tokens) + src_lengths.max().item()
self.cfg.generation.min_len = total_min_tokens
self.cfg.generation.max_len_b = total_max_tokens
self.cfg.generation.max_len_a = 0
logger.info(f"Preparing generator with settings {self.cfg.generation}")
generator = self.task.build_generator(
self.models, self.cfg.generation, extra_gen_cls_kwargs={"stop": stop}
)
# okay actually generate
logger.info(f"Executing generation on input tensor size {src_tokens.shape}")
if use_cuda:
batch = utils.move_to_cuda(batch)
translate_start_time = time.time()
translations = self.task.inference_step(generator, self.models, batch)
translate_time = time.time() - translate_start_time
total_generation_time += translate_time
# possibly cut off any bsz padding we did
translations = translations[: len(inputs)]
# actually turn everything into strings
for i in range(len(translations)):
decoding = translations[i]
beams = []
for beam in decoding:
# first beam is always the highest scoring
tokens = beam["tokens"].tolist() # implicit move to cpu
scores = beam["positional_scores"].tolist()
if logprobs > 0:
distributions = beam["distributions"].cpu()
else:
distributions = None
tokens, scores, distributions = GeneratorInterface._filter_special(
tokens, scores, distributions
)
prompt_len = src_lengths[i]
if echo:
# don't cut off prompt
tokens = tokens[: prompt_len + max_tokens[i] - 1]
scores = scores[: prompt_len + max_tokens[i] - 1]
if logprobs > 0:
distributions = distributions[
: prompt_len + max_tokens[i] - 1
]
else:
# cut off prompt
tokens = tokens[prompt_len - 1 :][: max_tokens[i]]
scores = scores[prompt_len - 1 :][: max_tokens[i]]
if logprobs > 0:
distributions = distributions[prompt_len - 1 :][
: max_tokens[i]
]
# turn it into a string
text = self.bpe.bpe.decode(tokens)
# re-encode it so we get offsets
token_offsets = [s for s, e in self.bpe.bpe.encode(text).offsets]
result = {
"text": self.bpe.bpe.decode(tokens),
"tokens": [self.bpe.bpe.decode([t]) for t in tokens],
# text offset is useful for cutting off prompts or prefixes
# or evaluating PPL on just a subset of tokens
"text_offset": token_offsets,
"token_scores": scores,
}
if logprobs > 0:
# final result is a List[Dict[str, float]]
# where each item in the list corresponds to a token in the
# sequence, and the dict provides the probabilities of the
# top-k tokens at that timestep.
out_logprobs = []
all_top_toks, all_top_scores = distributions.topk(
k=logprobs, dim=-1
)
for top_scores, top_toks in zip(all_top_toks, all_top_scores):
lp = {
self.bpe.bpe.decode([t.item()]): s.item()
for t, s in zip(top_toks, top_scores)
}
out_logprobs.append(lp)
result["top_logprobs"] = out_logprobs
else:
result["top_logprobs"] = None
beams.append(result)
retval.append(beams)
logger.info(
"Total time: {:.3f} seconds; generation time: {:.3f}".format(
time.time() - start_time, total_generation_time
)
)
return retval
@staticmethod
def _filter_special(
tokens: List[int],
scores: List[float],
distributions,
pad_token: int = 1,
):
"""
Cut off tokens after finding a special tokens.
"""
# tokens is a 1D list of token IDs of length seqlen
# scores is a 1D list of log-probability scores for those tokens (length seqlen)
# distributions (optional) is a seqlen x vocab_size tensor corresponding to
# the full distribution of predictions at each timestep
output = []
mask = []
for t, s in zip(tokens, scores):
if t == pad_token:
# simply skip pads
mask.append(False)
continue
if t <= 3:
# and other special tokens should end things
break
mask.append(True)
output.append((t, s))
new_tokens, new_scores = zip(*output)
# cut off at stop and drop pads
if distributions is not None:
distributions = distributions[: len(mask)][mask]
distributions = distributions[: len(output)]
return new_tokens, new_scores, distributions
|
flash_metaseq-main
|
metaseq/hub_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import torch
from metaseq import utils
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(
self,
tgt_dict,
softmax_batch=None,
eos=None,
compute_vocab_dist=False,
):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos() if eos is None else eos
self.softmax_batch = softmax_batch or sys.maxsize
assert self.softmax_batch > 0
self.compute_vocab_dist = compute_vocab_dist
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample["net_input"]
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample["target"]
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
avg_vocab_dist = None
for model in models:
model.eval()
decoder_out = model(**net_input)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get("attn", None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
vocab_dist = []
for bd, tgt, is_single in batched:
sample["target"] = tgt
curr_prob = model.get_normalized_probs(
bd, log_probs=len(models) == 1, sample=sample
).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
if self.compute_vocab_dist:
vocab_dist = curr_prob
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(
curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt
)
probs[idx:end] = tgt_probs.view(-1)
idx = end
if self.compute_vocab_dist:
vocab_dist.append(curr_prob.view(step, -1))
sample["target"] = orig_target
if self.compute_vocab_dist and type(vocab_dist) is list:
vocab_dist = torch.cat(vocab_dist, dim=0) # (bsz x tsz, vocab_size)
vocab_dist = vocab_dist.contiguous().view(
sample["target"].size(0), sample["target"].size(1), -1
)
probs = probs.view(sample["target"].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if self.compute_vocab_dist:
if avg_vocab_dist is None:
avg_vocab_dist = vocab_dist
else:
avg_vocab_dist.add_(vocab_dist)
if attn is not None:
if torch.is_tensor(attn):
attn = attn.data
else:
attn = attn[0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_vocab_dist is not None:
avg_vocab_dist.div_(len(models))
avg_vocab_dist.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = (
utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad)
if sample["target"] is not None
else None
)
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_vocab_dist is not None:
avg_vocab_dist_i = avg_vocab_dist[i][
start_idxs[i] : start_idxs[i] + tgt_len, :
].cpu() # off load
else:
avg_vocab_dist_i = None
id_i = sample["id"][i] if "id" in sample else None
if avg_attn is not None:
avg_attn_i = avg_attn[i]
else:
avg_attn_i = None
hypos.append(
[
{
"tokens": ref,
"score": score_i,
"attention": avg_attn_i,
"alignment": None,
"positional_scores": avg_probs_i,
"id": id_i,
"vocab_dist": avg_vocab_dist_i,
}
]
)
return hypos
|
flash_metaseq-main
|
metaseq/sequence_scorer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
from typing import Dict, Optional
from torch import Tensor
from typing_extensions import Protocol
class HasIncrementalState(Protocol):
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
class IncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def with_incremental_state(cls):
cls.__bases__ = (IncrementalState,) + tuple(
b for b in cls.__bases__ if b != IncrementalState
)
return cls
|
flash_metaseq-main
|
metaseq/incremental_decoding_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import importlib
import logging
import os
import random
import sys
import warnings
from typing import Callable, Dict, List, Optional
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import Tensor
from metaseq.distributed import utils as distributed_utils
from metaseq.incremental_decoding_utils import HasIncrementalState
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
logger = logging.getLogger(__name__)
MANIFOLD_PATH_SEP = "|"
class FileContentsAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(FileContentsAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
from metaseq.file_io import PathManager
if PathManager.isfile(values):
with PathManager.open(values) as f:
argument = f.read().strip()
else:
argument = values
setattr(namespace, self.dest, argument)
def split_paths(paths: str) -> List[str]:
return (
paths.split(os.pathsep)
if "://" not in paths
else paths.split(MANIFOLD_PATH_SEP)
)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample, cast_to_fp32=True):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if cast_to_fp32 and tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def get_incremental_state(
module: HasIncrementalState,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: HasIncrementalState,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
norm_type2_reduce_op = {"l2": dist.ReduceOp.SUM, "inf": dist.ReduceOp.MAX}
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def post_process_prediction(
hypo_tokens,
alignment,
tgt_dict,
remove_bpe=None,
extra_symbols_to_ignore=None,
):
hypo_str = tgt_dict.string(
hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore
)
if remove_bpe is not None:
# Convert back to tokens for evaluating without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_l2_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(
chunk_size, has_inf, [cur_device_grads], False
)
norms.append(norm[0].to(torch.cuda.current_device()))
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
@torch.no_grad()
def clip_grad_norm_(
params, max_norm, norm_type="l2", aggregate_norm_fn=None, device=None
) -> torch.Tensor:
def grad_exists(p):
return p is not None and getattr(p, "grad", None) is not None
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
params = list(filter(grad_exists, params))
grads, sharded_grads = [], []
if device is None:
if torch.cuda.is_available():
# param/grads could be on CPU if using CPU offloading, but we want
# everything on GPU if possible
device = torch.device("cuda:{}".format(torch.cuda.current_device()))
elif len(params) > 0:
device = params[0].device # could be "xla"
else:
device = torch.device("cpu")
def norm(t, n_type):
if n_type == "l2":
return torch.norm(t, p=2, dtype=torch.float32)
elif n_type == "inf":
return torch.norm(t, p=float("inf"), dtype=torch.float32)
else:
raise ValueError(
f"Invalid clip_norm_type: {n_type}! Please pass either 'l2' or 'inf'!"
)
for p in params:
if hasattr(p, "_is_sharded"):
sharded_grads.append(p.grad.detach())
else:
grads.append(p.grad.detach())
if len(grads) == 0:
total_norm = torch.tensor(0.0, dtype=torch.float32, device=device)
elif len(grads) == 1:
total_norm = norm(grads[0], norm_type)
else:
if (
multi_tensor_l2norm_available
and norm_type == "l2"
and grads[0].dtype != torch.bfloat16
):
total_norm = multi_tensor_l2_total_norm(grads)
else:
if (
torch.cuda.is_available()
and norm_type == "l2"
and grads[0].dtype != torch.bfloat16
):
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
total_norm = norm(
torch.stack([norm(g, norm_type) for g in grads]), norm_type
)
# calculate split_norm and all_reduce with other workers
norms = [total_norm]
for split_grads in [sharded_grads]:
if len(split_grads) == 0:
continue
split_norm = norm(
torch.stack([norm(g, norm_type) for g in split_grads]), norm_type
)
if dist.is_initialized():
reduce_op = norm_type2_reduce_op[norm_type]
if norm_type == "l2":
split_norm.pow_(2)
dist.all_reduce(
split_norm,
group=distributed_utils.get_data_parallel_group(),
op=reduce_op,
)
if norm_type == "l2":
split_norm.sqrt_()
norms.append(split_norm)
if len(norms) > 1:
total_norm = norm(torch.stack(norms), norm_type)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads + sharded_grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path) and not os.path.isfile(
os.path.dirname(module_path)
):
metaseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir)
if os.path.exists(metaseq_rel_path):
module_path = metaseq_rel_path
else:
metaseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(metaseq_rel_path):
module_path = metaseq_rel_path
else:
raise FileNotFoundError(module_path)
# ensure that user modules are only imported once
import_user_module.memo = getattr(import_user_module, "memo", set())
if module_path not in import_user_module.memo:
import_user_module.memo.add(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
else:
raise ImportError(
"Failed to import --user-dir={} because the corresponding module name "
"({}) is not globally unique. Please rename the directory to "
"something unique and try again.".format(module_path, module_name)
)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
from metaseq.logging.meters import safe_round
if loss is None:
return 0.0
try:
return safe_round(base**loss, round)
except OverflowError:
return float("inf")
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def relu_squared(x: torch.Tensor):
return F.relu(x).pow(2)
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
from metaseq.modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "relu_squared":
return relu_squared
elif activation == "gelu":
return gelu
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"relu_squared",
"gelu",
"gelu_accurate",
"tanh",
"linear",
]
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def get_rng_state():
state = {"torch_rng_state": torch.get_rng_state()}
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state):
torch.set_rng_state(state["torch_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = get_rng_state()
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def __enter__(self):
return self
def __exit__(self, *exc):
set_rng_state(self.rng_state)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def round_safe(x):
if torch.is_tensor(x):
return float(np.round(x.cpu().numpy(), 4))
else:
try:
return round(x, 4)
except Exception:
return x
def remove_prefix(text: str, prefix: str):
if text.startswith(prefix):
return text[len(prefix) :]
return text
def print_r0(x, file=None):
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
print(x, file=file, flush=True)
def get_random_port():
old_state = random.getstate()
random.seed()
port = random.randint(10000, 20000)
random.setstate(old_state)
return port
def floating_point_precision_convertor(
x, fp16: bool, memory_efficient_fp16: bool, bf16: bool
):
"""
Convert a tensor x into the desired dtype.
Also sanity checks combinations of options.
"""
if memory_efficient_fp16:
assert not bf16, "Do not combined bf16 with memory_efficient_fp16."
if bf16:
assert fp16, "Setting --bf16 requires also setting --fp16 for legacy reasons."
if not fp16 and not bf16:
return x
if not memory_efficient_fp16:
# original parameters stay in fp32 and are converted by fairscale
return x
elif bf16:
return x.bfloat16()
else:
return x.half()
def get_precise_epoch(epoch: Optional[int], count: int, iterator_size: int) -> float:
return (
epoch - 1 + (count + 1) / float(iterator_size)
if epoch is not None and iterator_size > 0
else None
)
|
flash_metaseq-main
|
metaseq/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import collections
import functools
import logging
import os
import re
import traceback
from glob import glob
from typing import Any, Dict, List, Optional
import torch
from omegaconf import OmegaConf
from metaseq.dataclass.configs import CheckpointConfig
from metaseq.dataclass.utils import overwrite_args_by_name
from metaseq.distributed import utils as dist_utils
from metaseq.file_io import PathManager, torch_load_cpu
from metaseq.launcher.opt_job_constants import ComputeEnvs
logger = logging.getLogger(__name__)
OPT_KEY = "last_optimizer_state"
def save_checkpoint(
cfg: CheckpointConfig,
trainer,
epoch_itr,
val_loss,
training_finished=False,
async_callback_fn=None,
):
from metaseq import meters
# only one worker should attempt to create the required dir
if trainer.data_parallel_rank == 0:
os.makedirs(cfg.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if cfg.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if cfg.no_save:
return
trainer.consolidate_optimizer() # TODO(SS): we dont need if no_save_optimizer_state
if not trainer.should_save_checkpoint_on_current_rank:
return
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates")
def is_better(a, b):
return a >= b if cfg.maximize_best_checkpoint_metric else a <= b
suffix = trainer.checkpoint_suffix
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not end_of_epoch
and cfg.save_interval_updates > 0
and updates % cfg.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = (
val_loss is not None
and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
and not cfg.no_best_checkpoints
)
if (
val_loss is not None
and cfg.keep_best_checkpoints > 0
and not cfg.no_best_checkpoints
):
checkpoint_conds[
"checkpoint.best_{}_{:.2f}.pt".format(cfg.best_checkpoint_metric, val_loss)
] = not hasattr(save_checkpoint, "best") or is_better(
val_loss, save_checkpoint.best
)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not cfg.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
if PathManager.islink(checkpoints[0]):
PathManager.rm(checkpoints[0])
trainer.save_checkpoint(
checkpoints[0],
extra_state,
training_finished=training_finished,
async_callback_fn=async_callback_fn,
)
def _copy_if_not_async(src, dest):
if cfg.write_checkpoints_asynchronously:
pass # TODO[ioPath]: Need to implement a delayed asynchronous file copying/moving feature.
else:
assert PathManager.copy(
src, dest, overwrite=True
), f"Failed to copy {src} to {dest}"
for cp in checkpoints[1:]:
_copy_if_not_async(src=checkpoints[0], dest=cp)
write_timer.stop()
logger.info(
"Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
_delete_old_checkpoint_files(
cfg,
end_of_epoch,
suffix,
)
def _delete_old_checkpoint_files(
cfg: CheckpointConfig, end_of_epoch: bool, suffix: str
):
if not end_of_epoch and cfg.keep_interval_updates > 0:
suffixes = [suffix]
# remove old checkpoints; checkpoints are sorted in descending order
for one_suffix in suffixes:
checkpoints = _checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(one_suffix)
)
for old_chk in checkpoints[cfg.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if cfg.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = _checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix)
)
for old_chk in checkpoints[cfg.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if cfg.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = _checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
cfg.best_checkpoint_metric, suffix
),
)
if not cfg.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[cfg.keep_best_checkpoints :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
reset_optimizer = cfg.reset_optimizer
reset_lr_scheduler = cfg.reset_lr_scheduler
optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)
reset_meters = cfg.reset_meters
reset_dataloader = cfg.reset_dataloader
if cfg.finetune_from_model is not None and (
reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader
):
raise ValueError(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
)
suffix = trainer.checkpoint_suffix
default_restore_file = "checkpoint_last.pt"
# default to loading from restore file.
if cfg.restore_file == default_restore_file:
checkpoint_path_to_load = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
first_launch = not PathManager.exists(checkpoint_path_to_load)
if cfg.finetune_from_model is not None and first_launch:
# if there is no last checkpoint to restore, start the finetune from pretrained model
# else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
reset_optimizer = True
reset_lr_scheduler = True
reset_meters = True
reset_dataloader = True
checkpoint_path_to_load = None
if PathManager.exists(cfg.finetune_from_model):
checkpoint_path_to_load = cfg.finetune_from_model
elif suffix is not None: # check for sharded version
sharded_path = cfg.finetune_from_model.replace(".pt", suffix + ".pt")
if PathManager.exists(sharded_path):
checkpoint_path_to_load = sharded_path
if checkpoint_path_to_load is None:
raise ValueError(
f"--finetune-from-model {cfg.finetune_from_model} does not exist either as is or sharded"
)
logger.info(
f"loading pretrained model from {checkpoint_path_to_load}: "
"optimizer, lr scheduler, meters, dataloader will be reset"
)
elif suffix is not None:
checkpoint_path_to_load = cfg.restore_file.replace(".pt", suffix + ".pt")
else:
checkpoint_path_to_load = cfg.restore_file
if cfg.restore_file != default_restore_file and cfg.finetune_from_model:
raise ValueError(
"--finetune-from-model and --restore-file (non-default value) "
"can not be specified together: " + str(cfg)
)
# Azure logic
try:
from metaseq_internal import azure_utils
has_metaseq_internal = True
except ImportError:
has_metaseq_internal = False
logger.warning(
"Proceeding without metaseq-internal installed! Please check if you need this!"
)
# TODO(susanz): fix all of this spagetti, split out logic by env
if (
cfg.cloud_upload_path
and cfg.cluster_env == ComputeEnvs.AZURE.value
and has_metaseq_internal
):
if (
# --restore-file was not passed, always download latest checkpoint
(
cfg.restore_file == default_restore_file
and cfg.finetune_from_model is None
)
# --restore-file was passed, but we requeued, so download latest checkpoint
or int(os.environ.get("SLURM_RESTART_COUNT", 0)) > 0
):
# download checkpoint into local save_dir
checkpoint_path_to_load = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
azure_utils.download_recent_ckpt(
cfg.cloud_upload_path, checkpoint_path_to_load, suffix + ".pt"
)
elif (
# --restore-file was passed and is a blob URL, download that checkpoint
cfg.restore_file != default_restore_file
and "windows.net" in cfg.restore_file
):
blob_url = cfg.restore_file.replace(".pt", suffix + ".pt")
# download checkpoint into local save_dir
checkpoint_path_to_load = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
azure_utils.download_specific_ckpt(blob_url, checkpoint_path_to_load)
else:
logger.info(
f"Using checkpoint {checkpoint_path_to_load} even while on Azure"
)
# RSC logic: --restore-file was passed, and we requeued
elif (
cfg.restore_file != default_restore_file
and int(os.environ.get("SLURM_RESTART_COUNT", 0)) > 0
):
# point checkpoint_path to the current checkpoint directory for loading, if it exists.
save_dir_last = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
if PathManager.isfile(save_dir_last):
checkpoint_path_to_load = save_dir_last
logger.info(f"attempting to load checkpoint from: {checkpoint_path_to_load}")
extra_state = trainer.load_checkpoint(
checkpoint_path_to_load,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
reset_meters=reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not reset_optimizer
and not reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(epoch=1, **passthrough_args)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr
def _is_checkpoint_sharded(checkpoint_files) -> bool:
"""Infer if state is sharded based on whether largest file is more than 10% larger than smallest."""
sizes = [os.path.getsize(p) for p in checkpoint_files]
size_ratio = max(sizes) / min(sizes)
if size_ratio >= 1.1:
return False
else:
return True
def get_paths_to_load(local_path, suffix="rank-"):
checkpoint_files = glob(re.sub(f"{suffix}[0-9]+", f"{suffix}*", local_path))
if not _is_checkpoint_sharded(checkpoint_files):
return [local_path]
checkpoint_files_count = len(checkpoint_files)
world_size = dist_utils.get_data_parallel_world_size()
fnames = []
if world_size >= checkpoint_files_count:
return [local_path]
assert checkpoint_files_count % world_size == 0
n_local_files = int(checkpoint_files_count / world_size)
rank = dist_utils.get_data_parallel_rank()
start_rank = n_local_files * rank #
for rank_to_load in range(start_rank, start_rank + n_local_files):
fname = re.sub(
f"{suffix}[0-9]+",
f"{suffix}{rank_to_load}",
local_path,
)
fnames.append(fname)
logger.info(
f"Loading {checkpoint_files_count} on {world_size} DDP workers: {n_local_files} files per worker. "
)
return fnames
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False) -> dict:
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
# path to checkpoint...-shared.pt
paths_to_load = get_paths_to_load(local_path, suffix="shard")
try:
if len(paths_to_load) > 1:
state = _merge_flat_fsdp_shards([torch_load_cpu(f) for f in paths_to_load])
else:
state = torch_load_cpu(local_path)
except Exception:
print(
"got exception while trying to load",
path,
"with paths to load",
paths_to_load,
)
logger.info("Done reading from disk")
if "cfg" in state and state["cfg"] is not None:
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = lambda _: True
state["cfg"] = OmegaConf.create(state["cfg"])
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(state["cfg"], True)
if arg_overrides is not None:
overwrite_args_by_name(state["cfg"], arg_overrides)
state = _upgrade_state_dict(state)
return state
def load_model_ensemble_and_task(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
build_model_hook=None,
):
assert state is None or len(filenames) == 1
from metaseq import tasks
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
cfg = None
for filename in filenames:
orig_filename = filename
assert num_shards > 0
for shard_idx in range(num_shards):
if num_shards == 1:
filename = filename.replace(".pt", suffix + ".pt")
else:
filename = orig_filename[:-3] + f"_part{shard_idx}.pt"
if state is None:
state = load_checkpoint_to_cpu(filename, arg_overrides)
if "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
else:
raise RuntimeError(
f"!!! cfg does not exist in state keys = {state.keys()} !!!"
)
# Load 175B model trained on megatron (model parallel) branch
# "cfg.common.model_parallel_size == 1" checks if model parallel is
# enabled at load time. If it's not, fall back to non-MP
# transformer code path.
if (
getattr(cfg.model, "arch", None) == "transformer_lm_megatron"
and cfg.common.model_parallel_size == 1
):
cfg.model.arch = "transformer_lm_gpt"
cfg.model._name = "transformer_lm_gpt"
oproj_key = "decoder.output_projection.weight"
emb_key = "decoder.embed_tokens.weight"
if emb_key in state["model"] and oproj_key not in state["model"]:
state["model"][oproj_key] = state["model"][emb_key]
if task is None:
task = tasks.setup_task(cfg.task)
if "task_state" in state:
task.load_state_dict(state["task_state"])
if build_model_hook is not None:
model = build_model_hook(cfg, task)
else:
# build model for ensemble
model = task.build_model(cfg.model)
model.load_state_dict(state["model"], strict=strict, model_cfg=cfg.model)
logger.info("Done loading state dict")
# reset state so it gets loaded for the next model in ensemble
state = None
ensemble.append(model)
return ensemble, cfg, task
def _checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = float(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def torch_persistent_save(
obj, filename: str, async_write: bool = False, async_callback_fn=None
):
assert (
async_callback_fn is None or async_write
), "async_callback_fn requires async_write=True (--save-async)"
if async_write and async_callback_fn is not None:
callback = functools.partial(async_callback_fn, filename)
else:
callback = None
if async_write:
with PathManager.opena(filename, "wb", callback_after_file_close=callback) as f:
_torch_persistent_save(obj, f)
else:
if PathManager.supports_rename(filename):
# do atomic save
with PathManager.open(filename + ".tmp", "wb") as f:
_torch_persistent_save(obj, f)
PathManager.rename(filename + ".tmp", filename)
else:
# fallback to non-atomic save
with PathManager.open(filename, "wb") as f:
_torch_persistent_save(obj, f)
def _torch_persistent_save(obj, f, num_retries=3):
if isinstance(f, str):
with PathManager.open(f, "wb") as h:
torch_persistent_save(obj, h)
return
for i in range(num_retries):
try:
return torch.save(obj, f)
except Exception:
if i == num_retries - 1:
logger.error(traceback.format_exc())
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
# add optimizer_history
if "optimizer_history" not in state:
state["optimizer_history"] = [
{"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
]
state["last_optimizer_state"] = state["optimizer"]
del state["optimizer"]
del state["best_loss"]
# move extra_state into sub-dictionary
if "epoch" in state and "extra_state" not in state:
state["extra_state"] = {
"epoch": state["epoch"],
"batch_offset": state["batch_offset"],
"val_loss": state["val_loss"],
}
del state["epoch"]
del state["batch_offset"]
del state["val_loss"]
# reduce optimizer history's memory usage (only keep the last state)
if "optimizer" in state["optimizer_history"][-1]:
state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
for optim_hist in state["optimizer_history"]:
del optim_hist["optimizer"]
# move best_loss into lr_scheduler_state
if "lr_scheduler_state" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["lr_scheduler_state"] = {
"best": state["optimizer_history"][-1]["best_loss"]
}
del state["optimizer_history"][-1]["best_loss"]
# keep track of number of updates
if "num_updates" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["num_updates"] = 0
# use stateful training data iterator
if "train_iterator" not in state["extra_state"]:
state["extra_state"]["train_iterator"] = {
"epoch": state["extra_state"]["epoch"],
"iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
}
return state
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
rank = dist_utils.get_global_rank()
temp_file_path = os.path.join(save_dir, f"dummy{rank}")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning(
"Unable to access checkpoint save directory: {}".format(save_dir)
)
raise e
else:
try:
os.remove(temp_file_path)
except FileNotFoundError:
pass
def _merge_flat_fsdp_shards(shards_to_load: List[Dict], unpad=False) -> Dict:
"""
Concatenate tensor entries in a list of local_state_dicts into one
local_state_dict to allow resumption on a different world size.
"""
merged_state = {}
world_size = dist_utils.get_data_parallel_world_size()
for key in shards_to_load[0].keys():
merged_state[key] = shards_to_load[0][key]
pad_info = _get_pad_info(shards_to_load[-1])
dtype = torch.float16
for k in shards_to_load[0]["model"]:
dtype = shards_to_load[0]["model"][k].dtype
if "flat_param" in k:
pad_info_k = pad_info[k]
catted = torch.cat([x["model"][k] for x in shards_to_load])
if world_size == 1 and pad_info_k > 0:
catted = catted[:-pad_info_k]
elif world_size > 1 and pad_info_k > 0 and not unpad:
raise NotImplementedError(
f"Param {k} padded with {pad_info_k} extra elements. You must use the reshard_mp script."
)
merged_state["model"][k] = catted
if "decoder.version" not in merged_state["model"]:
merged_state["model"]["decoder.version"] = torch.tensor([3.0], dtype=dtype)
if OPT_KEY in merged_state:
merged_state[OPT_KEY] = _merge_flat_fsdp_opt_state(shards_to_load)
return merged_state
def _merge_flat_fsdp_opt_state(shards_to_load: List[Dict]) -> Dict:
"""Logic described here: https://tinyurl.com/2p86zffr"""
result = shards_to_load[0][OPT_KEY]
pad_info = _get_pad_info(shards_to_load[-1])
world_size = dist_utils.get_data_parallel_world_size()
os2model_key = dict(
zip(shards_to_load[0][OPT_KEY]["state"].keys(), pad_info.keys())
)
for k in shards_to_load[0][OPT_KEY]["state"].keys():
# 0,1,2,3... if each layer wrapped, else 0
for k2 in shards_to_load[0][OPT_KEY]["state"][k].keys():
# exp_avg, exp_avg_sq, step (for adam32 bit)
states = [x[OPT_KEY]["state"][k][k2] for x in shards_to_load]
if not torch.is_tensor(states[0]) or is_singleton_tensor(states[0]):
result["state"][k][k2] = states[0]
else:
catted = torch.cat(states)
if k in os2model_key:
opt_state_key = os2model_key[k]
pad_info_k = pad_info[opt_state_key]
if world_size == 1 and pad_info_k > 0: # unpad
catted = catted[:-pad_info_k]
result["state"][k][k2] = catted
return result
def is_singleton_tensor(x: Any) -> bool:
"""Is x a dimensionless tensor?"""
return torch.is_tensor(x) and x.dim() == 0
def _get_pad_info(state_dict: Dict) -> Dict[str, int]:
if "shard_metadata" not in state_dict:
# Note: comment this out if you have sharded checkpoints that you think can be loaded
return collections.defaultdict(lambda: 0)
res = {}
for m in state_dict["shard_metadata"]["param_metadata"]:
fsdp_path = m["fsdp_path"]
for k, v in m["params"].items():
full_key = f"{fsdp_path}.{k}" if fsdp_path else k
assert full_key not in res, f"collision: {full_key} already in {res}"
res[full_key] = v["padding"]
return res
|
flash_metaseq-main
|
metaseq/checkpoint_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
and `huggingface <https://github.com/huggingface>`_.
"""
import fnmatch
import json
import logging
import os
import shutil
import tarfile
import tempfile
from functools import partial, wraps
from hashlib import sha256
from io import open
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "pytorch_fairseq")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_FAIRSEQ_CACHE = Path(os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path))
except (AttributeError, ImportError):
PYTORCH_FAIRSEQ_CACHE = os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def load_archive_file(archive_file):
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
logger.info(
"Archive name '{}' was not found in archive name list. "
"We assumed '{}' was a path or URL but couldn't find any file "
"associated to this path or URL.".format(
archive_file,
archive_file,
)
)
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info(
"loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file
)
)
# Extract archive to temp dir and replace .tar.bz2 if necessary
tempdir = None
if not os.path.isdir(resolved_archive_file):
tempdir = tempfile.mkdtemp()
logger.info(
"extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir
)
)
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, "r:" + ext) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the URL's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def request_wrap_timeout(func, url):
import requests
for attempt, timeout in enumerate([10, 20, 40, 60, 60]):
try:
return func(timeout=timeout)
except requests.exceptions.Timeout as e:
logger.warning(
"Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs",
url,
attempt,
timeout,
exc_info=e,
)
continue
raise RuntimeError(f"Unable to fetch file {url}")
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = request_wrap_timeout(partial(requests.get, url, stream=True), url)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
import requests
response = request_wrap_timeout(
partial(requests.head, url, allow_redirects=True), url
)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except RuntimeError:
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
|
flash_metaseq-main
|
metaseq/file_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
class Search(nn.Module):
def __init__(self, tgt_dict):
super().__init__()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.src_lengths = torch.tensor(-1)
self.supports_constraints = False
self.stop_on_max_len = False
def step(
self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None
):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
prev_output_tokens: (bsz x step)
the previously generated output tokens
original_batch_idxs: (bsz)
the tensor with the batch indices, in the range [0, bsz)
this is useful in case there has been applied a re-ordering
and we need to know the orignal indices
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
"""Initialize constraint states for constrained decoding (if supported).
Args:
batch_constraints: (torch.Tensor, optional)
the list of constraints, in packed form
beam_size: (int)
the beam size
Returns:
*encoder_out* rearranged according to *new_order*
"""
pass
def prune_sentences(self, batch_idxs: Tensor):
"""
Removes constraint states for completed sentences (if supported).
This is called from sequence_generator._generate() when sentences are
deleted from the batch.
Args:
batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
"""
pass
def update_constraints(self, active_hypos: Tensor):
"""
Updates the constraint states by selecting the beam items that are retained.
This is called at each time step of sequence_generator._generate() when
the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
Args:
active_hypos: (batch size, beam size)
list of integers denoting, for each sentence, which beam candidate items
should be kept.
"""
pass
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.constraint_states = None
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
# Project back into relative indices and beams
beams_buf = torch.div(indices_buf, vocab_size, rounding_mode="trunc")
indices_buf = indices_buf.fmod(vocab_size)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1),
beam_size,
replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1))
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
|
flash_metaseq-main
|
metaseq/search.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import shutil
from typing import List, Optional
import torch
from omegaconf.dictconfig import DictConfig
logger = logging.getLogger(__file__)
from .s3_utils import S3PathHandler # noqa: E402
try:
from iopath.common.file_io import PathManager
IOPathPathManager = PathManager()
except ImportError:
IOPathPathManager = None
try:
IOPathPathManager.register_handler(S3PathHandler())
except KeyError:
pass
except Exception:
logging.exception("Failed to register S3 Path Handler. Try pip install boto3")
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
fvcore's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathPathManager:
return IOPathPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathPathManager:
return IOPathPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if IOPathPathManager:
return IOPathPathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if IOPathPathManager:
return IOPathPathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if IOPathPathManager:
return IOPathPathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def islink(path: str) -> Optional[bool]:
if not PathManager.path_requires_pathmanager(path):
return os.path.islink(path)
return None
@staticmethod
def ls(path: str) -> List[str]:
if IOPathPathManager:
return IOPathPathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if IOPathPathManager:
return IOPathPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if IOPathPathManager:
return IOPathPathManager.rm(path)
os.remove(path)
assert not os.path.exists(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if IOPathPathManager:
return IOPathPathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathPathManager:
return IOPathPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
@staticmethod
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathPathManager:
for p in IOPathPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
@staticmethod
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
@staticmethod
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
@staticmethod
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
callback_after_file_close=None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathPathManager
return IOPathPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
callback_after_file_close=callback_after_file_close,
)
@staticmethod
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathPathManager
if IOPathPathManager:
return IOPathPathManager.async_close()
return False
def recursively_cast_dictconfigs(cfg):
if isinstance(cfg, DictConfig):
cfg = eval(str(cfg))
assert not isinstance(cfg, DictConfig)
if isinstance(cfg, dict):
return {k2: recursively_cast_dictconfigs(v2) for k2, v2 in cfg.items()}
else:
# Easy to support List, Tuple if needed
return cfg
def torch_load_cpu(path):
state = torch.load(path, map_location=torch.device("cpu"))
# If model was trained with fp16, model from loaded state_dict can be moved to fp16
if not isinstance(state, dict):
return state
if "cfg" in state:
state["cfg"] = recursively_cast_dictconfigs(state["cfg"])
if (
state["cfg"]["common"]["fp16"]
or state["cfg"]["common"]["memory_efficient_fp16"]
):
state["model"] = {k: v.half() for k, v in state["model"].items()}
return state
def save_json(content, path, indent=4):
with open(path, "w") as f:
json.dump(content, f, indent=indent)
def load_json(p):
return json.load(open(p))
def load_jsonl(path):
with open(path).read() as jsonl_content:
result = [json.loads(jline) for jline in jsonl_content.splitlines()]
return result
def load_and_pop_last_optimizer_state(pth):
st = torch_load_cpu(pth)
st.pop("last_optimizer_state", None)
return st
|
flash_metaseq-main
|
metaseq/file_io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
import logging
import re
import sys
import time
from itertools import chain
from typing import Any, Dict, List
import torch
from omegaconf import OmegaConf
from metaseq import checkpoint_utils, models, optim, utils
from metaseq.distributed import utils as distributed_utils
from metaseq.file_io import PathManager
from metaseq.logging import meters, metrics
from metaseq.nan_detector import NanDetector
from metaseq.optim import lr_scheduler
logger = logging.getLogger(__name__)
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, cfg, task, model, criterion):
self.cfg = cfg
self.task = task
# catalog shared parameters
shared_params = _catalog_shared_params(model)
self.cuda = torch.cuda.is_available() and not cfg.common.cpu
self.dont_log_param_and_grad_norm = getattr(
cfg.common, "dont_log_param_and_grad_norm", False
)
if self.cuda:
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
if self.is_fsdp:
import fairscale
if self.cfg.distributed_training.zero_sharding != "none":
raise ValueError(
"FullyShardedDataParallel is not compatible with --zero-sharding "
"option (it's already built in)"
)
if (
max(self.cfg.optimization.update_freq) > 1
and fairscale.__version__ < "0.4.0"
):
raise RuntimeError(
"Please update to fairscale 0.4.0 or newer when combining "
"--update-freq with FullyShardedDataParallel"
)
if self.cfg.optimizer == "adam8bit":
assert (
self.use_sharded_state
), "adam8bit + FSDP requires --use-sharded-state"
if self.use_sharded_state:
import fairscale
assert (
fairscale.__version__ >= "0.3.9"
), "--use-sharded-state requires newer fairscale. pip install -U fairscale"
else:
if self.cfg.distributed_training.cpu_offload:
raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded")
# copy model and criterion to current device/dtype
self._criterion = criterion
self._model = model
if not self.is_fsdp:
if cfg.common.fp16:
self._criterion = self._criterion.half()
self._model = self._model.half()
if (
# the DistributedModel wrapper will handle moving to device,
# so only handle cases which don't use the wrapper
not self.use_distributed_wrapper
):
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
# check that shared parameters are preserved after device transfer
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info(
"detected shared parameter: {} <- {}".format(shared_param[0], path)
)
_set_module_by_path(self._model, path, ref)
logger.info(metrics.get_nvidia_smi_gpu_memory_stats_str())
self._dummy_batch = None # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
# TODO(myleott): support tpu
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
# get detailed cuda environment
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if self.data_parallel_world_size > 1:
self.cuda_env_arr = distributed_utils.all_gather_list(
self.cuda_env, group=distributed_utils.get_global_group()
)
else:
self.cuda_env_arr = [self.cuda_env]
if self.data_parallel_rank == 0:
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time("wall", priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
"""Reinitialize the Trainer, typically after model params change."""
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_process_group(self):
return distributed_utils.get_data_parallel_group()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
@property
def is_data_parallel_master(self):
# NOTE: this returns true for all model parallel replicas with data
# parallel rank 0
return self.data_parallel_rank == 0
@property
def use_distributed_wrapper(self) -> bool:
return (self.data_parallel_world_size > 1) or (
self.is_fsdp and self.cfg.distributed_training.cpu_offload
)
@property
def should_save_checkpoint_on_current_rank(self) -> bool:
"""Indicates whether to save checkpoints on the current DDP rank."""
if self.is_fsdp:
return True
else:
return self.is_data_parallel_master
@property
def checkpoint_suffix(self) -> str:
"""Suffix to add to the checkpoint file name."""
if not self.use_sharded_state:
return self.cfg.checkpoint.checkpoint_suffix
elif self.is_fsdp:
return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format(
self.data_parallel_rank
)
else:
return self.cfg.checkpoint.checkpoint_suffix or ""
@property
def criterion(self):
if self._wrapped_criterion is None:
if utils.has_parameters(self._criterion) and self.use_distributed_wrapper:
self._wrapped_criterion = models.DistributedModel(
self.cfg.distributed_training,
self._criterion,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if self.use_distributed_wrapper or self.is_fsdp:
self._wrapped_model = models.DistributedModel(
self.cfg.distributed_training,
self._model,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
if self.is_fsdp and self.cfg.common.fp16:
# FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper,
# mostly for the grad scaling. But if we don't have the
# --memory-efficient-fp16 flag set, then we're effectively doing
# regular --fp16 and can allow the use of optimizers that would
# otherwise be unsupported by MemoryEfficientFP16Optimizer.
allow_unsupported = not self.cfg.common.memory_efficient_fp16
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params, allow_unsupported=allow_unsupported
)
elif self.cfg.common.fp16:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16, "
"please switch to FP32 which is likely to be faster"
)
if self.cfg.common.memory_efficient_fp16:
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params
)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
logger.info("NOTE: your device may support faster training with --fp16")
self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)
if self.is_fsdp:
assert self._optimizer.supports_flat_params, (
"--ddp-backend=fully_sharded is only compatible with pointwise "
"optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). "
"However, the sharding will result in slightly different results when "
"using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)"
)
if self.cfg.distributed_training.zero_sharding == "os":
if (
self.cfg.common.fp16
and not self.cfg.common.memory_efficient_fp16
and not self.cfg.common.fp16_no_flatten_grads
):
raise ValueError(
"ZeRO is incompatible with fp16 and flattened grads. "
"Please use --fp16-no-flatten-grads"
)
else:
optim.shard_(self._optimizer, self.data_parallel_process_group)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(
self.cfg.lr_scheduler,
self.optimizer,
)
self._lr_scheduler.step_update(0)
@property
def is_fsdp(self):
return self.cfg.distributed_training.ddp_backend == "fully_sharded"
@property
def use_sharded_state(self):
return self.cfg.distributed_training.use_sharded_state
def consolidate_optimizer(self):
"""For OSS, we need to consolidate the state dict."""
self._gathered_optim_state = None
if self.cfg.checkpoint.no_save_optimizer_state:
return
if hasattr(self.optimizer.optimizer, "consolidate_state_dict"):
self.optimizer.optimizer.consolidate_state_dict()
elif self.is_fsdp and not self.use_sharded_state:
st = self.model.gather_full_optim_state_dict(
self.optimizer
) # only returns on rank 0
if st is None:
st = -1 # sentinel so that workers do not save optimizer.state_dict()
self._gathered_optim_state = st
assert self._gathered_optim_state is not None
def state_dict(self, filename, training_finished=False) -> Dict[str, Dict]:
model_state_dict = self.model.state_dict()
optim_state = None
if not self.cfg.checkpoint.no_save_optimizer_state:
optim_state = self._gathered_optim_state or self.optimizer.state_dict()
model_save_list = [
(
filename,
model_state_dict,
optim_state,
)
]
state_dicts = {}
# This is what gets saved to checkpoints.
for filename, model_state_dict, optimizer_state_dict in model_save_list:
state_dict = {
"cfg": OmegaConf.to_container(self.cfg)
if OmegaConf.is_config(self.cfg)
else self.cfg,
"model": model_state_dict,
"criterion": (
self.criterion.state_dict()
if utils.has_parameters(self.criterion)
else None
),
"optimizer_history": (self._optim_history or [])
+ [
{
"criterion_name": self.get_criterion().__class__.__name__,
"optimizer_name": self.optimizer.__class__.__name__,
"lr_scheduler_state": self.lr_scheduler.state_dict(),
"num_updates": self.get_num_updates(),
}
],
"task_state": self.task.state_dict() if self.task is not None else {},
"extra_state": {
"metrics": metrics.state_dict(),
"previous_training_time": self.cumulative_training_time(),
},
}
if not self.cfg.checkpoint.no_save_optimizer_state or (
self.cfg.checkpoint.no_save_optimizer_state_on_training_finished
and training_finished
):
state_dict["last_optimizer_state"] = optimizer_state_dict
if self.is_fsdp and self.use_sharded_state:
state_dict[
"shard_metadata"
] = (
self.model.local_metadata_dict()
) # save FSDP flattening and padding info
state_dicts[filename] = state_dict
return state_dicts
def save_checkpoint(
self, filename, extra_state, training_finished=False, async_callback_fn=None
):
"""Save all training state in a checkpoint file."""
# call state_dict on all ranks in case it needs internal communication
state_dicts = self.state_dict(filename, training_finished)
for filename, state_dict in state_dicts.items():
logger.info(f"Saving checkpoint to {filename}")
state_dict = utils.move_to_cpu(
state_dict,
# keep params in FP16 when training with --memory-efficient-fp16
cast_to_fp32=not self.cfg.common.memory_efficient_fp16,
)
state_dict["extra_state"].update(extra_state)
if self.should_save_checkpoint_on_current_rank:
checkpoint_utils.torch_persistent_save(
state_dict,
filename,
async_write=self.cfg.checkpoint.write_checkpoints_asynchronously,
async_callback_fn=async_callback_fn,
)
logger.info(f"Finished saving checkpoint to {filename}")
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""
Load all training state from a checkpoint file.
rank = 0 will load the checkpoint, and then broadcast it to all
other ranks.
"""
extra_state, self._optim_history, last_optim_state = None, [], None
is_distributed = self.data_parallel_world_size > 1
bexists = PathManager.isfile(filename)
if bexists:
logger.info(f"Preparing to load checkpoint {filename}")
load_on_all_ranks = (
self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks
# FSDP requires loading checkpoint shards on all ranks
or self.is_fsdp
)
if load_on_all_ranks or self.is_data_parallel_master:
state = checkpoint_utils.load_checkpoint_to_cpu(
filename,
load_on_all_ranks=load_on_all_ranks,
)
last_optim_state = state.get("last_optimizer_state", None)
if last_optim_state == -1:
master_path = re.sub("shard[0-9]+", "shard0", filename)
last_optim_state = torch.load(master_path, map_location="cpu")[
"last_optimizer_state"
]
logger.info(f"Loaded state for {filename}")
# If doing zero_sharding, do not broadcast global optimizer
# state. Later we will broadcast sharded states to each rank
# to avoid memory exploding.
if (
not load_on_all_ranks
and self.cfg.distributed_training.zero_sharding == "os"
and "last_optimizer_state" in state
and is_distributed
):
state["last_optimizer_state"] = "SHARDED"
else:
last_optim_state = None
state = None
if self.data_parallel_world_size > 1 and not load_on_all_ranks:
state = distributed_utils.broadcast_object(
state,
src_rank=0,
group=self.data_parallel_process_group,
dist_device=self.device,
)
if self.data_parallel_rank > 0:
last_optim_state = state.get("last_optimizer_state", None)
# load model parameters
try:
self.model.load_state_dict(
state["model"], strict=True, model_cfg=self.cfg.model
)
# save memory for later steps
del state["model"]
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
del state["criterion"]
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), (
f"Criterion does not match; please reset the optimizer "
f"(--reset-optimizer). {last_optim['criterion_name']} vs "
f"{self.get_criterion().__class__.__name__}"
)
assert last_optim["optimizer_name"] == self.optimizer.__class__.__name__, (
f"Optimizer does not match; please reset the optimizer "
f"(--reset-optimizer). {last_optim['optimizer_name']} vs "
f"{self.optimizer.__class__.__name__}"
)
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
if not load_on_all_ranks and is_distributed:
last_optim_state = self.optimizer.broadcast_global_state_dict(
last_optim_state
)
elif self.is_fsdp and not self.use_sharded_state:
last_optim_state = self.model.get_shard_from_optim_state_dict(
last_optim_state
)
logger.info(f"FSDP got shard from optim_state for {filename}")
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
logger.info(f"Loaded optim_state for {filename}")
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
itr_state = extra_state["train_iterator"]
epoch = itr_state["epoch"]
if "previous_training_time" in extra_state:
self._previous_training_time = extra_state["previous_training_time"]
self._start_time = time.time()
self.lr_step(epoch)
if (
itr_state.get("version", 1) >= 2
and itr_state["iterations_in_epoch"] == 0
):
# reset meters at start of epoch
reset_meters = True
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
logger.info(
f"Loaded checkpoint {filename} (epoch {epoch} @ {self.get_num_updates()} updates)"
)
else:
logger.info("No existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
data_selector=None,
shard_batch_itr=True,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.cfg.dataset.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
)
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.train_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.cfg.dataset.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.cfg.dataset.num_workers,
epoch=epoch,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=(
not self.cfg.optimization.train_with_epoch_remainder_batch
),
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def get_valid_iterator(
self,
subset,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.cfg.dataset.max_tokens_valid,
max_sentences=self.cfg.dataset.batch_size_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers_valid,
# always pass a fixed "epoch" to keep validation data consistent
# across training epochs
epoch=1,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=False,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
self.lr_step_begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
def begin_valid_epoch(self, epoch):
"""Called at the beginning of each validation epoch."""
# task specific setup per validation epoch
self.task.begin_valid_epoch(epoch, self.get_model())
def reset_dummy_batch(self, batch):
self._dummy_batch = batch
@metrics.aggregate("train")
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=0)
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
for i, sample in enumerate(samples): # delayed update loop
sample, is_dummy_batch = self._prepare_sample(sample)
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
# The no_sync context manager results in increased memory
# usage with FSDP, since full-size gradients will be
# accumulated on each GPU. It's typically a better tradeoff
# to do the extra communication with FSDP.
and not self.is_fsdp
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if self.cfg.distributed_training.distributed_world_size == 1:
return None
else:
raise e
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
# gather logging outputs from all replicas
if self._sync_stats():
train_time = self._local_cumulative_training_time()
logging_outputs, (
sample_size,
ooms,
total_train_time,
) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch
)
self._cumulative_training_time = (
total_train_time / self.data_parallel_world_size
)
overflow = False
logger.debug(f"[{self.get_num_updates()}] done with fwd, bwd")
try:
with torch.autograd.profiler.record_function("reduce-grads"):
# reduce gradients across workers
self.optimizer.all_reduce_grads(self.model)
if utils.has_parameters(self.criterion):
self.optimizer.all_reduce_grads(self.criterion)
with torch.autograd.profiler.record_function("multiply-grads"):
# multiply gradients by (data_parallel_size / sample_size) since
# DDP normalizes by the number of data parallel workers for
# improved fp16 precision.
# Thus we get (sum_of_gradients / sample_size) at the end.
# In case of fp16, this step also undoes loss scaling.
# (Debugging note: Some optimizers perform this scaling on the
# fly, so inspecting model.parameters() or optimizer.params may
# still show the original, unscaled gradients.)
numer = self.data_parallel_world_size if self._sync_stats() else 1
self.optimizer.multiply_grads(numer / (sample_size or 1.0))
# Note: (sample_size or 1.0) handles the case of a zero gradient, in a
# way that avoids CPU/device transfers in case sample_size is a GPU or
# TPU object. The assumption is that the gradient itself is also 0.
with torch.autograd.profiler.record_function("clip-grads"):
# clip grads
grad_norm = self.clip_grad_norm(
self.cfg.optimization.clip_norm,
self.cfg.optimization.clip_norm_type,
self.cfg.optimization.skip_gradient_update_on_clip_norm,
)
# check that grad norms are consistent across workers
self._check_grad_norms(grad_norm)
if not torch.isfinite(grad_norm).all():
# check local gradnorm single GPU case, trigger NanDetector
raise FloatingPointError("gradients are Nan/Inf")
with torch.autograd.profiler.record_function("optimizer"):
# take an optimization step
self.task.optimizer_step(
self.optimizer, model=self.model, update_num=self.get_num_updates()
)
logger.debug(f"[{self.get_num_updates()}] done with optimizer step")
except FloatingPointError:
# re-run the forward and backward pass with hooks attached to print
# out where it fails
self.zero_grad()
with NanDetector(self.get_model()):
for _, sample in enumerate(samples):
sample, _ = self._prepare_sample(sample)
self.task.train_step(
sample,
self.model,
self.criterion,
self.optimizer,
self.get_num_updates(),
ignore_grad=False,
)
raise
except OverflowError as e:
overflow = True
logger.info(
f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}"
)
grad_norm = torch.tensor(0.0).cuda()
self.zero_grad()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
logging_output = None
if not overflow:
self.set_num_updates(self.get_num_updates() + 1)
if self.cuda and self.cuda_env is not None:
# log minimum free memory over the iteration
self._log_gpu_mem_stats()
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# clear CUDA cache to reduce memory fragmentation
if (
self.cuda
and self.cfg.common.empty_cache_freq > 0
and (
(self.get_num_updates() + self.cfg.common.empty_cache_freq - 1)
% self.cfg.common.empty_cache_freq
)
== 0
):
torch.cuda.empty_cache()
if self.cfg.common.fp16 and not self.cfg.common.bf16:
metrics.log_scalar(
"loss_scale",
self.optimizer.scaler.loss_scale,
priority=700,
round=4,
weight=0,
)
metrics.log_scalar(
"scale_window",
self.optimizer.scaler.scale_window,
priority=700,
round=4,
weight=0,
)
metrics.log_stop_time("train_wall")
return logging_output
@metrics.aggregate("valid")
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
with torch.no_grad():
self.model.eval()
self.criterion.eval()
self.zero_grad()
sample, is_dummy_batch = self._prepare_sample(sample)
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size,) = self._aggregate_logging_outputs(
logging_outputs,
sample_size,
ignore=is_dummy_batch,
)
# log validation stats
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step_begin_epoch(self, epoch):
"""Adjust the learning rate at the beginning of the epoch."""
self.lr_scheduler.step_begin_epoch(epoch)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
if isinstance(new_lr, dict):
for k, v in new_lr.items():
metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300)
new_lr = new_lr.get("default", next(iter(new_lr.values())))
else:
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from metaseq import meters
if "get_meter" not in self._warn_once:
self._warn_once.add("get_meter")
utils.deprecation_warning(
"Trainer.get_meter is deprecated. Please use metaseq.metrics instead."
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_") :]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(
self, clip_norm, clip_norm_type="l2", skip_gradient_update_on_clip_norm=False
):
return self.optimizer.clip_grad_norm(
clip_norm,
clip_norm_type,
aggregate_norm_fn=None,
skip_gradient_update_on_clip_norm=skip_gradient_update_on_clip_norm,
)
def cumulative_training_time(self):
if self._cumulative_training_time is None:
# single GPU
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
"""Aggregate training time in seconds."""
return time.time() - self._start_time + self._previous_training_time
def _prepare_sample(self, sample, is_dummy=False):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
assert (
self._dummy_batch is not None and len(self._dummy_batch) > 0
), "Invalid dummy batch: {}".format(self._dummy_batch)
sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True)
return sample, True
if self.cuda:
sample = utils.move_to_cuda(sample)
def lower_precision(t):
"""Converts a tensor to the desired dtype based on our cfg."""
if t.dtype is torch.float32:
if self.cfg.bf16:
return t.bfloat16()
return t.half()
return t
if self.cfg.common.fp16:
sample = utils.apply_to_sample(lower_precision, sample)
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
return sample, False
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.cfg.common.seed + self.get_num_updates()
utils.set_torch_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP
if self.data_parallel_world_size == 1:
return False
else:
return True
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
else:
return self._all_gather_list_sync(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
def _all_gather_list_sync(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. all_gather_list_sync is
suitable when logging outputs are complex types.
"""
if ignore:
logging_outputs = []
results = list(
zip(
*distributed_utils.all_gather_list(
[logging_outputs] + list(extra_stats_to_sum),
max_size=getattr(self.cfg.common, "all_gather_list_size", 16384),
group=self.data_parallel_process_group,
)
)
)
logging_outputs, extra_stats_to_sum = results[0], results[1:]
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return logging_outputs, extra_stats_to_sum
def _fast_stat_sync_sum(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data["extra_stats_" + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data["logging_outputs_" + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(
data, device=self.device, group=self.data_parallel_process_group
)
extra_stats_to_sum = [
data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def _check_grad_norms(self, grad_norm):
"""Check that grad norms are consistent across workers."""
if self._grad_norm_buf is not None:
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(
self._grad_norm_buf, group=self.data_parallel_process_group
)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))
return (
torch.isfinite(tensor).all()
and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()
)
if not is_consistent(self._grad_norm_buf):
pretty_detail = "\n".join(
"rank {:3d} = {:.8f}".format(r, n)
for r, n in enumerate(self._grad_norm_buf.tolist())
)
error_detail = "grad_norm across the workers:\n{}\n".format(
pretty_detail
)
# use FloatingPointError to trigger NanDetector
raise FloatingPointError(
"Fatal error: gradients are inconsistent between workers. "
"Try --ddp-backend=legacy_ddp. "
"Or are you mixing up different generation of GPUs in training?"
+ "\n"
+ "-" * 80
+ "\n{}\n".format(error_detail)
+ "-" * 80
)
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
# perform a bunch of arch-specific gradient metrics
for name, param in self.model.named_parameters():
if (not self.is_fsdp) or self.dont_log_param_and_grad_norm:
break
if param.grad is None:
continue
nice_name = name.replace("module._fsdp_wrapped_module._fpw_module.", "")
nice_name = nice_name.replace("_fsdp_wrapped_module._fpw_module.", "")
nice_name = nice_name.replace("._fsdp_wrapped_module.flat_param_0", "")
nice_name = nice_name.replace("decoder.layers.", "layer")
# threshold for near zeros
threshold = torch.finfo(param.grad.dtype).tiny * 2
with torch.no_grad():
g = param.grad
if hasattr(self.optimizer, "_multiply_factor"):
g = self.optimizer._multiply_factor * g
norm = g.norm(p=2, dim=-1, dtype=torch.float32)
max_ = g.max()
nz = ((g > -threshold) & (g < threshold)).sum() / g.numel()
# priorities for printing order
metrics.log_scalar(f"gnorm_{nice_name}", norm, priority=10)
metrics.log_scalar(f"gmax_{nice_name}", max_, priority=11)
metrics.log_scalar(f"gzero_{nice_name}", nz, priority=12)
with torch.no_grad():
norm = param.norm(p=2, dim=-1, dtype=torch.float32)
max_ = param.max()
nz = ((param > -threshold) & (param < threshold)).sum() / param.numel()
# priorities for printing order
metrics.log_scalar(f"pnorm_{nice_name}", norm, priority=13)
metrics.log_scalar(f"pmax_{nice_name}", max_, priority=14)
metrics.log_scalar(f"pzero_{nice_name}", nz, priority=15)
# standard code
if grad_norm is not None and (
not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm)
):
metrics.log_speed("ups", 1.0, priority=100, round=2)
metrics.log_scalar("gnorm", grad_norm, priority=400, round=3)
if self.cfg.optimization.clip_norm > 0:
metrics.log_scalar(
"clip",
torch.where(
grad_norm > self.cfg.optimization.clip_norm,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
priority=500,
round=1,
)
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
# extra warning for criterions that don't properly log a loss value
if "loss" not in agg:
if "loss" not in self._warn_once:
self._warn_once.add("loss")
logger.warning(
"Criterion.reduce_metrics did not log a 'loss' value, "
"which may break some functionality"
)
metrics.log_scalar("loss", -1)
# support legacy interface
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
for key_to_delete in ["ppl", "wps", "wpb", "bsz"]:
if key_to_delete in logging_output:
del logging_output[key_to_delete]
return logging_output
def _log_gpu_mem_stats(self):
# log minimum free memory over the iteration
cuda_gb_allocated = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
cuda_gb_reserved = torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024
torch.cuda.reset_peak_memory_stats()
cuda_gb_free = self.cuda_env.total_memory_in_GB - cuda_gb_allocated
metrics.log_scalar(
"cuda_gb_allocated", cuda_gb_allocated, priority=1500, round=1, weight=0
)
metrics.log_scalar(
"cuda_gb_reserved", cuda_gb_reserved, priority=1500, round=1, weight=0
)
metrics.log_scalar(
"cuda_gb_free", cuda_gb_free, priority=1500, round=1, weight=0
)
# log nvidia smi stats
if self.cfg.common.log_nvidia_smi:
nvidia_smi_stats = metrics.nvidia_smi_gpu_memory_stats()
for key, val in nvidia_smi_stats.items():
metrics.log_scalar(key, val, priority=1500, round=1, weight=0)
def _catalog_shared_params(module, memo=None, prefix=""):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1]
def _get_module_by_path(module, path):
path = path.split(".")
for name in path:
module = getattr(module, name)
return module
def _set_module_by_path(module, path, value):
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value)
|
flash_metaseq-main
|
metaseq/trainer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from omegaconf import II
from metaseq.data import Dictionary, BaseDataset
from metaseq.dataclass import MetaseqDataclass
from metaseq.tasks import BaseTask, register_task
logger = logging.getLogger(__name__)
@dataclass
class DummyLMConfig(MetaseqDataclass):
dict_size: int = 49996
dataset_size: int = 100000
tokens_per_sample: int = field(
default=512, metadata={"help": "max sequence length"}
)
add_bos_token: bool = False
batch_size: Optional[int] = II("dataset.batch_size")
max_tokens: Optional[int] = II("dataset.max_tokens")
max_target_positions: int = II("task.tokens_per_sample")
@register_task("dummy_lm", dataclass=DummyLMConfig)
class DummyLMTask(BaseTask):
def __init__(self, cfg: DummyLMConfig):
super().__init__(cfg)
# load dictionary
self.dictionary = Dictionary()
for i in range(cfg.dict_size):
self.dictionary.add_symbol("word{}".format(i))
self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8
logger.info("dictionary: {} types".format(len(self.dictionary)))
seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1
self.dummy_src = seq[:-1]
self.dummy_tgt = seq[1:]
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.cfg.batch_size is not None:
bsz = self.cfg.batch_size
else:
bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample)
self.datasets[split] = DummyDataset(
{
"id": 1,
"net_input": {
"src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]),
"src_lengths": torch.full(
(bsz,), self.cfg.tokens_per_sample, dtype=torch.long
),
},
"target": torch.stack([self.dummy_tgt for _ in range(bsz)]),
"nsentences": bsz,
"ntokens": bsz * self.cfg.tokens_per_sample,
},
num_items=self.cfg.dataset_size,
item_size=self.cfg.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(BaseDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
def set_epoch(self, epoch):
pass
|
flash_metaseq-main
|
metaseq/benchmark/dummy_lm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import dummy_lm # noqa
|
flash_metaseq-main
|
metaseq/benchmark/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum
@dataclass
class Size:
n_layers: int
emb_size: int
n_heads: int
d_head: int
batch_size: int
lr: float
model_parallel: int
@property
def ffn_size(self):
return 4 * self.emb_size
# from appendix b of https://arxiv.org/pdf/2005.14165.pdf
# see table 2.1 in https://arxiv.org/pdf/2005.14165.pdf
# assert all sizes make sense
TOTAL_TRAIN_TOKENS = 300e9
TOTAL_WARMUP_TOKENS = 375e6
M = 1024 * 1024 # 1 million
MODEL_SIZES = {
"8m": Size(4, 128, 2, 64, int(0.5 * M), 1.0e-3, 2), # tiny
"125m": Size(12, 768, 12, 64, int(0.5 * M), 6.0e-4, 2), # small
"350m": Size(24, 1024, 16, 64, int(0.5 * M), 3.0e-4, 2), # medium
"760m": Size(24, 1536, 16, 96, int(0.5 * M), 2.5e-4, 2), # large
"1.3b": Size(24, 2048, 32, 64, int(1.0 * M), 2.0e-4, 2), # xl
"2.7b": Size(32, 2560, 32, 80, int(1.0 * M), 1.6e-4, 4),
"6.7b": Size(32, 4096, 32, 128, int(2.0 * M), 1.2e-4, 2),
"13b": Size(40, 5120, 40, 128, int(4.0 * M), 1.0e-4, 2),
"30b": Size(48, 7168, 56, 128, int(4.0 * M), 1.0e-4, 2),
"66b": Size(64, 9216, 72, 128, int(2.0 * M), 8e-5, 8),
"175b": Size(96, 12288, 96, 128, int(0.25 * M), 3e-5, 8),
}
# from appendix b of https://arxiv.org/pdf/2005.14165.pdf
# see table 2.1 in https://arxiv.org/pdf/2005.14165.pdf
for name, size in MODEL_SIZES.items():
assert size.n_heads * size.d_head == size.emb_size, name
class ComputeEnvs(Enum):
AWS = "aws"
AZURE = "azure"
FAIR = "fair"
DATA_LOCATIONS = {
ComputeEnvs.AZURE: "/data/opt",
}
VALID_SUBSETS = [
"BookCorpusFair",
"CommonCrawl",
"DM_Mathematics",
"Gutenberg_PG-19",
"HackerNews",
"OpenSubtitles",
"OpenWebText2",
"USPTO",
"Wikipedia_en",
"redditflattened",
"stories",
]
|
flash_metaseq-main
|
metaseq/launcher/opt_job_constants.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import fnmatch
import hashlib
import itertools
import os
import random
import shlex
import shutil
import subprocess
import textwrap
from collections import OrderedDict
from pathlib import Path
import metaseq
from metaseq.launcher.sweep import get_env_from_args
from metaseq.utils import get_random_port
def main(get_grid, postprocess_hyperparams, args):
def dry_run(msg):
if args.dry_run:
print(f"| dry-run: {msg}")
return args.dry_run
if args.local:
args.num_nodes = 1
# compute all possible hyperparameter configurations
grid = get_grid(args)
grid_product = list(itertools.product(*[hp.values for hp in grid]))
# randomly shuffle configurations
random.seed(args.seed)
random.shuffle(grid_product)
launch_train(args, grid, grid_product, dry_run, postprocess_hyperparams)
def copy_all_python_files(
source,
snapshot_main_dir,
code_snapshot_hash,
recurse_dirs="metaseq,metaseq_cli,scripts",
):
"""
Copies following files from source to destination:
a) all *.py files at direct source location.
b) all metaseq/*.py recursively (default); recurse through comma-separated recurse_dirs
"""
def include_patterns(*patterns):
"""Factory function that can be used with copytree() ignore parameter.
Arguments define a sequence of glob-style patterns
that are used to specify what files to NOT ignore.
Creates and returns a function that determines this for each directory
in the file hierarchy rooted at the source directory when used with
shutil.copytree().
from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern
"""
def _ignore_patterns(path, names):
keep = set(
name for pattern in patterns for name in fnmatch.filter(names, pattern)
)
ignore = set(
name
for name in names
if name not in keep and not os.path.isdir(os.path.join(path, name))
)
return ignore
return _ignore_patterns
def pys_but_no_dirs(path, names):
pys = set(fnmatch.filter(names, "*.py"))
return [name for name in names if name not in pys]
destination = os.path.join(snapshot_main_dir, code_snapshot_hash)
# copy root files:
shutil.copytree(source, destination, ignore=pys_but_no_dirs)
# copy folders
for d in recurse_dirs.split(","):
shutil.copytree(
os.path.join(source, d),
os.path.join(destination, d),
ignore=include_patterns("*.py", "*.so", "*.yaml"),
)
return destination
def run_setup(args, config, dry_run):
# compute save_dir
save_dir_key = ".".join(
filter(
lambda save_dir_key: save_dir_key is not None and len(save_dir_key) > 0,
[hp.get_save_dir_key() for hp in config.values()],
)
)
save_dir_key = save_dir_key.replace(",", "_")
num_total_gpus = args.num_nodes * args.num_gpus
save_dir = os.path.join(
args.checkpoints_dir, f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}"
)
# create save directory if it doesn't exist
if not os.path.exists(save_dir):
if not dry_run(f"create directory: {save_dir}"):
os.makedirs(save_dir)
return save_dir_key, save_dir
def is_job_valid(args, save_dir, dry_run):
if has_finished(save_dir):
return False
elif has_failed(save_dir):
if args.resume_failed:
dry_run(f"resume failed run: {save_dir}")
else:
print(f"skip failed run (override with --resume-failed): {save_dir}")
return False
elif has_started(save_dir):
print(f"skip in progress run: {save_dir}")
return False
return True
DEFAULT_NCCL_DEBUG = os.getenv("NCCL_DEBUG", "INFO")
DEFAULT_NCCL_DEBUG_LOCAL = os.getenv("NCCL_DEBUG", "")
def set_env(args, env, dry_run):
if "OMP_NUM_THREADS" not in env:
env["OMP_NUM_THREADS"] = "2"
if args.local:
if not dry_run("start training locally"):
if "CUDA_VISIBLE_DEVICES" not in env:
env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, range(args.num_gpus)))
env["NCCL_DEBUG"] = DEFAULT_NCCL_DEBUG_LOCAL
else:
if args.num_nodes > 1:
env["NCCL_SOCKET_IFNAME"] = "^docker0,lo"
env["NCCL_DEBUG"] = DEFAULT_NCCL_DEBUG
def gen_train_command(args, env, config, oss_destination, save_dir, save_dir_key):
# generate train command
train_cmd = [args.python, os.path.join(oss_destination, args.script)]
train_cmd.extend(["--distributed-world-size", str(args.num_nodes * args.num_gpus)])
if args.num_nodes > 1 or (args.num_gpus > 1 and not args.local):
train_cmd.extend(
[
"--distributed-port",
str(get_random_port()),
]
)
if args.data is not None:
train_cmd.extend([args.data])
if args.local_checkpoints_dir is None:
train_cmd.extend(["--save-dir", save_dir])
else:
num_total_gpus = args.num_nodes * args.num_gpus
local_save_dir = os.path.join(
args.local_checkpoints_dir,
f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}",
)
train_cmd.extend(["--save-dir", local_save_dir])
if getattr(args, "full_azure_upload_path", None) is not None:
if args.azure_folder_auto_name:
from urllib.parse import urlparse
o = urlparse(args.full_azure_upload_path)
o = o._replace(
path=os.path.join(
o.path, f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}"
)
+ "/"
)
train_cmd.extend(["--save-async", "--cloud-upload-path", o.geturl()])
else:
train_cmd.extend(
["--save-async", "--cloud-upload-path", args.full_azure_upload_path]
)
if not args.no_wandb:
try:
import wandb
except ImportError:
wandb = None
if wandb or ("WANDB_API_KEY" in env and "WANDB_BASE_URL" in env):
if "--wandb-project" not in config:
project = f"{args.prefix}.{save_dir_key}"
train_cmd.extend(["--wandb-project", project])
if "WANDB_RUN_GROUP" not in env:
env["WANDB_RUN_GROUP"] = args.prefix
if "WANDB_RUN_ID" not in env:
env["WANDB_RUN_ID"] = hashlib.md5(save_dir.encode("utf-8")).hexdigest()
if "WANDB_RESUME" not in env:
env["WANDB_RESUME"] = "allow"
if not args.no_tensorboard:
if args.tensorboard_logdir is None:
tensorboard_logdir = os.path.join(save_dir, "tb")
else:
tensorboard_logdir = os.path.join(
args.tensorboard_logdir,
f"{args.prefix}.{save_dir_key}.ngpu{str(args.num_nodes * args.num_gpus)}",
)
train_cmd.extend(["--tensorboard-logdir", tensorboard_logdir])
cluster_env = get_env_from_args(args)
train_cmd.extend(["--cluster-env", cluster_env.value])
for hp in config.values():
train_cmd.extend(map(str, hp.get_cli_args()))
return train_cmd
def gen_srun_command_and_str(args, save_dir_key, train_log, train_stderr, train_cmd):
base_srun_cmd = [
"srun",
"--job-name",
f"{args.prefix}.{save_dir_key}",
"--output",
train_log,
"--error",
train_stderr,
"--open-mode",
"append",
"--unbuffered",
]
if args.cpu_bind:
base_srun_cmd += [f"--cpu-bind={args.cpu_bind}"]
if args.salloc:
excluded_hosts = os.environ.get("EXCLUDED_HOSTS", None)
included_hosts = os.environ.get("INCLUDED_HOSTS", None)
base_srun_cmd += [
"--nodes",
str(args.num_nodes),
"--ntasks-per-node",
str(args.num_gpus),
"--ntasks",
str(args.num_gpus * args.num_nodes),
"--cpus-per-task",
args.cpus_per_task,
]
base_srun_cmd += ["-x", excluded_hosts] if excluded_hosts is not None else []
base_srun_cmd += ["-w", included_hosts] if included_hosts is not None else []
srun_cmd = base_srun_cmd + train_cmd
srun_cmd_str = " ".join(map(shlex.quote, srun_cmd))
if getattr(args, "requeue_on_fail", False):
# sometimes we want the job to just be requeued magically if it exit codes
# i.e. in the case of very very large models with long runtimes.
srun_cmd_str = f"( {srun_cmd_str} || scontrol requeue $SLURM_JOB_ID )"
return srun_cmd, srun_cmd_str
def gen_sbatch_command_and_str(
args, job_name, train_log, train_stderr, oss_destination, srun_cmd_str
):
excluded_hosts = os.environ.get("EXCLUDED_HOSTS", None)
included_hosts = os.environ.get("INCLUDED_HOSTS", None)
sbatch_cmd = [
"sbatch",
"--job-name",
job_name,
"--gpus-per-node",
str(args.num_gpus),
"--nodes",
str(args.num_nodes),
"--ntasks-per-node",
str(args.num_gpus),
"--cpus-per-task",
args.cpus_per_task,
"--output",
train_log,
"--error",
train_stderr,
"--open-mode",
"append",
# '--no-requeue',
"--signal",
"B:USR1@180",
]
if args.constraint:
sbatch_cmd += ["--constraint", args.constraint]
if args.partition:
sbatch_cmd += ["--partition", args.partition]
if args.reservation:
sbatch_cmd += ["--reservation", args.reservation]
if args.exclusive:
sbatch_cmd += ["--exclusive"]
comment = ""
if args.comment:
comment = args.comment
if args.snapshot_code:
comment += (
f", OSS Code Location: {oss_destination}"
if comment
else f"OSS Code Location: {oss_destination}"
)
sbatch_cmd += ["--comment", comment]
if args.time is not None:
sbatch_cmd.extend(["--time", args.time])
if args.mem is not None:
sbatch_cmd += ["--mem", args.mem]
else:
sbatch_cmd += ["--mem", "0"]
sbatch_cmd += ["-x", excluded_hosts] if excluded_hosts is not None else []
sbatch_cmd += ["-w", included_hosts] if included_hosts is not None else []
wrapped_cmd = requeue_support()
if args.azure:
wrapped_cmd += "\n" + azure_support()
wrapped_cmd += "\n" + srun_cmd_str + " \n wait $! \n sleep 610 & \n wait $!"
sbatch_cmd += ["--wrap", wrapped_cmd]
sbatch_cmd_str = " ".join(map(shlex.quote, sbatch_cmd))
return sbatch_cmd, sbatch_cmd_str
def local_run(args, env, train_cmd, dry_run):
assert args.num_nodes == 1, "distributed training cannot be combined with --local"
if not dry_run("start training locally"):
if "CUDA_VISIBLE_DEVICES" not in env:
env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, range(args.num_gpus)))
env["NCCL_DEBUG"] = DEFAULT_NCCL_DEBUG_LOCAL
train_proc = subprocess.Popen(train_cmd, env=env)
train_proc.wait()
def run_batch(env, sbatch_cmd_str, sbatch_cmd):
print(f"running command: {sbatch_cmd_str}\n")
with subprocess.Popen(sbatch_cmd, stdout=subprocess.PIPE, env=env) as train_proc:
stdout = train_proc.stdout.read().decode("utf-8")
try:
job_id = int(stdout.rstrip().split()[-1])
print(f"Launched job {job_id}")
except IndexError:
job_id = None
return job_id, stdout
def write_git_commit(train_log):
with open(train_log, "a") as train_log_h:
# log most recent git commit
git_commit = subprocess.check_output(
"git log | head -n 1", shell=True, encoding="utf-8"
)
print(git_commit.rstrip(), file=train_log_h)
def dry_run_batch(env, train_log, train_stderr, sbatch_cmd_str, sbatch_cmd, dry_run):
dry_run("start remote training")
dry_run(f"- log stdout to: {train_log}")
dry_run(f"- log stderr to: {train_stderr}")
dry_run(f"- run command: {sbatch_cmd_str}")
sbatch_cmd += ["--test-only"]
with subprocess.Popen(sbatch_cmd, stdout=subprocess.PIPE, env=env) as train_proc:
stdout = train_proc.stdout.read().decode("utf-8")
print(stdout)
def launch_train(args, grid, grid_product, dry_run, postprocess_hyperparams):
oss_destination = ""
if args.snapshot_code:
# Currently hash is just the current time in ISO format.
# Remove colons since they cannot be escaped in POSIX PATH env vars.
code_snapshot_hash = datetime.datetime.now().isoformat().replace(":", "_")
# Copy metaseq OSS code
metaseq_oss_path = str(Path(metaseq.__file__).parents[1])
oss_destination = copy_all_python_files(
metaseq_oss_path,
os.path.join(args.snapshot_root, "slurm_snapshot_code_oss"),
code_snapshot_hash,
args.snapshot_recurse_dirs_oss,
)
os.environ["PYTHONPATH"] = (
oss_destination + ":" + os.environ.get("PYTHONPATH", "")
)
# set environment
base_env = os.environ.copy()
set_env(args, base_env, dry_run)
# start training
for i, hp_values in enumerate(grid_product):
if i == args.num_trials:
break
config = OrderedDict()
for hp, value in zip(grid, hp_values):
config[hp.name] = hp
config[hp.name].current_value = value
# postprocess hyperparams
postprocess_hyperparams(args, config)
save_dir_key, save_dir = run_setup(args, config, dry_run)
# check if job failed, exists, finished
if not is_job_valid(args, save_dir, dry_run):
continue
# clone base env and update for this job, e.g., we set WANDB_RUN_ID
# based on the save_dir, which is based on the current hyperparam values
env = base_env.copy()
# generate train command
train_cmd = gen_train_command(
args, env, config, oss_destination, save_dir, save_dir_key
)
train_log = os.path.join(save_dir, "train.log")
train_stderr = os.path.join(save_dir, "train.stderr.%j") # %j = slurm job id
srun_cmd, srun_cmd_str = gen_srun_command_and_str(
args, save_dir_key, train_log, train_stderr, train_cmd
)
job_id = None
if args.dry_run:
train_cmd_str = " ".join(train_cmd)
dry_run(f"train command: {train_cmd_str}")
if args.local:
local_run(args, env, train_cmd, dry_run)
else:
srun_cmd_str = srun_cmd_str + " &"
# build command
if not args.salloc:
job_name = f"{args.prefix}.{save_dir_key}"
sbatch_cmd, sbatch_cmd_str = gen_sbatch_command_and_str(
args,
job_name,
train_log,
train_stderr,
oss_destination,
srun_cmd_str,
)
else:
sbatch_cmd = srun_cmd
sbatch_cmd_str = srun_cmd_str
if args.dry_run:
dry_run_batch(
env, train_log, train_stderr, sbatch_cmd_str, sbatch_cmd, dry_run
)
else:
write_git_commit(train_log)
with open(train_log, "a") as train_log_h:
job_id, stdout = run_batch(env, sbatch_cmd_str, sbatch_cmd)
print(stdout, file=train_log_h)
if job_id is not None:
print("Launched {}".format(job_id))
def has_finished(save_dir):
train_log = os.path.join(save_dir, "train.log")
if not os.path.exists(train_log):
return False
with open(train_log, "r") as h:
lines = h.readlines()
if len(lines) == 0:
return False
if "done training" in lines[-1]:
return True
return False
def has_failed(save_dir):
if not os.path.exists(save_dir):
return False
# find max job id
job_ids = []
for fn in os.listdir(save_dir):
if fn.startswith("train.stderr."):
job_ids.append(int(fn.split(".")[-1]))
if len(job_ids) == 0:
return False
max_job_id = max(job_ids)
def _has_failed(stderr_fn):
with open(stderr_fn, "r") as h:
for line in h:
if len(line.strip()) > 0:
# assume that any output in stderr indicates an error
return True
return False
return _has_failed(os.path.join(save_dir, f"train.stderr.{max_job_id}"))
def has_started(save_dir):
train_log = os.path.join(save_dir, "train.log")
if not os.path.exists(train_log):
return False
return True
def requeue_support():
return textwrap.dedent(
"""
trap_handler () {
echo "Caught signal: " $1
# SIGTERM must be bypassed
if [ "$1" = "TERM" ]; then
echo "bypass sigterm"
else
# Submit a new job to the queue
echo "Requeuing " $SLURM_JOB_ID
scontrol requeue $SLURM_JOB_ID
fi
}
# Install signal handler
trap 'trap_handler USR1' USR1
trap 'trap_handler TERM' TERM
"""
)
def azure_support():
return textwrap.dedent(
"""
export NCCL_TOPO_FILE=/opt/microsoft/ndv4-topo.xml
export NCCL_IB_PCI_RELAXED_ORDERING=1
export UCX_IB_PCI_RELAXED_ORDERING=on
export NCCL_SOCKET_IFNAME=eth0
export UCX_NET_DEVICES=eth0
export CUDA_DEVICE_ORDER=PCI_BUS_ID
export OMPI_MCA_COLL_HCOLL_ENABLE=0
if [ -e "/etc/profile.d/modules.sh" ]; then
. /etc/profile.d/modules.sh
module load mpi/hpcx
fi
"""
)
|
flash_metaseq-main
|
metaseq/launcher/slurm.py
|
flash_metaseq-main
|
metaseq/launcher/__init__.py
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import os
import subprocess
from typing import Optional, List, Callable, MutableMapping
from urllib.parse import urlparse
from metaseq.launcher.opt_job_constants import ComputeEnvs
class hyperparam(object):
"""Base class for defining hyperparameters."""
def __init__(
self,
name,
values=None,
binary_flag=False,
save_dir_key=None,
positional_arg=False,
):
"""
Arguments:
- name : the name of the hyperparameter (e.g., `--dropout`)
- values : the set of values to sweep over (e.g., `[0.0, 0.1, 0.2]`)
- binary_flag : whether the hyperparameter uses a boolean flag (e.g., `--no-save`)
- save_dir_key : function that takes the hyperparameter value and returns the "key"
to be appended to the output directory name
- positional_arg : whether the hyperparameter is a positional argument
"""
self.name = name
if values is None: # syntactic sugar for binary flags
self.values = [True]
self.binary_flag = True
else:
self.values = values if isinstance(values, list) else [values]
self.binary_flag = binary_flag
self.save_dir_key = save_dir_key
self.positional_arg = positional_arg
self.current_value = None
if positional_arg and name.startswith("-"):
raise ValueError(
f"positional arguments must not start with a dash ({name})"
)
if len(self.values) > 1 and self.save_dir_key is None:
raise ValueError(
f"{name} has more than one value but is missing a save_dir_key!"
)
def get_cli_args(self):
if self.binary_flag:
return [self.name] if self.current_value else []
elif self.positional_arg:
return [self.current_value]
else:
return [self.name, self.current_value]
def get_save_dir_key(self):
if self.save_dir_key is None:
return None
if self.binary_flag:
return self.save_dir_key(1) if self.current_value else None
return self.save_dir_key(self.current_value)
def get_env_from_args(args):
# Returns a ComputeEnvs enum.
if args.azure:
return ComputeEnvs.AZURE
elif args.aws:
return ComputeEnvs.AWS
elif args.fair:
return ComputeEnvs.FAIR
else:
raise NotImplementedError(
"Env not passed in! Please pass in one of: --azure, --aws, --fair"
)
def _get_args(add_extra_options_func=None, input_args: Optional[List[str]] = None):
"""
input_args (List[str]): strings to parse, defaults to sys.argv
"""
parser = argparse.ArgumentParser("Script for launching hyperparameter sweeps ")
parser.add_argument("--grid", help="grid function we used", default=None)
parser.add_argument("-d", "--data", help="path to data directory")
parser.add_argument(
"-p",
"--prefix",
required=True,
help="save checkpoints and logs in <checkpoints-dir>/<prefix>.<save_dir_key>",
)
parser.add_argument(
"-t",
"--num-trials",
default=-1,
type=int,
help="number of random hyperparam configurations to try (-1 for grid search)",
)
parser.add_argument(
"-g", "--num-gpus", type=int, required=True, help="number of GPUs per node"
)
parser.add_argument(
"-n",
"--num-nodes",
type=int,
default=1,
help="number of nodes for distributed training",
)
parser.add_argument(
"--update-freq",
type=int,
default=0,
help="update freq",
)
parser.add_argument("--seed", type=int, default=1234)
parser.add_argument(
"--resume-failed",
action="store_true",
help="resume any runs that failed",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="output only a list of actions to perform without performing them",
)
parser.add_argument("--local", action="store_true", help="run job locally")
parser.add_argument("--debug", action="store_true", help="debug")
parser.add_argument(
"--script", default="metaseq_cli/train.py", help="script to launch"
)
parser.add_argument(
"--python", default="python", help="path to nonstandard python binary"
)
# Slurm params
parser.add_argument(
"--salloc", action="store_true", help="run agaist current allocation"
)
parser.add_argument("--reservation", help="reservation to run on")
parser.add_argument(
"--exclusive", action="store_true", help="if set, get exclusive host"
)
parser.add_argument(
"--time", default="4320", help="expected job duration in minutes"
)
parser.add_argument("--mem", "--mem", help="memory to request")
parser.add_argument(
"--constraint",
metavar="CONSTRAINT",
help='gpu constraint, if any. e.g. "volta"',
)
parser.add_argument("--comment", help="comment string")
parser.add_argument(
"--snapshot-code",
action="store_true",
default=False,
help="Flag for creating a snapshot of training code while creating slurm job,"
' path is "./slurm_snapshot_code/<TIME_ISO_FORMAT/>:", '
"can find time from comment of slurm job.",
)
parser.add_argument(
"--snapshot-root",
type=str,
default=".",
help="root path for saving the snapshot code.",
)
parser.add_argument(
"--snapshot-recurse-dirs-oss",
default="metaseq,metaseq_cli",
help="comma-separated directories from where to recursively copy *.py, *.so and *.yaml files",
)
parser.add_argument(
"--no-tensorboard", action="store_true", help="disable tensorboard logging"
)
parser.add_argument("--no-wandb", action="store_true", help="disable WandB logging")
# Env flags
parser.add_argument("--azure", action="store_true", help="running on azure")
parser.add_argument("--aws", action="store_true", help="running on aws")
parser.add_argument("--fair", action="store_true", help="running on fair")
# Azure specific flag
parser.add_argument(
"--full-azure-upload-path",
default=None,
help="Azure blob storage SAS URL",
)
parser.add_argument(
"--azure-folder-auto-name",
action="store_true",
help="Automatically name azure folder",
)
# Following args have env specific defaults.
parser.add_argument(
"--partition",
help="slurm partition to run on",
)
parser.add_argument(
"--checkpoints-dir",
help="save checkpoints and logs in <checkpoints-dir>/<prefix>.<save_dir_key>",
)
parser.add_argument("--cpus-per-task", type=str)
parser.add_argument(
"--cpu-bind", help="configured to improve all-to-all perf, especially on A100s"
)
parser.add_argument(
"--local-checkpoints-dir",
help="node-local directory for saving checkpoints",
)
parser.add_argument(
"--tensorboard-logdir",
default=None, # None will default to save_dir/tb
help="save tensorboard logs in <tensorboard-logdir>/<prefix>.<save_dir_key>",
)
if add_extra_options_func is not None: # mutates parser
add_extra_options_func(parser)
args = parser.parse_args(input_args)
# Env check
assert (
sum([args.azure, args.aws, args.fair]) == 1
), "Must pass an env, and only one env (--azure, --aws, --fair)!"
# Set defaults based on env
env = get_env_from_args(args)
_modify_arg_defaults_based_on_env(env, args)
return args
def _modify_arg_defaults_based_on_env(env, args):
# TODO(susan): move all this default logic into separate config file
default_partition = None
if env == ComputeEnvs.FAIR:
default_partition = "learnfair"
default_prefix = ""
if env == ComputeEnvs.AZURE:
default_prefix = "/shared/home"
elif env == ComputeEnvs.AWS:
default_prefix = "/checkpoints"
elif env == ComputeEnvs.FAIR:
default_prefix = "/checkpoint"
if env == ComputeEnvs.FAIR:
default_checkpoint_dir = os.path.join(
default_prefix, os.environ["USER"], str(datetime.date.today())
)
else:
default_checkpoint_dir = os.path.join(
default_prefix,
os.environ["USER"],
"checkpoints",
str(datetime.date.today()),
)
default_cpu_per_task = None
if env == ComputeEnvs.AZURE or env == ComputeEnvs.AWS:
default_cpu_per_task = 12
elif env == ComputeEnvs.FAIR:
default_cpu_per_task = 10
default_cpu_bind = "none"
if env == ComputeEnvs.AZURE:
default_cpu_bind = (
"mask_cpu:ffffff000000,ffffff000000,ffffff,ffffff,"
"ffffff000000000000000000,ffffff000000000000000000,"
"ffffff000000000000,ffffff000000000000"
)
elif env == ComputeEnvs.AWS:
default_cpu_bind = (
"mask_cpu:000000ffffff000000ffffff,000000ffffff000000ffffff,"
"000000ffffff000000ffffff,000000ffffff000000ffffff,"
"ffffff000000ffffff000000,ffffff000000ffffff000000,"
"ffffff000000ffffff000000,ffffff000000ffffff000000"
)
elif env == ComputeEnvs.FAIR:
default_cpu_bind = "map_ldom:0,0,0,0,1,1,1,1"
default_local_checkpoints_dir = None
if env == ComputeEnvs.AZURE:
azure_upload_path = os.environ.get("AZURE_BLOB_SAS_URL", "")
if azure_upload_path != "":
# write checkpoints to local scratch storage on each node
default_local_checkpoints_dir = os.path.join(
"/mnt/scratch",
os.environ["USER"],
"checkpoints",
str(datetime.date.today()),
)
# then copy them to Azure blob storage
o = urlparse(azure_upload_path)
o = o._replace(
path=os.path.join(
o.path, os.environ["USER"], str(datetime.date.today())
)
)
azure_upload_path = o.geturl()
# set upload path if not specified
if args.full_azure_upload_path is None:
args.full_azure_upload_path = azure_upload_path
# if needed, create a container for this user on the Azure blob account
cmd = [
"azcopy", # TODO(susanz): requires azcopy to be installed.
"make",
o._replace(path=os.path.dirname(o.path)).geturl(),
]
subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# assign default slurm partition
if args.partition is None:
args.partition = default_partition
# assign default checkpoint directory
if args.checkpoints_dir is None:
args.checkpoints_dir = default_checkpoint_dir
# assign default # cpus per task
if args.cpus_per_task is None:
args.cpus_per_task = str(default_cpu_per_task)
# assign default cpu bind
if args.cpu_bind is None:
args.cpu_bind = default_cpu_bind
# assign default local checkpoint dir
if args.local_checkpoints_dir is None:
args.local_checkpoints_dir = default_local_checkpoints_dir
def main(
get_grid: Callable[[argparse.Namespace], List[hyperparam]],
postprocess_hyperparams: Callable[
[argparse.Namespace, MutableMapping[str, hyperparam]], None
],
add_extra_options_func: Optional[Callable[[argparse.ArgumentParser], None]] = None,
scheduler_args: Optional[List[str]] = None,
) -> None:
"""Do a grid search.
Parameters:
get_grid: A unary callable which returns the grid to search over.
The callable is passed the parsed sweep arguments including the extra
arguments defined by `add_extra_options_func`. See also `get_args`.
The returned list represents the dimensions of the grid. That is, a list of
length n represents a grid of dimension n. Let v_i denote the number of
possible values for dimension i. Then the total number of configurations
is given by v_1 * ... * v_n.
postprocess_hyperparams: A 2-ary callable to post-process hyperparameter
configurations before running the job. The first argument is the parsed
sweep arguments including the extra arguments defined by
`add_extra_options_func`. The second argument is a realized hyperparameter
configuration as a mutable mapping of hyperparameter name to `hyperparam`
instance with a `current_value` set.
add_extra_options_func: A unary callable which adds extra arguments to the
sweep CLI. It is passed the parser used to define the sweep script's CLI.
scheduler_args: A list of unprocessed arguments to parse. If None, then
`sys.argv[1:]`.
"""
args = _get_args(add_extra_options_func, scheduler_args)
from .slurm import main as backend_main
get_grid = get_grid[args.grid] if args.grid is not None else get_grid
backend_main(get_grid, postprocess_hyperparams, args)
|
flash_metaseq-main
|
metaseq/launcher/sweep.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This sweep script takes some additional optional arguments. See add_extra_options_func
for more details.
"""
import os
from metaseq.launcher.opt_job_constants import (
TOTAL_TRAIN_TOKENS,
TOTAL_WARMUP_TOKENS,
MODEL_SIZES,
DATA_LOCATIONS,
VALID_SUBSETS,
)
from metaseq.launcher.sweep import (
hyperparam,
get_env_from_args,
main as sweep_main,
)
# have to do this at the module level, unfortunately; unable to use args.<env>
for _cluster, _folder in DATA_LOCATIONS.items():
if os.path.exists(_folder):
try:
import metaseq_internal # noqa: F401
from metaseq_internal.fb_sweep.dependency_checks import * # noqa
except ImportError:
print("\n\nmetaseq_internal not installed! Proceeding...")
pass
break
def add_extra_options_func(parser):
# NOTE we shouldn't add new options here... track changes via git instead
parser.add_argument(
"--restore-file", help="load an existing checkpoint for continuing training"
)
parser.add_argument(
"--reset-dataloader",
action="store_true",
help="reset the dataloader to epoch 1",
)
parser.add_argument("--model-size", choices=MODEL_SIZES.keys(), required=True)
parser.add_argument(
"--no-save-dir", action="store_true", help="avoid saving with hparams"
)
# Args related to benchmarking and profiling
parser.add_argument(
"--benchmark",
action="store_true",
help="use synthetic data and only train for 50 steps (for benchmarking)",
)
parser.add_argument(
"--profile",
default=False,
action="store_true",
)
parser.add_argument("--max-update", "--mu", type=int, default=None)
def get_grid(args):
# Infer data path if not given
DATA_ROOT = ""
if args.data is None and not args.benchmark:
cluster_env = get_env_from_args(args)
args.data = os.path.join(
DATA_LOCATIONS[cluster_env], "corpus_dedup_10_10_1_0.05_exp29"
)
if os.path.exists(args.data):
DATA_ROOT = DATA_LOCATIONS[cluster_env]
else:
raise RuntimeError("Where are you running this?! Check DATA_LOCATIONS.")
SEQ_LEN = 2048
size = MODEL_SIZES[args.model_size]
# updates = 300B tokens / 2048 seq_len / 1024 batchsize
total_gpus = args.num_gpus * args.num_nodes
# TODO: fix training to run with 1 gpu (see Enable sweep scripts to run with a single GPU #176)
if args.num_gpus < 2:
raise ValueError("Need at least two gpus to run model parallel code")
if total_gpus < size.model_parallel:
raise ValueError(
"Total gpus (num_gpus * num_nodes) must be great than model parallel factor"
)
if total_gpus % size.model_parallel != 0:
raise ValueError(
"Total gpus (num_gpus * num_nodes) must be divisible by model parallel factor"
)
total_gpus = (args.num_gpus * args.num_nodes) // size.model_parallel
ddp_bsz = (size.batch_size // total_gpus) // SEQ_LEN
total_updates = args.max_update
if total_updates is None:
total_updates = int(TOTAL_TRAIN_TOKENS) // size.batch_size
warmup_updates = int(TOTAL_WARMUP_TOKENS) // size.batch_size
log_interval = 1
grid = []
# default streaming_lm task config
task_config = [
hyperparam("--task", "streaming_language_modeling"),
hyperparam(
"--sample-break-mode",
"none",
save_dir_key=lambda val: f"bm_{val}" if not no_save_params else "",
),
hyperparam(
"--vocab-filename",
os.path.join(DATA_ROOT, "tokenizers/gpt2-vocab.json"),
save_dir_key=lambda _: "gpt2" if not no_save_params else "",
),
hyperparam(
"--merges-filename", os.path.join(DATA_ROOT, "tokenizers/gpt2-merges.txt")
),
]
# separate task config for dummy_lm
if args.benchmark:
# Overrides for speed benchmarking
args.data = None
task_config = [
hyperparam("--task", "dummy_lm", save_dir_key=lambda val: val),
hyperparam(
"--dict-size", 51200 - 4
), # TODO(susan): what is this -4 sorcery? relic of more nmt things?
]
total_updates = 50
warmup_updates = 50
log_interval = 5
grid += task_config
if args.profile:
grid += [hyperparam("--new-profiler")]
no_save_params = args.no_save_dir
args.snapshot_code = True
grid += [
hyperparam("--train-subset", "train"),
hyperparam("--valid-subset", ",".join(f"valid/{ss}" for ss in VALID_SUBSETS)),
hyperparam("--ignore-unused-valid-subsets"),
hyperparam("--num-workers", 8),
hyperparam("--num-workers-valid", 1),
hyperparam("--validate-interval-updates", 2000),
hyperparam("--save-interval-updates", 2000),
hyperparam(
"--no-epoch-checkpoints"
), # only save checkpoints based on num steps
hyperparam("--no-best-checkpoints"), # don't save checkpoint_best.pt
hyperparam(
"--memory-efficient-fp16",
save_dir_key=lambda val: "me_fp16" if not no_save_params else "",
),
hyperparam("--fp16-init-scale", 4),
# we set this for the main run but it's probably nt needed here
# hyperparam("--threshold-loss-scale", 0.25, save_dir_key=lambda val: f"minscale{val}"),
hyperparam(
"--ddp-backend",
"fully_sharded",
save_dir_key=lambda val: "fsdp" if not no_save_params else "",
),
hyperparam("--no-reshard-after-forward", save_dir_key=lambda _: "zero2"),
hyperparam("--use-sharded-state"),
hyperparam("--checkpoint-activations"),
hyperparam("--model-parallel-size", size.model_parallel),
hyperparam("--criterion", "vocab_parallel_cross_entropy"),
hyperparam("--distribute-checkpointed-activations"),
hyperparam("--tensor-parallel-init-model-on-gpu"),
# Flags to match exact same initialization of Megatron code for exp 12.00
hyperparam("--full-megatron-init"),
hyperparam("--megatron-init-sigma", 0.006),
hyperparam(
"--activation-fn",
"relu",
save_dir_key=lambda x: x if not no_save_params else "",
),
hyperparam(
"--arch",
"transformer_lm_megatron",
save_dir_key=lambda val: val if not no_save_params else "",
),
hyperparam("--share-decoder-input-output-embed"),
hyperparam(
"--decoder-layers",
size.n_layers,
save_dir_key=lambda val: f"nlay{val}" if not no_save_params else "",
),
hyperparam(
"--decoder-embed-dim",
size.emb_size,
save_dir_key=lambda val: f"emb{val}" if not no_save_params else "",
),
hyperparam("--decoder-ffn-embed-dim", size.ffn_size),
hyperparam("--decoder-attention-heads", size.n_heads),
# Switch to learned position embeddings for exp 12.00, without scaling
hyperparam(
"--decoder-learned-pos",
save_dir_key=lambda _: "lrnpos" if not no_save_params else "",
),
hyperparam(
"--no-scale-embedding",
save_dir_key=lambda _: "0emb_scale" if not no_save_params else "",
),
hyperparam(
"--tokens-per-sample",
SEQ_LEN,
save_dir_key=lambda val: f"tps{val}" if not no_save_params else "",
),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
# GPT-3 uses "(0.9, 0.95)"
hyperparam(
"--adam-betas",
f"(0.9, 0.95)",
save_dir_key=lambda val: "b2_{}".format(eval(val)[1])
if not no_save_params
else "",
),
# Sometimes lowering --adam-eps to 1e-6 can stabilize training
hyperparam(
"--adam-eps",
1e-8,
save_dir_key=lambda val: f"eps{val}" if not no_save_params else "",
),
# GPT-3 used --clip-norm=1.0
hyperparam(
"--clip-norm",
1.0,
save_dir_key=lambda val: f"cl{val}" if not no_save_params else "",
),
hyperparam("--clip-norm-type", "l2"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam(
"--lr",
size.lr,
save_dir_key=lambda val: f"lr{val:.3g}" if not no_save_params else "",
),
hyperparam(
"--end-learning-rate",
size.lr * 0.1,
save_dir_key=lambda val: f"endlr{val:.3g}" if not no_save_params else "",
),
hyperparam(
"--warmup-updates",
warmup_updates,
save_dir_key=lambda val: f"wu{val}" if not no_save_params else "",
),
hyperparam("--total-num-update", total_updates),
hyperparam(
"--dropout",
0.1,
save_dir_key=lambda val: f"dr{val}" if not no_save_params else "",
),
hyperparam(
"--attention-dropout",
0.1,
save_dir_key=lambda val: f"atdr{val}" if not no_save_params else "",
),
hyperparam(
"--no-emb-dropout",
save_dir_key=lambda _: "0emb_dr" if not no_save_params else "",
),
hyperparam(
"--weight-decay",
0.1,
save_dir_key=lambda val: f"wd{val}" if not no_save_params else "",
),
hyperparam(
"--batch-size",
ddp_bsz,
save_dir_key=lambda val: f"ms{val}" if not no_save_params else "",
),
hyperparam(
"--update-freq",
1,
save_dir_key=lambda val: f"uf{val}" if not no_save_params else "",
),
hyperparam(
"--max-update",
total_updates,
save_dir_key=lambda val: f"mu{val}" if not no_save_params else "",
),
hyperparam(
"--seed",
1,
save_dir_key=lambda val: f"s{val}" if not no_save_params else "",
),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", log_interval),
hyperparam("--required-batch-size-multiple", 1),
]
if args.restore_file:
grid += [hyperparam("--restore-file", args.restore_file)]
if args.reset_dataloader:
grid += [hyperparam("--reset-dataloader")]
return grid
def postprocess_hyperparams(args, config):
pass
def cli_main():
sweep_main(
get_grid, postprocess_hyperparams, add_extra_options_func=add_extra_options_func
)
if __name__ == "__main__":
cli_main()
|
flash_metaseq-main
|
metaseq/launcher/opt_baselines.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Streaming Language Modeling task that loads corpora in src-tgt format and performs
on-the-fly tokenization.
"""
import logging
import os
from typing import Any, Dict, List
import torch
from metaseq.data import (
JsonlDataset,
StreamingShuffleDataset,
StreamingSrcTgtDataset,
data_utils,
)
from metaseq.tasks.streaming_language_modeling import (
StreamingLanguageModelingTask,
StreamingLanguageModelingConfig,
)
from metaseq.tasks import register_task
logger = logging.getLogger(__name__)
@register_task(
"streaming_finetune_language_modeling", dataclass=StreamingLanguageModelingConfig
)
class StreamingFinetuneLanguageModelingTask(StreamingLanguageModelingTask):
def _tokenize_src_tgt_json(self, json):
src = json["src"].rstrip(" ")
tgt = json["tgt"].rstrip()
full_tokens = torch.LongTensor(
self.tokenizer.encode(" ".join([src, tgt])).ids + [self.eod]
)
src_tokens_len = len(self.tokenizer.encode(src).ids)
tgt_tokens = torch.clone(full_tokens)
tgt_tokens[:src_tokens_len] = self.dictionary.pad_index
return (full_tokens, tgt_tokens)
def load_dataset(self, split: str, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
The folder structure is assumed to look like:
/path/to/data/train/00/foo.jsonl
/path/to/data/train/00/bar.jsonl
/path/to/data/train/01/foo.jsonl
/path/to/data/train/01/bar.jsonl
/path/to/data/valid/00/foo.jsonl
/path/to/data/valid/00/bar.jsonl
In this example, we have two "shards" of training data, which will be
iterated over in epochs 1 and 2, respectively. Subsequent epochs will
cycle back over the same data. We also have two different data sources
in each shard (foo and bar), which will be combined and shuffled.
Each jsonl entry is a dict with "src" and "tgt" keys. Loss is computed
only on the tgt tokens.
Args:
split (str): name of the split (e.g., train, valid, valid1, test)
"""
# This function reads a bunch of jsonl files, concats them together,
# shuffles them, then chunks them into blocks of tokens (e.g., 2048).
# determine number of shards for this split shards = {}
cur_shard_str = self.get_shard_str(epoch, split)
# concatenate any jsonl files that are part of the shard
datasets, corpora = [], []
for file in sorted(
os.listdir(os.path.join(self.args.data, split, cur_shard_str))
):
if not file.endswith(".jsonl"):
continue
datasets.append(
JsonlDataset(
path=os.path.join(self.args.data, split, cur_shard_str, file),
tokenizer=self._tokenize_src_tgt_json,
)
)
corpora.append(os.path.splitext(file)[0])
assert len(datasets) > 0
if (
self.args.multicorpus_sampling_alpha != 1
or self.args.multicorpus_sampling_maximum > 0
):
datasets = self._alpha_sampling(datasets, corpora, epoch)
dataset = torch.utils.data.ConcatDataset(datasets)
# shuffle order across epochs
dataset = StreamingShuffleDataset(dataset, seed=self.args.seed)
self.datasets[split] = StreamingSrcTgtDataset(
dataset,
# We generate blocks with one extra token, so that we have a target
# for the final input token. This results in slight data loss.
block_size=self.args.tokens_per_sample + 1,
break_mode=self.args.sample_break_mode,
# we drop the remainder block during training
drop_last=(split == "train"),
padding_idx=self.source_dictionary.pad(),
# 1284 is a randomly-generated offset to decouple the seed used here
# from the seed used above in StreamingShuffleDataset
# TODO: Track this seed to avoid collisions. See issue #65
seed=1284 + self.args.seed,
)
def _collate_fn(self, items: List[Dict[str, Any]]):
# StreamingTokenBlockDataset returns None as filler
if len([x for x in items if x is not None]) == 0:
return {}
src_tokens = data_utils.collate_tokens(
[x["src_block"] for x in items if x is not None],
pad_idx=self.source_dictionary.pad(),
pad_to_bsz=self.args.batch_size,
)
tgt_tokens = data_utils.collate_tokens(
[x["tgt_block"] for x in items if x is not None],
pad_idx=self.source_dictionary.pad(),
pad_to_bsz=self.args.batch_size,
)
# generate inputs and targets
input = src_tokens[:, :-1].contiguous()
target = tgt_tokens[:, 1:].contiguous()
ids = torch.cat([x["ids"] for x in items if x is not None])
if ids.numel() != torch.unique(ids).numel():
n_duplicate = ids.numel() - torch.unique(ids).numel()
logger.error(
f"found {n_duplicate}/{ids.numel()} duplicate document IDs in the same batch!"
)
# metaseq expects batches to have the following structure
return {
"id": ids,
"net_input": {
"src_tokens": input,
},
"target": target,
"nsentences": input.size(0),
"ntokens": input.ne(self.dictionary.pad()).sum(),
"ntokens_target": target.ne(self.dictionary.pad()).sum(),
}
|
flash_metaseq-main
|
metaseq/tasks/streaming_finetune_language_modeling.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import Optional, List
import numpy as np
import torch
from omegaconf import II
from metaseq import utils
from metaseq.data import (
AppendTokenDataset,
Dictionary,
IdDataset,
LMContextWindowDataset,
MonolingualDataset,
NestedDictionaryDataset,
NumelDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
MultiplePadDataset,
data_utils,
)
from metaseq.data.indexed_dataset import get_available_dataset_impl
from metaseq.data.shorten_dataset import maybe_shorten_dataset
from metaseq.dataclass import ChoiceEnum, MetaseqDataclass
from metaseq.tasks import LegacyTask, register_task
SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"])
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
logger = logging.getLogger(__name__)
try:
from tokenizers import ByteLevelBPETokenizer
has_hf_tokenizers = True
except ImportError:
has_hf_tokenizers = False
@dataclass
class LanguageModelingInferenceForModelsTrainedWithStreamingConfig(MetaseqDataclass):
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory"}
)
vocab_filename: Optional[str] = field(
default="", metadata={"help": "path to bpe-vocab.json"}
)
merges_filename: Optional[str] = field(
default="", metadata={"help": "path to bpe-merges.json"}
)
end_of_document_symbol: Optional[str] = field(
default="</s>", metadata={"help": "symbol indicating an end-of-document"}
)
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="none",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
output_dictionary_size: int = field(
default=-1, metadata={"help": "limit the size of output dictionary"}
)
self_target: bool = field(default=False, metadata={"help": "include self target"})
future_target: bool = field(
default=False, metadata={"help": "include future target"}
)
past_target: bool = field(default=False, metadata={"help": "include past target"})
add_bos_token: bool = field(
default=False, metadata={"help": "prepend beginning of sentence token (<s>)"}
)
max_source_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the target sequence"}
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
pad_to_fixed_length: Optional[bool] = field(
default=False,
metadata={"help": "pad to fixed length"},
)
pad_to_fixed_bsz: Optional[bool] = field(
default=False,
metadata={"help": "boolean to pad to fixed batch size"},
)
# TODO common vars below add to parent
seed: int = II("common.seed")
batch_size: Optional[int] = II("dataset.batch_size")
batch_size_valid: Optional[int] = II("dataset.batch_size_valid")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
data_buffer_size: int = II("dataset.data_buffer_size")
tpu: bool = II("common.tpu")
use_plasma_view: bool = II("common.use_plasma_view")
plasma_path: str = II("common.plasma_path")
update_freq: List[int] = II("optimization.update_freq")
@register_task(
"language_modeling_inference_for_models_trained_with_streaming",
dataclass=LanguageModelingInferenceForModelsTrainedWithStreamingConfig,
)
class LanguageModelingInferenceForModelsTrainedWithStreamingTask(LegacyTask):
"""
This class is specially developed for inference of models trained
with the new StreamingLanguageModeling but follows closely the language_modeling implementation.
"""
def __init__(self, args):
super().__init__(args)
if not has_hf_tokenizers:
raise ImportError("Please install tokenizers with: pip install tokenizers")
self.tokenizer = ByteLevelBPETokenizer.from_file(
args.vocab_filename, args.merges_filename
)
self.eod = self.tokenizer.token_to_id(args.end_of_document_symbol)
if self.eod is None:
# fix for 7B_punctsplit_bpe_relu2_229k
# TODO(susanz): Figure out why this is different from StreamingLanguageModelingTask
self.eod = self.tokenizer.token_to_id("<endoftext|>")
assert (
self.eod is not None
), "Cannot find end-of-document symbol ({}) in tokenizer".format(
args.end_of_document_symbol
)
# construct a dummy metaseq Dictionary corresponding to the given tokenizer
self.dictionary = Dictionary()
for id in range(self.dictionary.nspecial, self.tokenizer.get_vocab_size()):
# This aims at producing a dict with first symbols being special
# (<s> <pad>) and the rest '4', '5' to be backward compatible with
# the current few_shot eval logic. See
# examples/few_shot/predictor.py
if id <= max(self.eod, 3):
self.dictionary.add_symbol(self.tokenizer.id_to_token(id))
else:
self.dictionary.add_symbol(str(id))
# confirm that metaseq dictionary and BPE have matching special symbols
assert self.dictionary.bos_index == 0
assert self.tokenizer.id_to_token(0) in {"<BOS>", "<s>"}
assert self.dictionary.pad_index == 1
assert self.tokenizer.id_to_token(1) in {"<PAD>", "<pad>"}
assert self.dictionary.eos_index == 2
assert self.tokenizer.id_to_token(2) in {"<EOS>", "</s>"}
assert self.dictionary.unk_index == 3
assert self.tokenizer.id_to_token(3) in {"<UNK>", "<unk>"}
self.dictionary.pad_to_multiple_(8)
self.output_dictionary = self.dictionary
@classmethod
def setup_task(cls, args, **kwargs):
# upgrade old checkpoints
if getattr(args, "exclude_self_target", False):
args.self_target = False
return cls(args)
def load_dataset(
self, split: str, epoch=1, combine=False, **kwargs
) -> MonolingualDataset:
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, valid1, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each process has its own copy of the raw data (likely to be an np.memmap)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
if dataset is None:
raise FileNotFoundError(f"Dataset not found: {split} ({split_path})")
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
use_plasma_view=self.args.use_plasma_view,
split_path=split_path,
plasma_path=self.args.plasma_path,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
fixed_pad_length = None
if self.args.pad_to_fixed_length:
fixed_pad_length = self.args.tokens_per_sample
pad_to_bsz = None
if self.args.pad_to_fixed_bsz:
pad_to_bsz = (
self.args.batch_size_valid if "valid" in split else self.args.batch_size
)
self.datasets[split] = MonolingualDataset(
dataset=dataset,
sizes=dataset.sizes,
src_vocab=self.dictionary,
tgt_vocab=self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=True,
add_bos_token=self.args.add_bos_token,
fixed_pad_length=fixed_pad_length,
pad_to_bsz=pad_to_bsz,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We prepend an eos token to src_tokens
(or bos if `--add-bos-token` is set) and we append a <pad> to target.
This is convenient both for generation with a prefix and LM scoring.
"""
dataset = StripTokenDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
# remove eos from (end of) target sequence
self.source_dictionary.eos(),
)
src_dataset = PrependTokenDataset(
dataset,
token=(
self.source_dictionary.bos()
if getattr(self.args, "add_bos_token", False)
else self.eod
# In some models we accidently replaced this with <endoftext/>
# (id:4) but it seems they work with eos as well so we will
# keep this way in this experimental task until figure a better
# flexible soluton.
),
)
src_dataset = MultiplePadDataset(
src_dataset, pad_idx=self.source_dictionary.pad(), multiple=8
)
tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad())
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": src_dataset,
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": MultiplePadDataset(
tgt_dataset, pad_idx=self.source_dictionary.pad(), multiple=8
),
},
sizes=[np.array(src_lengths)],
)
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
# Generation will always be conditioned on bos_token
if getattr(self.args, "add_bos_token", False):
bos_token = self.source_dictionary.bos()
else:
bos_token = self.source_dictionary.eos()
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the language_modeling task is not supported"
)
# SequenceGenerator doesn't use src_tokens directly, we need to
# pass the `prefix_tokens` argument instead
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(bos_token).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
# ensures that every evaluated token has access to a context of at least
# this size, if possible
context_window: int = 0,
):
if context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=self.args.tokens_per_sample,
context_window=context_window,
pad_idx=self.source_dictionary.pad(),
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
)
@property
def source_dictionary(self):
"""Return the :class:`~metaseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~metaseq.data.Dictionary` for the language
model."""
return self.output_dictionary
|
flash_metaseq-main
|
metaseq/tasks/language_modeling_inference_for_models_trained_with_streaming.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from omegaconf import II
from metaseq import utils
from metaseq.data import (
AppendTokenDataset,
Dictionary,
IdDataset,
LMContextWindowDataset,
MonolingualDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
StripTokenDataset,
SortDataset,
TokenBlockDataset,
data_utils,
)
from metaseq.data.indexed_dataset import get_available_dataset_impl
from metaseq.data.shorten_dataset import maybe_shorten_dataset
from metaseq.dataclass import ChoiceEnum, MetaseqDataclass
from metaseq.tasks import LegacyTask, register_task
try:
from tokenizers import ByteLevelBPETokenizer
has_hf_tokenizers = True
except ImportError:
has_hf_tokenizers = False
SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"])
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
logger = logging.getLogger(__name__)
@dataclass
class LanguageModelingConfig(MetaseqDataclass):
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory"}
)
# Begin args from StreamingLanguageModelingConfig
vocab_filename: Optional[str] = field(
default="", metadata={"help": "path to bpe-vocab.json"}
)
merges_filename: Optional[str] = field(
default="", metadata={"help": "path to bpe-merges.txt"}
)
end_of_document_symbol: Optional[str] = field(
default="</s>", metadata={"help": "symbol indicating an end-of-document"}
)
final_vocab_size: Optional[int] = field(
default=None, metadata={"help": "force vocab size to this"}
) # End of args from StreamingLanguageModelingConfig
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="none",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
add_bos_token: bool = field(
default=False, metadata={"help": "prepend beginning of sentence token (<s>)"}
)
max_source_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the target sequence"}
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
pad_to_fixed_length: Optional[bool] = field(
default=False,
metadata={"help": "pad to fixed length"},
)
pad_to_fixed_bsz: Optional[bool] = field(
default=False,
metadata={"help": "boolean to pad to fixed batch size"},
)
shuffle_docs: bool = field(
default=False,
metadata={
"help": "Only for sample break mode EOS, first shuffle docs and then create blocks"
},
)
# TODO common vars below add to parent
seed: int = II("common.seed")
batch_size: Optional[int] = II("dataset.batch_size")
batch_size_valid: Optional[int] = II("dataset.batch_size_valid")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
data_buffer_size: int = II("dataset.data_buffer_size")
use_plasma_view: bool = II("common.use_plasma_view")
plasma_path: str = II("common.plasma_path")
# TODO(susanz): Deprecate this task. This pre-date StreamingLanguageModelingTask,
# when tokenization happened offline.
@register_task("language_modeling", dataclass=LanguageModelingConfig)
class LanguageModelingTask(LegacyTask):
"""
Train a language model. To be deprecated.
"""
def __init__(self, args):
super().__init__(args)
if not has_hf_tokenizers:
raise ImportError("Please install tokenizers with: pip install tokenizers")
self.tokenizer = ByteLevelBPETokenizer.from_file(
args.vocab_filename, args.merges_filename
)
self.eod = self.tokenizer.token_to_id(args.end_of_document_symbol)
if self.eod is None:
# This will be executed for old models that do not have the args.end_of_document_symbol explicitly set
# and do not use <s/> (the default) but <EOS>
self.eod = self.tokenizer.token_to_id("<EOS>")
assert (
self.eod is not None
), "Cannot find end-of-document symbol ({}) in tokenizer".format(
args.end_of_document_symbol
)
# construct a dummy metaseq Dictionary corresponding to the given tokenizer
self.dictionary = Dictionary()
tok_vocab_size = self.tokenizer.get_vocab_size()
for id in range(self.dictionary.nspecial, tok_vocab_size):
self.dictionary.add_symbol(self.tokenizer.id_to_token(id))
final_vocab_size = args.final_vocab_size
# final_vocab_size = 51200 for roberta dictionary
if final_vocab_size is not None:
if final_vocab_size < tok_vocab_size:
raise ValueError(
f"incompatible: {final_vocab_size}, tok_vocab_size: {tok_vocab_size}"
)
self.dictionary.pad_to_multiple_(final_vocab_size)
else:
self.dictionary.pad_to_multiple_(8)
# confirm that metaseq dictionary and BPE have matching special symbols
assert self.dictionary.bos_index == 0
assert self.tokenizer.id_to_token(0) in {"<BOS>", "<s>"}
assert self.dictionary.pad_index == 1
assert self.tokenizer.id_to_token(1) in {"<PAD>", "<pad>"}
assert self.dictionary.eos_index == 2
assert self.tokenizer.id_to_token(2) in {"<EOS>", "</s>"}
assert self.dictionary.unk_index == 3
assert self.tokenizer.id_to_token(3) in {"<UNK>", "<unk>"}
@classmethod
def setup_task(cls, args, **kwargs):
return cls(args)
def load_dataset(self, split: str, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, valid1, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each process has its own copy of the raw data (likely to be an np.memmap)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
if dataset is None:
raise FileNotFoundError(f"Dataset not found: {split} ({split_path})")
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
if self.args.shuffle_docs:
assert (
self.args.sample_break_mode == "none"
), "shuffle docs is only for sample break mode none"
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode="complete_doc",
include_targets=False,
use_plasma_view=self.args.use_plasma_view,
split_path=split_path,
plasma_path=self.args.plasma_path,
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
dataset = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
use_plasma_view=self.args.use_plasma_view,
split_path=split_path,
plasma_path=self.args.plasma_path,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
fixed_pad_length = None
if self.args.pad_to_fixed_length:
fixed_pad_length = self.args.tokens_per_sample
pad_to_bsz = None
if self.args.pad_to_fixed_bsz:
pad_to_bsz = (
self.args.batch_size_valid if "valid" in split else self.args.batch_size
)
self.datasets[split] = MonolingualDataset(
dataset=dataset,
sizes=dataset.sizes,
src_vocab=self.dictionary,
tgt_vocab=self.dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=True,
add_bos_token=self.args.add_bos_token,
fixed_pad_length=fixed_pad_length,
pad_to_bsz=pad_to_bsz,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We prepend an eos token to src_tokens
(or bos if `--add-bos-token` is set) and we append a <pad> to target.
This is convenient both for generation with a prefix and LM scoring.
"""
dataset = StripTokenDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
# remove eos from (end of) target sequence
self.source_dictionary.eos(),
)
src_dataset = PrependTokenDataset(
dataset,
token=(
self.source_dictionary.bos()
if getattr(self.args, "add_bos_token", False)
else self.source_dictionary.eos()
),
)
tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad())
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(
tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False
),
},
sizes=[np.array(src_lengths)],
)
def inference_step(self, generator, models, sample, prefix_tokens=None, **kwargs):
with torch.no_grad():
# Generation will always be conditioned on bos_token
if getattr(self.args, "add_bos_token", False):
bos_token = self.source_dictionary.bos()
else:
bos_token = self.source_dictionary.eos()
# SequenceGenerator doesn't use src_tokens directly, we need to
# pass the `prefix_tokens` argument instead
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(bos_token).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=bos_token,
**kwargs,
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
# ensures that every evaluated token has access to a context of at least
# this size, if possible
context_window: int = 0,
):
if context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=self.args.tokens_per_sample,
context_window=context_window,
pad_idx=self.source_dictionary.pad(),
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
|
flash_metaseq-main
|
metaseq/tasks/language_modeling.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import warnings
from argparse import Namespace
from typing import Any, Callable, Dict, List
import torch
from omegaconf import DictConfig
from metaseq import metrics, search, tokenizer, utils
from metaseq.data import Dictionary, BaseDataset, data_utils, encoders, iterators
from metaseq.dataclass import MetaseqDataclass
from metaseq.dataclass.utils import gen_parser_from_dataclass
logger = logging.getLogger(__name__)
class StatefulContainer(object):
_state: Dict[str, Any] = dict()
_factories: Dict[str, Callable[[], Any]] = dict()
def add_factory(self, name, factory: Callable[[], Any]):
self._factories[name] = factory
def merge_state_dict(self, state_dict: Dict[str, Any]):
self._state.update(state_dict)
@property
def state_dict(self) -> Dict[str, Any]:
return self._state
def __getattr__(self, name):
if name not in self._state and name in self._factories:
self._state[name] = self._factories[name]()
if name in self._state:
return self._state[name]
raise AttributeError(f"Task state has no factory for attribute {name}")
class BaseTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
Tasks have limited statefulness. In particular, state that needs to be
saved to/loaded from checkpoints needs to be stored in the `self.state`
:class:`StatefulContainer` object. For example::
self.state.add_factory("dictionary", self.load_dictionary)
print(self.state.dictionary) # calls self.load_dictionary()
This is necessary so that when loading checkpoints, we can properly
recreate the task state after initializing the task instance.
"""
@classmethod
def add_args(cls, parser):
"""Add task-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
cfg: MetaseqDataclass
datasets: Dict[str, BaseDataset]
dataset_to_epoch_iter: Dict[BaseDataset, Any]
state: StatefulContainer = None
def __init__(self, cfg: MetaseqDataclass, **kwargs):
self.cfg = cfg
self.datasets = dict()
self.dataset_to_epoch_iter = dict()
self.state = StatefulContainer()
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, cfg: DictConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (omegaconf.DictConfig): parsed command-line arguments
"""
return cls(cfg, **kwargs)
def load_dataset(
self,
split: str,
combine: bool = False,
task_cfg: MetaseqDataclass = None,
**kwargs,
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
combine (bool): combines a split segmented into pieces into one dataset
task_cfg (MetaseqDataclass): optional task configuration stored in the checkpoint that can be used
to load datasets
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~metaseq.data.BaseDataset` corresponding to *split*
"""
from metaseq.data import BaseDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], BaseDataset):
raise TypeError("Datasets are expected to be of type BaseDataset")
return self.datasets[split]
def filter_indices_by_size(
self, indices, dataset, max_positions=None, ignore_invalid_inputs=False
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~metaseq.data.BaseDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(indices, max_positions)
if len(ignored) > 0:
if not ignore_invalid_inputs:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
logger.warning(
(
"{:,} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def get_batch_iterator(
self,
dataset: BaseDataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
batch_by_size=True,
skip_remainder_batch=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~metaseq.data.BaseDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator
(default: False).
batch_by_size (bool, optional):
batch sequences of similar length together to reduce padding.
If false, each batch will be of size max_sentences.
skip_remainder_batch (bool, optional): if set, discard the last
batch in each training epoch, as the last batch is often smaller than
local_batch_size * distributed_word_size (default: ``True``).
Returns:
~metaseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
if not disable_iterator_cache and dataset in self.dataset_to_epoch_iter:
logger.debug("reusing EpochBatchIterator for epoch {}".format(epoch))
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, BaseDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
if batch_by_size:
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
else:
assert (
max_sentences is not None
), "If batch_by_size=False, max_sentences must be passed. Got None"
starts = indices[::max_sentences]
batch_sampler = [indices[s : s + max_sentences] for s in starts]
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
skip_remainder_batch=skip_remainder_batch,
)
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, cfg: MetaseqDataclass):
"""
Build the :class:`~metaseq.models.BaseModel` instance for this
task.
Args:
cfg (MetaseqDataclass): configuration object
Returns:
a :class:`~metaseq.models.BaseModel` instance
"""
from metaseq import models
model = models.build_model(cfg, self)
return model
def build_criterion(self, cfg: DictConfig):
"""
Build the :class:`~metaseq.criterions.BaseCriterion` instance for
this task.
Args:
cfg (omegaconf.DictConfig): configration object
Returns:
a :class:`~metaseq.criterions.BaseCriterion` instance
"""
from metaseq import criterions
return criterions.build_criterion(cfg, self)
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None
):
if getattr(args, "score_reference", False):
from metaseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_vocab_dist=getattr(args, "compute_vocab_dist", False),
)
from metaseq.sequence_generator import SequenceGenerator
# Choose search strategy.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
if seq_gen_cls is None:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
temperature=getattr(args, "temperature", 1.0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~metaseq.data.BaseDataset`.
model (~metaseq.models.BaseModel): the model
criterion (~metaseq.criterions.BaseCriterion): the criterion
optimizer (~metaseq.optim.BaseOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def optimizer_step(self, optimizer, model, update_num):
optimizer.step()
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
raise NotImplementedError
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def begin_valid_epoch(self, epoch, model):
"""Hook function called before the start of each validation epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = BaseTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
def state_dict(self):
if self.state is not None:
return self.state.state_dict
return {}
def load_state_dict(self, state_dict: Dict[str, Any]):
if self.state is not None:
self.state.merge_state_dict(state_dict)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~metaseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~metaseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
def build_tokenizer(self, args):
"""Build the pre-tokenizer for this task."""
return encoders.build_tokenizer(args)
def build_bpe(self, args):
"""Build the tokenizer for this task."""
return encoders.build_bpe(args)
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
tokens = [
self.source_dictionary.encode_line(
encode_fn(src_str), add_if_not_exist=False
).long()
for src_str in lines
]
lengths = [t.numel() for t in tokens]
return tokens, lengths
class LegacyTask(BaseTask):
def __init__(self, args: Namespace):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def setup_task(cls, args: Namespace, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def build_model(self, args: Namespace):
"""
Build the :class:`~metaseq.models.BaseModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~metaseq.models.BaseModel` instance
"""
from metaseq import models
model = models.build_model(args, self)
return model
def build_criterion(self, args: Namespace):
"""
Build the :class:`~metaseq.criterions.BaseCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~metaseq.criterions.BaseCriterion` instance
"""
from metaseq import criterions
return criterions.build_criterion(args, self)
|
flash_metaseq-main
|
metaseq/tasks/base_task.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Streaming Language Modeling task that loads corpora in plaintext and performs
on-the-fly tokenization.
"""
import logging
import os
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
import numpy as np
import torch
from omegaconf import II
from metaseq.data import (
Dictionary,
JsonlDataset,
PartitionedStreamingDataset,
ResamplingDataset,
StreamingShuffleDataset,
StreamingTokenBlockDataset,
StreamingSrcTgtDataset,
data_utils,
iterators,
)
from metaseq.dataclass import MetaseqDataclass
from metaseq.tasks import LegacyTask, register_task
try:
from tokenizers import ByteLevelBPETokenizer
has_hf_tokenizers = True
except ImportError:
has_hf_tokenizers = False
logger = logging.getLogger(__name__)
DEFAULT_MULTICORPUS_MAX = -1
@dataclass
class StreamingLanguageModelingConfig(MetaseqDataclass):
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory with JSONL files"}
)
vocab_filename: Optional[str] = field(
default="", metadata={"help": "path to bpe-vocab.json"}
)
merges_filename: Optional[str] = field(
default="", metadata={"help": "path to bpe-merges.txt"}
)
end_of_document_symbol: Optional[str] = field(
default="</s>", metadata={"help": "symbol indicating an end-of-document"}
)
sample_break_mode: Optional[str] = field(
default="none",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
max_source_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the target sequence"}
)
final_vocab_size: Optional[int] = field(
default=None, metadata={"help": "force vocab size to this"}
)
multicorpus_sampling_alpha: Optional[float] = field(
default=1.0,
metadata={
"help": "smoothing alpha for sample rations across multiple datasets"
},
)
multicorpus_sampling_maximum: Optional[float] = field(
default=DEFAULT_MULTICORPUS_MAX,
metadata={"help": "Maximum size for example proportional sampling"},
)
# TODO common vars below add to parent
seed: int = II("common.seed")
batch_size: Optional[int] = II("dataset.batch_size")
batch_size_valid: Optional[int] = II("dataset.batch_size_valid")
data_buffer_size: int = II("dataset.data_buffer_size")
update_freq: List[int] = II("optimization.update_freq")
@register_task("streaming_language_modeling", dataclass=StreamingLanguageModelingConfig)
class StreamingLanguageModelingTask(LegacyTask):
"""
Train a language model on a stream of data. Currently we assume the stream
is in JSONL format and we tokenize inputs on-the-fly.
Note that we append an end-of-document symbol to the end of each document.
Args:
tokenizer (tokenizers.ByteLevelBPETokenizer): the BPE tokenizer to use
"""
def __init__(self, args):
super().__init__(args)
if not has_hf_tokenizers:
raise ImportError("Please install tokenizers with: pip install tokenizers")
self.tokenizer = ByteLevelBPETokenizer.from_file(
args.vocab_filename, args.merges_filename
)
if max(args.update_freq) > 1:
raise NotImplementedError(
"--update-freq is not compatible with StreamingLanguageModelingTask"
)
self.eod = self.tokenizer.token_to_id(args.end_of_document_symbol)
if self.eod is None:
# This will be executed for old models that do not have the args.end_of_document_symbol explicitly set
# and do not use <s/> (the default) but <EOS>
self.eod = self.tokenizer.token_to_id("<EOS>")
assert (
self.eod is not None
), "Cannot find end-of-document symbol ({}) in tokenizer".format(
args.end_of_document_symbol
)
# construct a dummy metaseq Dictionary corresponding to the given tokenizer
self.dictionary = Dictionary()
tok_vocab_size = self.tokenizer.get_vocab_size()
for id in range(self.dictionary.nspecial, tok_vocab_size):
self.dictionary.add_symbol(self.tokenizer.id_to_token(id))
final_vocab_size = args.final_vocab_size
# final_vocab_size = 51200 for roberta dictionary
if final_vocab_size is not None:
if final_vocab_size < tok_vocab_size:
raise ValueError(
f"incompatible: {final_vocab_size}, tok_vocab_size: {tok_vocab_size}"
)
self.dictionary.pad_to_multiple_(final_vocab_size)
else:
self.dictionary.pad_to_multiple_(8)
# confirm that metaseq dictionary and BPE have matching special symbols
assert self.dictionary.bos_index == 0
assert self.tokenizer.id_to_token(0) in {"<BOS>", "<s>"}
assert self.dictionary.pad_index == 1
assert self.tokenizer.id_to_token(1) in {"<PAD>", "<pad>"}
assert self.dictionary.eos_index == 2
assert self.tokenizer.id_to_token(2) in {"<EOS>", "</s>"}
assert self.dictionary.unk_index == 3
assert self.tokenizer.id_to_token(3) in {"<UNK>", "<unk>"}
@classmethod
def setup_task(cls, args, **kwargs):
return cls(args)
def _tokenize_one_json(self, json):
text = json["text"]
return torch.LongTensor(
# append an end-of-document symbol after each document
self.tokenizer.encode(text.rstrip()).ids
+ [self.eod]
)
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by corpus. This helps small corpus by upsampling them.
"""
if self.args.multicorpus_sampling_maximum == DEFAULT_MULTICORPUS_MAX:
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob**self.args.multicorpus_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
else:
dataset_lens = [
min(l, self.args.multicorpus_sampling_maximum) for l in dataset_lens
]
smoothed_prob = dataset_lens / sum(dataset_lens)
return smoothed_prob
def _alpha_sampling(self, datasets, corpora, epoch=1):
"""
Up or down sample corpora with alpha sampling.
"""
dataset_lengths = np.array(
[len(d) for d in datasets],
dtype=float,
)
logger.info(f"loaded total {dataset_lengths.sum()} blocks for all corpora")
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by corpus: %s",
{
corpus: "{0:.4f}".format(sample_probs[id])
for id, corpus in enumerate(corpora)
},
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
# TODO: add an option for shrinking all size ratios to below 1
# if self.args.multicorpus_sampling_alpha != 1:
# size_ratio /= size_ratio.max()
# Fix numeric errors in size ratio computation
# 0.999999999999999999 -> 1
# 1.000000000000000002 -> 1
for i in range(len(size_ratio)):
size_ratio[i] = round(size_ratio[i], 8)
logger.info(
"Up/Down Sampling ratio by corpus: %s",
{
corpus: "{0:.2f}".format(size_ratio[id])
for id, corpus in enumerate(corpora)
},
)
logger.info(
"Actual dataset size by corpus: %s",
{
corpus: "{0:.2f}".format(len(datasets[id]))
for id, corpus in enumerate(corpora)
},
)
resampled_datasets = [
ResamplingDataset(
datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] > 1.0,
)
for i, d in enumerate(datasets)
]
# TODO: estimate the actual steps or tokens seen in training before launching experiments.
logger.info(
"Resampled dataset size by corpus: %s",
{
corpus: "{0:.2f}".format(len(resampled_datasets[id]))
for id, corpus in enumerate(corpora)
},
)
return resampled_datasets
def get_shard_str(self, epoch, split):
shards = {}
for shard_id in os.listdir(os.path.join(self.args.data, split)):
assert (
int(shard_id) not in shards
), f"shard id: {shard_id} not in shards: {shards}"
shards[int(shard_id)] = shard_id
assert min(shards.keys()) == 0
assert max(shards.keys()) == len(shards) - 1
cur_shard_str = shards[(epoch - 1) % len(shards)]
return cur_shard_str
def load_dataset(self, split: str, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
The folder structure is assumed to look like:
/path/to/data/train/00/foo.jsonl
/path/to/data/train/00/bar.jsonl
/path/to/data/train/01/foo.jsonl
/path/to/data/train/01/bar.jsonl
/path/to/data/valid/00/foo.jsonl
/path/to/data/valid/00/bar.jsonl
In this example, we have two "shards" of training data, which will be
iterated over in epochs 1 and 2, respectively. Subsequent epochs will
cycle back over the same data. We also have two different data sources
in each shard (foo and bar), which will be combined and shuffled.
Args:
split (str): name of the split (e.g., train, valid, valid1, test)
"""
# This function reads a bunch of jsonl files, concats them together,
# shuffles them, then chunks them into blocks of tokens (e.g., 2048).
# determine number of shards for this split
cur_shard_str = self.get_shard_str(epoch, split)
# concatenate any jsonl files that are part of the shard
datasets, corpora = [], []
for file in sorted(
os.listdir(os.path.join(self.args.data, split, cur_shard_str))
):
if not file.endswith(".jsonl"):
continue
datasets.append(
JsonlDataset(
path=os.path.join(self.args.data, split, cur_shard_str, file),
tokenizer=self._tokenize_one_json,
)
)
corpora.append(os.path.splitext(file)[0])
assert len(datasets) > 0
if self.args.multicorpus_sampling_alpha != 1:
datasets = self._alpha_sampling(datasets, corpora, epoch)
dataset = torch.utils.data.ConcatDataset(datasets)
# shuffle order across epochs
dataset = StreamingShuffleDataset(dataset, seed=self.args.seed)
# chunk into blocks of tokens
self.datasets[split] = StreamingTokenBlockDataset(
dataset,
# We generate blocks with one extra token, so that we have a target
# for the final input token. This results in slight data loss.
block_size=self.args.tokens_per_sample + 1,
break_mode=self.args.sample_break_mode,
# we drop the remainder block during training
drop_last=(split == "train"),
padding_idx=self.source_dictionary.pad(),
# 1284 is a randomly-generated offset to decouple the seed used here
# from the seed used above in StreamingShuffleDataset
seed=1284 + self.args.seed,
)
def _collate_fn(self, items: List[Dict[str, Any]]):
# StreamingTokenBlockDataset returns None as filler
if len([x for x in items if x is not None]) == 0:
return {}
tokens = data_utils.collate_tokens(
[x["block"] for x in items if x is not None],
pad_idx=self.source_dictionary.pad(),
pad_to_bsz=self.args.batch_size,
)
# generate inputs and targets
input = tokens[:, :-1].contiguous()
target = tokens[:, 1:].contiguous()
ids = torch.cat([x["ids"] for x in items if x is not None])
if ids.numel() != torch.unique(ids).numel():
n_duplicate = ids.numel() - torch.unique(ids).numel()
logger.error(
f"found {n_duplicate}/{ids.numel()} duplicate document IDs in the same batch!"
)
# metaseq expects batches to have the following structure
return {
"id": ids,
"net_input": {
"src_tokens": input,
},
"target": target,
"nsentences": input.size(0),
"ntokens": input.ne(self.dictionary.pad()).sum(),
}
def dataset(self, split):
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
batch_by_size=True,
skip_remainder_batch=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (torch.utils.data.Dataset): dataset to batch
max_sentences (int, optional): max number of sentences in each
batch (default: None).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator
(default: False).
batch_by_size (bool, optional):
batch sequences of similar length together to reduce padding.
If false, each batch will be of size max_sentences.
skip_remainder_batch (bool, optional): if set, discard the last
batch in each training epoch, as the last batch is often smaller
than local_batch_size * distributed_word_size (default: ``True``).
Returns:
~metaseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
assert max_tokens is None
# Up to this point, we have shuffled documents, flattened them into a 1D
# tensor, then chunked into token blocks. But if documents are long, then
# adjacent blocks may be from a single document, and naively distributed
# sequential blocks to GPUs may cause entire updates to be dominated by a
# handful of unique documents. Instead we have a readahead buffer that
# reads in 10 full batches of data and shuffles sequences across them,
# thus increasing randomness. This assumes that no single document spans
# 10 full batches, which is reasonable when batch sizes are in the
# millions and documents are on average much smaller.
assert isinstance(dataset, StreamingTokenBlockDataset) or isinstance(
dataset, StreamingSrcTgtDataset
)
shuffle_buffer_size = 10 * max_sentences * num_shards
logger.info(f"setting shuffle buffer size to {shuffle_buffer_size}")
dataset.set_shuffle_buffer_size(shuffle_buffer_size)
# partition dataset across data parallel workers
dataset = PartitionedStreamingDataset(
dataset,
num_shards=num_shards,
shard_id=shard_id,
drop_last=skip_remainder_batch,
)
# create a stateful/checkpointable iterator for the current data
# parallel worker
return iterators.StreamingEpochBatchIterator(
dataset=dataset,
batch_size=max_sentences,
collate_fn=self._collate_fn,
drop_last=skip_remainder_batch,
num_workers=num_workers,
epoch=epoch,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
|
flash_metaseq-main
|
metaseq/tasks/streaming_language_modeling.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from metaseq.dataclass import MetaseqDataclass
from metaseq.dataclass.utils import merge_with_parent, populate_dataclass
from hydra.core.config_store import ConfigStore
from .base_task import BaseTask, LegacyTask # noqa
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(cfg: MetaseqDataclass, **kwargs):
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
# legacy tasks
task = TASK_REGISTRY[task_name]
if task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = populate_dataclass(dc(), cfg)
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert (
task is not None
), f"Could not infer task type from {cfg}. Available tasks: {TASK_REGISTRY.keys()}"
return task.setup_task(cfg, **kwargs)
def register_task(name, dataclass=None):
"""
New tasks can be added to metaseq with the
:func:`~metaseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(BaseTask):
(...)
.. note::
All Tasks must implement the :class:`~metaseq.tasks.BaseTask`
interface.
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError("Cannot register duplicate task ({})".format(name))
if not issubclass(cls, BaseTask):
raise ValueError(
"Task ({}: {}) must extend BaseTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError(
"Cannot register task with duplicate class name ({})".format(
cls.__name__
)
)
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if dataclass is not None and not issubclass(dataclass, MetaseqDataclass):
raise ValueError(
"Dataclass {} must extend MetaseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="task", node=node, provider="metaseq")
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("metaseq.tasks." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group("Additional command-line arguments")
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
|
flash_metaseq-main
|
metaseq/tasks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import signal
import threading
from torch import nn
logger = logging.getLogger(__name__)
class DistributedTimeoutWrapper(nn.Module):
"""
A wrapper that kills the process if no progress is made within a given
*timeout*. The timer is reset every time :func:`forward` is called.
Usage::
module = DistributedTimeoutWrapper(module, timeout=30)
x = module(input)
time.sleep(20) # safe
x = module(input)
time.sleep(45) # job will be killed before this returns
Args:
module (nn.Module): module to wrap
timeout (int): number of seconds before killing the process
(set to a value <= 0 to disable the timeout)
signal (Optional): signal to send once timeout is triggered
"""
def __init__(self, module: nn.Module, timeout: int, signal=signal.SIGINT):
super().__init__()
self.module = module
self.timeout = timeout
self.signal = signal
if timeout > 0:
self._heartbeat = threading.Event()
self._heartbeat_thread = threading.Thread(
target=self._check_heartbeat,
args=(os.getpid(),),
daemon=True,
)
self._heartbeat_thread.start()
self._terminated = False
else:
self._heartbeat = None
self._heartbeat_thread = None
def __del__(self):
self.stop_timeout()
def __getattr__(self, name):
"""Forward missing attributes to wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.module, name)
def stop_timeout(self):
if self._heartbeat_thread is not None:
self._terminated = True
self._heartbeat_thread.join()
def state_dict(self, *args, **kwargs):
return self.module.state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return self.module.load_state_dict(*args, **kwargs)
def forward(self, *args, **kwargs):
if self._heartbeat is not None:
self._heartbeat.set()
return self.module(*args, **kwargs)
def _check_heartbeat(self, parent_pid):
self._heartbeat.wait() # wait for the first forward pass
while True:
self._heartbeat.clear()
success = self._heartbeat.wait(timeout=self.timeout)
if self._terminated:
break
elif not success:
logger.error(
(
"Killing job for not making progress in {} seconds. "
"Set --heartbeat-timeout=-1 to disable this timeout."
).format(int(self.timeout))
)
os.kill(parent_pid, self.signal)
return
|
flash_metaseq-main
|
metaseq/distributed/distributed_timeout_wrapper.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
from typing import Optional
import torch
from metaseq.dataclass.configs import DistributedTrainingConfig
from metaseq.distributed import utils as dist_utils
logger = logging.getLogger(__name__)
try:
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.utils.testing import DummyProcessGroup
has_FSDP = True
except ImportError:
FSDP = torch.nn.Module
has_FSDP = False
class FullyShardedDataParallel(FSDP):
"""
A small wrapper around fairscale's FullyShardedDataParallel (FSDP) with some
metaseq-specific checkpoint saving/loading logic.
Args:
use_sharded_state (bool): if True, then ``state_dict`` will return
``FSDP.local_state_dict`` and ``load_state_dict`` will call
``FSDP.load_local_state_dict``. Otherwise, ``state_dict`` will
return the full model weights on data parallel rank 0 (empty on
other ranks) and ``load_state_dict`` will broadcast model weights
from rank 0 to other ranks.
"""
def __init__(self, *args, use_sharded_state: bool = False, **kwargs):
if not has_FSDP:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
super().__init__(*args, **kwargs)
self.use_sharded_state = use_sharded_state
@property
def unwrapped_module(self) -> torch.nn.Module:
if self.flatten_parameters:
return self.module.module
else:
return self.module
def state_dict(self, destination=None, prefix="", keep_vars=False):
if self.use_sharded_state:
return super().local_state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
else:
if self.rank == 0:
return super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
else:
# We must call state_dict() due to use of communication
# primitives. But we don't use the result.
super().state_dict()
return destination or {}
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
if self.use_sharded_state:
return super().load_local_state_dict(state_dict, strict=strict)
else:
if not isinstance(self.process_group, DummyProcessGroup):
state_dict = dist_utils.broadcast_object(
state_dict, src_rank=0, group=self.process_group
)
return super().load_state_dict(state_dict, strict=strict)
@contextlib.contextmanager
def fsdp_enable_wrap(
cfg: DistributedTrainingConfig, use_sharded_state: bool = False, **kwargs
):
try:
from fairscale.nn import enable_wrap
except ImportError:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
if cfg.memory_efficient_fp16:
assert cfg.fp16 # memory_efficient_fp16 should imply fp16
group = dist_utils.get_data_parallel_group()
if group is None and cfg.distributed_world_size == 1:
group = DummyProcessGroup(rank=0, size=1)
if cfg.fp16:
compute_dtype = torch.bfloat16 if cfg.bf16 else torch.float16
else:
compute_dtype = torch.float32
fsdp_config = {
"process_group": group,
"reshard_after_forward": not cfg.no_reshard_after_forward,
"mixed_precision": cfg.fp16 and not cfg.memory_efficient_fp16,
"fp32_reduce_scatter": cfg.fp32_reduce_scatter,
"flatten_parameters": True,
"cpu_offload": cfg.cpu_offload and not cfg.memory_efficient_fp16,
"compute_dtype": compute_dtype,
"bucket_cap_mb": cfg.bucket_cap_mb,
"state_dict_device": torch.device("cpu"),
"gradient_predivide_factor": cfg.gradient_predivide_factor,
**kwargs,
}
with enable_wrap(
wrapper_cls=FullyShardedDataParallel,
use_sharded_state=use_sharded_state,
**fsdp_config,
):
yield
def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs):
"""
Helper to wrap layers/modules in FSDP. This falls back to a no-op if
fairscale is not available.
Args:
module (nn.Module): module to (maybe) wrap
min_num_params (int, Optional): minimum number of layer params to wrap
"""
try:
from fairscale.nn import wrap
if os.environ.get("RESHARD_OVERRIDE_PROCESS_GROUP", "False") == "True":
logger.info("Process group was None, overriding to DummyProcessGroup")
kwargs["process_group"] = DummyProcessGroup(rank=0, size=1)
if min_num_params is not None:
num_params = sum(p.numel() for p in module.parameters())
if num_params >= min_num_params:
return wrap(module, **kwargs)
else:
return module
else:
return wrap(module, **kwargs)
except ImportError:
return module
|
flash_metaseq-main
|
metaseq/distributed/fully_sharded_data_parallel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .distributed_timeout_wrapper import DistributedTimeoutWrapper
from .fully_sharded_data_parallel import (
fsdp_enable_wrap,
fsdp_wrap,
FullyShardedDataParallel,
)
from .module_proxy_wrapper import ModuleProxyWrapper
__all__ = [
"DistributedTimeoutWrapper",
"fsdp_enable_wrap",
"fsdp_wrap",
"FullyShardedDataParallel",
"ModuleProxyWrapper",
]
|
flash_metaseq-main
|
metaseq/distributed/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import logging
import os
import pickle
import random
import signal
import socket
import struct
import subprocess
import warnings
from argparse import Namespace
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional
import torch
import torch.distributed as dist
from omegaconf import open_dict
from metaseq.dataclass.configs import DistributedTrainingConfig, MetaseqConfig
# Flag to indicate if we're using Megatron
# NOTE: this is a temporary hack until we move away from Megatron's model parallel init
_USE_MEGATRON = False
logger = logging.getLogger(__name__)
def is_master(cfg: DistributedTrainingConfig):
return cfg.distributed_rank == 0
def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False):
if cfg.distributed_init_method is not None:
return
if cfg.distributed_port > 0:
# we can determine the init method automatically for Slurm
_infer_slurm_init(cfg)
elif all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
# support torch.distributed.launch
_infer_torch_distributed_launch_init(cfg)
elif cfg.distributed_world_size > 1 or force_distributed:
# fallback for single node with multiple GPUs
_infer_single_node_init(cfg)
if not cfg.distributed_no_spawn:
with open_dict(cfg):
cfg.distributed_num_procs = min(
torch.cuda.device_count(), cfg.distributed_world_size
)
def _infer_torch_distributed_launch_init(cfg: DistributedTrainingConfig):
cfg.distributed_init_method = "env://"
cfg.distributed_world_size = int(os.environ["WORLD_SIZE"])
cfg.distributed_rank = int(os.environ["RANK"])
# processes are created by torch.distributed.launch
cfg.distributed_no_spawn = True
def _infer_slurm_init(cfg: DistributedTrainingConfig):
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
host = hostnames.split()[0].decode("utf-8")
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
# if we're in a container, then maybe MASTER_ADDR is set
host = os.environ.get("MASTER_ADDR", None)
if host is None:
return
cfg.distributed_init_method = "tcp://{host}:{port}".format(
host=host, port=cfg.distributed_port
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
gpus_per_node = torch.cuda.device_count()
node_id = int(os.environ.get("SLURM_NODEID"))
cfg.distributed_rank = node_id * gpus_per_node
cfg.distributed_world_size = nnodes * gpus_per_node
else:
assert ntasks_per_node == torch.cuda.device_count()
cfg.distributed_world_size = ntasks_per_node * nnodes
cfg.distributed_no_spawn = True
cfg.distributed_rank = int(os.environ.get("SLURM_PROCID"))
cfg.device_id = int(os.environ.get("SLURM_LOCALID"))
def _infer_single_node_init(cfg: DistributedTrainingConfig):
assert (
cfg.distributed_world_size <= torch.cuda.device_count()
), f"world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices"
port = random.randint(10000, 20000)
cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port)
def distributed_init(cfg: MetaseqConfig):
if isinstance(cfg, Namespace):
from metaseq.dataclass.utils import convert_namespace_to_omegaconf
cfg = convert_namespace_to_omegaconf(cfg)
if torch.distributed.is_available() and torch.distributed.is_initialized():
warnings.warn("Distributed is already initialized, cannot initialize twice!")
else:
logger.info(
"distributed init (rank {}): {}".format(
cfg.distributed_training.distributed_rank,
cfg.distributed_training.distributed_init_method,
)
)
dist.init_process_group(
backend=cfg.distributed_training.distributed_backend,
init_method=cfg.distributed_training.distributed_init_method,
world_size=cfg.distributed_training.distributed_world_size,
rank=cfg.distributed_training.distributed_rank,
)
logger.info(
"initialized host {} as rank {}".format(
socket.gethostname(),
cfg.distributed_training.distributed_rank,
)
)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
cfg.distributed_training.distributed_rank = torch.distributed.get_rank()
if is_master(cfg.distributed_training):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if cfg.common.model_parallel_size > 1:
try:
from megatron.mpu import (
initialize_model_parallel,
model_parallel_cuda_manual_seed,
)
except ImportError:
raise ImportError(
"\n\nPlease install megatron using the setup instructions!"
)
global _USE_MEGATRON
_USE_MEGATRON = True
initialize_model_parallel(cfg.common.model_parallel_size)
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda(), group=get_model_parallel_group())
model_parallel_cuda_manual_seed(cfg.common.seed)
model_part_number = get_model_parallel_rank()
cfg.checkpoint.checkpoint_suffix += "-model_part-{0}".format(model_part_number)
return cfg.distributed_training.distributed_rank
def distributed_main(i, main, cfg: MetaseqConfig, kwargs):
if not cfg.distributed_training.distributed_no_spawn:
# if in local spawning, i is offset by -1 since torch.multiprocessing.spawn
# always starts at rank 0
i = i + 1
cfg.distributed_training.device_id = i
if torch.cuda.is_available() and not cfg.common.cpu:
torch.cuda.set_device(cfg.distributed_training.device_id)
# This is temporary way of making microsoft Tutel happy, as it reads the local rank from
# the env. To make it work in cleaner way, we might need to change their interfaces to be
# able to pass local rank.
os.environ["LOCAL_RANK"] = str(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_rank is None:
# start_rank is the rank of gpu 0 on this machine.
cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i
cfg.distributed_training.distributed_rank = distributed_init(cfg)
after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None)
if after_distributed_init_fn:
cfg = after_distributed_init_fn(cfg)
main(cfg, **kwargs)
if torch.distributed.is_initialized():
torch.distributed.barrier(get_global_group())
def _spawn_helper(main, cfg, kwargs):
"""
Perform a fork() to many processes.
Intentionally runs the rank0 in the main process so that signals
can be more easily caught and we can cleanup processes.
"""
# Launch multiple subprocesses
spawncontext = torch.multiprocessing.start_processes(
distributed_main,
# need to give rank offset as 1 to cover the fact that the main
# process is rank 0, but that spawn() doesn't let you control rank:
# it always starts at 0
(main, cfg, kwargs),
nprocs=min(
torch.cuda.device_count(),
cfg.distributed_training.distributed_world_size - 1,
),
join=False,
start_method="spawn",
)
try:
# -1 because we offset by +1 inside distributed_main when using
# spawn_helper
retval = distributed_main(-1, main, cfg, kwargs)
spawncontext.join()
return retval
except (KeyboardInterrupt, Exception):
# weirdly KeyboardInterrupt is not an Exception
# propagate exceptions on the main node by killing workers
for p in spawncontext.processes:
if p.is_alive():
os.kill(p.pid, signal.SIGTERM)
raise
def call_main(cfg: MetaseqConfig, main, **kwargs):
if cfg.distributed_training.distributed_init_method is None:
infer_init_method(cfg.distributed_training)
if cfg.distributed_training.distributed_init_method is not None:
# distributed training
if not cfg.distributed_training.distributed_no_spawn:
start_rank = cfg.distributed_training.distributed_rank
cfg.distributed_training.distributed_rank = None # assign automatically
kwargs["start_rank"] = start_rank
return _spawn_helper(main, cfg, kwargs)
else:
return distributed_main(
cfg.distributed_training.device_id, main, cfg, kwargs
)
else:
# single GPU main
return main(cfg, **kwargs)
def new_groups(grouped_ranks: List[List[int]]):
groups = [dist.new_group(g) for g in grouped_ranks]
my_group_idx = _find_my_group_index(grouped_ranks)
return groups[my_group_idx]
def _find_my_group_index(grouped_ranks):
my_rank = get_global_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError
def _find_my_group(grouped_ranks):
index = _find_my_group_index(grouped_ranks)
return grouped_ranks[index]
def get_rank(group):
return dist.get_rank(group=group)
def get_world_size(group):
if torch.distributed.is_initialized():
return dist.get_world_size(group=group)
else:
return 1
def get_global_group():
if torch.distributed.is_initialized():
if not hasattr(get_global_group, "_global_group"):
# ideally we could use torch.distributed.group.WORLD, but it seems
# to cause random NCCL hangs in some cases
get_global_group._global_group = dist.new_group()
return get_global_group._global_group
else:
return None
def get_global_rank():
if torch.distributed.is_initialized():
return torch.distributed.get_rank()
else:
return 0
def get_global_world_size():
if torch.distributed.is_initialized():
return torch.distributed.get_world_size()
else:
return 1
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
global _USE_MEGATRON
if _USE_MEGATRON:
from megatron import mpu
return mpu.get_data_parallel_group()
else:
return get_global_group()
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
dp_group = get_data_parallel_group()
if dp_group is not None:
return get_rank(dp_group)
else:
return get_global_rank()
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
dp_group = get_data_parallel_group()
if dp_group is not None:
return get_world_size(dp_group)
else:
return get_global_world_size()
def get_model_parallel_group():
global _USE_MEGATRON
if _USE_MEGATRON:
from megatron import mpu
return mpu.get_tensor_model_parallel_group()
else:
return None
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return get_rank(get_model_parallel_group())
def get_model_parallel_world_size():
"""Return world size for the model parallel group."""
return get_world_size(get_model_parallel_group())
def all_reduce(tensor, group, op="sum"):
if op == "sum":
op = dist.ReduceOp.SUM
elif op == "max":
op = dist.ReduceOp.MAX
else:
raise NotImplementedError
dist.all_reduce(tensor, op=op, group=group)
return tensor
def broadcast(tensor, src, group):
dist.broadcast(tensor, src=src, group=group)
def all_to_all(tensor, group):
"""Perform an all-to-all operation on a 1D Tensor."""
assert tensor.dim() == 1
split_count = get_world_size(group=group)
assert tensor.numel() % split_count == 0
output = torch.zeros_like(tensor)
dist.all_to_all_single(output, tensor, group=group)
return output
def all_gather(tensor, group, return_tensor=False):
"""Perform an all-gather operation."""
world_size = get_world_size(group=group)
rank = get_rank(group=group)
tensor_list = [
tensor if i == rank else torch.empty_like(tensor) for i in range(world_size)
]
dist.all_gather(tensor_list, tensor, group=group)
if return_tensor:
return torch.stack(tensor_list, dim=0)
else:
return tensor_list
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable and any CUDA tensors will be moved
to CPU and returned on CPU as well.
Args:
data (Any): data from the local worker to be gathered on other workers
group: group of the collective
max_size (int, optional): maximum size of the data to be gathered
across workers
"""
from metaseq import utils
if group is None:
group = get_global_group()
rank = get_rank(group=group)
world_size = get_world_size(group=group)
buffer_size = max_size * world_size
if (
not hasattr(all_gather_list, "_buffer")
or all_gather_list._buffer.numel() < buffer_size
):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
data = utils.move_to_cpu(data)
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4 # size of header that contains the length of the encoded data
size = header_size + enc_size
if size > max_size:
raise ValueError(
"encoded data size ({}) exceeds max_size ({})".format(size, max_size)
)
header = struct.pack(">I", enc_size)
cpu_buffer[:size] = torch.ByteTensor(list(header + enc))
start = rank * max_size
buffer[start : start + size].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size : (i + 1) * max_size]
(enc_size,) = struct.unpack(">I", bytes(out_buffer[:header_size].tolist()))
if enc_size > 0:
result.append(
pickle.loads(
bytes(out_buffer[header_size : header_size + enc_size].tolist())
)
)
return result
except pickle.UnpicklingError:
raise Exception(
"Unable to unpickle data from other workers. all_gather_list requires all "
"workers to enter the function together, so this error usually indicates "
"that the workers have fallen out of sync somehow. Workers can fall out of "
"sync if one of them runs out of memory, or if there are other conditions "
"in your training script that can cause one worker to finish an epoch "
"while other workers are still iterating over their portions of the data. "
"Try rerunning with --ddp-backend=legacy_ddp and see if that helps."
)
def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]:
"""
AllReduce a dictionary of values across workers. We separately
reduce items that are already on the device and items on CPU for
better performance.
Args:
data (Mapping[str, Any]): dictionary of data to all-reduce, but
cannot be a nested dictionary
device (torch.device): device for the reduction
group: group of the collective
"""
data_keys = list(data.keys())
# We want to separately reduce items that are already on the
# device and items on CPU for performance reasons.
cpu_data = OrderedDict()
device_data = OrderedDict()
for k in data_keys:
t = data[k]
if not torch.is_tensor(t):
cpu_data[k] = torch.tensor(t, dtype=torch.double)
elif t.device.type != device.type:
cpu_data[k] = t.to(dtype=torch.double)
else:
device_data[k] = t.to(dtype=torch.double)
def _all_reduce_dict(data: OrderedDict):
if len(data) == 0:
return data
buf = torch.cat([t.view(-1) for t in data.values()]).to(device=device)
all_reduce(buf, group=group)
split_buf = torch.split(buf, [t.numel() for t in data.values()])
reduced_data = [t.view_as(orig) for t, orig in zip(split_buf, data.values())]
return OrderedDict(zip(data.keys(), reduced_data))
cpu_data = _all_reduce_dict(cpu_data)
device_data = _all_reduce_dict(device_data)
def get_from_stack(key):
if key in cpu_data:
return cpu_data[key]
elif key in device_data:
return device_data[key]
raise KeyError
return OrderedDict([(key, get_from_stack(key)) for key in data_keys])
def broadcast_tensors(
tensors: Optional[List[torch.Tensor]],
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
) -> List[torch.Tensor]:
"""
Broadcasts a list of tensors without other (non-src) ranks needing to know
the dtypes/shapes of the tensors.
"""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
# share metadata first to simplify transfer
is_src_rank = get_rank(group) == src_rank
if is_src_rank:
# We only want to communicate device type ie (cpu vs cuda) and not the index of cuda.
metadata = [
{"size": t.size(), "dtype": t.dtype, "device": torch.device(t.device.type)}
for t in tensors
]
metadata = _broadcast_object_slow(metadata, src_rank, group, dist_device)
else:
metadata = _broadcast_object_slow(None, src_rank, group, dist_device)
out_tensors = []
for i, meta in enumerate(metadata):
if is_src_rank:
tensor = tensors[i]
broadcast(tensors[i].to(dist_device), src=src_rank, group=group)
else:
tensor = torch.zeros(
[meta["size"].numel()], dtype=meta["dtype"], device=dist_device
)
broadcast(tensor, src=src_rank, group=group)
tensor = tensor.view(meta["size"]).to(meta["device"])
out_tensors.append(tensor)
return out_tensors
def broadcast_object(
obj: Any,
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
) -> Any:
"""Broadcast an arbitrary Python object to other workers."""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
if get_rank(group) == src_rank:
# split the tensors from the non-tensors so we can broadcast them
# directly, avoiding unnecessary serialization/deserialization
tensors = []
obj = _split_tensors_from_obj(obj, tensors)
obj = _broadcast_object_slow(obj, src_rank, group, dist_device)
tensors = broadcast_tensors(tensors, src_rank, group, dist_device)
else:
obj = _broadcast_object_slow(None, src_rank, group, dist_device)
tensors = broadcast_tensors(None, src_rank, group, dist_device)
return _put_tensors_in_obj(obj, tensors)
def _broadcast_object_slow(
obj: Any,
src_rank: int,
group: object,
dist_device: torch.device,
) -> Any:
if get_rank(group) == src_rank:
# Emit data
buffer = io.BytesIO()
torch.save(obj, buffer)
buffer = torch.ByteTensor(buffer.getbuffer()).to(dist_device)
length = torch.LongTensor([len(buffer)]).to(dist_device)
broadcast(length, src=src_rank, group=group)
broadcast(buffer, src=src_rank, group=group)
else:
# Fetch from the source
length = torch.LongTensor([0]).to(dist_device)
broadcast(length, src=src_rank, group=group)
buffer = torch.ByteTensor(int(length.item())).to(dist_device)
broadcast(buffer, src=src_rank, group=group)
buffer = io.BytesIO(buffer.cpu().numpy())
obj = torch.load(buffer, map_location="cpu")
return obj
@dataclass(frozen=True)
class _TensorPlaceholder:
index: int
def _split_tensors_from_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if torch.is_tensor(obj):
placeholder = _TensorPlaceholder(index=len(tensors))
tensors.append(obj)
return placeholder
elif isinstance(obj, dict):
return {k: _split_tensors_from_obj(v, tensors) for k, v in obj.items()}
elif isinstance(obj, list):
return [_split_tensors_from_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple(_split_tensors_from_obj(v, tensors) for v in obj)
elif isinstance(obj, set):
return {_split_tensors_from_obj(v, tensors) for v in obj}
else:
return obj
def _put_tensors_in_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if isinstance(obj, _TensorPlaceholder):
return tensors[obj.index]
elif isinstance(obj, dict):
return {k: _put_tensors_in_obj(v, tensors) for k, v in obj.items()}
elif isinstance(obj, list):
return [_put_tensors_in_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple(_put_tensors_in_obj(v, tensors) for v in obj)
elif isinstance(obj, set):
return {_put_tensors_in_obj(v, tensors) for v in obj}
else:
return obj
|
flash_metaseq-main
|
metaseq/distributed/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import logging
import os
import re
import time
from collections import defaultdict, OrderedDict
from glob import glob
from pathlib import Path
import torch
from tqdm import tqdm
from metaseq.distributed.fully_sharded_data_parallel import FSDP as FSDP
from metaseq.file_io import load_and_pop_last_optimizer_state
logger = logging.getLogger(__name__)
def _get_shard_number(x) -> int:
match = re.search(r"shard(\d+).pt", x)
if match is None:
raise AssertionError(f"{x} did not match shard(\\d+).pt")
else:
return int(match.groups()[0])
def consolidate_fsdp_shards(
pth_prefix: str,
save_prefix=None,
strict=False,
new_arch_name=None,
no_stitch_megatron=False,
megatron_part=None,
) -> str:
if pth_prefix.endswith(".pt"):
pth_prefix = pth_prefix[:-3]
if save_prefix is None:
save_prefix = pth_prefix + "_consolidated" # .pt'
all_ckpt_files = list(
sorted(glob(f"{pth_prefix}*shard*.pt"), key=_get_shard_number)
)
if megatron_part is not None:
no_stitch_megatron = True
all_ckpt_files = [
x for x in all_ckpt_files if f"model_part-{megatron_part}" in x
]
assert all_ckpt_files, f"no paths matched {pth_prefix}*shard*.pt"
weights = []
metadata = []
expert_paths = []
expert_dest_paths = []
expert_ranks = []
names = []
dense = True
t0 = time.time()
for p in tqdm(all_ckpt_files):
names.append(Path(p).name)
if re.search(r"rank-(\d+)", os.path.basename(p)): # expert checkpoint
expert_paths.append(p)
r = re.search(r"rank-(\d+)", os.path.basename(p)).groups()[0]
assert r not in expert_ranks
expert_ranks.append(r)
expert_dest_paths.append(f"{save_prefix}-rank-{r}.pt")
else:
ckpt = load_and_pop_last_optimizer_state(p)
weights.append(ckpt["model"])
metadata.append(ckpt["shard_metadata"])
assert weights, f"all files were considered experts: {all_ckpt_files}"
do_consolidate = True
if "decoder.embed_tokens.weight" in weights[0].keys():
shape = weights[0]["decoder.embed_tokens.weight"].shape
logger.info(
f"This ckpt does not seem sharded. I see unflat params! like "
f"decoder.embed_tokens.weight shaped {shape}. Will just copy files "
f"and remove optim_state."
)
do_consolidate = False
if do_consolidate:
num_parts = find_num_parts(names)
if num_parts:
logger.info("consolidate_model_parallel")
consolidated_weights = consolidate_model_parallel(
metadata,
names,
strict,
weights,
parts=num_parts,
no_stitch_megatron=no_stitch_megatron,
)
else:
logger.info("FSDP.consolidate_shard_weights")
consolidated_weights = FSDP.consolidate_shard_weights(
shard_weights=weights, shard_metadata=metadata, strict=strict
)
del weights, metadata
gc.collect()
done_consolidate = time.time()
logger.info(f"Done consolidating after {done_consolidate-t0//60} minutes")
else:
consolidated_weights = weights[0]
if new_arch_name is not None:
ckpt["cfg"]["model"]._name = new_arch_name
if dense:
logger.info("dense")
def save_checkpoint(weights_to_save, prefix):
ckpt_consolidated = dict(
model=weights_to_save,
cfg=ckpt["cfg"],
extra_state=ckpt["extra_state"],
optimizer_history=ckpt["optimizer_history"],
args=ckpt.get("args"),
)
save_path = f"{prefix}.pt"
logger.info(f"Saving to {save_path} ...")
torch.save(ckpt_consolidated, save_path)
logger.info(f"Done after {time.time()-t0//60} minutes")
return save_path
if no_stitch_megatron:
saved_paths = []
for part_id, part_consolidated_weights in consolidated_weights.items():
saved_paths.append(
save_checkpoint(
part_consolidated_weights, f"{save_prefix}-model_part-{part_id}"
)
)
return saved_paths
return save_checkpoint(consolidated_weights, save_prefix)
ckpt_shared = dict(
model=consolidated_weights,
cfg=ckpt["cfg"],
extra_state=ckpt["extra_state"],
optimizer_history=ckpt["optimizer_history"],
args=ckpt["args"],
)
logger.info("saving..")
torch.save(ckpt_shared, f"{save_prefix}-shared.pt")
logger.info(f"Done saving. Total time: {time.time()-t0//60} minutes, ")
# Process experts
for src, dst in tqdm(
list(zip(expert_paths, expert_dest_paths)), desc="expert files"
):
ckpt = load_and_pop_last_optimizer_state(src)
if do_consolidate:
expert_wt = FSDP.consolidate_shard_weights(
shard_weights=[ckpt["model"]],
shard_metadata=[ckpt["shard_metadata"]],
strict=False,
)
ckpt = dict(
model=expert_wt,
cfg=ckpt["cfg"],
extra_state=ckpt["extra_state"],
optimizer_history=ckpt["optimizer_history"],
args=ckpt["args"],
)
torch.save(ckpt, dst)
logger.info(f"saved consolidated MoE with prefix {save_prefix}.pt")
return f"{save_prefix}.pt"
def consolidate_model_parallel(
metadata, names, strict, weights, parts=2, no_stitch_megatron=False
):
model_parts = defaultdict(list)
metadata_parts = defaultdict(list)
for i, n in enumerate(names):
for p in range(parts):
if f"part-{p}" in n:
model_parts[p].append(weights[i])
metadata_parts[p].append(metadata[i])
all_parts_consolidated = defaultdict(list)
for k, v in model_parts.items():
part_weights = FSDP.consolidate_shard_weights(
shard_weights=v, shard_metadata=metadata_parts[k], strict=strict
)
all_parts_consolidated[k] = part_weights
if no_stitch_megatron:
return all_parts_consolidated
model = glue_megatron_parts(all_parts_consolidated)
return model
def handle_qkv_proj(model_parts, key):
parts = [model_parts[part_id][key] for part_id in range(len(model_parts))]
ks, vs, qs = [], [], []
for p in parts:
k, v, q = torch.split(p, p.shape[0] // 3)
ks.append(k)
vs.append(v)
qs.append(q)
return torch.cat(ks, dim=0), torch.cat(vs, dim=0), torch.cat(qs, dim=0)
def _handle_one(parts, is_weight):
"""Make it look like a normal LayerNorm"""
n_parts = len(parts)
err_msg = f"Redundant ModelParallelFusedLayerNorm params have been updated."
if is_weight:
init = 1.0
assert not torch.logical_and(parts[0].ne(1), parts[1].ne(1)).any(), err_msg
else:
init = 0.0
assert not torch.logical_and(parts[0].ne(0), parts[1].ne(0)).any(), err_msg
ret_val = torch.cat([p.unsqueeze(-1) for p in parts], dim=1).sum(1) - (
init * (n_parts - 1)
)
return ret_val
def handle_legacy_ln_(glued_model, n_parts):
"""Consolidate ffn_layernorm.lns.weight.{part_id} -> ffn_layernorm.weight"""
if "decoder.layers.0.ffn_layernorm.lns.0.weight" not in glued_model:
return
n_layers = get_n_layers(glued_model)
for i in range(n_layers):
layer_weights = [
glued_model.pop(f"decoder.layers.{i}.ffn_layernorm.lns.{p}.weight")
for p in range(n_parts)
]
layer_biases = [
glued_model.pop(f"decoder.layers.{i}.ffn_layernorm.lns.{p}.bias")
for p in range(n_parts)
]
glued_model[f"decoder.layers.{i}.ffn_layernorm.weight"] = _handle_one(
layer_weights, True
)
glued_model[f"decoder.layers.{i}.ffn_layernorm.bias"] = _handle_one(
layer_biases, False
)
def get_n_layers(glued_model):
n_layers = 0
while True:
if f"decoder.layers.{n_layers}.fc1.weight" in glued_model:
n_layers += 1
else:
assert (
n_layers > 0
), f"found 0 layers bc no keys matching decoder.layers.0.fc1.weight"
return n_layers
def glue_megatron_parts(model_parts):
glued_model = OrderedDict()
def assert_all_close(key):
for part_id in range(len(model_parts)):
if not torch.allclose(model_parts[part_id][key], model_parts[0][key]):
err = (
(model_parts[part_id][key] - model_parts[0][key])
.float()
.abs()
.max()
.item()
)
logger.info(f"max discrepancy {key}: {err}")
for key in model_parts[0]:
if "qkv" in key:
# Bias of CP gets concatenated
if key.endswith("bias"):
k, v, q = handle_qkv_proj(model_parts, key)
else:
assert key.endswith("weight")
k, v, q = handle_qkv_proj(model_parts, key)
glued_model[key.replace("qkv", "k")] = k
glued_model[key.replace("qkv", "v")] = v
glued_model[key.replace("qkv", "q")] = q
elif "ffn_layernorm" in key:
glued_model[key] = torch.cat(
[model_parts[part_id][key] for part_id in range(len(model_parts))]
)
elif "layer_norm" in key:
assert_all_close(key)
glued_model[key] = model_parts[0][key]
elif "fc1" in key or "k_proj" in key or "q_proj" in key or "v_proj" in key:
# Bias of CP gets concatenated
if key.endswith("bias"):
glued_bias = torch.cat(
[model_parts[part_id][key] for part_id in range(len(model_parts))]
)
glued_model[key] = glued_bias
# weights of CP gets concatenated along dim 0
else:
assert key.endswith("weight")
glued_weight = torch.cat(
[model_parts[part_id][key] for part_id in range(len(model_parts))],
dim=0,
)
glued_model[key] = glued_weight
# FC1 is CP
# FC2 is RP
elif "fc2" in key or "out_proj" in key:
# Bias of RP gets replicated
if key.endswith("bias"):
assert_all_close(key)
glued_model[key] = model_parts[0][key]
# weights of RP gets concatenated along dim 1
else:
assert key.endswith("weight")
glued_weight = torch.cat(
[model_parts[part_id][key] for part_id in range(len(model_parts))],
dim=1,
)
glued_model[key] = glued_weight
elif "embed_tokens.weight" in key:
glued_weight = torch.cat(
[model_parts[part_id][key] for part_id in range(len(model_parts))],
dim=0,
)
glued_model[key] = glued_weight
elif "embed_positions" in key:
if "_float_tensor" in key:
# Assume embed positions are non learned ie.e sinusoidal
glued_model[key] = torch.zeros([1])
else:
assert_all_close(key)
glued_model[key] = model_parts[0][key]
elif "version" in key:
glued_model[key] = model_parts[0][key]
else:
assert_all_close(key)
glued_model[key] = model_parts[0][key]
assert len(glued_model.keys()) >= len(model_parts[0].keys())
# Consolidate ffn_layernorm.lns.weight.{part_id} -> ffn_layernorm.weight
handle_legacy_ln_(glued_model, len(model_parts))
assert "decoder.layers.0.ffn_layernorm.lns.0.weight" not in glued_model
return glued_model
def find_num_parts(names) -> int:
parts = []
for n in names:
part = re.search(r"part-(\d+)-", n)
if part is not None:
parts.append(int(part.groups()[0]))
if parts:
return max(parts) + 1
else:
return 0
|
flash_metaseq-main
|
metaseq/distributed/stitch_fsdp_ckpt.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.