python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import importlib
import os
import sys
import tempfile
from collections import namedtuple
from dataclasses import fields
from typing import Any, Callable, Dict, Generator, List
Item = namedtuple("Item", ["constructor", "config"])
# credit: snippet used in ClassyVision (and probably other places)
def import_all_modules(root: str, base_module: str) -> List[str]:
modules: List[str] = []
for file in os.listdir(root):
if file.endswith((".py", ".pyc")) and not file.startswith("_"):
module = file[: file.find(".py")]
if module not in sys.modules:
module_name = ".".join([base_module, module])
importlib.import_module(module_name)
modules.append(module_name)
return modules
def get_registry_decorator(
class_registry, name_registry, reference_class, default_config
) -> Callable[[str, Any], Callable[[Any], Any]]:
def register_item(name: str, config: Any = default_config):
"""Registers a subclass.
This decorator allows xFormers to instantiate a given subclass
from a configuration file, even if the class itself is not part of the
xFormers library."""
def register_cls(cls):
if name in class_registry:
raise ValueError("Cannot register duplicate item ({})".format(name))
if not issubclass(cls, reference_class):
raise ValueError(
"Item ({}: {}) must extend the base class: {}".format(
name, cls.__name__, reference_class.__name__
)
)
if cls.__name__ in name_registry:
raise ValueError(
"Cannot register item with duplicate class name ({})".format(
cls.__name__
)
)
class_registry[name] = Item(constructor=cls, config=config)
name_registry.add(cls.__name__)
return cls
return register_cls
return register_item
def generate_matching_config(superset: Dict[str, Any], config_class: Any) -> Any:
"""Given a superset of the inputs and a reference config class,
return exactly the needed config"""
# Extract the required fields
field_names = list(map(lambda x: x.name, fields(config_class)))
subset = {k: v for k, v in superset.items() if k in field_names}
# The missing fields get Noned
for k in field_names:
if k not in subset.keys():
subset[k] = None
return config_class(**subset)
def rmf(filename: str) -> None:
"""Remove a file like rm -f."""
try:
os.remove(filename)
except FileNotFoundError:
pass
@contextlib.contextmanager
def temp_files_ctx(num: int) -> Generator:
""" A context to get tempfiles and ensure they are cleaned up. """
files = [tempfile.mkstemp()[1] for _ in range(num)]
yield tuple(files)
# temp files could have been removed, so we use rmf.
for name in files:
rmf(name)
|
bart_ls-main
|
xformers/xformers/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Optional
import torch
import torch.nn as nn
from torch.cuda.amp import custom_bwd, custom_fwd
from xformers.components.activations import Activation
from xformers.triton.k_activations import (
get_triton_activation_bwd_kernel,
get_triton_activation_kernel,
)
from xformers.triton.k_fused_matmul_bw import fused_matmul_backward
from xformers.triton.k_fused_matmul_fw import fused_matmul
class _fused_linear_triton(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx,
x,
weight,
bias,
activation,
act_grad_kernel,
trainable_weight,
trainable_bias,
save_activation_inputs,
):
# Kick the fused Triton kernel, handling bias and activation in one go
y, activation_inputs = fused_matmul(
x, weight, bias, activation, save_activation_inputs
)
ctx.activation_grad_kernel = act_grad_kernel
ctx.trainable_weight = trainable_weight
ctx.trainable_bias = trainable_bias
# Micro-optimization: saving these is not always needed (?)
if x.requires_grad or ctx.trainable_weight or ctx.trainable_bias:
ctx.save_for_backward(weight, activation_inputs, x)
return y
@staticmethod
@custom_bwd
def backward(ctx: Any, grad_out: torch.Tensor) -> Any: # type: ignore
"""
Compute the derivative with respect to x, other tensors were not trainable inputs.
"""
(weight, activation_inputs, x) = ctx.saved_tensors
grad_input, grad_weight, grad_bias = fused_matmul_backward(
grad_out=grad_out,
inputs=x,
act_in=activation_inputs,
weight=weight,
trainable_weight=ctx.trainable_weight,
trainable_bias=ctx.trainable_bias,
activation_grad=ctx.activation_grad_kernel,
)
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class FusedLinear(nn.Module):
"""
Handle a linear transform, like torch.nn.Linear_, and a given activation, in a single kernel.
The whole transform: is :math:`y = activation(xA^T + b)`.
This is typically significantly faster than PyTorch while using fp16 and non-sigmoid activations,
as of September 2021.
.. _torch.nn.Linear: https://pytorch.org/docs/stable/generated/torch.nn.Linear.html
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = False,
activation: Optional[Activation] = None,
**_,
):
super().__init__()
self.weight = nn.Parameter(
torch.empty(out_features, in_features), requires_grad=True
)
self.bias = (
nn.Parameter(torch.empty(out_features), requires_grad=True)
if bias
else None
)
self._activation_kernel = get_triton_activation_kernel(activation)
self._activation_grad_kernel = get_triton_activation_bwd_kernel(activation)
self.reset_parameters()
def reset_parameters(self) -> None:
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
return _fused_linear_triton.apply(
x,
self.weight,
self.bias,
self._activation_kernel,
self._activation_grad_kernel,
self.weight.requires_grad,
self.bias.requires_grad if self.bias is not None else False,
self.training and x.requires_grad,
)
|
bart_ls-main
|
xformers/xformers/triton/fused_linear_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import triton
from xformers.triton.k_sum import k_sum_0
def sum_2d_dim_0(x: torch.Tensor):
"""
Sum a 2D tensor across the first dimension
"""
out = torch.empty(x.shape[1], device=x.device, dtype=x.dtype)
assert (
x.ndim == 2
), "This is a very specific kernel, only for 2-dim tensors and summing along dim 0"
M, N = x.shape
# This kernel is not competitive for these sizes
if M > 2048 or M < 8:
return x.sum(dim=0)
assert (
M >= 4
), "This is a very specific kernel, requires the reduction dimension to be bigger than 4"
assert x.stride(1) == 1, (
"We're expecting x to be contiguous along dim 1, and non contiguous along dim 0.\n"
" You would probably be better served with torch.sum()"
)
BLOCK_M = min(triton.next_power_of_2(M), 2048)
BLOCK_N = 32
if BLOCK_M > 256:
BLOCK_N = 16
if BLOCK_M > 1024:
BLOCK_N = 8
def grid(meta):
return (triton.cdiv(N, meta["BLOCK_N"]),)
# fmt: off
k_sum_0[grid](
out, x,
x.stride(0),
M, N,
x.dtype == torch.float16,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
num_stages=4,
)
# fmt: on
return out
|
bart_ls-main
|
xformers/xformers/triton/sum_strided.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional
import triton
import triton.language as tl
from xformers.components import Activation
_kAlpha = math.sqrt(2.0 / math.pi)
def get_triton_activation_kernel(activation: Optional[Activation]):
return (
{
Activation.ReLU: relu,
Activation.LeakyReLU: leaky_relu,
Activation.GeLU: gelu,
Activation.SquaredReLU: squared_relu,
}[activation]
if activation
else None
)
def get_triton_activation_bwd_kernel(activation: Optional[Activation]):
return (
{
Activation.ReLU: relu_grad,
Activation.LeakyReLU: leaky_relu_grad,
Activation.GeLU: gelu_grad,
Activation.SquaredReLU: squared_relu_grad,
}[activation]
if activation
else None
)
@triton.jit
def tanh(x):
# Tanh is just a scaled sigmoid
return 2 * tl.sigmoid(2 * x) - 1
@triton.jit
def cosh(x):
exp_x = tl.exp(x)
return (exp_x + 1.0 / exp_x) * 0.5
# a Triton implementation of the most used activations
# See for instance http://arxiv.org/abs/1606.08415 for an overview
# ReLU
@triton.jit
def relu(x):
"""
ReLU_ activation function
.. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
"""
return tl.where(x >= 0, x, 0.0)
@triton.jit
def relu_grad(x):
return tl.where(x >= 0, 1.0, 0.0)
@triton.jit
def squared_relu(x):
"""
Squared ReLU activation, as proposed in the Primer_ paper.
.. _Primer: https://arxiv.org/abs/2109.08668
"""
x_ = relu(x)
return x_ * x_
@triton.jit
def squared_relu_grad(x):
return tl.where(x >= 0, 2.0 * x, 0.0)
# Leaky ReLU
@triton.jit
def leaky_relu(x):
"""
LeakyReLU_ activation
.. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html
"""
scale = 0.01 + 0.0
return tl.where(x >= 0, x, scale * x)
@triton.jit
def leaky_relu_grad(x):
return tl.where(x >= 0, 1.0, 0.01)
@triton.jit
def gelu(x):
"""
GeLU_ activation - Gaussian error linear unit
.. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
"""
return 0.5 * x * (1 + tanh(_kAlpha * (x + 0.044715 * x * x * x)))
@triton.jit
def gelu_grad(x):
# Normal computation, just try to maximize reuse
x_3 = x * x * x
_a = 0.0356774 * x_3 + _kAlpha * x
# (hoping that a division is cheaper than an exponential..)
exp_a = tl.exp(_a)
exp_m_a = 1.0 / exp_a
_cos_h = exp_a + exp_m_a
_tan_h = (exp_a - exp_m_a) / _cos_h
_cos_h *= 0.5
return 0.5 + 0.5 * _tan_h + (0.0535161 * x_3 + 0.398942 * x) / (_cos_h * _cos_h)
|
bart_ls-main
|
xformers/xformers/triton/activations.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional
import triton
import triton.language as tl
from xformers.components import Activation
_kAlpha = math.sqrt(2.0 / math.pi)
def get_triton_activation_kernel(activation: Optional[Activation]):
return (
{
Activation.ReLU: relu,
Activation.LeakyReLU: leaky_relu,
Activation.GeLU: gelu,
Activation.SquaredReLU: squared_relu,
}[activation]
if activation
else None
)
def get_triton_activation_bwd_kernel(activation: Optional[Activation]):
return (
{
Activation.ReLU: relu_grad,
Activation.LeakyReLU: leaky_relu_grad,
Activation.GeLU: gelu_grad,
Activation.SquaredReLU: squared_relu_grad,
}[activation]
if activation
else None
)
@triton.jit
def tanh(x):
# Tanh is just a scaled sigmoid
return 2 * tl.sigmoid(2 * x) - 1
@triton.jit
def cosh(x):
exp_x = tl.exp(x)
return (exp_x + 1.0 / exp_x) * 0.5
# a Triton implementation of the most used activations
# See for instance http://arxiv.org/abs/1606.08415 for an overview
# ReLU
@triton.jit
def relu(x):
"""
ReLU_ activation function
.. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
"""
zero = 0.0
return tl.where(x >= 0, x, zero.to(x.dtype))
@triton.jit
def relu_grad(x):
# ReLU is different from other activations
# in that it does not require the input to retrospectively compute its gradient
# here the input is the downstream gradient, and we return the upstream gradient directly
zero = 0.0
one = 1.0
return tl.where(x >= 0, one.to(x.dtype), zero.to(x.dtype))
@triton.jit
def squared_relu(x):
"""
Squared ReLU activation, as proposed in the Primer_ paper.
.. _Primer: https://arxiv.org/abs/2109.08668
"""
x_ = relu(x)
return (x_ * x_).to(x.dtype)
@triton.jit
def squared_relu_grad(x):
return tl.where(x >= 0, 2.0 * x, 0.0)
# Leaky ReLU
@triton.jit
def leaky_relu(x):
"""
LeakyReLU_ activation
.. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html
"""
scale = 0.01 + 0.0
scale = scale.to(x.dtype)
return tl.where(x >= 0, x, scale * x)
@triton.jit
def leaky_relu_grad(x):
min_grad = 0.01
max_grad = 1
min_grad = min_grad.to(x.dtype)
max_grad = max_grad.to(x.dtype)
return tl.where(x >= 0, max_grad, min_grad)
@triton.jit
def gelu(x):
"""
GeLU_ activation - Gaussian error linear unit
.. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
"""
return 0.5 * x * (1 + tanh(_kAlpha * (x + 0.044715 * x * x * x)))
@triton.jit
def gelu_grad(x):
# CREDITS: Fast implementation proposed in
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/fused_bias_gelu.py#L30
tanh_out = tanh(0.79788456 * x * (1 + 0.044715 * x * x))
return 0.5 * x * (
(1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
) + 0.5 * (1 + tanh_out)
|
bart_ls-main
|
xformers/xformers/triton/k_activations.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import triton
import triton.language as tl
from xformers.triton.configs_matmul import kernel_config
# CREDITS: Initially inspired by the Triton tutorial on matrix multiplications
# fmt: off
@triton.autotune(
configs=kernel_config,
key=["M", "N", "K"],
)
@triton.jit
def kernel_fma(
# Pointers to matrices
D, A, W, C, In,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_db, stride_dm, stride_dn,
stride_ab, stride_am, stride_ak,
stride_wn, stride_wk,
stride_ib, stride_im, stride_in,
# Meta-parameters
**META,
):
# fmt: on
"""
Kernel for computing D = activation(A x W + C)
- A has shape (B, M, K)
- W has shape (K, N)
- C has shape (N,)
- D has shape (B, M, N)
- In (optional) has shape (B, M, N)
'In' optionally saves the A x W + C intermediate for backward computations
"""
# extract metaparameters
BLOCK_M, GROUP_M = META["BLOCK_ROW"], META["GROUP_M"]
BLOCK_N, BLOCK_K = META["BLOCK_COL"], META["BLOCK_K"]
# programs are grouped together to improve L2 hit rate
pid, batch_id = tl.program_id(axis=0), tl.program_id(axis=1)
num_pid_m = tl.cdiv(M, BLOCK_M) # number of program ids along the M axis
num_pid_n = tl.cdiv(N, BLOCK_N) # number of programs ids along the N axis
num_pid_in_group = GROUP_M * num_pid_n # number of programs in group
group_id = pid // num_pid_in_group # id of the group this program is in
first_pid_m = group_id * GROUP_M # row-id of the first program in the group
GROUP_M = min(
num_pid_m - first_pid_m, GROUP_M
) # if `num_pid_m` isn't divisible by `GROUP_M`, the last group is smaller
# *within groups*, programs are ordered in a column-major order
# row-id of the program in the *launch grid*
pid_m = first_pid_m + (pid % GROUP_M)
# col-id of the program in the *launch grid*
pid_n = (pid % num_pid_in_group) // GROUP_M
# rm (resp. rn) denotes a range of indices
# for rows (resp. col) of C
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# rk denotes a range of indices for columns
# (resp. rows) of A (resp. B)
rk = tl.arange(0, BLOCK_K)
# the memory addresses of elements in the first block of
# A and W can be computed using numpy-style broadcasting
D += rm[:, None] * stride_dm + rn[None, :] * stride_dn + batch_id * stride_db
A += rm[:, None] * stride_am + rk[None, :] * stride_ak + batch_id * stride_ab
W += rk[:, None] * stride_wk + rn[None, :] * stride_wn
# initialize and iteratively update accumulator
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
if META["BIAS"]:
bias = tl.load(C + rn, mask=rn < N, other=0.0).to(tl.float32)
acc += bias[None, :]
for _ in range(K, 0, -BLOCK_K):
# block level matrix multiplication
a = tl.load(A)
w = tl.load(W)
acc += tl.dot(a, w).to(tl.float32)
# increment pointers so that the next blocks of A and B
# are loaded during the next iteration
A += BLOCK_K * stride_ak
W += BLOCK_K * stride_wk
# optional: save the activation inputs
if META["SAVE_ACT_INPUTS"]:
In += rm[:, None] * stride_im + rn[None, :] * stride_in + batch_id * stride_ib
tl.store(In, acc, mask=(rm[:, None] < M) & (rn[None, :] < N))
# optional: fused activation (while the data is in shared memory)
if META["ACTIVATION"]:
acc = META["ACTIVATION"](acc)
# write back result
tl.store(D, acc, mask=(rm[:, None] < M) & (rn[None, :] < N))
# Activation needs to be a triton kernel
def fused_matmul(
x: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor],
activation=None,
save_inputs: bool = False
):
"""
Compute e = activation(x @ weight + bias).
This wrapper kicks the `kernel_fma` Triton kernel
"""
if x.ndim == 2:
x = x.unsqueeze(0)
_should_squeeze = True
else:
_should_squeeze = False
assert (
x.shape[2] == weight.shape[1]
), f"Incompatible dimensions in between inputs and weight, {x.shape} - {weight.shape}"
assert bias is None or bias.is_contiguous()
assert (
bias is None or bias.shape[0] == weight.shape[0]
), "Incompatible dimensions in between weight and bias"
B, M, K = x.shape
N, K = weight.shape
# FIXME: @lefaudeux
assert (
K % 32 == 0
), "We don't check memory-out-of-bounds with K so K must be divisible by BLOCK_K"
outputs = torch.empty((B, M, N), device=x.device, dtype=x.dtype)
act_inputs = torch.empty_like(outputs) if save_inputs else outputs
# 1D launch kernel where each block gets its own program.
def grid(META):
return (
triton.cdiv(M, META["BLOCK_ROW"]) * triton.cdiv(N, META["BLOCK_COL"]),
B,
)
# fmt: off
kernel_fma[grid](
# data ptrs
outputs, x, weight,
bias if bias is not None else x, # auto skip bias if not present
act_inputs,
# shapes
M, N, K,
# strides
outputs.stride(0), outputs.stride(1), outputs.stride(2),
x.stride(0), x.stride(1), x.stride(2),
weight.stride(0), weight.stride(1),
act_inputs.stride(0), act_inputs.stride(1), act_inputs.stride(2),
# optional fused activation
ACTIVATION=activation,
# optional fused bias
BIAS=bias is not None,
# speed optimization: group the programs
# improve on data reuse in L2 cache
GROUP_M=8,
BLOCK_K=32,
SAVE_ACT_INPUTS=save_inputs
)
# fmt: on
if _should_squeeze:
outputs.squeeze_()
return (outputs, act_inputs) if save_inputs else (outputs, None)
# fmt: off
@triton.autotune(
configs=kernel_config,
key=["M", "N", "K"],
)
@triton.jit
def kernel_fma_grad_in(
# Pointers to all the tensors
GRAD_IN, GRAD_ACT, ACT_IN, GRAD_OUT, W,
# Tensor dimensions
M, N, K,
# strides for all the gradients
stride_gib, stride_gim, stride_gik,
stride_gab, stride_gam, stride_gan,
stride_gob, stride_gom, stride_gon,
# strides for the extra data
stride_aib, stride_aim, stride_ain,
stride_wn, stride_wk,
# Meta-parameters
**META,
):
# fmt: on
"""
Kernel for computing `grad_out = grad_in * activation_grad(inputs) @ W^T`
- grad_out has shape (B, M, N)
- W has shape (K, N)
- grad_in has shape (B, M, K)
- X has shape (B, M, K)
"""
# extract metaparameters
BLOCK_M, GROUP_M = META["BLOCK_ROW"], META["GROUP_M"]
BLOCK_N, BLOCK_K = META["BLOCK_N"], META["BLOCK_COL"]
# programs are grouped together to improve L2 hit rate
pid, batch_id = tl.program_id(axis=0), tl.program_id(axis=1)
num_pid_m = tl.cdiv(M, BLOCK_M) # number of program ids along the M axis
num_pid_k = tl.cdiv(K, BLOCK_K) # number of programs ids along the N axis
num_pid_in_group = GROUP_M * num_pid_k # number of programs in group
group_id = pid // num_pid_in_group # id of the group this program is in
first_pid_m = group_id * GROUP_M # row-id of the first program in the group
GROUP_M = min(
num_pid_m - first_pid_m, GROUP_M
) # if `num_pid_m` isn't divisible by `GROUP_M`, the last group is smaller
# *within groups*, programs are ordered in a column-major order
pid_m = first_pid_m + (pid % GROUP_M) # row-id of the program in the *launch grid*
pid_k = (
pid % num_pid_in_group
) // GROUP_M # col-id of the program in the *launch grid*
# memory ranges
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rk = pid_k * BLOCK_K + tl.arange(0, BLOCK_K)
rn = tl.arange(0, BLOCK_N)
# memory blocks can be computed using numpy-style broadcasting
GRAD_OUT += rm[:, None] * stride_gom + rn[None, :] * stride_gon + batch_id * stride_gob
GRAD_ACT += rm[:, None] * stride_gam + rn[None, :] * stride_gan + batch_id * stride_gab
ACT_IN += rm[:, None] * stride_aim + rn[None, :] * stride_ain + batch_id * stride_aib
GRAD_IN += rm[:, None] * stride_gim + rk[None, :] * stride_gik + batch_id * stride_gib
W += rn[:, None] * stride_wn + rk[None, :] * stride_wk
# initialize and iteratively update accumulator
grad_in = tl.zeros((BLOCK_M, BLOCK_K), dtype=tl.float32)
act_grad_fn = META["ACTIVATION_GRAD"]
for _ in range(N, 0, -BLOCK_N):
grad_out = tl.load(GRAD_OUT) # BLOCK_M x BLOCK_N
w = tl.load(W) # BLOCK_N x BLOCK_K
# optional fused activation gradient (while the data is in shared memory)
if META["ACTIVATION_GRAD"]:
if META["ACTIVATION_GRAD_REQ_INPUTS"]:
# This activation requires its inputs
act_input = tl.load(ACT_IN)
grad_act = act_grad_fn(act_input)
ACT_IN += BLOCK_N * stride_ain
else:
# Save some time, we can reuse the outputs to know about the grad
grad_act = act_grad_fn(grad_out)
grad_out *= grad_act.to(grad_out.dtype)
# store grad_act as an intermediate, will be used for grad/weight and grad/bias
if META["SAVE_ACT_GRAD"]:
tl.store(GRAD_ACT, grad_out)
# gradient #1: input with respect to outputs
# grad_in is grad_out scaled by the (transposed) weight
grad_in += tl.dot(grad_out, w)
# increment pointers so that the next blocks of A and B are loaded during the next iteration
GRAD_OUT += BLOCK_N * stride_gon
GRAD_ACT += BLOCK_N * stride_gan
W += BLOCK_N * stride_wn
# write back result
tl.store(GRAD_IN, grad_in, mask=(rm[:, None] < M) & (rk[None, :] < K)) # type promotion or downgrade is automatic
# Activation needs to be a triton kernel
def fused_matmul_backward(
grad_out: torch.Tensor,
inputs: torch.Tensor,
weight: torch.Tensor,
trainable_weight: bool,
trainable_bias: bool,
activation_inputs: Optional[torch.Tensor],
activation_grad=None,
activation_grad_req_inputs: bool = False,
):
"""
Compute grad_in = activation^-1(grad_out) @ weight.transpose()
.. note: The weight buffer is transposed on the fly
"""
if grad_out.ndim == 2:
# Add the batch dimension
# This is inelegant and maybe slow, but never really used in the xformers context
grad_out = grad_out.unsqueeze(0)
_should_squeeze = True
else:
_should_squeeze = False
assert (
grad_out.shape[2] == weight.shape[0]
), "Incompatible dimensions in between grad_out and weight"
B, M, N = grad_out.shape
N, K = weight.shape
grad_in = torch.empty((B, M, K), device=grad_out.device, dtype=grad_out.dtype)
grad_act = torch.empty_like(grad_out)
# Compute the gradient for the inputs
def grid(META):
return (
triton.cdiv(M, META["BLOCK_ROW"]) * triton.cdiv(K, META["BLOCK_COL"]),
B,
)
if activation_inputs is None:
# place holder, this will not be used really
activation_inputs = grad_out
# fmt: off
kernel_fma_grad_in[grid](
# data ptrs
grad_in, grad_act, activation_inputs, grad_out, weight,
# shapes
M, N, K,
# strides
grad_in.stride(0), grad_in.stride(1), grad_in.stride(2),
grad_act.stride(0), grad_act.stride(1), grad_act.stride(2),
grad_out.stride(0), grad_out.stride(1), grad_out.stride(2),
activation_inputs.stride(0), activation_inputs.stride(1), activation_inputs.stride(2),
weight.stride(0), weight.stride(1),
# optional fused activation
ACTIVATION_GRAD=activation_grad,
# data reuse optimization
GROUP_M=16,
BLOCK_N=32,
ACTIVATION_GRAD_REQ_INPUTS=activation_grad_req_inputs,
SAVE_ACT_GRAD=trainable_weight or trainable_bias
)
# fmt: on
grad_bias = torch.sum(grad_act, dim=[0, 1]) if trainable_bias else None
# Reuse Triton optimized matmul
grad_weight = None
if trainable_weight:
grad_act_ = torch.reshape(grad_act, (grad_act.shape[0]*grad_act.shape[1], grad_act.shape[2])).transpose(1, 0)
if inputs.ndim == 2:
inputs = inputs.unsqueeze(0)
inputs_ = torch.reshape(inputs, (inputs.shape[0]*inputs.shape[1], inputs.shape[2]))
grad_weight = triton.ops.matmul(grad_act_, inputs_)
if _should_squeeze:
grad_in = grad_in.squeeze_()
del grad_act
return grad_in, grad_weight, grad_bias
|
bart_ls-main
|
xformers/xformers/triton/k_fused_matmul.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
_triton_available = torch.cuda.is_available()
if _triton_available:
try:
from .dropout import FusedDropoutBias, dropout # noqa
from .fused_linear_layer import FusedLinear # noqa
from .layer_norm import FusedLayerNorm, layer_norm # noqa
from .softmax import log_softmax, softmax # noqa
__all__ = [
"dropout",
"softmax",
"log_softmax",
"FusedDropoutBias",
"FusedLinear",
"FusedLayerNorm",
"layer_norm",
]
except ImportError:
__all__ = []
|
bart_ls-main
|
xformers/xformers/triton/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This comes almost as-is from the Triton layer norm tutorial
# https://github.com/openai/triton/blob/master/python/tutorials/05-layer-norm.py
import logging
import torch
import triton
import triton.language as tl
from torch.cuda.amp import custom_bwd, custom_fwd
_triton_layernorm_fp16_enabled = False # NOTE: PyTorch keeps layernorm as fp32
_triton_registered_warnings = False
@triton.jit
def _affine(W, B, N, x, META):
cols = tl.arange(0, META["BLOCK_SIZE_N"])
w = tl.load(W + cols, mask=cols < N, other=1.0)
zero = 0.0
zero = zero.to(w.dtype) # Triton bug workarounds
w = tl.where(cols < N, w, zero)
b = tl.load(B + cols, mask=cols < N, other=0.0)
b = tl.where(cols < N, b, zero)
y = x * w + b
return y
@triton.jit
def _store(y, Y, stride, N, META):
row = tl.program_id(0)
cols = tl.arange(0, META["BLOCK_SIZE_N"])
y_ptrs = Y + row * stride + cols
tl.store(y_ptrs, y, mask=cols < N)
@triton.jit
def _layer_norm_non_affine(X, M, V, stride, N, eps, META):
# fmt: on
"""
Fused layernorm kernel over a 3d tensor.
The layer norm is applied over the last dimension.
Compute
y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta
"""
row = tl.program_id(0)
cols = tl.arange(0, META["BLOCK_SIZE_N"])
# Move to this row
x_ptrs = X + row * stride + cols
x = tl.load(x_ptrs, mask=cols < N, other=0.0).to(tl.float32)
x = tl.where(cols < N, x, 0.0) # Triton bug workarounds
# Compute variance
x_mean = tl.sum(x, axis=0) / N
x_zm = x - x_mean
x_zm = tl.where(cols < N, x_zm, 0.0) # Triton bug workaround
x_var = tl.sum(x_zm * x_zm, axis=0) / N
x_inv_sigma = 1.0 / tl.sqrt(x_var + eps)
# write-back per sample mean/rstd, used in the backward pass
tl.store(M + row, x_mean)
tl.store(V + row, x_inv_sigma)
return x_zm * x_inv_sigma
# fmt: off
@triton.jit
def _layer_norm_non_affine_fw(X, Y, M, V, stride, N, eps, **META):
_store(_layer_norm_non_affine(X, M, V, stride, N, eps, META), Y, stride, N, META)
# fmt: off
@triton.jit
def _layer_norm_fw(X, Y, W, B, M, V, stride, N, eps, **META):
# fmt: on
"""
Fused layernorm kernel over a 3d tensor.
The layer norm is applied over the last dimension.
Compute
y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta
"""
y = _layer_norm_non_affine(X, M, V, stride, N, eps, META)
y = _affine(W, B, N, y, META)
_store(y, Y, stride, N, META)
# Backward pass (DX + partial DW + partial DB)
# fmt: off
@triton.jit
def _layer_norm_bwd_dx_fused(
DX, DY, DW, DB,
Y, W, B, V,
Lock, stride, N,
**META
):
# fmt: on
GROUP_SIZE_M = META["GROUP_SIZE_M"]
BLOCK_SIZE_N = META["BLOCK_SIZE_N"]
# position of elements processed by this program
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
# offset data pointers to start at the row of interest
y_ptrs = Y + row * stride + cols
dy_ptrs = DY + row * stride + cols
w_ptrs = W + cols
b_ptrs = B + cols
# offset locks and weight/bias gradient pointer
# each kernel instance accumulates partial sums for
# DW and DB into one of GROUP_SIZE_M independent buffers
# these buffers stay in the L2, which allow this kernel
# to be fast
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
# load data to SRAM
y = tl.load(y_ptrs, mask=cols < N, other=0).to(tl.float32)
dy = tl.load(dy_ptrs, mask=cols < N, other=0).to(tl.float32)
w = tl.load(w_ptrs, mask=cols < N, other=0).to(tl.float32)
b = tl.load(b_ptrs, mask=cols < N, other=0).to(tl.float32)
rstd = tl.load(V + row)
# compute dx
xhat = (y - b) / w
wdy = w * dy
xhat = tl.where(cols < N, xhat, 0.0)
wdy = tl.where(cols < N, wdy, 0.0)
mean1 = tl.sum(xhat * wdy, axis=0) / N
mean2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * mean1 + mean2)) * rstd
# write-back dx
_store(dx, DX, stride, N, META)
# accumulate partial sums for dw/db
partial_dw = (dy * xhat).to(w.dtype)
partial_db = dy.to(w.dtype)
# - wait for a lock on the accumulated dw/db
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
# - we got the lock, accumulate this kernel's results with
# the stored values.
dw_ptrs = DW + lock_id * N + cols
db_ptrs = DB + lock_id * N + cols
if count == 0:
# first store doesn't accumulate
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(dw_ptrs, mask=cols < N, other=0.)
partial_db += tl.load(db_ptrs, mask=cols < N, other=0.)
tl.store(dw_ptrs, partial_dw, mask=cols < N)
tl.store(db_ptrs, partial_db, mask=cols < N)
# release lock
tl.atomic_xchg(Lock, 0)
@triton.jit
def _layer_norm_no_affine_bwd(
DX, DY,
Y, V,
stride, N,
**META
):
# fmt: on
# position of elements processed by this program
row = tl.program_id(0)
cols = tl.arange(0, META["BLOCK_SIZE_N"])
# offset data pointers to start at the row of interest
y_ptrs = Y + row * stride + cols
dy_ptrs = DY + row * stride + cols
# load data to SRAM
y = tl.load(y_ptrs, mask=cols < N, other=0).to(tl.float32)
dy = tl.load(dy_ptrs, mask=cols < N, other=0).to(tl.float32)
rstd = tl.load(V + row)
# compute dx
xhat = tl.where(cols < N, y, 0.0)
wdy = tl.where(cols < N, dy, 0.0)
mean1 = tl.sum(xhat * wdy, axis=0) / N
mean2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * mean1 + mean2)) * rstd
# write-back dx
_store(dx, DX, stride, N, META)
# Backward pass (total DW + total DB)
# fmt: off
@triton.jit
def _layer_norm_bwd_dwdb(DW, DB, FINAL_DW, FINAL_DB, M, N, **meta):
# fmt: on
pid = tl.program_id(0)
BLOCK_SIZE_M = meta["BLOCK_SIZE_M"]
BLOCK_SIZE_N = meta["BLOCK_SIZE_N"]
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, meta["BLOCK_SIZE_M"])
offs = rows[:, None] * N + cols[None, :]
dw += tl.load(DW + offs, mask=(rows[:, None] < M) & (cols[None, :] < N), other=0.0)
db += tl.load(DB + offs, mask=(rows[:, None] < M) & (cols[None, :] < N), other=0.0)
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
tl.store(FINAL_DW + cols, sum_dw, mask=cols < N)
tl.store(FINAL_DB + cols, sum_db, mask=cols < N)
# FIXME: @lefaudeux tensor shape changes are not well handled, see shape3
class _LayerNorm(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16 if _triton_layernorm_fp16_enabled else None)
def forward(ctx, x, weight, bias, eps):
# allocate output
y = torch.empty_like(x)
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
# allocate mean and std, they'll be used in the backward pass
mean = torch.empty((M,), dtype=torch.float32, device="cuda")
rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE_N:
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
if not x_arg.is_contiguous() or not y.is_contiguous():
global _triton_registered_warnings
if not _triton_registered_warnings:
logging.warning("Non-contiguous input tensor found. Making it contiguous,"
+ " but could have perf or trainer implications")
_triton_registered_warnings = True
x_arg = x_arg.contiguous()
y = y.contiguous()
# heuristics for number of warps.
num_warps = min(max(BLOCK_SIZE_N // 256, 1), 8)
# enqueue kernel
# fmt: off
if weight is None:
_layer_norm_non_affine_fw[(M,)](
x_arg, y, mean, rstd,
x_arg.stride(0),
N,
eps,
num_warps=num_warps,
BLOCK_SIZE_N=BLOCK_SIZE_N
)
else:
_layer_norm_fw[(M,)](
x_arg, y, weight, bias, mean, rstd,
x_arg.stride(0),
N,
eps,
num_warps=num_warps,
BLOCK_SIZE_N=BLOCK_SIZE_N
)
# fmt: on
ctx.save_for_backward(y, rstd, weight, bias)
ctx.BLOCK_SIZE_N = BLOCK_SIZE_N
ctx.num_warps = num_warps
ctx.eps = eps
ctx.N = N
return y.reshape_as(x)
@staticmethod
@custom_bwd
def backward(ctx, dy):
y, var, weight, bias = ctx.saved_tensors
# heuristics for amount of parallel reduction stream for DG/DB
N = y.size(-1)
GROUP_SIZE_M = 64
if N <= 8192:
GROUP_SIZE_M = 96
if N <= 4096:
GROUP_SIZE_M = 128
if N <= 1024:
GROUP_SIZE_M = 256
# flatten the batch dimension, if any.
# We're interested in 'samples' x norm_dimension
y = y.reshape(-1, y.size(-1))
M, N = y.size()
# allocate output
locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device="cuda")
t_args = {"dtype": y.dtype, "device": y.device}
_dw = torch.empty((GROUP_SIZE_M, y.size(-1)), **t_args)
_db = torch.empty((GROUP_SIZE_M, y.size(-1)), **t_args)
dw = torch.empty((y.size(-1),), **t_args)
db = torch.empty((y.size(-1),), **t_args)
dy = dy.contiguous()
dx = torch.empty_like(dy)
# Check the tensor shapes and layouts
# we suppose in the kernel that they have the same size and are contiguous
assert dx.numel() == y.numel(), \
"Something is wrong in the backward graph, possibly because of an inplace operation after the layernorm"
# enqueue kernel using forward pass heuristics
# also compute partial sums for DW and DB
# fmt: off
meta = {"BLOCK_SIZE_N": ctx.BLOCK_SIZE_N,
"GROUP_SIZE_M": GROUP_SIZE_M,
"num_warps": ctx.num_warps}
if weight is None:
_layer_norm_no_affine_bwd[(M,)](dx, dy, y, var, y.stride(0), N, **meta)
return dx, None, None, None
_layer_norm_bwd_dx_fused[(M,)](
dx, dy, _dw, _db,
y, weight, bias, var,
locks,
y.stride(0),
N,
**meta
)
# fmt: on
def grid(meta):
return [triton.cdiv(N, meta["BLOCK_SIZE_N"])]
# accumulate partial sums in separate kernel
# fmt: off
_layer_norm_bwd_dwdb[grid](
_dw, _db, dw, db,
GROUP_SIZE_M,
N,
BLOCK_SIZE_M=32,
BLOCK_SIZE_N=128
)
# fmt: on
dx = dx.reshape_as(dy)
return dx, dw, db, None
|
bart_ls-main
|
xformers/xformers/triton/k_layer_norm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import triton
import triton.language as tl
# fmt: off
@triton.jit
def k_sum_0(
Y, X,
stride_xm,
M, N,
is_fp16,
**meta,
):
# fmt: om
"""
Sum a 2d tensor over the first (strided) dimension.
This extracts some speed through a parallel sum across the second dimension
"""
BLOCK_M = meta["BLOCK_M"]
BLOCK_N = meta["BLOCK_N"]
# partial row indices. We'll reduce over this dimension
m = tl.arange(0, BLOCK_M)
# To get some extra parallelization, we handle several columns in the same thread block
rn = tl.program_id(axis=0) * BLOCK_N + tl.arange(0, BLOCK_N)
# the memory address of all the elements that we want to load can be computed as follows
x_ptrs = X + m[:, None] * stride_xm + rn[None, :]
x_sum = tl.zeros((BLOCK_N,), dtype=tl.float32)
tiles = M // BLOCK_M
if M % BLOCK_M > 0:
tiles += 1
col_mask = (rn[None, :] < N)
for _ in range(tiles):
# load input data; pad out-of-bounds elements with 0
# NOTE: make sure to accumulate in fp32 to prevent a trivial overflow
mask = (m[:, None] < M) & col_mask
x = tl.load(x_ptrs, mask=mask, other=0.0)
x_sum += tl.sum(x, 0)
# move the load pointer
x_ptrs += BLOCK_M * stride_xm
m += BLOCK_M # update the mask check
tl.store(Y + rn, x_sum, mask=rn < N)
|
bart_ls-main
|
xformers/xformers/triton/k_sum.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
import torch
_gpu_is_old: Optional[bool] = None
def gpu_capabilities_older_than_70() -> bool:
"""Return True if the GPU's compute capability is older than SM70."""
global _gpu_is_old
if _gpu_is_old is None:
for i in range(torch.cuda.device_count()):
major, _ = torch.cuda.get_device_capability(f"cuda:{i}")
if major < 7:
_gpu_is_old = True
if _gpu_is_old is None:
_gpu_is_old = False
return _gpu_is_old
SUPPORTED_CUDA_DEVICES = ["V100", "A100", "T4"]
def get_current_cuda_device():
current_device = str(torch.cuda.get_device_properties(torch.cuda.current_device()))
for device_str in SUPPORTED_CUDA_DEVICES:
if current_device.find(device_str) > 0:
return device_str
logging.warning("Unsupported device, Triton code generation may fail")
return "P100" # default to an old GPU
def assert_almost_equal(x, y, decimal=2, err_msg=""):
import numpy.testing as npt
if isinstance(x, torch.Tensor):
x = x.cpu().detach().numpy()
if isinstance(y, torch.Tensor):
y = y.cpu().detach().numpy()
npt.assert_array_almost_equal(x, y, err_msg=err_msg, decimal=decimal)
|
bart_ls-main
|
xformers/xformers/triton/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import triton
from xformers.triton.utils import get_current_cuda_device
# CREDITS: Optimized defaults as suggested in the Triton documentation for matrix multiplications
# Handle different SM configurations for the older GPUs
# fmt: off
_configs_P100 = [
triton.Config({'BLOCK_ROW': 64 , 'BLOCK_COL': 32}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_ROW': 32 , 'BLOCK_COL': 64}, num_stages=5, num_warps=2)
]
_configs_V100 = _configs_P100 + [
triton.Config({'BLOCK_ROW': 128, 'BLOCK_COL': 256}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_ROW': 256, 'BLOCK_COL': 128}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_ROW': 256, 'BLOCK_COL': 64}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_ROW': 64 , 'BLOCK_COL': 256}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_ROW': 128, 'BLOCK_COL': 128}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_ROW': 128, 'BLOCK_COL': 64}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_ROW': 64 , 'BLOCK_COL': 128}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_ROW': 128, 'BLOCK_COL': 32}, num_stages=4, num_warps=4),
]
# fmt: on
# NOTE: Could be that different configs would be better
_configs_A100 = _configs_V100
kernel_config = (
{"P100": _configs_P100, "V100": _configs_V100, "A100": _configs_A100}[
get_current_cuda_device()
]
if torch.cuda.is_available()
else {}
)
|
bart_ls-main
|
xformers/xformers/triton/configs_matmul.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import triton
import triton.language as tl
# CREDITS: Initially inspired by the Triton tutorial on matrix multiplications
# fmt: off
@triton.autotune(
configs=[
triton.Config({"BLOCK_ROW": 16, "BLOCK_COL": 16}, num_stages=5, num_warps=1),
triton.Config({"BLOCK_ROW": 32, "BLOCK_COL": 32}, num_stages=5, num_warps=1),
triton.Config({"BLOCK_ROW": 64, "BLOCK_COL": 32}, num_stages=5, num_warps=2),
triton.Config({"BLOCK_ROW": 32, "BLOCK_COL": 64}, num_stages=5, num_warps=2),
triton.Config({"BLOCK_ROW": 128, "BLOCK_COL": 64}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_ROW": 64, "BLOCK_COL": 128}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_ROW": 128, "BLOCK_COL": 128}, num_stages=4, num_warps=4),
# triton.Config({"BLOCK_ROW": 32, "BLOCK_COL": 256}, num_stages=3, num_warps=4),
# triton.Config({"BLOCK_ROW": 256, "BLOCK_COL": 32}, num_stages=3, num_warps=4),
# triton.Config({"BLOCK_ROW": 64, "BLOCK_COL": 256}, num_stages=3, num_warps=8),
# triton.Config({"BLOCK_ROW": 256, "BLOCK_COL": 64}, num_stages=3, num_warps=8),
],
key=["M", "N", "K"],
)
@triton.jit
def kernel_fma(
# Pointers to matrices
OUT, ACT_INPUTS, INPUT, WEIGHT, BIAS,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_om, stride_im,
stride_wn, stride_wk,
# Meta-parameters
**META,
):
# fmt: on
"""
Kernel for computing Out = activation(A x W + C)
- Input has shape (M, K)
- Weight has shape (K, N)
- Bias has shape (N,)
- Output has shape (M, N)
- ActInputs (optional) has shape (M, N)
'ActInputs' optionally saves the A x W + C intermediate for backward computations
This kernel will consolidate over K
"""
# extract metaparameters
BLOCK_M, GROUP_M = META["BLOCK_ROW"], META["GROUP_ROW"]
BLOCK_N, BLOCK_K = META["BLOCK_COL"], META["BLOCK_K"]
# programs are grouped together to improve L2 hit rate
# the logic is that we'll consolidate over K. If the programs were not grouped,
# then multiple cols/rows in the result would end up pulling in the same row and lines
# from the inputs. By grouping the computation we ensure some data reuse, which the hardware
# covers via the L2 cache
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_M) # number of program ids along the M axis
num_pid_n = tl.cdiv(N, BLOCK_N) # number of programs ids along the N axis
num_pid_in_group = GROUP_M * num_pid_n # number of programs in group
group_id = pid // num_pid_in_group # id of the group this program is in
first_pid_m = group_id * GROUP_M # row-id of the first program in the group
GROUP_M = min(
num_pid_m - first_pid_m, GROUP_M
) # if `num_pid_m` isn't divisible by `GROUP_M`, the last group is smaller
# *within groups*, programs are ordered in a column-major order
# row-id /col-id of the program in the *launch grid*
pid_m = first_pid_m + (pid % GROUP_M)
pid_n = (pid % num_pid_in_group) // GROUP_M
# now compute the block that each program will go through
# rm (resp. rn) denotes a range of indices
# for rows (resp. col) of C
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
rk = tl.arange(0, BLOCK_K)
# the memory addresses of elements can follow numpy broadcasting
input_ptrs = INPUT + rm[:, None] * stride_im + rk[None, :]
weight_ptrs = WEIGHT + rk[:, None] * stride_wk + rn[None, :] * stride_wn
# initialize and iteratively update accumulator
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
if META["BIAS"]:
bias = tl.load(BIAS + rn, mask=rn < N, other=0.0).to(tl.float32)
acc += bias[None, :]
# block level matrix multiplication.
# We fetch a block memory block from both inputs, matmul and accumulate, then repeat
for _ in range(K, 0, -BLOCK_K):
a = tl.load(input_ptrs, mask=((rk[None, :] < K) & (rm[:, None] < M)), other=0.0)
w = tl.load(weight_ptrs, mask=((rk[:, None] < K) & (rn[None, :] < N)), other=0.0)
acc += tl.dot(a, w).to(tl.float32)
input_ptrs += BLOCK_K
weight_ptrs += BLOCK_K * stride_wk
# optional: save the activation inputs
if META["SAVE_ACT_INPUTS"]:
act_in_ptrs = ACT_INPUTS + rm[:, None] * stride_om + rn[None, :]
tl.store(act_in_ptrs, acc, mask=(rm[:, None] < M) & (rn[None, :] < N))
# optional: fused activation (while the data is in shared memory)
if META["ACTIVATION"]:
acc = META["ACTIVATION"](acc)
# write back result
out_ptrs = OUT + rm[:, None] * stride_om + rn[None, :]
tl.store(out_ptrs, acc, mask=(rm[:, None] < M) & (rn[None, :] < N))
# Activation needs to be a triton kernel
def fused_matmul(
x: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor],
activation=None,
save_act_inputs: bool = False
):
"""
Compute e = activation(x @ weight + bias).
This wrapper kicks the `kernel_fma` Triton kernel
"""
if not x.is_contiguous():
x = x.contiguous()
x_ = x if x.ndim == 2 else x.flatten(0, 1)
assert (
x_.shape[1] == weight.shape[1]
), f"Incompatible dimensions in between inputs and weight, {x_.shape} - {weight.shape}"
assert bias is None or bias.is_contiguous()
assert (
bias is None or bias.shape[0] == weight.shape[0]
), "Incompatible dimensions in between weight and bias"
M, K = x_.shape
N, K = weight.shape
outputs = torch.empty((M, N), device=x.device, dtype=x.dtype)
act_inputs = torch.empty_like(outputs) if save_act_inputs else x # will not be used in that case
# 1D launch kernel where each block gets its own program.
def grid(META):
return (
triton.cdiv(M, META["BLOCK_ROW"]) * triton.cdiv(N, META["BLOCK_COL"]),
)
# fmt: off
kernel_fma[grid](
# data ptrs
outputs, act_inputs, x_, weight,
bias if bias is not None else x, # auto skip bias if not present
# shapes
M, N, K,
# strides
outputs.stride(0), x_.stride(0),
weight.stride(0), weight.stride(1),
# optional fused activation
ACTIVATION=activation,
# optional fused bias
BIAS=bias is not None,
# speed optimization: group the programs
# improve on data reuse in L2 cache
GROUP_ROW=8,
BLOCK_K=32,
SAVE_ACT_INPUTS=save_act_inputs
)
# fmt: on
outputs = outputs if x.ndim == 2 else outputs.reshape(x.shape[0], -1, N)
return outputs, act_inputs if save_act_inputs else None
|
bart_ls-main
|
xformers/xformers/triton/k_fused_matmul_fw.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This comes almost as-is from the Triton dropout tutorial
# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py
from typing import Optional
import torch
import triton
from torch.cuda.amp import custom_bwd, custom_fwd
from xformers.components.activations import Activation, build_activation
from xformers.triton.k_activations import (
get_triton_activation_bwd_kernel,
get_triton_activation_kernel,
)
from xformers.triton.k_dropout import k_dropout_bw, k_dropout_fw
GROUP_M = 32
BLOCK_M = GROUP_M // 4
BLOCK_N = 128
# Helper to handle the SPMD launch grid and error cases
class _dropout(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, x, p, bias, activation, activation_grad, trainable_bias):
# Soft-flatten an hypothetical 3rd dimension
x_ = x.reshape(-1, x.shape[-1]).contiguous()
y = torch.empty_like(x_)
M, N = x_.shape
assert bias is None or (bias.dtype == x.dtype and bias.shape[0] == N)
def grid(meta):
# NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for
# a given seed and offsets. "BLOCK_M" here describes the size of one of these blocks
# but we need to take this factor of 4 into account when scheduling all the kernels
return (
triton.cdiv(M, meta["BLOCK_M"] * 4),
triton.cdiv(N, meta["BLOCK_N"]),
)
N_BLOCK_N = triton.cdiv(N, BLOCK_N)
# Generate one seed per sample
# seed max is int32 max for positive numbers: 2**16
seeds = torch.randint(65536, (N_BLOCK_N,), device=x.device).to(torch.int32)
# fmt: off
k_dropout_fw[grid](
y, x_,
bias if bias is not None else x_,
seeds,
y.stride(0),
M, N,
p,
x.dtype == torch.float16,
USE_BIAS=bias is not None,
ACTIVATION=activation,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
)
# fmt: on
if activation is not None:
ctx.save_for_backward(seeds, bias, x)
else:
ctx.save_for_backward(seeds, bias, None)
ctx.trainable_bias = bias is not None and trainable_bias
ctx.activation_grad = activation_grad
ctx.p = p
return y.reshape_as(x)
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
(seeds, bias, inputs) = ctx.saved_tensors
# Soft-flatten an hypothetical 3rd dimension
grad_out_ = grad_out.reshape(-1, grad_out.shape[-1]).contiguous()
grad_in = torch.empty_like(grad_out_)
M, N = grad_out_.shape
# Optional inputs to compute the activation contribution to the gradient
assert inputs is not None or ctx.activation_grad is None
if inputs is None:
inputs = grad_out_
elif inputs.ndim > 2:
inputs = inputs.reshape(-1, N)
# We split the problem in tiles:
# - over M there will be a follow up reduction
# - over M, we go by 4 tiles at at time (consequence of the random number generation)
# - over N we compromise in between trying to use as much memory paralellism as possible,
# (fill in the warps, there are 32 threads per warps, and 4 warps default), and not being too
# big because of register spilling
N_BLOCKS_M = triton.cdiv(M, GROUP_M)
if ctx.trainable_bias:
grad_bias = torch.empty(
(
N_BLOCKS_M,
N,
),
device=grad_in.device,
dtype=grad_in.dtype,
)
else:
grad_bias = grad_in # will not be used
def grid(meta):
# NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for
# a given seed and offsets. "BLOCK_M" here describes the size of one of these blocks
# but we need to take this factor of 4 into account when scheduling all the kernels
return (
triton.cdiv(M, meta["BLOCK_M"] * 4),
triton.cdiv(N, meta["BLOCK_N"]),
)
# fmt: off
k_dropout_bw[grid](
grad_in, grad_bias, grad_out_,
inputs, bias if bias is not None else inputs,
seeds,
grad_out_.stride(0), inputs.stride(0),
M, N,
ctx.p,
grad_in.dtype == torch.float16,
USE_BIAS=bias is not None,
ACTIVATION_GRAD=ctx.activation_grad,
TRAINABLE_BIAS=ctx.trainable_bias,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
)
# fmt: on
return (
grad_in.reshape_as(grad_out),
None,
torch.sum(grad_bias, dim=0) if ctx.trainable_bias else None,
None,
None,
None,
)
def dropout(
x: torch.Tensor,
p: float,
bias: Optional[torch.Tensor] = None,
activation: Optional[Activation] = None,
):
"""
Apply dropout on the input tensor.
Optionally add a bias, the computation will be fused.
"""
# Micro optim, skip dropout
if p == 0.0 and activation is None:
return x + bias if bias is not None else x
act_kernel = get_triton_activation_kernel(activation)
act_grad_kernel = get_triton_activation_bwd_kernel(activation)
return _dropout.apply(
x,
p,
bias,
act_kernel,
act_grad_kernel,
bias is not None and bias.requires_grad,
)
class FusedDropoutBias(torch.nn.Module):
def __init__(
self,
p: float,
bias_shape: Optional[int],
activation: Optional[Activation] = None,
) -> None:
super().__init__()
self.p = p
self.activation_type = activation
self.bias = (
torch.zeros(bias_shape, requires_grad=True)
if bias_shape is not None
else None
)
self.activation = get_triton_activation_kernel(activation)
self.pytorch_activation = build_activation(self.activation_type)
self.activation_grad = get_triton_activation_bwd_kernel(activation)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Convenience, catch a possible type or device mismatch
if self.bias is not None: # type: ignore
self.bias = self.bias.to(dtype=x.dtype, device=x.device) # type: ignore
# Train/inference
p = self.p if self.training else 0.0
# This kernel is slower than pytorch for small buffers, bypassing it in that case
perf_check = x.shape[-1] > 512
# Catch a non-cuda setup, fallback to pytorch
if not x.is_cuda or not perf_check:
x = x + self.bias if self.bias is not None else x
x = self.pytorch_activation(x)
return torch.nn.functional.dropout(x, p)
# The normal, Triton-backed path
return _dropout.apply(
x, p, self.bias, self.activation, self.activation_grad, True
)
|
bart_ls-main
|
xformers/xformers/triton/dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This comes almost as-is from the Triton dropout tutorial
# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py
import triton
import triton.language as tl
_configs = [
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
]
@triton.jit
def _get_4_bin_masks(seed, rand_offsets, p):
seed = tl.load(seed)
rand1, rand2, rand3, rand4 = tl.randint4x(seed, rand_offsets)
# binarize masks, save registers
# NOTE: We keep the random numbers as is there (integers over int32),
# and convert the threshold instead, for speed
# The initial distribution is -2**31 / 2**31 -1
# and our float threshold is in between [0, 1]
# The full computation is: `start_point + full range * p`
threshold = (-2147483648.0 + 4294967295.0 * p).to(tl.int32)
rand_mask1 = rand1 > threshold
rand_mask2 = rand2 > threshold
rand_mask3 = rand3 > threshold
rand_mask4 = rand4 > threshold
return rand_mask1, rand_mask2, rand_mask3, rand_mask4
@triton.jit
def _random_prune_and_scale(x, rand_mask, p, p_scale):
zero = 0.0
if p > 0.0:
# generate all the random numbers for the block at once, then reshape
keep = tl.reshape(rand_mask, x.shape)
# prune and normalize in one go
x = tl.where(keep, (x * p_scale).to(x.dtype), zero.to(x.dtype))
return x
# fmt: off
@triton.heuristics({"SIZE_RAND_BLOCK": lambda *_, **meta: meta["BLOCK_N"] * meta["BLOCK_M"]})
@triton.autotune(
configs=_configs,
key=["M", "N", "is_fp16"],
)
@triton.jit
def k_dropout_fw(
Y, X, BIAS, SEEDS,
stride,
M, N,
p,
is_fp16, # autotune
**meta,
):
"""
Apply dropout on an input tensor
Y : Output (M, N)
X : Input (M, N)
BIAS (N,)
SEEDS (M,)
p : dropout probability
"""
# fmt: on
BLOCK_M = meta["BLOCK_M"]
BLOCK_N = meta["BLOCK_N"]
SIZE_RAND_BLOCK = meta["SIZE_RAND_BLOCK"]
row_id = tl.program_id(axis=0)
rows = row_id * BLOCK_M * 4 + tl.arange(0, BLOCK_M)
col_id = tl.program_id(axis=1)
cols = col_id * BLOCK_N + tl.arange(0, BLOCK_N)
seed = SEEDS + col_id
# pointers starting point
x_ptrs = X + rows[:, None] * stride + cols[None, :]
y_ptrs = Y + rows[:, None] * stride + cols[None, :]
# go over all the tiles, one by one
rand_offsets = tl.arange(0, SIZE_RAND_BLOCK) + row_id * BLOCK_M * 4
rand_mask1, rand_mask2, rand_mask3, rand_mask4 = _get_4_bin_masks(seed, rand_offsets, p)
col_mask = cols[None, :] < N
p_scale = 1 / (1 - p) if p < 1. else 1.
if meta["USE_BIAS"]:
b_ptrs = BIAS + cols[None, :]
bias = tl.load(b_ptrs, mask=cols[None, :] < N, other=0.)
for i in range(4):
# cycle through the binary masks (workaround / no indexing)
if i == 0:
rand_mask = rand_mask1
elif i == 1:
rand_mask = rand_mask2
elif i == 2:
rand_mask = rand_mask3
else:
rand_mask = rand_mask4
block_mask = (rows[:, None] < M) & col_mask
x = tl.load(x_ptrs, mask=block_mask, other=0.)
# optionally apply a fused bias
if meta["USE_BIAS"]:
x += bias
# optional: fused activation (while the data is in shared memory)
if meta["ACTIVATION"]:
x = meta["ACTIVATION"](x)
# randomly prune (and scale) the resulting buffer, possibly a no-op
output = _random_prune_and_scale(x, rand_mask, p, p_scale)
tl.store(y_ptrs, output, mask=block_mask)
# Update the pointers
rows += BLOCK_M # needs to be updated for the mask to be correct
x_ptrs += BLOCK_M * stride
y_ptrs += BLOCK_M * stride
# fmt: off
@triton.heuristics({"SIZE_RAND_BLOCK": lambda *_, **meta: meta["BLOCK_N"] * meta["BLOCK_M"]})
@triton.autotune(
configs=_configs,
key=["M", "N", "is_fp16"],
)
@triton.jit
def k_dropout_bw(
GRAD_IN, GRAD_BIAS, GRAD_OUT,
INPUTS, BIAS, SEEDS,
stride_grad, stride_inputs,
M, N,
p,
is_fp16, # autotune
**meta,
):
"""
Apply dropout on an input tensor
GRAD_OUT (M, N)
GRAD_BIAS (N,)
GRAD_IN (M, N)
BIAS (N,)
SEEDS (N,)
p : dropout probability
"""
# fmt: on
BLOCK_M = meta["BLOCK_M"]
BLOCK_N = meta["BLOCK_N"]
SIZE_RAND_BLOCK = meta["SIZE_RAND_BLOCK"]
TRAINABLE_BIAS = meta["TRAINABLE_BIAS"]
rows = tl.arange(0, BLOCK_M)
row_id = tl.program_id(axis=0)
rows = row_id * BLOCK_M * 4 + tl.arange(0, BLOCK_M)
col_id = tl.program_id(axis=1)
cols = col_id * BLOCK_N + tl.arange(0, BLOCK_N)
seed = SEEDS + col_id # FIXME index the seed properly
# pointers starting point
grad_out_ptrs = GRAD_OUT + rows[:, None] * stride_grad + cols[None, :]
grad_in_ptrs = GRAD_IN + rows[:, None] * stride_grad + cols[None, :]
input_ptrs = INPUTS + rows[:, None] * stride_inputs + cols[None, :]
# random binary masks, save registers
rand_offsets = tl.arange(0, SIZE_RAND_BLOCK) + row_id * BLOCK_M * 4
rand_mask1, rand_mask2, rand_mask3, rand_mask4 = _get_4_bin_masks(seed, rand_offsets, p)
# now go over the tiles
grad_bias = tl.zeros((BLOCK_N,), dtype=tl.float32)
col_mask = cols[None, :] < N
p_scale = 1 / (1 - p) if p < 1. else 1.
if meta["USE_BIAS"]:
b_ptrs = BIAS + cols[None, :]
bias = tl.load(b_ptrs, mask=col_mask, other=0.)
for i in range(4):
# cycle through the binary masks (workaround / no indexing)
if i == 0:
rand_mask = rand_mask1
elif i == 1:
rand_mask = rand_mask2
elif i == 2:
rand_mask = rand_mask3
else:
rand_mask = rand_mask4
block_mask = (rows[:, None] < M) & col_mask
grad_out = tl.load(grad_out_ptrs, mask=block_mask, other=0.)
# optional: fused activation (while the data is in shared memory)
if meta["ACTIVATION_GRAD"]:
inputs = tl.load(input_ptrs, mask=block_mask, other=0.)
# optionally apply a fused bias
if meta["USE_BIAS"]:
inputs += bias
act_grad = meta["ACTIVATION_GRAD"](inputs).to(grad_out.dtype)
grad_out *= act_grad
# randomly prune (and scale) the resulting buffer, possibly a no-op
# note that even if we did not save the mask from the FW pass, it is generated
# from the same seeds, so the same drop mask is applied here
output = _random_prune_and_scale(grad_out, rand_mask, p, p_scale)
# write-back
tl.store(grad_in_ptrs, output, mask=block_mask)
# optionally accumulate the bias gradient
if TRAINABLE_BIAS:
grad_bias += tl.sum(output, axis=0)
# Update the pointers
rows += BLOCK_M # needs to be updated for the mask to be correct
grad_out_ptrs += BLOCK_M * stride_grad
input_ptrs += BLOCK_M * stride_inputs
grad_in_ptrs += BLOCK_M * stride_grad
if TRAINABLE_BIAS:
grad_bias_ptr = GRAD_BIAS + row_id * N + cols
tl.store(grad_bias_ptr, grad_bias, mask=cols < N)
|
bart_ls-main
|
xformers/xformers/triton/k_dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum import Enum
from typing import Optional
import torch
import triton
from torch.cuda.amp import custom_bwd, custom_fwd
from xformers.triton.k_softmax import _softmax, _softmax_backward
# CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/
# and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html
_triton_registered_overflow = False
_triton_registered_warnings = False
_triton_softmax_fp16_enabled = False # NOTE: PyTorch keeps softmax as fp32
class MaskType(str, Enum):
ADD = "add"
MUL = "mul"
# Helper to handle the SPMD launch grid and error cases
class _softmax_triton(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16 if _triton_softmax_fp16_enabled else None)
def forward(ctx, x, mask, log_outputs, causal):
"""
Fused softmax implementation, using the Triton programming model.
This only supports a reduction over the last dimension for now
"""
# Handle 2D/3D tensors
x_ = x.unsqueeze(0) if x.ndim == 2 else x
if not x_.is_contiguous():
x_ = x_.contiguous()
y = torch.empty_like(x_)
assert (
y.stride(2) == 1 and x_.stride(2) == 1
), f"{x.shape} - {x_.shape} - {x_.stride()}"
# SPMD launch grid
grid_2d = (
x_.shape[0],
x_.shape[1],
)
# enqueue GPU kernel
use_mask = True
if mask is None:
# placeholder, will not be used
mask = x_
use_mask = False
else:
# Make sure that the mask is binary
assert mask.dtype == x.dtype, "An additive mask is requested"
_softmax[grid_2d](
y,
x_,
mask,
y.stride(0),
y.stride(1),
x_.stride(0),
x_.stride(1),
mask.stride(0),
x_.shape[2],
log=log_outputs,
use_mask=use_mask,
causal=causal,
)
ctx.save_for_backward(y)
ctx.log_outputs = log_outputs
ctx.causal = causal
return y.reshape_as(x)
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
(out,) = ctx.saved_tensors
# Handle 2D/3D tensors
grad_out_ = grad_out.unsqueeze(0) if grad_out.ndim == 2 else grad_out
# SPMD launch grid
grid_2d = (
grad_out_.shape[0],
grad_out_.shape[1],
)
depth = triton.next_power_of_2(grad_out_.shape[2])
grad_in = torch.empty_like(
out
) # torch.zeros is measurably slower, we'll zero out in the kernel
# Make sure that the tensor are contiguous
grad_in, grad_out, out = map(lambda x: x.contiguous(), [grad_in, grad_out, out])
# fmt: off
_softmax_backward[grid_2d](
grad_in, grad_out_, out,
grad_in.stride(0), grad_in.stride(1),
grad_out_.stride(0), grad_out_.stride(1),
out.stride(0), out.stride(1),
out.shape[2],
depth=depth,
log=ctx.log_outputs,
causal=ctx.causal
)
# fmt: on
return grad_in.reshape_as(grad_out), None, None, None
def softmax(
x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False
) -> torch.Tensor:
r"""Applies the Softmax function to an 3-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
.. warning: softmax is computed on the last dimension of the input tensor.
Args:
x: input tensor.
mask: optional mask, its application will be fused to the softmax computation if triton is used
causal: optional performance optimization, if triton is used and the attention is causal
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1] and sum to 1
"""
return _softmax_dispatch(x, log=False, mask=mask, causal=causal)
def log_softmax(
x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False
) -> torch.Tensor:
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an 3-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Args:
x: input tensor.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
"""
return _softmax_dispatch(x, log=True, mask=mask, causal=causal)
def _softmax_dispatch(
x: torch.Tensor, log: bool, mask: Optional[torch.Tensor], causal: bool = False
) -> torch.Tensor:
# Triton is used if
# - CUDA
# - there's enough data to make it faster than pytorch. This could change over time, Triton is improving
# - there was no previous failure
global _triton_registered_overflow
global _triton_registered_warnings
try:
if torch.cuda.is_available() and x.is_cuda and not _triton_registered_overflow:
return _softmax_triton.apply(x, mask, log, causal)
except (triton.code_gen.OutOfResources, RuntimeError) as e:
# Catch cases where the current GPU does not have enough registers to hold a full tensor line
# fallback to PyTorch's implementation, which streams the tensor in and out
_triton_registered_overflow = True
logging.warning(
"Triton softmax kernel register spillover or invalid image caught."
"Deactivating this kernel, please file an issue int the xFormers repository"
)
logging.warning(e)
if causal and not _triton_registered_warnings:
logging.warning(
"Triton softmax could not be used. \
The causal flags is being passed but it does not provide any benefit with PyTorch softmax."
)
_triton_registered_warnings = True
if mask is not None:
x = x + mask
if log:
return torch.log_softmax(x, dim=-1)
else:
return torch.softmax(x, dim=-1)
|
bart_ls-main
|
xformers/xformers/triton/softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: the underlying kernel comes straight from the Triton tutorials
# see https://github.com/openai/triton/blob/master/python/tutorials/05-layer-norm.py
import logging
from typing import Optional
import torch
import torch.nn as nn
import triton
from xformers.triton.k_layer_norm import _LayerNorm
_triton_registered_warnings = False
class FusedLayerNorm(nn.Module):
"""
Handle a layer normalization, like torch.nn.LayerNorm_.
This implementation should be measurably faster than the default PyTorch layernorm (as of PyTorch 1.9),
both for training and inference worloads.
.. NOTE: Computations under Torch AMP are kept as float32 by default, one can change this to be float16
by setting the flag `xformers.triton.k_layer_norm._triton_layernorm_fp16_enabled = True`
.. _torch.nn.LayerNorm: https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html
"""
def __init__(self, normalized_shape, affine=True, eps=1e-05):
super().__init__()
if affine:
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
else:
self.weight = self.bias = None
self.epsilon = eps
def forward(self, x):
return layer_norm(x, self.weight, self.bias, self.epsilon)
def layer_norm(
x: torch.Tensor,
weight: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
eps: float = 1e-05,
) -> torch.Tensor:
global _triton_registered_warnings
r"""Applies normalization over a mini batch of inputs"""
try:
if (
not _triton_registered_warnings
and torch.cuda.is_available()
and x.is_cuda
and weight is not None
and bias is not None
):
return _LayerNorm.apply(x, weight, bias, eps)
except (triton.code_gen.OutOfResources, RuntimeError) as e:
# Catch cases where the current GPU does not have enough registers to hold a full tensor line
# fallback to PyTorch's implementation, which streams the tensor in and out
_triton_registered_warnings = True
logging.warning(
"Triton layernorm kernel register spillover or invalid image caught. "
"Deactivating this kernel, please file an issue int the xFormers repository"
)
logging.warning(e)
return torch.nn.functional.layer_norm(
x, [x.shape[-1]], weight=weight, bias=bias, eps=eps
)
|
bart_ls-main
|
xformers/xformers/triton/layer_norm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import triton
import triton.language as tl
from xformers.triton.sum_strided import sum_2d_dim_0
# fmt: off
@triton.heuristics({
'EVEN_N': lambda *args, **meta: args[3] % (meta['BLOCK_COL']) == 0,
})
@triton.autotune(
configs=[
triton.Config({"BLOCK_COL": 32}, num_stages=5, num_warps=2),
triton.Config({"BLOCK_COL": 64}, num_stages=5, num_warps=2),
triton.Config({"BLOCK_COL": 128}, num_stages=3, num_warps=4),
triton.Config({"BLOCK_COL": 256}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_COL": 512}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_COL": 1024}, num_stages=3, num_warps=16),
],
key=["N"],
)
@triton.jit
def kernel_bw(
# Pointers to matrices
GRAD_ACT, GRAD_OUT, ACT_INPUTS,
# Matrix dimensions
N,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_gom, stride_aim,
# Meta-parameters
**META,
):
# fmt: on
"""
Go over all the activation inputs, compute the corresponding gradient
"""
# extract metaparameters
BLOCK_N = META["BLOCK_COL"]
# this kernel is relatively simple in terms of scheduling:
# - per row (pid_m)
# - each program a given chunk on the col axis,
# since it's more effective memory and occupancy wise
pid_m, pid_n = tl.program_id(axis=0), tl.program_id(axis=1)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# the memory addresses of elements in the first block of
# A and W can be computed using numpy-style broadcasting
act_input_ptrs = ACT_INPUTS + pid_m * stride_aim + rn
# compute the gradient which is related to this activation
if META["EVEN_N"]:
act_in = tl.load(act_input_ptrs)
else:
act_in = tl.load(act_input_ptrs, mask=rn < N, other=0.0)
grad_act = META["ACTIVATION_GRAD"](act_in)
# now read the incoming gradient, the backpropagated one is the multiple of both
grad_out_ptrs = GRAD_OUT + pid_m * stride_gom + rn
if META["EVEN_N"]:
grad_out = tl.load(grad_out_ptrs)
else:
grad_out = tl.load(grad_out_ptrs, mask=rn < N)
grad_act *= grad_out
# write back result
grad_act_ptrs = GRAD_ACT + pid_m * stride_gom + rn
tl.store(grad_act_ptrs, grad_act, mask=rn < N)
def fused_matmul_backward(
grad_out: torch.Tensor,
inputs: torch.Tensor,
act_in: Optional[torch.Tensor],
weight: torch.Tensor,
trainable_weight: bool,
trainable_bias: bool,
activation_grad=None,
):
"""
Compute grad_in = activation^-1(grad_out) @ weight.transpose()
.. note: The weight buffer is transposed on the fly
.. note: Activation gradient needs to be a Triton kernel
"""
# Make sure that we don't have to handle the stride over cols
if not grad_out.is_contiguous():
grad_out = grad_out.contiguous()
grad_out_ = grad_out if grad_out.ndim == 2 else grad_out.flatten(0, 1)
inputs_ = inputs if inputs.ndim == 2 else inputs.flatten(0, 1)
assert grad_out_.shape[1] == weight.shape[0], "Incompatible dimensions in between grad_out and weight"
M, N = grad_out_.shape
N, _ = weight.shape
# Compute the gradient for the activation
if activation_grad is not None:
grad_act = torch.empty_like(grad_out_)
# Some activations do not require their inputs to
# know of their grad, the downstream grad is enough
if act_in is None:
act_in = grad_out_
def grid(META):
return (
M,
triton.cdiv(N, META["BLOCK_COL"]),
)
# fmt: off
kernel_bw[grid](
# data ptrs
grad_act, grad_out_, act_in,
# shapes
N,
# strides
grad_act.stride(0), act_in.stride(0),
weight.stride(0), weight.stride(1),
# optional fused activation
ACTIVATION_GRAD=activation_grad,
)
# Backpropagation going up, the reference gradient is now
# just before the activation
grad_out_ = grad_act
# The following ops can also be handled by triton
grad_in = grad_out_ @ weight
grad_weight = grad_out_.transpose(1, 0) @ inputs_ if trainable_weight else None
grad_bias = sum_2d_dim_0(grad_out_) if trainable_bias else None
return grad_in.reshape_as(inputs), grad_weight, grad_bias
|
bart_ls-main
|
xformers/xformers/triton/k_fused_matmul_bw.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import triton
import triton.language as tl
# CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/
# and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html
def get_depth(*args, **_):
return triton.next_power_of_2(args[-1])
# autotune: Triton will test out these configurations, and automatically pick the fastest one.
# heuristic: add arguments to the kernel call automatically given some heuristics. These arguments are passed in "meta"
# fmt: off
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["K"],
)
@triton.heuristics(values={"depth": get_depth , "is_fp16": lambda *args, **_: args[0].dtype == torch.float16})
@triton.jit
def _softmax(
Y, X, M,
stride_ym, stride_yn,
stride_xm, stride_xn,
stride_mn,
K,
**meta, # extra parameters which can be automatically filled in given some heuristics
):
# fmt: om
"""
Fused softmax kernel over a 3d tensor.
The softmax is applied over the last dimension, meaning that this is equivalent to torch.softmax(tensor, dim=-1)
Note, if the last dimension is large, say 128K elements, the kernel compile time can shot up to many minutes when
the kernel is run for the first time.
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, meta["depth"])
# the memory address of all the elements that we want to load can be computed as follows
x_ptrs = X + m * stride_xm + n * stride_xn + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if meta["causal"]:
io_mask = io_mask & (k <= n)
x = tl.load(x_ptrs, mask=io_mask, other=float("-inf"))
# Causal - 2: enforce correctness over a couple of misloaded values
if meta["causal"]:
off = float("-inf")
off = off.to(x.dtype)
x = tl.where(k > n, off, x)
if meta["use_mask"]:
mask_ptrs = M + n * stride_mn + k
add_mask = tl.load(mask_ptrs, io_mask, other=float("-inf"))
x += add_mask
# compute numerically-stable softmax
z = x - tl.max(x, axis=0)
if meta["is_fp16"]:
# tl.exp() crashes on fp16 values
# See https://github.com/openai/triton/issues/241
z = z.to(tl.float32)
num = tl.exp(z)
denom = tl.sum(num, axis=0)
if meta["log"]:
y = z - tl.log(denom)
else:
y = num / denom
# write back to Y.
# we only write once, hence the "fused" softmax naming
y_ptrs = Y + m * stride_ym + n * stride_yn + k
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
tl.store(y_ptrs, y, mask=k < K)
# fmt: off
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
],
key=["K"],
)
@triton.heuristics(values={"is_fp16": lambda *args, **_: args[0].dtype == torch.float16})
@triton.jit
def _softmax_backward(
GradIn, GradOut, Out,
stride_bm, stride_bn,
stride_gm, stride_gn,
stride_om, stride_on,
K,
**meta,
):
# fmt: on
"""
Compute the softmax gradients.
..Note: Not autotuning for now because this would lead to broken accumulated gradients
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, meta["depth"])
# the memory address of all the elements that we want to load can be computed as follows
grad_out_ptrs = GradOut + m * stride_gm + n * stride_gn + k
out_ptrs = Out + m * stride_om + n * stride_on + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if meta["causal"]:
io_mask = io_mask & (k <= n)
g = tl.load(grad_out_ptrs, mask=io_mask, other=float(0))
o = tl.load(out_ptrs, mask=io_mask, other=float(0))
# Causal - 2: enforce correctness over a couple of misloaded values
if meta["causal"]:
zero = float(0)
zero = zero.to(g.dtype)
g = tl.where(k > n, zero, g)
o = tl.where(k > n, zero, o)
if meta["log"]:
s = tl.sum(g, 0)
if meta["is_fp16"]:
o = o.to(tl.float32)
grad_in = g - tl.exp(o) * s
else:
# Step 1: Compute the intermediate sum used for the gradient
s = tl.sum(g * o, 0)
# Step 2: Compute the gradients
grad_in = o * (g - s)
# write back to the input gradients
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
grad_in_ptrs = GradIn + m * stride_bm + n * stride_bn + k
tl.store(grad_in_ptrs, grad_in, mask=k < K)
|
bart_ls-main
|
xformers/xformers/triton/k_softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components import Activation, MultiHeadDispatch
from xformers.components.attention import LinformerAttention
from xformers.components.feedforward import MLP
from xformers.models.base import ModelConfig
@dataclass
class LinformerConfig(ModelConfig):
k: int # The dimension of the space on which the key and values are projected
class LinformerEncoderLayer(torch.nn.Module):
"""
An implementation of a Linformer_ encoder layer using the xFormers components
.. _Linformer: `Wang, S., Li, B. Z., Khabsa, M., Fang, H., & Ma, H. (2020).
Linformer: Self-Attention with Linear Complexity. ArXiv, 2048(2019).`
"""
def __init__(
self,
num_heads: int,
dim_sequence: int, # FIXME: should not be needed, make this dynamic
dim_embedding: int,
dim_feedforward: int,
activation: Activation,
attention_dropout: Optional[float],
ff_dropout: Optional[float],
final_dropout: Optional[float],
k: int,
*args,
**kwargs,
):
super().__init__()
if not attention_dropout:
attention_dropout = 0.0
if not ff_dropout:
ff_dropout = 0.0
if not final_dropout:
final_dropout = 0.0
self.attention = LinformerAttention(
dropout=attention_dropout, causal=False, seq_len=dim_sequence, k=k
)
self.multihead = MultiHeadDispatch(
dim_model=dim_embedding,
num_heads=num_heads,
residual_dropout=attention_dropout,
attention=self.attention,
)
self.norm1 = torch.nn.LayerNorm(dim_embedding)
self.feedforward = MLP(
dim_model=dim_feedforward,
dropout=ff_dropout,
activation=activation,
hidden_layer_multiplier=4,
)
self.norm2 = torch.nn.LayerNorm(dim_embedding)
self.dropout = nn.Dropout(p=final_dropout)
def forward(self, tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# NOTE: this is just an example, pre/post layer norms are possible in practice
tensor = self.norm1(tensor + self.multihead(tensor, tensor, tensor))
tensor = self.norm2(tensor + self.feedforward(tensor))
tensor *= mask.unsqueeze(-1).type_as(tensor)
return tensor
@classmethod
def from_config(cls, config: LinformerConfig):
# pyre-fixme[16]: `LinformerConfig` has no attribute `as_patchy_dict`.
return cls(**config.as_patchy_dict())
class LinformerDecoderLayer(torch.nn.Module):
"""
An implementation of a Linformer_ decoder layer using the xFormers components
.. _Linformer: `Wang, S., Li, B. Z., Khabsa, M., Fang, H., & Ma, H. (2020).
Linformer: Self-Attention with Linear Complexity. ArXiv, 2048(2019).`
"""
def __init__(
self,
num_heads: int,
dim_sequence: int, # NOTE: should not be needed, this could just be dynamic
dim_embedding: int,
dim_feedforward: int,
activation: Activation,
attention_dropout: Optional[float],
ff_dropout: Optional[float],
final_dropout: Optional[float],
k: int,
*args,
**kwargs,
):
super().__init__()
if not attention_dropout:
attention_dropout = 0.0
if not ff_dropout:
ff_dropout = 0.0
if not final_dropout:
final_dropout = 0.0
self.multihead1 = MultiHeadDispatch(
dim_model=dim_embedding,
num_heads=num_heads,
residual_dropout=attention_dropout,
attention=LinformerAttention(
dropout=attention_dropout, causal=True, seq_len=dim_sequence, k=k
),
)
self.multihead2 = MultiHeadDispatch(
dim_model=dim_embedding,
num_heads=num_heads,
residual_dropout=attention_dropout,
attention=LinformerAttention(
dropout=attention_dropout, causal=False, seq_len=dim_sequence, k=k
),
)
self.feedforward = MLP(
dim_model=dim_feedforward,
dropout=ff_dropout,
activation=activation,
hidden_layer_multiplier=4,
)
self.norm1 = torch.nn.LayerNorm(dim_embedding)
self.norm2 = torch.nn.LayerNorm(dim_embedding)
self.norm3 = torch.nn.LayerNorm(dim_embedding)
self.dropout = nn.Dropout(p=final_dropout)
def forward(self, target: torch.Tensor, memory: torch.Tensor) -> torch.Tensor:
# FIXME: Handle automatically different layer norm positions
# Masked multi head attention
x = self.norm1(target + self.multihead1(target, target, target))
# Include the memory/Encoder results
x = self.norm2(x + self.multihead2(key=memory, value=memory, query=x))
# FF
x = self.norm3(x + self.feedforward(x))
return x
@classmethod
def from_config(cls, config: LinformerConfig):
# pyre-fixme[16]: `LinformerConfig` has no attribute `as_patchy_dict`.
return cls(**config.as_patchy_dict())
class LinFormer(torch.nn.Module):
def __init__(
self,
num_heads: int,
dim_sequence: int, # FIXME: should not be needed, make this dynamic
dim_embedding: int,
dim_feedforward: int,
activation: Activation,
num_encoder_layers: int,
num_decoder_layers: int,
attention_dropout: Optional[float],
ff_dropout: Optional[float],
final_dropout: Optional[float],
k: int,
):
super().__init__()
encoders = [
LinformerEncoderLayer(
num_heads,
dim_sequence,
dim_embedding,
dim_feedforward,
activation,
attention_dropout,
ff_dropout,
final_dropout,
k,
)
for _ in range(num_encoder_layers)
]
self.encoders = torch.nn.Sequential(*encoders) if encoders else None
self.decoders = nn.ModuleList(
[
LinformerDecoderLayer(
num_heads,
dim_sequence,
dim_embedding,
dim_feedforward,
activation,
attention_dropout,
ff_dropout,
final_dropout,
k,
)
for _ in range(num_decoder_layers)
]
)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
# Encode to latent space if encoder is present
latent = self.encoders(inputs) if self.encoders else None
# If decoder: either use the encoder ouput, or just decode, both options are possible
if self.decoders:
for decoder in self.decoders:
inputs = decoder(
target=inputs, memory=latent if latent is not None else inputs
)
return inputs
# There was no decoder, we're looking for encoded values
return latent
@classmethod
def from_config(cls, config: LinformerConfig):
# pyre-fixme[16]: `LinformerConfig` has no attribute `as_patchy_dict`.
return cls(**config.as_patchy_dict())
|
bart_ls-main
|
xformers/xformers/models/linformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
from xformers.components import Activation
@dataclass
class ModelConfig:
num_heads: int
dim_sequence: int
dim_embedding: int
dim_feedforward: int
num_encoder_layers: int
num_decoder_layers: int
activation: Activation
attention_dropout: Optional[float]
relu_dropout: Optional[float]
dropout: Optional[float]
|
bart_ls-main
|
xformers/xformers/models/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn.init import (
_calculate_fan_in_and_fan_out,
_no_grad_uniform_,
constant_,
xavier_uniform_,
)
def small_init_(tensor: torch.Tensor, gain: float = 1.0) -> torch.Tensor:
r"""Fills the input `Tensor` with values according to the method
described in `Transformer Without Tears`_, using a uniform distribution.
This is a variation of the Xavier init. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + 4 * \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
.. _`Transformer Without Tears`: https://doi.org/10.5281/zenodo.3525484
"""
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + 4 * fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return _no_grad_uniform_(tensor, -a, a)
@dataclass
class InProjParams:
in_features: int
out_features: int
bias: bool
small_init: bool = False
class InProjContainer(nn.Module):
"""
Handle all the input projections in one go, opportunistically fuse some operations.
CREDITS: Inspired by https://github.com/pytorch/text/blob/master/torchtext/nn/modules/multiheadattention.py
and the MultiHeadAttention implementation from PyTorch
"""
def __init__(
self,
query_proj_params: InProjParams,
key_proj_params: Optional[InProjParams],
value_proj_params: Optional[InProjParams],
):
super().__init__()
assert (
query_proj_params.in_features == query_proj_params.out_features
), "We assume in_features == out_features for queries, please provide your projection if this is not the case"
# If nothing is specified for key and value, use the same as query
if key_proj_params is None:
key_proj_params = query_proj_params
if value_proj_params is None:
value_proj_params = query_proj_params
# Catch a beneficial case, if Q,K,V dimensions are the same
self.same_dimensions = (
query_proj_params.in_features == key_proj_params.in_features
and value_proj_params.in_features == key_proj_params.in_features
)
self.out_features = query_proj_params.out_features
self.q_p_params = query_proj_params
self.k_p_params = key_proj_params
self.v_p_params = value_proj_params
# - handle all the weights
# save the requested init method
if self.same_dimensions:
# We can use a single weight and bias buffer, which will speed up self attention
self.in_proj_weight = nn.Parameter(
torch.empty((3 * self.out_features, self.out_features))
)
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
else:
# The dimensions are different, use seperate buffers
self.q_proj_weight = nn.Parameter(
torch.empty((self.out_features, query_proj_params.in_features))
)
self.k_proj_weight = nn.Parameter(
torch.empty((self.out_features, key_proj_params.in_features))
)
self.v_proj_weight = nn.Parameter(
torch.empty((self.out_features, value_proj_params.in_features))
)
self.register_parameter("in_proj_weight", None)
# - handle all the inputs
if query_proj_params.bias:
self.in_proj_bias = nn.Parameter(torch.empty(3 * self.out_features))
else:
self.register_parameter("in_proj_bias", None)
# - multi-head attention specific init for the weights and biases
self._reset_parameters()
def _reset_parameters(self):
if self.in_proj_weight is not None:
# 1/sqrt(2) init is empirically beneficial in that case
self.in_proj_weight = self._init_weights(
self.q_p_params, self.in_proj_weight, gain=1.0 / math.sqrt(2)
)
else:
self.q_proj_weight = self._init_weights(
self.q_p_params, self.q_proj_weight, gain=1.0
)
self.k_proj_weight = self._init_weights(
self.k_p_params, self.k_proj_weight, gain=1.0
)
self.v_proj_weight = self._init_weights(
self.v_p_params, self.v_proj_weight, gain=1.0
)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
@staticmethod
def _init_weights(params: InProjParams, weights: torch.Tensor, gain: float):
if params.small_init:
return small_init_(weights, gain=gain)
else:
return xavier_uniform_(weights, gain=gain)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if self.in_proj_weight is not None:
if id(query) == id(key):
# Self attention, get all the projected values at once
# we compute everything transposed, so that q,k,v stay contiguous after splitting
qkv = query @ self.in_proj_weight.transpose(-2, -1)
if self.in_proj_bias is not None:
qkv += self.in_proj_bias
q, k, v = map(
lambda x: x.contiguous(),
qkv.split(self.out_features, dim=-1),
)
return q, k, v
else:
# Not self attention
# - bias free projection
projections = self.in_proj_weight.split(self.out_features, dim=0)
q, k, v = map(
lambda x, y: x @ y.transpose(1, 0), [query, key, value], projections
)
# - optionally add bias
if self.in_proj_bias is not None:
biases = self.in_proj_bias.split(self.out_features, dim=0)
q, k, v = map(lambda x, y: x + y, [q, k, v], biases)
return q, k, v
# We have a weight per input, but share a bigger bias buffer
assert (
self.q_proj_weight is not None
and self.k_proj_weight is not None
and self.v_proj_weight is not None
)
# - bias free projection
q, k, v = map(
lambda x, y: x @ y.transpose(1, 0),
[query, key, value],
[self.q_proj_weight, self.k_proj_weight, self.v_proj_weight],
)
# - optionally add bias
if self.in_proj_bias is not None:
biases = self.in_proj_bias.split(self.out_features, dim=0)
q, k, v = map(lambda x, y: x + y, [q, k, v], biases)
return q, k, v
|
bart_ls-main
|
xformers/xformers/components/in_proj_container.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import List, Union
import torch
import torch.nn as nn
from xformers import _is_triton_available
if _is_triton_available:
from xformers.triton.layer_norm import FusedLayerNorm
def _to_tensor_list(
inputs: Union[torch.Tensor, List[torch.Tensor]]
) -> List[torch.Tensor]:
if not isinstance(inputs, list):
inputs = [inputs]
return inputs
class LayerNormStyle(str, Enum):
"""Support different layer norm styles.
See "On Layer Normalization in the Transformer Architecture",
Xiong et al., https://arxiv.org/pdf/2002.04745v1.pdf
"""
Pre = "pre"
Post = "post"
# CREDITS: the following is inspired by FastAI's Transformer implementation
class Residual(nn.Module):
"""Object-oriented handling of the residual path"""
def __init__(self, layer: nn.Module):
super().__init__()
self.layer = layer
def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]], *args, **kwargs):
inputs = _to_tensor_list(inputs)
return inputs[0] + self.layer(*inputs, *args, **kwargs)
class PreNorm(nn.Module):
"""Adds LayerNorm before computing attention
..Note: If a list of inputs is passed, all of them get normalized"""
def __init__(self, d_model: int, sublayer: nn.Module, use_triton: bool = True):
super().__init__()
if _is_triton_available and use_triton:
self.norm: Union[nn.LayerNorm, FusedLayerNorm] = FusedLayerNorm(d_model)
else:
self.norm = nn.LayerNorm(d_model)
self.sublayer = sublayer
def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]], *args, **kwargs):
inputs = _to_tensor_list(inputs)
x_norm = [self.norm(x_) for x_ in inputs]
return self.sublayer(*x_norm, *args, **kwargs)
class PostNorm(nn.Module):
"""Adds LayerNorm after computing attention"""
def __init__(self, d_model: int, sublayer: nn.Module, use_triton: bool = True):
super().__init__()
if _is_triton_available and use_triton:
self.norm: Union[nn.LayerNorm, FusedLayerNorm] = FusedLayerNorm(d_model)
else:
self.norm = nn.LayerNorm(d_model)
self.sublayer = sublayer
def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]], *args, **kwargs):
inputs = _to_tensor_list(inputs)
x = self.sublayer(*inputs, *args, **kwargs)
return self.norm(x)
|
bart_ls-main
|
xformers/xformers/components/residual.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# CREDITS: Code adapted from
# https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
# https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py,
# https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
# pyre-fixme[13]: `cpu_state` is not initialized in the constructor.
class Deterministic(nn.Module):
def __init__(self, net: nn.Module):
super().__init__()
self.net = net
self.cpu_state: torch.Tensor = torch.get_rng_state()
self.cuda_in_fwd: bool = False
self.gpu_devices: List[int] = []
self.gpu_states: List[torch.Tensor] = []
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng: bool = False, set_rng: bool = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices: List[int] = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
class ReversibleBlock(nn.Module):
def __init__(self, f: nn.Module, g: nn.Module, split_dim: int = -1):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.split_dim = split_dim
def forward(self, x: torch.Tensor, f_args={}, g_args={}):
x1, x2 = torch.chunk(x, 2, dim=-1)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=self.split_dim)
def backward_pass(
self, y: torch.Tensor, dy: torch.Tensor, f_args={}, g_args={}
): # pragma: no cover # this is covered, but called directly from C++
y1, y2 = torch.chunk(y, 2, dim=self.split_dim)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=self.split_dim)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=self.split_dim)
dx = torch.cat([dx1, dx2], dim=self.split_dim)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(
ctx, dy
): # pragma: no cover # this is covered, but called directly from C++
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks: nn.ModuleList):
super().__init__()
# pyre-fixme[23]: Unable to unpack `torch.nn.Module` into 2 values.
self.blocks = nn.ModuleList([ReversibleBlock(f, g) for f, g in blocks])
def forward(self, x, arg_route=(True, False), **kwargs):
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {"f_args": f_args, "g_args": g_args}
return _ReversibleFunction.apply(x, self.blocks, block_kwargs)
|
bart_ls-main
|
xformers/xformers/components/reversible.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Optional
import torch
from torch import nn
class Activation(str, Enum):
SquaredReLU = "squared_relu"
GeLU = "gelu"
LeakyReLU = "leaky_relu"
ReLU = "relu"
# For unit testing / parity comparisons, probably not the fastest way
class SquaredReLU(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_ = torch.nn.functional.relu(x)
return x_ * x_
class Passthrough(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x
def build_activation(activation: Optional[Activation]):
if not activation:
return Passthrough()
return {
Activation.ReLU: nn.ReLU,
Activation.GeLU: nn.GELU,
Activation.LeakyReLU: nn.LeakyReLU,
Activation.SquaredReLU: SquaredReLU,
}[activation]()
|
bart_ls-main
|
xformers/xformers/components/activations.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import asdict, dataclass
from typing import Optional
import torch
import torch.nn as nn
from torch.nn.init import constant_
from xformers.components.attention import Attention
from xformers.components.in_proj_container import InProjContainer, InProjParams
from xformers.components.positional_embedding import RotaryEmbedding
@dataclass
class MultiHeadDispatchConfig:
dim_model: int
residual_dropout: float
num_heads: int
attention: Attention
bias: bool
dim_key: Optional[int]
dim_value: Optional[int]
in_proj_container: Optional[InProjContainer]
use_separate_proj_weight: Optional[bool]
use_rotary_embeddings: Optional[bool]
out_proj: Optional[nn.Module]
# Move head forward and fold into batch dim. dimensions become (B * nh, S, hs)
def _fold_heads(t: torch.Tensor, B: int, S: int, H: int, Hs: int):
return t.view(B, S, H, Hs).transpose(1, 2).flatten(start_dim=0, end_dim=1)
def _split_heads(t: torch.Tensor, B: int, S: int, H: int, Hs: int):
return t.view(B, S, H, Hs).transpose(1, 2)
class MultiHeadDispatch(nn.Module):
"""
A multi-head masked self-attention dispatch mechanism, with a projection at the end,
following the architecture proposed in `Attention is all you need`_, Vaswani et al.
The actual attention mechanism can vary, as well as the projections.
This can be used to wrap the proposed attention mechanisms and make them multi-head aware,
but it is optional.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762v5
"""
def __init__(
self,
dim_model: int,
residual_dropout: float,
num_heads: int,
attention: Attention,
bias: bool = True,
dim_key: Optional[int] = None,
dim_value: Optional[int] = None,
in_proj_container: Optional[InProjContainer] = None,
use_separate_proj_weight: Optional[bool] = False,
use_rotary_embeddings: Optional[bool] = False,
out_proj: Optional[nn.Module] = None,
*args,
**kwargs,
):
super().__init__()
assert (
dim_model % num_heads == 0
) # static preset for now, each head works on 1/d the embeddings, could be relaxed
assert num_heads > 0
# Popular default is that all latent dimensions are the same
dim_key, dim_value = map(lambda x: x if x else dim_model, (dim_key, dim_value))
self.num_heads = num_heads
self.dim_k = dim_key // num_heads
self.dim_value = dim_value
self.dim_model = dim_model
self.attention = attention
# key, query, value projections for all heads
# critical options are
# - are we sharing weights ?
# - are we adding biases, and if yes are they shared ?
if attention.requires_input_projection:
self.in_proj_container = (
in_proj_container
if in_proj_container is not None
else InProjContainer(
query_proj_params=InProjParams(dim_model, dim_key, bias=bias),
key_proj_params=InProjParams(dim_model, dim_key, bias=bias)
if use_separate_proj_weight
else None,
value_proj_params=InProjParams(dim_model, dim_value, bias=bias)
if use_separate_proj_weight
else None,
)
)
# Optional rotary embeddings
self.rotary_embeddings = (
RotaryEmbedding(self.dim_k) if use_rotary_embeddings else None
)
# Regularization
self.resid_drop = nn.Dropout(residual_dropout, inplace=False)
# Output projection
self.proj = out_proj if out_proj else nn.Linear(dim_model, dim_model, bias=bias)
if isinstance(self.proj, nn.Linear) and self.proj.bias is not None:
constant_(self.proj.bias, 0.0)
def _check(self, t, name):
assert (
t.shape[2] % self.dim_k == 0
), f"the {name} embeddings need to be divisible by the number of heads"
def forward(
self,
query: torch.Tensor,
key: Optional[torch.Tensor] = None,
value: Optional[torch.Tensor] = None,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Expected input dimensions are [batch size, sequence length, embed dim]
Output dimensions are [batch size, sequence length, embed dim]
"""
if key is None:
key = query
if value is None:
value = query
# Check the dimensions properly
self._check(query, "query")
self._check(value, "value")
self._check(key, "key")
max_batch = max((query.shape[0], key.shape[0], value.shape[0]))
query, key, value = map(
lambda x: x.expand(max_batch, -1, -1), [query, key, value]
)
B, S_Q, _ = query.size() # Batch x Sequence x Embedding (latent)
_, S_K, _ = key.size() # K, Q's sequence length could differ
# Catch different query and key length but a causal attention
if S_Q != S_K:
assert (
not self.attention.requires_same_k_q_dimensions
), "This attention mechanism requires query and key to have the same sequence (context) lengths"
if hasattr(self.attention, "causal"):
assert not self.attention.causal, (
"Causal attention is not supported when key and query have different sequence lengths.\n"
+ "In that case causality is ill-determined. Please pad your sequences accordingly"
)
if self.attention.requires_skip_multi_head:
return self.attention(
query, key, value, att_mask=att_mask, key_padding_mask=key_padding_mask
)
# Calculate query, key, values for all heads in batch
if self.attention.requires_input_projection:
q, k, v = self.in_proj_container(query=query, key=key, value=value)
else:
k, q, v = key, query, value
# Optional: rotary embedding, add relative positioning information
if self.rotary_embeddings:
# rotary requires the head dimension
q = _split_heads(q, B, S_Q, self.num_heads, self.dim_k)
k = _split_heads(k, B, S_K, self.num_heads, self.dim_k)
v = _split_heads(v, B, S_K, self.num_heads, self.dim_k)
q, k = self.rotary_embeddings(q=q, k=k)
if not self.attention.requires_head_dimension:
q, k, v = q.flatten(0, 1), k.flatten(0, 1), v.flatten(0, 1)
else:
# Reshape k/q/v to either expose the heads, or fold the head dimension into the batch
reshape_fn = (
_split_heads if self.attention.requires_head_dimension else _fold_heads
)
q = reshape_fn(q, B, S_Q, self.num_heads, self.dim_k)
k = reshape_fn(k, B, S_K, self.num_heads, self.dim_k)
v = reshape_fn(v, B, S_K, self.num_heads, self.dim_k)
# Self-attend
y = self.attention(
q=q, k=k, v=v, att_mask=att_mask, key_padding_mask=key_padding_mask
)
# Re-assemble all head outputs side by side
y = (
y.view(B, self.num_heads, S_Q, self.dim_k)
.transpose(1, 2)
.flatten(start_dim=2, end_dim=3)
)
# Output projection, dropout and good to go
y = self.resid_drop(self.proj(y))
# Return the same sequence size as the input
return y
@classmethod
def from_config(cls, config: MultiHeadDispatchConfig):
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
|
bart_ls-main
|
xformers/xformers/components/multi_head_dispatch.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import fields
from pathlib import Path
from typing import Any, Dict, Union
from xformers.utils import import_all_modules
from .activations import Activation, build_activation # noqa
from .attention import Attention, build_attention # noqa
from .in_proj_container import InProjContainer, InProjParams # noqa
from .multi_head_dispatch import MultiHeadDispatch # noqa
from .multi_head_dispatch import MultiHeadDispatchConfig
from .residual import LayerNormStyle, PostNorm, PreNorm, Residual # noqa
# automatically import any Python files in the directory
import_all_modules(str(Path(__file__).parent), "xformers.components")
def build_multi_head_attention(
multi_head_config: Union[MultiHeadDispatchConfig, Dict[str, Any]],
):
"""Builds a multihead attention from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_attention",
"foo": "bar"}` will find a class that was registered as "my_attention"
(see :func:`register_attention`) and call .from_config on it."""
if not isinstance(multi_head_config, MultiHeadDispatchConfig):
# Extract the required fields
field_names = list(map(lambda x: x.name, fields(MultiHeadDispatchConfig)))
# The missing fields get Noned
for k in field_names:
if k not in multi_head_config.keys():
multi_head_config[k] = None
# Could be that the attention needs to be instantiated
if not isinstance(multi_head_config["attention"], Attention):
# Convenience: fill in possible missing fields
if "num_heads" not in multi_head_config["attention"]:
multi_head_config["attention"]["num_heads"] = multi_head_config[
"num_heads"
]
if "dim_model" not in multi_head_config["attention"]:
multi_head_config["attention"]["dim_model"] = multi_head_config[
"dim_model"
]
if (
"dim_features" not in multi_head_config["attention"]
or multi_head_config["attention"]["dim_features"] is None
):
multi_head_config["attention"]["dim_features"] = (
multi_head_config["dim_model"] // multi_head_config["num_heads"]
)
multi_head_config["attention"] = build_attention(
multi_head_config["attention"]
)
multi_head_config = MultiHeadDispatchConfig(**multi_head_config)
return MultiHeadDispatch.from_config(multi_head_config)
|
bart_ls-main
|
xformers/xformers/components/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
maybe_sparsify,
register_attention,
sparsify,
)
from xformers.components.attention.attention_patterns import (
causal_1d_pattern,
global_token_pattern,
)
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class GlobalAttentionConfig(AttentionConfig):
attention_query_mask: torch.Tensor # Mark the queries which have global attention
causal: Optional[bool]
force_sparsity: Optional[bool]
@register_attention("global", GlobalAttentionConfig)
class GlobalAttention(Attention):
def __init__(
self,
dropout: float,
attention_query_mask: torch.Tensor,
causal: bool = False,
force_sparsity: bool = False,
*_,
**__,
):
r"""
Global attention, as proposed for instance in BigBird_ or Longformer_.
Global means in that case that the queries positively labelled in the ```attention_query_mask``` can attend
to all the other queries. The queries negatively labelled in the ```attention_query_mask``` cannot attend to
any other query.
This implementation is sparse-aware, meaning that the empty attention parts will not be represented in memory.
Args:
dropout (float): probability of an element to be zeroed
attention_mask (torch.Tensor): if true, this query can attend to all the others
"""
super().__init__()
assert attention_query_mask.dtype == torch.bool, "A boolean mask is expected"
assert (
attention_query_mask.shape[1] == 1
and attention_query_mask.shape[0] > attention_query_mask.shape[1]
), "A N x 1 query mask is expected"
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.attention_mask = global_token_pattern(attention_query_mask[:, 0])
self.force_sparsity = force_sparsity
if causal:
self.attention_mask &= causal_1d_pattern(attention_query_mask.shape[1])
self.attention_mask = (
sparsify(self.attention_mask)
if self.force_sparsity
else maybe_sparsify(self.attention_mask)
)
self.requires_same_k_q_dimensions = True
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
*_,
**__,
):
# Make sure that the mask is on the right device
if self.attention_mask.device != q.device:
self.attention_mask = self.attention_mask.to(q.device)
# Mask-aware attention
if att_mask is not None:
if att_mask.dtype == torch.bool and isinstance(
self.attention_mask, AttentionMask
):
mask = self.attention_mask + AttentionMask.from_bool(att_mask)
else:
mask = self.attention_mask & att_mask
else:
mask = self.attention_mask
# Handle q/k/v which would not fit the mask
seq_len = q.shape[-2]
q_, k_, v_ = map(lambda x: self._maybe_pad_sequence(x, mask), (q, k, v))
# Normal attention with the global tokens mask
att = scaled_dot_product_attention(
q=q_, k=k_, v=v_, att_mask=mask, dropout=self.attn_drop
)
att = self.attn_drop(att)
# Take into account an hypothetical padding
return att[:, :seq_len, :]
|
bart_ls-main
|
xformers/xformers/components/attention/global_tokens.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional
import torch
import torch.autograd.profiler as profiler
import torch.nn as nn
import torch.nn.functional as Fn
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.core import (
scaled_dot_product_attention,
scaled_query_key_softmax,
)
class LandmarkSelection(str, Enum):
Orthogonal = "orthogonal"
KMeans = "kmeans"
KMeans_Spherical = "kmeans_spherical"
Random = "random"
@dataclass
class OrthoformerAttentionConfig(AttentionConfig):
"""
num_landmarks Number of landmarks to use for softmax approximation.
subsample_fraction Percentage of q_samples matrix to sample per iteration
landmark_selection Landmark selection strategy
"""
num_landmarks: Optional[int]
subsample_fraction: Optional[float]
landmark_selection: Optional[LandmarkSelection]
@register_attention("orthoformer", OrthoformerAttentionConfig)
class OrthoFormerAttention(Attention):
def __init__(
self,
dropout: float,
num_landmarks: int = 32,
subsample_fraction: float = 1.0,
landmark_selection: LandmarkSelection = LandmarkSelection.Orthogonal,
*args,
**kwargs,
):
"""
Orthoformer_ attention mechanism.
::
"Keeping Your Eye on the Ball: Trajectory Attention in Video Transformers"
Patrick, M., Campbell, D., Asano, Y., Misra, I., Metze, F., Feichtenhofer,
C., Vedaldi, A., Henriques, J. (2021)
Reference codebase: https://github.com/facebookresearch/Motionformer
.. _Orthoformer: https://arxiv.org/abs/2106.05392
"""
super().__init__()
self.num_landmarks = num_landmarks
self.attn_drop = nn.Dropout(dropout)
self.subsample_fraction = subsample_fraction
self.landmark_selection = landmark_selection
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
*args,
**kwargs,
):
N = k.shape[1]
if self.num_landmarks == N:
# Default attention
x = scaled_dot_product_attention(q, k, v, att_mask)
else:
with torch.no_grad(), profiler.record_function("select landmarks"):
if self.landmark_selection == LandmarkSelection.Orthogonal:
landmarks = self._compute_orthogonal_landmarks(q)
elif self.landmark_selection == LandmarkSelection.Random:
half_L = self.num_landmarks // 2
landmarks_q = q[:, torch.randint(q.size(1), (half_L,)), :]
landmarks_k = k[:, torch.randint(k.size(1), (half_L,)), :]
landmarks = torch.cat((landmarks_q, landmarks_k), dim=-2)
elif self.landmark_selection == LandmarkSelection.KMeans:
landmarks = self._cluster_landmarks(q)
elif self.landmark_selection == LandmarkSelection.KMeans_Spherical:
landmarks = self._cluster_landmarks(q, spherical=True)
if att_mask is not None:
logging.warning(
"Orthoformer: attention mask passed alongside with using landmarks to reduce dimensions. \
The two are typically not compatible"
)
# FIXME: Should we still accept a mask in that case ?
att_mask = None
# pyre-ignore[61]: TODO(T103337542): `landmarks` mistakenly seems
# like it could be uninitialized.
kernel_1 = scaled_query_key_softmax(q, landmarks, att_mask)
# pyre-ignore[61]: TODO(T103337542): `landmarks` mistakenly seems
# like it could be uninitialized.
kernel_2 = scaled_query_key_softmax(landmarks, k, att_mask)
x = torch.matmul(kernel_1, torch.matmul(kernel_2, v))
x = self.attn_drop(x)
return x
def _cluster_landmarks(
self,
q: torch.Tensor,
spherical: bool = False,
num_iters: int = 6,
) -> torch.Tensor:
"""
Construct set of landmarks by recursively selecting new landmarks
that are maximally orthogonal to the existing set.
Returns near orthogonal landmarks with shape (B, M, D).
"""
num_landmarks = min(self.num_landmarks, q.shape[1])
if self.subsample_fraction < 1.0:
num_samples = max(
int(self.subsample_fraction * q.size(-2)), num_landmarks
) # Need at least M/2 samples of queries and keys
q_samples = q[:, torch.randint(q.size(-2), (num_samples,)), :] # (B, N, D)
else:
q_samples = q # (B, N, D)
if spherical:
q_samples_normalized = Fn.normalize(
q_samples, p=2, dim=-1
) # may need to change default eps to eps=1e-8 for mixed precision compatibility
landmarks = self._kmeans_spherical(
q_samples_normalized, num_landmarks, num_iters
)
else:
landmarks = self._kmeans(q_samples, num_landmarks, num_iters)
return landmarks # (B, M, D)
def _kmeans(self, x: torch.Tensor, K: int, num_iters: int = 10):
"""
Arguments:
x: (B, N, D)
K: number of clusters
num_iters: the number of kmeans updates
"""
B, N, D = x.size()
assert K <= N, f"{K} > {N}"
c = x[
:, torch.randperm(N, device=x.device)[:K], :
].clone() # initialisation for the centroids
with profiler.record_function("kmeans"):
x_i = x.view(B, N, 1, D)
c_j = c.view(B, 1, K, D)
counts = c.new_zeros(B, K)
ones = x.new_ones((B, N))
for _ in range(num_iters):
# E step: assign points to the nearest cluster
D_ij = ((x_i - c_j) ** 2).sum(-1) # (B, N, K) squared distances
cl = D_ij.argmin(
dim=-1, keepdim=True
).long() # (B, N, 1) index of point to nearest cluster
# M step: update the centroids
c.zero_()
c.scatter_add_(-2, cl.repeat(1, 1, D), x) # sum of points per cluster
counts.fill_(1e-6) # avoid div0
counts.scatter_add_(
-1, cl.squeeze(-1), ones
) # number of points per cluster
c.divide_(counts.unsqueeze(-1)) # compute the average
return c
def _kmeans_spherical(self, x: torch.Tensor, K: int, num_iters=10):
"""
Arguments:
x: (B, N, D)
"""
B, N, D = x.size()
assert K <= N, f"{K} > {N}"
# initialisation for the centroids
c = x[:, torch.randperm(N, device=x.device)[:K], :].clone()
with profiler.record_function("kmeans_spherical"):
counts = c.new_zeros(B, K)
ones = x.new_ones((B, N))
for _ in range(num_iters):
# E step: assign points to the nearest cluster
D_ij = torch.matmul(
x, c.transpose(-2, -1)
) # (B, N, K) cosine similarity
cl = D_ij.argmax(
dim=-1, keepdim=True
).long() # (B, N, 1) index of point to nearest cluster
# M step: update the centroids
c.zero_()
c.scatter_add_(-2, cl.repeat(1, 1, D), x) # sum of points per cluster
counts.fill_(1e-6) # avoid div0
counts.scatter_add_(
-1, cl.squeeze(-1), ones
) # number of points per cluster
c.divide_(counts.unsqueeze(-1)) # compute the average
c = Fn.normalize(c, p=2, dim=-1) # renormalise
return c
def _compute_orthogonal_landmarks(self, q: torch.Tensor) -> torch.Tensor:
"""
Construct set of landmarks by recursively selecting new landmarks
that are maximally orthogonal to the existing set.
Returns near orthogonal landmarks with shape (B, M, D).
"""
if self.subsample_fraction < 1.0:
# Need at least M samples of queries
num_samples = max(
int(self.subsample_fraction * q.size(-2)), self.num_landmarks
)
q_samples = q[
:, torch.randint(q.size(-2), (num_samples,), device=q.device), :
]
else:
# (B, N, D)
q_samples = q
# may need to change default eps to eps=1e-8 for mixed precision compatibility
q_samples_normalized = Fn.normalize(q_samples, p=2, dim=-1)
B, N, D = q_samples_normalized.shape
selected_mask = torch.zeros((B, N, 1), device=q_samples_normalized.device)
landmark_mask = torch.ones(
(B, 1, 1), dtype=selected_mask.dtype, device=q_samples_normalized.device
)
# Get initial random landmark
random_idx = torch.randint(
q_samples_normalized.size(-2), (B, 1, 1), device=q_samples_normalized.device
)
selected_mask.scatter_(-2, random_idx, landmark_mask)
# Selected landmarks
selected_landmarks = torch.empty(
(B, self.num_landmarks, D),
device=q_samples_normalized.device,
dtype=q_samples_normalized.dtype,
)
selected_landmarks[:, 0, :] = q_samples_normalized[
torch.arange(q_samples_normalized.size(0)), random_idx.view(-1), :
].view(B, D)
# Store computed cosine similarities
cos_sims = torch.empty(
(B, N, self.num_landmarks),
device=q_samples_normalized.device,
dtype=q_samples_normalized.dtype,
)
for M in range(1, self.num_landmarks):
with profiler.record_function("find new landmark"):
# Calculate absolute cosine similarity between selected and unselected landmarks
# (B, N, D) * (B, D) -> (B, N)
cos_sims[:, :, M - 1] = torch.einsum(
"b n d, b d -> b n",
q_samples_normalized,
selected_landmarks[:, M - 1, :],
).abs()
# (B, N, M) cosine similarities of current set of landmarks wrt all queries and keys
cos_sim_set = cos_sims[:, :, :M]
# Get orthogonal landmark: landmark with smallest absolute cosine similarity:
# set cosine similarity for already selected landmarks to > 1
cos_sim_set.view(-1, M)[selected_mask.flatten().bool(), :] = 10
# (B,) - want max for non
selected_landmark_idx = cos_sim_set.amax(-1).argmin(-1)
# Add most orthogonal landmark to selected landmarks:
selected_landmarks[:, M, :] = q_samples_normalized[
torch.arange(q_samples_normalized.size(0)), selected_landmark_idx, :
].view(B, D)
# Removed selected indices from non-selected mask:
selected_mask.scatter_(
-2, selected_landmark_idx.unsqueeze(-1).unsqueeze(-1), landmark_mask
)
# (B, M, D)
landmarks = torch.masked_select(q_samples, selected_mask.bool()).reshape(
B, -1, D
)
return landmarks # (B, M, D)
|
bart_ls-main
|
xformers/xformers/components/attention/ortho.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass
from typing import Optional
import torch
from xformers import _is_triton_available
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.utils import bool_mask_to_additive
_mask_type_warning = True
if _is_triton_available:
from triton.ops.blocksparse import matmul as blocksparse_matmul
from triton.ops.blocksparse import softmax as blocksparse_softmax
from xformers.triton.softmax import MaskType
from xformers.triton.utils import gpu_capabilities_older_than_70
# Blocksparse requires Tensor cores
if gpu_capabilities_older_than_70():
logging.warning(
"Blocksparse is not available: the current GPU does not expose Tensor cores"
)
_is_triton_available = False
if _is_triton_available:
@dataclass
class BlockSparseAttentionConfig(AttentionConfig):
layout: torch.Tensor # The dimensions of the random features
block_size: int
dropout: float
num_heads: int
@register_attention("blocksparse", BlockSparseAttentionConfig)
class BlockSparseAttention(Attention):
r"""
Thin wrap over the Triton blocksparse computations. The sparsity pattern is determined through the layout.
.. warning: the layout is assumed to have the dimensions [heads, seq, seq].
If some dimensions are missing, we assume that the same layout is to be used across heads.
.. warning: for now, the sequence (context) length has to be a power of two. This constraint could
be relaxed in the future.
.. note: it is possible to pass a specific per batch mask in the forward call,
but this will not lead to any speed up.
Any constant sparsity pattern is better passed through the layout parameter.
"""
def __init__(
self,
layout: torch.Tensor,
block_size: int = 16,
dropout: float = 0.0,
num_heads: int = 1, # optional, used to adapt the layout if in need
*args,
**kwargs,
):
if layout.dim() == 2:
logging.warning(
"The layout passed is lacking a head dimension and a batch dimension"
)
logging.warning(
"Now assuming that the same layout is to be used across all heads"
)
layout = layout.unsqueeze(0).expand(num_heads, -1, -1)
logging.warning(f"New layout dimensions: {layout.shape}")
assert block_size >= 16, "Minimum block size is 16, for now at least"
super().__init__()
self.attn_drop = torch.nn.Dropout(dropout, inplace=False)
# Pure blocksparse data
self.layout = layout
self.block_size = block_size
# blocksparse operators
self.sparse_dot_sdd = blocksparse_matmul(
self.layout,
self.block_size,
"sdd",
trans_a=False,
trans_b=True,
)
self.sparse_dot_dsd = blocksparse_matmul(
self.layout,
self.block_size,
"dsd",
trans_a=False,
trans_b=False,
)
self.sparse_softmax = blocksparse_softmax(self.layout, self.block_size)
# make sure that the head dimension is not folded down with the batch
self.requires_head_dimension = True
# key padding mask and attention mask must be passed in separately
self.requires_separate_masks = True
self.requires_same_k_q_dimensions = True
def update_mask_type(self, mask: torch.Tensor):
global _mask_type_warning
if _mask_type_warning:
logging.warning(
"Mask has to be additive. Fixing that but this slows things down"
)
mask = bool_mask_to_additive(mask)
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
scale: float = 1.0,
*args,
**kwargs,
) -> torch.Tensor:
r"""
att_mask A 2D attention mask. The dtype must be the same as q. An additive mask is expected,
meaning float values using "-inf" to mask values.
key_padding_mask A mask with size (batch size x sequence length). The dtype must be the same as q.
An additive mask is expected, meaning float values using "-inf" to mask values
"""
# NOTE:
# The attention mask will be taken into account when computing the softmax
# meaning that non-masked values which are present in the initial blocksparse layout will be computed.
# If blocks are to be constantly masked, better perf would thus be reached by signalling them out in the
# initial attention setup
if att_mask is not None and att_mask.dtype == torch.bool:
self.update_mask_type(att_mask)
if key_padding_mask is not None and key_padding_mask.dtype == torch.bool:
self.update_mask_type(key_padding_mask)
assert (
att_mask is None or att_mask.dim() == 2
), "The attention mask is constant across heads, expected dimensions are [seq x seq]"
assert (
q.shape[-2] == k.shape[-2]
), "Blocksparse requires the same dimensions for K and Q for now"
assert (
q.shape[-2] == self.layout.shape[-2] * self.block_size
), "Actual sequence size and layout are inconsistent"
assert (
k.shape[-2] == self.layout.shape[-2] * self.block_size
), "Actual sequence size and layout are inconsistent"
assert math.log(
q.shape[-2], 2
).is_integer(), (
"For now blocksparse only works on power-of-two sequence lengths"
)
# Blocksparse only works on fp16
q_dtype = q.dtype
q, k, v = q.half(), k.half(), v.half()
if att_mask is not None:
att_mask = att_mask.half()
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.half()
# Self-attend: (B, nh, S, hs) x (B, nh, hs, S) -> (B, nh, S, S)
# When the computations are block sparse, the matrix types change along the way:
# - (sparse) attention matrix = (dense) Kt * (dense) Q
q = q / math.sqrt(q.size(-1))
sparse_att_mat = self.sparse_dot_sdd(q, k)
# - softmax on the sparse attention matrix
sparse_att_mat = self.sparse_softmax(
sparse_att_mat,
scale=scale,
key_padding_mask=key_padding_mask,
attn_mask=att_mask,
key_padding_mask_mode=MaskType.ADD,
attn_mask_mode=MaskType.ADD,
)
sparse_att_mat = self.attn_drop(sparse_att_mat)
# - then (dense) attention is (sparse) attention matrix * dense (value)
a = self.sparse_dot_dsd(sparse_att_mat, v)
return a.to(q_dtype)
|
bart_ls-main
|
xformers/xformers/components/attention/blocksparse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.attention import (
Attention,
AttentionConfig,
maybe_sparsify,
register_attention,
sparsify,
)
from xformers.components.attention.attention_patterns import (
causal_1d_pattern,
local_1d_pattern,
)
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class LocalAttentionConfig(AttentionConfig):
causal: Optional[bool] = None
window_size: Optional[int] = None
force_sparsity: Optional[bool] = None
@register_attention("local", LocalAttentionConfig)
class LocalAttention(Attention):
def __init__(
self,
dropout: float = 0.0,
causal: bool = False,
window_size: int = 5,
force_sparsity: bool = False,
*args,
**kwargs,
):
r"""
An implementation of a sliding window attention, as proposed in RoutingTransformer_, LongFormer_ or BigBird_
Args:
dropout (float): the probability of an output to be randomly dropped at training time
causal (bool): apply a causal mask, in that the attention cannot be applied to the future
window_size (int): the overall window size for local attention.
Odd number is expected if the mask is not causal, as the window size will be evenly
distributed on both sides of each query
.. _RoutingTransformer: https://arxiv.org/pdf/2003.05997.pdf
.. _BigBird: https://arxiv.org/pdf/2007.14062.pdf
.. _Longformer: https://arxiv.org/pdf/2004.05150.pdf
"""
super().__init__()
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.causal = causal
self.force_sparsity = force_sparsity
if not self.causal:
assert (
window_size % 2 == 1
), "The window size is assumed to be odd (counts self-attention + 2 wings)"
self.window_size = window_size
self.attention_mask: Optional[torch.Tensor] = None
self.requires_same_k_q_dimensions = True
def _get_local_mask(self, shape: torch.Size) -> torch.Tensor:
window_size = self.window_size * 2 + 1 if self.causal else self.window_size
mask = local_1d_pattern(shape[1], window_size)
if self.causal:
mask &= causal_1d_pattern(shape[1])
mask = sparsify(mask) if self.force_sparsity else maybe_sparsify(mask)
return mask
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
*args,
**kwargs,
):
# Local window attention masking
if self.attention_mask is None or self.attention_mask.shape[1] != q.shape[1]:
self.attention_mask = self._get_local_mask(q.shape).to(q.device)
# Take into account the optional user mask
mask = (
self.attention_mask if att_mask is None else self.attention_mask & att_mask
)
return scaled_dot_product_attention(
q=q, k=k, v=v, att_mask=mask, dropout=self.attn_drop
)
|
bart_ls-main
|
xformers/xformers/components/attention/local.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Credits: this is heavily inspired by the official implementation, present in
# https://github.com/sarthmit/Compositional-Attention
# Original author: Sarthak Mittal
# This is a simplified version, for the sake of clarity, and because some features could be exposed later
# via the library directly.
# In particular, code paths for TPUs, quantization and gumbel softmax have been removed
# We're also following the same dimension ordering as in the rest of the xformers library
# which is to say [Batch, Sequence, Embedding] wherever possible
import math
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
register_attention,
)
from xformers.components.attention.core import _softmax
from xformers.components.in_proj_container import InProjContainer, InProjParams
def _either_or(a: Optional[int], b: int) -> int:
return a if a is not None else b
@dataclass
class CompositionalAttentionConfig(AttentionConfig):
dim_model: int
num_heads: int
dim_attn: Optional[int] = None
num_rules: Optional[int] = None
dim_key: Optional[int] = None
dim_value: Optional[int] = None
dim_selection: Optional[int] = None
dropout: float
qk_rule: bool = False
nonlinear: bool = False
q_compose: bool = False
bias: bool = True
causal: Optional[bool] = False
in_proj_container: Optional[InProjContainer] = None
use_separate_proj_weight: Optional[bool] = False
@register_attention("compositional", CompositionalAttentionConfig)
class CompositionalAttention(Attention):
"""Compositional Attention, as proposed in
"Compositional Attention: Disentangling search and retrieval"_, S. Mittal et al.
A key insight from this proposal is that the attention mechanism can be conceived as two steps:
a search and a retrieval operation. When queried, the model can search for the most relevant information
(Softmax(QKt)), then retrieve information given the Value.
Contrary to the original attention proposal, which does not consider interactions in between heads,
the comppositional attention will consider all possible interactions and softmax over that dimension,
so that the information retrieved covers the most relevant dimensions. The number of heads and rules to
use is thus typically smaller than for a comparable traditional Transformer, and asking for the same number of heads
may not fit in memory.
Args:
dim_model: dimension of the incoming latent space
num_heads: number of heads *for the search operation*
dim_attn: dimension (embedding) of the attention
num_rules: number of rules to consider *for the retrieval operation*
dim_selection: dimension of the scoring/selection space for the retrievals
dim_key, dim_value: dimensions of K and V, if different from Q
dropout: attention dropout probability
qk_rule: QK product will drive the retrieval process
nonlinear: use a non linear method to score the retrievals
bias: use bias in the initial projection step
causal: causal computations (attend to the past only)
_"Compositional Attention: Disentangling search and retrieval": https://arxiv.org/pdf/2110.09419v1.pdf
"""
def __init__(
self,
dim_model: int,
num_heads: int,
dim_attn: Optional[int] = None,
num_rules: Optional[int] = None,
dim_selection: Optional[int] = None,
dim_key: Optional[int] = None,
dim_value: Optional[int] = None,
dropout=0.0,
qk_rule=False,
nonlinear=False,
q_compose=False,
in_proj_container: Optional[InProjContainer] = None,
use_separate_proj_weight: Optional[bool] = False,
bias=True,
causal=False,
*_,
**__,
):
super().__init__()
# Define the inherited flags
self.requires_skip_multi_head = (
True # This attention owns the multi-head mechanism
)
# Handle defaults / undefined values
self.dim_model = dim_model
num_rules = _either_or(num_rules, num_heads)
dim_selection = _either_or(dim_selection, dim_model // num_heads)
# All the initial definition plumbing
dim_attn = _either_or(dim_attn, dim_model)
dim_key = _either_or(dim_key, dim_model)
dim_value = _either_or(dim_value, dim_model)
self.in_proj_container = (
in_proj_container
if in_proj_container is not None
else InProjContainer(
query_proj_params=InProjParams(dim_model, dim_key, bias=bias),
key_proj_params=InProjParams(dim_model, dim_key, bias=bias)
if use_separate_proj_weight
else None,
value_proj_params=InProjParams(dim_model, dim_value, bias=bias)
if use_separate_proj_weight
else None,
)
)
self.num_heads = num_heads
self.num_rules = num_rules
self.qk_rule = qk_rule
self.dim_selection = dim_selection
self.nonlinear = nonlinear
self.q_compose = q_compose
self.dropout_module = nn.Dropout(dropout)
self.dim_head = dim_model // num_heads
self.value_dim = dim_attn // num_rules
assert (
self.value_dim * num_rules == dim_attn
), "value_dim must be divisible by num_rules"
self.scaling = self.dim_head ** -0.5
self.scaling_values = self.dim_selection ** -0.5
self.out_proj = nn.Linear(self.num_heads * self.value_dim, dim_model, bias=bias)
if self.qk_rule:
self.value_k = nn.Linear(self.value_dim, self.dim_selection, bias=bias)
if self.q_compose:
self.value_q = nn.Linear(self.dim_head, self.dim_selection, bias=bias)
else:
self.value_q = nn.Linear(
dim_model, self.dim_selection * self.num_heads, bias=bias
)
else:
if self.q_compose:
self.value_q = nn.Linear(self.dim_head, self.dim_selection, bias=bias)
else:
self.value_q = nn.Linear(
dim_model, self.dim_selection * self.num_heads, bias=bias
)
if self.nonlinear:
self.score_network: nn.Module = nn.Sequential(
nn.Linear(
self.dim_selection + self.value_dim,
self.dim_selection,
bias=bias,
),
nn.ReLU(),
nn.Linear(self.dim_selection, 1, bias=bias),
)
else:
self.score_network = nn.Linear(
self.dim_selection + self.value_dim, 1, bias=bias
)
self.causal = causal
self._reset_parameters()
def _reset_parameters(self):
# NOTE: in_proj_container is already initialized
if self.qk_rule:
nn.init.xavier_uniform_(self.value_k.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.value_q.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.value_q.weight)
if self.nonlinear:
nn.init.xavier_uniform_(self.score_network[0].weight)
nn.init.xavier_uniform_(self.score_network[2].weight)
else:
nn.init.xavier_uniform_(self.score_network.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(
self,
q: Tensor,
k: Tensor,
v: Tensor,
att_mask: Optional[Tensor] = None,
*args,
**kwargs,
) -> Tensor:
"""
Input shape: Time x Batch x Channel
Args:
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
B, Sq, E = q.shape
_, Sk, _ = k.shape
assert E == self.dim_model
# First define projected query/key/values
# We keep the projected and original tensors in flight,
# depending on the options the original values could be reused
q_unprojected = q
q, k, v = self.in_proj_container(query=q, key=k, value=v)
q *= self.scaling
# Init causal mask if needed, now that we know the context length
if self.causal and (
self._causal_mask is None or self._causal_mask.shape[0] != Sk
):
self._causal_mask = AttentionMask.make_causal(Sq, Sq, device=q.device)
# Convenience, create an attention mask if a tensor was passed
# This sanitizes different mask types being passed, from now on it's additive
if isinstance(att_mask, torch.Tensor):
# By default we don't know of the causality, and a check would be expensive
att_mask_additive: Optional[AttentionMask] = (
AttentionMask.from_bool(att_mask)
if att_mask.dtype == torch.bool
else AttentionMask(att_mask, is_causal=False)
)
else:
att_mask_additive = None
# Handle the attention and key padding masks
if self._causal_mask is not None:
# Optionally add the causal mask
if att_mask_additive is not None:
att_mask_additive += self._causal_mask
else:
att_mask_additive = self._causal_mask
# Flatten the heads or the rules
q = (
q.view(B, Sq, self.num_heads, self.dim_head)
.movedim(2, 1)
.flatten(0, 1) # [B * num_heads, Sq, dim_head]
)
k = (
k.view(B, Sk, self.num_heads, self.dim_head).movedim(2, 1).flatten(0, 1)
) # [B * num_heads, Sk, dim_head]
v = v.view(B, -1, self.num_rules, self.value_dim).movedim(2, 1).flatten(0, 1)
# Compute the search: Softmax(QKt)
attn_weights = torch.bmm(q, k.transpose(1, 2)) # [B * self.num_heads, Sq, Sk]
if att_mask_additive is not None:
attn_weights += att_mask_additive.values
attn_weights = _softmax(attn_weights, causal=self.causal)
attn_weights = attn_weights.view(B, self.num_heads, Sq, Sk)
attn_probs = self.dropout_module(attn_weights)
# Now compute the information retrieval
# keep all the heads in flight, we'll score the different possibilities
# - compute all the possible retrievals
v = v.view(B, 1, self.num_rules, Sk, self.value_dim)
attn_probs = attn_probs.unsqueeze(2)
attn = torch.matmul(attn_probs, v).view(
B, self.num_heads, self.num_rules, Sq, self.value_dim
)
attn = attn.movedim(3, 1) # [B, Sq, H, Rules, Values]
# - search the most appropriate retrieval among all the values
if self.q_compose:
v_q = self.value_q(q.transpose(0, 1)).view(
B, Sq, self.num_heads, 1, self.dim_selection
)
else:
v_q = self.value_q(q_unprojected).view(
B, Sq, self.num_heads, 1, self.dim_selection
)
if self.qk_rule:
v_q *= self.scaling_values
v_k = (
self.value_k(attn)
.view(B, Sq, self.num_heads, self.num_rules, self.dim_selection)
.transpose(4, 3)
.contiguous()
)
v_score = torch.matmul(v_q, v_k).view(
B, Sq, self.num_heads, self.num_rules, 1
)
else:
v_q = v_q.expand(-1, -1, -1, self.num_rules, -1)
v_in = torch.cat([attn, v_q], dim=-1)
v_score = self.score_network(v_in).view(
B, Sq, self.num_heads, self.num_rules, 1
)
v_score = F.softmax(v_score, dim=3)
# - extracted values are the original attention (inc. all the values) weighted by value score
attn = (attn * v_score).sum(dim=3).view(B, Sq, self.num_heads * self.value_dim)
# Final attention projection, same as other mechanisms
attn = self.out_proj(attn)
return attn
|
bart_ls-main
|
xformers/xformers/components/attention/compositional.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
from typing import Union
from functools import lru_cache
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from xformers.components.attention import Attention, AttentionConfig, register_attention
@dataclass
class LongformerConfig(AttentionConfig):
block_size: int
num_heads: int
dim_model: int
@register_attention("longformer", LongformerConfig)
class LongformerAttention(Attention):
def __init__(
self,
dropout: float,
num_heads: int,
dim_model: int,
block_size: int = 512,
*args, **kwargs
):
super().__init__()
self.drop_attn = nn.Dropout(dropout)
self.num_head = num_heads
self.head_dim = dim_model // num_heads
self.dim = dim_model
self.window_size = block_size
self.embed_dim = dim_model
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
*args, **kwargs
):
assert key_padding_mask is not None
attention_mask = key_padding_mask.to(q)
# attention_mask[:,0] = -1
# input q size (bsz*heads, seq_len, head_dim)
def _pad_to_window_size(x, window_size, pad):
seq_len = x.size(1)
pad_len = (window_size - seq_len % window_size) % window_size
assert len(x.shape) <= 3
if len(x.shape) == 3:
return F.pad(x, (0,0,0,pad_len), value=pad), pad_len
else:
return F.pad(x, (0,pad_len), value=pad), pad_len
orig_seq_len = q.size(1)
q, _ = _pad_to_window_size(q, self.window_size * 2, 0)
k, _ = _pad_to_window_size(k, self.window_size * 2, 0)
v, _ = _pad_to_window_size(v, self.window_size * 2, 0)
attention_mask, _ = _pad_to_window_size(attention_mask, self.window_size * 2, 1)
attention_mask = (attention_mask * -1e8).type_as(q)
key_padding_mask = attention_mask < 0
extra_attention_mask = attention_mask > 0
remove_from_windowed_attention_mask = attention_mask != 0
# num of global tokens
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
max_num_extra_indices_per_batch = num_extra_indices_per_batch.max()
if max_num_extra_indices_per_batch <= 0:
extra_attention_mask = None
else:
extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True)
zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, device=extra_attention_mask.device)
# mask indicating which values are actually going to be padding
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1)
# 2) location of the non-padding values in the selected global attention
selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True)
# 3) location of the padding values in the selected global attention
selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True)
seq_len = q.size(1)
bsz = q.size(0) // self.num_head
q = q.view(bsz, self.num_head, seq_len, self.head_dim).transpose(1,2).mul(1./math.sqrt(self.head_dim))
k = k.view(bsz, self.num_head, seq_len, self.head_dim).transpose(1,2)
v = v.view(bsz, self.num_head, seq_len, self.head_dim).transpose(1,2)
attn_weights = sliding_chunks_matmul_qk(q, k, self.window_size, padding_value=0) # bsz, seq_len, num_heads, 2 * w + 1
if remove_from_windowed_attention_mask is not None:
# This implementation is fast and takes very little memory because num_heads x hidden_size = 1
# from (bsz x seq_len) to (bsz x seq_len x num_heads x hidden_size)
remove_from_windowed_attention_mask = remove_from_windowed_attention_mask.unsqueeze(dim=-1).unsqueeze(dim=-1)
# cast to float/half then replace 1's with -inf
float_mask = remove_from_windowed_attention_mask.type_as(q).masked_fill(remove_from_windowed_attention_mask, -10000.0)
# repeat_size = 1 if isinstance(self.attention_dilation, int) else len(self.attention_dilation)
repeat_size = 1
float_mask = float_mask.repeat(1, 1, repeat_size, 1)
ones = float_mask.new_ones(size=float_mask.size()) # tensor of ones
# diagonal mask with zeros everywhere and -inf inplace of padding
d_mask = sliding_chunks_matmul_qk(ones, float_mask, self.window_size, padding_value=0)
attn_weights += d_mask
assert list(attn_weights.size()) == [bsz, seq_len, self.num_head, self.window_size * 2 + 1]
# the extra attention
if extra_attention_mask is not None:
selected_k = k.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
selected_k[selection_padding_mask_nonzeros] = k[extra_attention_mask_nonzeros]
# (bsz, seq_len, num_heads, max_num_extra_indices_per_batch)
selected_attn_weights = torch.einsum('blhd,bshd->blhs', (q, selected_k))
selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000
# concat to attn_weights
# (bsz, seq_len, num_heads, extra attention count + 2*window+1)
attn_weights = torch.cat((selected_attn_weights, attn_weights), dim=-1)
attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32) # use fp32 for numerical stability
if key_padding_mask is not None:
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
attn_weights_float = torch.masked_fill(attn_weights_float, key_padding_mask.unsqueeze(-1).unsqueeze(-1),
0.0)
attn_weights = attn_weights_float.type_as(attn_weights)
#attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
attn_probs = self.drop_attn(attn_weights)
attn = 0
if extra_attention_mask is not None:
selected_attn_probs = attn_probs.narrow(-1, 0, max_num_extra_indices_per_batch)
selected_v = v.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
selected_v[selection_padding_mask_nonzeros] = v[extra_attention_mask_nonzeros]
# use `matmul` because `einsum` crashes sometimes with fp16
# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
attn += torch.matmul(selected_attn_probs.transpose(1, 2), selected_v.transpose(1, 2).type_as(selected_attn_probs)).transpose(1, 2)
attn_probs = attn_probs.narrow(-1, max_num_extra_indices_per_batch, attn_probs.size(-1) - max_num_extra_indices_per_batch).contiguous()
attn += sliding_chunks_matmul_pv(attn_probs, v, self.window_size)
attn = attn.type_as(q)
assert list(attn.size()) == [bsz, seq_len, self.num_head, self.head_dim]
attn = attn.transpose(0, 1).reshape(seq_len, bsz, self.embed_dim).contiguous()
if extra_attention_mask is not None:
# global tokens attend all other tokens
global_q = q.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
global_q[selection_padding_mask_nonzeros] = q[extra_attention_mask_nonzeros]
# k # (bsz, seq_len, self.num_head, self.head_dim)
g2all_attn_weights = global_q.transpose(1,2).matmul(k.permute(0, 2, 3, 1))
attn_weights = g2all_attn_weights
assert list(attn_weights.size()) == [bsz, self.num_head, max_num_extra_indices_per_batch, seq_len]
attn_weights[selection_padding_mask_zeros[0], :, selection_padding_mask_zeros[1], :] = -10000.0
if key_padding_mask is not None:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-10000.0,
)
attn_weights = attn_weights.view(bsz * self.num_head, max_num_extra_indices_per_batch, seq_len)
attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32)
attn_weights = attn_weights_float.type_as(attn_weights) # use fp32 for numerical stability
# attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
attn_probs = self.drop_attn(attn_weights)
v = v.transpose(1,2).view(-1, seq_len, self.head_dim)
selected_attn = torch.bmm(attn_probs, v)
assert list(selected_attn.size()) == [bsz * self.num_head, max_num_extra_indices_per_batch, self.head_dim]
selected_attn_4d = selected_attn.view(bsz, self.num_head, max_num_extra_indices_per_batch, self.head_dim)
nonzero_selected_attn = selected_attn_4d[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]]
attn[extra_attention_mask_nonzeros[::-1]] = nonzero_selected_attn.view(len(selection_padding_mask_nonzeros[0]), -1).type_as(q)
out = attn[:orig_seq_len, :, :].view(-1, bsz, self.num_head, self.head_dim).permute(1, 2, 0, 3).view(bsz*self.num_head, orig_seq_len, self.head_dim)
return out
def sliding_chunks_matmul_qk(q: torch.Tensor, k: torch.Tensor, w: int, padding_value: float):
'''Matrix multiplicatio of query x key tensors using with a sliding window attention pattern.
This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer)
with an overlap of size w'''
bsz, seqlen, num_heads, head_dim = q.size()
assert seqlen % (w * 2) == 0
assert q.size() == k.size()
chunks_count = seqlen // w - 1
# group bsz and num_heads dimensions into one, then chunk seqlen into chunks of size w * 2
q = q.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim)
k = k.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim)
chunk_q = _chunk(q, w) # (B*H, num_of_overlapping_windows, head_dim)
chunk_k = _chunk(k, w)
# matrix multipication
# bcxd: bsz*num_heads x chunks x 2w x head_dim
# bcyd: bsz*num_heads x chunks x 2w x head_dim
# bcxy: bsz*num_heads x chunks x 2w x 2w
chunk_attn = torch.einsum('bcxd,bcyd->bcxy', (chunk_q, chunk_k)) # multiply
# convert diagonals into columns
diagonal_chunk_attn = _skew(chunk_attn, direction=(0, 0, 0, 1), padding_value=padding_value)
# allocate space for the overall attention matrix where the chunks are compined. The last dimension
# has (w * 2 + 1) columns. The first (w) columns are the w lower triangles (attention from a word to
# w previous words). The following column is attention score from each word to itself, then
# followed by w columns for the upper triangle.
diagonal_attn = diagonal_chunk_attn.new_empty((bsz * num_heads, chunks_count + 1, w, w * 2 + 1))
# copy parts from diagonal_chunk_attn into the compined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attn[:, :-1, :, w:] = diagonal_chunk_attn[:, :, :w, :w + 1]
diagonal_attn[:, -1, :, w:] = diagonal_chunk_attn[:, -1, w:, :w + 1] # Potential BUG: invalid attn weights
# - copying the lower triangle
diagonal_attn[:, 1:, :, :w] = diagonal_chunk_attn[:, :, - (w + 1):-1, w + 1:]
diagonal_attn[:, 0, 1:w, 1:w] = diagonal_chunk_attn[:, 0, :w - 1, 1 - w:]
# separate bsz and num_heads dimensions again
diagonal_attn = diagonal_attn.view(bsz, num_heads, seqlen, 2 * w + 1).transpose(2, 1)
mask_invalid_locations(diagonal_attn, w, 1, False)
return diagonal_attn
def sliding_chunks_matmul_pv(prob: torch.Tensor, v: torch.Tensor, w: int):
'''Same as sliding_chunks_matmul_qk but for prob and value tensors. It is expecting the same output
format from sliding_chunks_matmul_qk'''
bsz, seqlen, num_heads, head_dim = v.size()
assert seqlen % (w * 2) == 0
assert prob.size()[:3] == v.size()[:3]
assert prob.size(3) == 2 * w + 1
chunks_count = seqlen // w - 1
# group bsz and num_heads dimensions into one, then chunk seqlen into chunks of size 2w
chunk_prob = prob.transpose(1, 2).reshape(bsz * num_heads, seqlen // w, w, 2 * w + 1)
# group bsz and num_heads dimensions into one
v = v.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim)
# pad seqlen with w at the beginning of the sequence and another w at the end
padded_v = F.pad(v, (0, 0, w, w), value=-1)
# chunk padded_v into chunks of size 3w and an overlap of size w
chunk_v_size = (bsz * num_heads, chunks_count + 1, 3 * w, head_dim)
chunk_v_stride = padded_v.stride()
chunk_v_stride = chunk_v_stride[0], w * chunk_v_stride[1], chunk_v_stride[1], chunk_v_stride[2]
chunk_v = padded_v.as_strided(size=chunk_v_size, stride=chunk_v_stride)
skewed_prob = _skew2(chunk_prob, padding_value=0)
context = torch.einsum('bcwd,bcdh->bcwh', (skewed_prob, chunk_v))
return context.view(bsz, num_heads, seqlen, head_dim).transpose(1, 2)
def _chunk(x, w):
'''convert into overlapping chunkings. Chunk size = 2w, overlap size = w'''
# non-overlapping chunks of size = 2w
x = x.view(x.size(0), x.size(1) // (w * 2), w * 2, x.size(2))
# use `as_strided` to make the chunks overlap with an overlap size = w
chunk_size = list(x.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(x.stride())
# chunk_stride[1]: 512 x dim
chunk_stride[1] = chunk_stride[1] // 2
return x.as_strided(size=chunk_size, stride=chunk_stride)
def _skew2(x, padding_value):
'''shift every row 1 step to right converting columns into diagonals'''
# X = B x C x M x L
B, C, M, L = x.size()
x = F.pad(x, (0, M + 1), value=padding_value) # B x C x M x (L+M+1)
x = x.view(B, C, -1) # B x C x ML+MM+M
x = x[:, :, :-M] # B x C x ML+MM
x = x.view(B, C, M, M + L) # B x C, M x L+M
x = x[:, :, :, :-1]
return x
def mask_invalid_locations(input_tensor: torch.Tensor, w: int, d: Union[torch.Tensor, int], autoregressive: bool) -> torch.Tensor:
# d: dilation?
affected_seq_len, beginning_mask, ending_mask = _get_invalid_locations_mask(w, d, autoregressive, input_tensor.device)
seq_len = input_tensor.size(1)
beginning_input = input_tensor[:, :affected_seq_len, :, :w+1]
beginning_mask = beginning_mask[:, :seq_len].expand(beginning_input.size())
beginning_input.masked_fill_(beginning_mask, -float('inf'))
if not autoregressive:
ending_input = input_tensor[:, -affected_seq_len:, :, -(w+1):]
ending_mask = ending_mask[:, -seq_len:].expand(ending_input.size())
ending_input.masked_fill_(ending_mask, -float('inf'))
def _skew(x, direction, padding_value):
'''Convert diagonals into columns (or columns into diagonals depending on `direction`'''
x_padded = F.pad(x, direction, value=padding_value) # bsz*num_heads x chunks x (2w+1) x 2w
x_padded = x_padded.view(*x_padded.size()[:-2], x_padded.size(-1), x_padded.size(-2))
return x_padded
@lru_cache()
def _get_invalid_locations_mask(w: int, d: Union[torch.Tensor,int], autoregressive: bool, device: str):
if isinstance(d, int):
affected_seq_len = w * d
mask = _get_invalid_locations_mask_fixed_dilation(affected_seq_len, w, d)
mask = mask[None, :, None, :]
else:
affected_seq_len = w * d.max()
head_masks = []
d_list = d.cpu().numpy().tolist()
for d in d_list:
one_head_mask = _get_invalid_locations_mask_fixed_dilation(affected_seq_len, w, d)
head_masks.append(one_head_mask)
mask = torch.stack(head_masks, dim=-2)
mask = mask[None, :, :, :]
ending_mask = None if autoregressive else mask.flip(dims=(1, 3)).bool().to(device)
return affected_seq_len, mask.bool().to(device), ending_mask
def _get_invalid_locations_mask_fixed_dilation(seq_len: int, w: int, d: int):
diagonals_list = []
for j in range(-d * w, d, d):
diagonal_mask = torch.zeros(seq_len, device='cpu', dtype=torch.uint8)
diagonal_mask[:-j] = 1
diagonals_list.append(diagonal_mask)
return torch.stack(diagonals_list, dim=-1)
|
bart_ls-main
|
xformers/xformers/components/attention/longformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from typing import Any, Callable, Dict, Set, Union
import torch
from xformers.utils import (
generate_matching_config,
get_registry_decorator,
import_all_modules,
)
from ._sputnik_sparse import SparseCS
from .attention_mask import AttentionMask
from .base import Attention, AttentionConfig # noqa
# CREDITS: Classy Vision registry mechanism
ATTENTION_REGISTRY: Dict[str, Any] = {}
ATTENTION_CLASS_NAMES: Set[str] = set()
# Arbitrary threshold for now,
# in between dense and sparse matrix algorithms for the attention mechanism
_DENSITY_THRESHOLD = 0.30 # noqa # from the sputnik paper, vs.
_USE_SPUTNIK = True
def build_attention(config: Union[Dict[str, Any], AttentionConfig]):
"""Builds an attention from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_attention",
"foo": "bar"}` will find a class that was registered as "my_attention"
(see :func:`register_attention`) and call .from_config on it."""
if not isinstance(config, AttentionConfig):
try:
config_instance = generate_matching_config(
config, ATTENTION_REGISTRY[config["name"]].config
)
except KeyError as e:
name = config["name"]
logging.warning(f"{name} not available among {ATTENTION_REGISTRY.keys()}")
raise e
else:
config_instance = config
return ATTENTION_REGISTRY[config_instance.name].constructor.from_config(
config_instance
)
"""Registers an Attention subclass.
This decorator allows xFormers to instantiate a subclass of Attention
from a configuration file, even if the class itself is not part of the
xFormers library. To use it, apply this decorator to an Attention
subclass, like this:
.. code-block:: python
@dataclass
class MyConfig:
...
@register_attention('my_attention', MyConfig)
class MyAttention(Attention):
...
To instantiate an attention from a configuration file, see :func:`build_attention`."""
register_attention: Callable[[str, Any], Callable[[Any], Any]] = get_registry_decorator(
ATTENTION_REGISTRY, ATTENTION_CLASS_NAMES, Attention, AttentionConfig
)
def maybe_sparsify(matrix) -> Any:
# Sparsify if that makes sense
if torch.count_nonzero(matrix).item() / matrix.numel() > _DENSITY_THRESHOLD:
# If not sparse, then AttentionMask is the reference type
return AttentionMask.from_bool(matrix)
return sparsify(matrix)
def sparsify(matrix):
if _USE_SPUTNIK:
return SparseCS(matrix)
return matrix.to_sparse()
from .favor import FavorAttention # noqa
from .global_tokens import GlobalAttention # noqa
from .linformer import LinformerAttention # noqa
from .local import LocalAttention # noqa
from .nystrom import NystromAttention # noqa
from .ortho import OrthoFormerAttention # noqa
from .random import RandomAttention # noqa
from .scaled_dot_product import ScaledDotProduct # noqa
from .block_noglobal import BlockNoglobalAttention
__all__ = [
"ScaledDotProduct",
"LocalAttention",
"LinformerAttention",
"NystromAttention",
"RandomAttention",
"OrthoFormerAttention",
"GlobalAttention",
"FavorAttention",
"Attention",
"AttentionMask",
"build_attention",
"register_attention",
]
# Optionally expose the BlockSparse attention
try:
from .blocksparse import BlockSparseAttention # noqa
from .blocksparse_local import BlockSparseLocalAttention
__all__ += ["BlockSparseAttention"]
except ImportError:
pass
# automatically import any Python files in the directory
import_all_modules(str(Path(__file__).parent), "xformers.components.attention")
|
bart_ls-main
|
xformers/xformers/components/attention/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from functools import partial, reduce
from inspect import isfunction
import torch.nn.functional as F
from xformers.components.attention import Attention, AttentionConfig, register_attention
from operator import mul
@dataclass
class BlockSelfAttentionConfig(AttentionConfig):
window_size: int
num_heads: int
@register_attention("simple_block", BlockSelfAttentionConfig)
class SimpleBlockAttention(Attention):
def __init__(
self,
dropout: float,
window_size: int,
num_heads: int,
*args,
**kwargs
):
"""
A straightforward implementation of disjoint block attentions using native pytorch operations and supports CPU inference.
The sequence is split into equal-length chunks and the self-attention is calculated separately in each chunk
"""
super().__init__()
self.chunk_size = window_size
self.drop_attn = nn.Dropout(dropout)
self.num_head = num_heads
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
*args, **kwargs
):
bh = q.size(0)
orig_seq_len = q.size(1)
bsz = bh // self.num_head
head_dim = q.size(-1)
assert key_padding_mask is not None
key_padding_mask = key_padding_mask.to(q)
# pad the input length to factors of chunk size
def _pad_to_window_size(x, window_size):
seq_len = x.size(-2)
pad_len = (window_size - seq_len % window_size) % window_size
return F.pad(x, (0,0,0,pad_len), value=0), pad_len
q, _ = _pad_to_window_size(q, self.chunk_size)
k, _ = _pad_to_window_size(k, self.chunk_size)
v, _ = _pad_to_window_size(v, self.chunk_size)
if key_padding_mask.shape[1] % self.chunk_size != 0:
pad_len = (self.chunk_size - key_padding_mask.shape[1] % self.chunk_size) % self.chunk_size
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1)
buckets = q.shape[1] // self.chunk_size
b_q = bucket(buckets, q)
b_k, b_v = map(partial(bucket, buckets), (k, v)) # BH * bct * n_b * D
dots = torch.einsum('buie,buje->buij', b_q, b_k) * (head_dim ** -0.5)
mask_value = -10000
# The model does use global token markers (-1)
q_mask = key_padding_mask != 1
# 1 means not masking
kv_mask = q_mask
mq, mk = bucket(buckets, q_mask), bucket(buckets, kv_mask) # B * bkt * n_b
expand_head_and_merge_into_batch = lambda x: merge_dims(0, 1, expand_dim(x.unsqueeze(1), 1, self.num_head))
mq, mk = map(expand_head_and_merge_into_batch, (mq, mk)) # BH * bkt * n_b
mask = mq[:, :, :, None] * mk[:, :, None, :]
dots.masked_fill_(~mask, mask_value)
del mask
all_attn = dots.view(bsz*self.num_head, -1, self.chunk_size)
all_attn_probs = all_attn.softmax(dim=-1)
all_attn_probs = self.drop_attn(all_attn_probs)
block_attn_probs = all_attn_probs[:, :, :all_attn.shape[-1]]
block_attn_probs = block_attn_probs.view(bsz*self.num_head, -1, self.chunk_size, self.chunk_size)
out = block_attn_probs.matmul(b_v).view(bsz*self.num_head, -1, head_dim)
out = out[:,:orig_seq_len]
return out
def expand_dim(t, dim, k):
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def bucket(buckets, t, dim=1):
shape = list(t.shape)
shape[dim:dim+1] = [buckets, -1]
return t.reshape(*shape)
def unbucket(t, dim=1):
shape = list(t.shape)
shape[dim:dim+2] = [-1]
return t.reshape(*shape)
|
bart_ls-main
|
xformers/xformers/components/attention/simple_block.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.ops import masked_matmul
from xformers.sparse import SparseCSRTensor
# TODO: this is here for BC
from xformers.sparse.utils import _csr_to_coo, _dense_to_sparse # noqa: F401
class SparseCS:
def __init__(self, matrix, device=None):
if device is None:
device = torch.device("cpu")
if matrix.ndim == 2:
matrix = matrix[None]
assert matrix.ndim == 3
self._mat = SparseCSRTensor.from_dense(matrix).to(device)
@property
def device(self):
return self._mat.device
@property
def ndim(self):
return self._mat.ndim
@property
def dtype(self):
return self._mat.dtype
@property
def is_sparse(self):
return True
@property
def shape(self):
return self._mat.shape[1:]
@property
def values(self):
return self._mat._csr_values
@property
def row_indices(self):
return self._mat._csr_row_indices
@property
def column_indices(self):
return self._mat._csr_column_indices
@property
def row_offsets(self):
return self._mat._csr_row_offsets
@property
def _transp_info(self):
return self._mat._csr_transp_info
@classmethod
def wrap(
cls, shape, values, row_indices, row_offsets, column_indices, _transp_info
):
matrix = cls.__new__(cls)
_shape = (values.shape[0],) + shape
csr_matrix = SparseCSRTensor._wrap(
_shape, values, row_indices, row_offsets, column_indices, _transp_info
)
matrix._mat = csr_matrix
return matrix
@classmethod
def _wrap(cls, csr_matrix):
assert isinstance(csr_matrix, SparseCSRTensor)
matrix = cls.__new__(cls)
matrix._mat = csr_matrix
return matrix
def __mul__(self, other):
assert isinstance(other, (int, float))
return type(self)._wrap(self._mat * other)
def __add__(self, other):
assert isinstance(other, type(self))
return type(self)._wrap(self._mat + other._mat)
def matmul_with_mask(self, a, b):
return type(self)._wrap(masked_matmul(a, b, self._mat))
def softmax(self):
out = torch.nn.functional.softmax(self._mat, -1)
return type(self)._wrap(out)
def spmm(self, b):
out = torch.bmm(self._mat, b)
return out
def transpose(self):
out = torch.transpose(self._mat, -2, -1)
return type(self)._wrap(out)
def to(self, device):
assert isinstance(device, torch.device)
out = self._mat.to(device)
return type(self)._wrap(out)
def to_dense(self):
return self._mat.to_dense()
def logical_and(self, other: torch.Tensor):
assert not isinstance(other, SparseCS)
out = torch.logical_and(self._mat, other)
return type(self)._wrap(out)
def __and__(self, other):
return self.logical_and(other)
|
bart_ls-main
|
xformers/xformers/components/attention/_sputnik_sparse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from contextlib import nullcontext
from typing import Optional, Union
import torch
from xformers import _is_sparse_available, _is_triton_available
from xformers.components.attention.attention_mask import AttentionMask
if _is_sparse_available:
from ._sputnik_sparse import SparseCS
if _is_triton_available:
from xformers.triton.softmax import softmax as triton_softmax
def _create_random_sparsity(matrix, sparsity, divisible_by=4):
assert matrix.ndim == 3
keep = torch.rand_like(matrix[0], dtype=torch.float32) > sparsity
nonzero = torch.nonzero(keep)
nnz = nonzero.shape[0]
# NOTE: need to make it a multiple of 4 for sputnik
nonzero = nonzero[: (nnz - nnz % divisible_by)]
i, j = nonzero.unbind(1)
output = torch.zeros_like(matrix)
bdim = torch.arange(matrix.shape[0], device=matrix.device)[:, None]
output[bdim, i, j] = matrix[bdim, i, j]
return output
def _broadcast_batch(mask, batch_size):
if mask.ndim == 3:
return mask
assert mask.ndim == 2
mask = mask.coalesce()
values = mask.values()
indices = mask.indices()
nnz = len(values)
# strategy: repeat the indices and append the extra batch dimension to the indices
indices = indices.repeat(1, batch_size)
# now create the batch indices
batch_indices = torch.arange(batch_size, device=indices.device)
batch_indices = batch_indices[:, None].expand(batch_size, nnz).flatten()
# put them together
indices = torch.cat([batch_indices[None, :], indices], dim=0)
# now repeat the values
values = values.repeat(batch_size)
size = (batch_size,) + mask.shape
return torch.sparse_coo_tensor(indices, values, size)
def _matmul_with_mask(
a: torch.Tensor, b: torch.Tensor, mask: Optional[Union[torch.Tensor, "SparseCS"]]
) -> torch.Tensor:
if mask is None:
return a @ b
if _is_sparse_available and mask.dtype == torch.bool:
if isinstance(mask, SparseCS):
return mask.matmul_with_mask(a, b)
if mask.is_sparse:
# perform broadcasting if needed
mask = _broadcast_batch(mask, a.shape[0])
# coalesced is not implemented for bool tensors, so need to cast
mask = mask.to(dtype=a.dtype) # type: ignore # mypy is missing the catch above
return torch.ops.xformers.matmul_with_mask(a, b, mask)
# Non optimized codepath
assert not isinstance(mask, SparseCS)
att = a @ b
if mask.dtype == torch.bool:
if mask.ndim == 2:
mask = mask.unsqueeze(0).expand(att.shape[0], -1, -1)
# mask is presumed false == ignore
att[~mask] = float("-inf")
else:
# mask is presumed additive
att += mask
return att
def _softmax(a: torch.Tensor, causal: bool = False) -> torch.Tensor:
if _is_sparse_available and isinstance(a, SparseCS):
return a.softmax()
if a.is_sparse:
return torch.sparse.softmax(a, dim=a.ndim - 1)
if _is_triton_available:
return triton_softmax(a, mask=None, causal=causal)
else:
return torch.softmax(a, dim=a.ndim - 1)
if _is_sparse_available:
class SparseBMM(torch.autograd.Function):
@staticmethod
def forward(ctx, a, b):
a = a.coalesce()
r = torch.bmm(a, b)
ctx.save_for_backward(a, b)
return r
@staticmethod
def backward(ctx, grad):
a, b = ctx.saved_tensors
# gradients w.r.t. a
ga = None
if ctx.needs_input_grad[0]:
ga = torch.ops.xformers.matmul_with_mask(grad, b.transpose(-2, -1), a)
# gradients w.r.t. b
gb = None
if ctx.needs_input_grad[1]:
gb = a.transpose(1, 2).bmm(grad)
return ga, gb
def _sparse_bmm(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Batch matrix multiply between a sparse matrix and a dense matrix
"""
assert a.ndim == b.ndim == 3
assert a.shape[0] == b.shape[0]
assert a.shape[2] == b.shape[1]
return SparseBMM.apply(a, b)
def bmm(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
if _is_sparse_available:
if isinstance(a, SparseCS):
return a.spmm(b)
if a.is_sparse:
return _sparse_bmm(a, b)
return a @ b
def _apply_dropout(att, dropout):
if dropout is None:
return att
# Dropout chokes on sparse tensors
if _is_sparse_available:
if isinstance(att, SparseCS):
values = att.values.clone()
values = dropout(values)
att = SparseCS.wrap(
att.shape,
values,
att.row_indices,
att.row_offsets,
att.column_indices,
att._transp_info,
)
elif att.is_sparse:
att = att.coalesce()
values = att.values().clone() # protect against in-place dropout
values = dropout(values)
att = torch.sparse_coo_tensor(att.indices(), values, att.shape)
else:
# Simple dense case
att = dropout(att)
return att
# Non optimized vanilla dropout
att = dropout(att)
return att
def scaled_query_key_softmax(
q: torch.Tensor,
k: torch.Tensor,
att_mask: Optional[Union[AttentionMask, "SparseCS", torch.Tensor]],
) -> torch.Tensor:
# TODO assume we have (N, S, hs) instead of (B, nh, S, hs), with N = B x nh
# this is needed due to limitations in sparse_bmm for now
# Self-attend: (N, S, hs) x (N, hs, S) -> (N, S, S)
q = q / math.sqrt(k.size(-1))
# Matmul with mask
if att_mask is not None and isinstance(att_mask, AttentionMask):
# Additive mask
mask: Optional[Union[SparseCS, torch.Tensor]] = att_mask.values
else:
mask = att_mask
att = _matmul_with_mask(q, k.transpose(-2, -1), mask)
# Softmax to get the attention probabilities
is_causal = isinstance(att_mask, AttentionMask) and att_mask.is_causal
att = _softmax(att, causal=is_causal)
return att
def scaled_dot_product_attention(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[Union[AttentionMask, "SparseCS", torch.Tensor]],
dropout: Optional[torch.nn.Module] = None,
) -> torch.Tensor:
autocast_disabled = (
_is_sparse_available
and isinstance(att_mask, SparseCS)
or (att_mask is not None and att_mask.is_sparse)
)
with torch.cuda.amp.autocast(enabled=False) if autocast_disabled else nullcontext():
if autocast_disabled:
q, k, v = q.float(), k.float(), v.float()
att = scaled_query_key_softmax(q, k, att_mask=att_mask)
# Optional dropout, could be part of the masking in the future
att = _apply_dropout(att, dropout)
# Get to the predicted values, for all heads
# y = att @ v # (N, S, S) x (N, S, hs) -> (N, S, hs)
y = bmm(att, v)
return y
|
bart_ls-main
|
xformers/xformers/components/attention/core.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import torch
from xformers.components.attention import Attention, AttentionConfig, register_attention
def calc_rel_pos(n: int):
# Adapted from LucidRains
# https://github.com/lucidrains/lambda-networks/blob/main/lambda_networks/lambda_networks.py
rel_pos = torch.arange(n)[None, :] - torch.arange(n)[:, None] # [n, n]
rel_pos += n - 1 # shift value range from [-n+1, n-1] to [0, 2n-2]
return rel_pos
@dataclass
class LambdaLayerConfig(AttentionConfig):
seq_len: int # dimension of the input sequence
dim_head: int
@register_attention("lambda", LambdaLayerConfig)
class LambdaLayer(Attention):
def __init__(self, dropout: float, seq_len: int, dim_head: int, *_, **__):
"""
Attention approximation using Lambda layers, from
"Lambda networks: modeling long-range interactions without attention.", Bello, I. (2021).
"""
super().__init__()
# Possible extensions:
# - support different dimensions for key and queries
# - support varying dimensions in between inputs and outputs
# - support u hyperparam
self.rel_pos_emb = torch.nn.Parameter(
torch.randn(2 * seq_len - 1, int(dim_head))
)
self.rel_pos = calc_rel_pos(seq_len)
self.attn_drop = torch.nn.Dropout(dropout, inplace=True)
self.requires_same_k_q_dimensions = True
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, *args, **kwargs
):
"""..NOTE: We're reusing the einsum notation suggested by the paper, changed in that
heads are folded in the batch dimension"""
content_lambda = torch.einsum("bnk,bnv->bkv", torch.softmax(k, dim=-1), v)
content_output = torch.einsum("bnk,bkv->bnv", q, content_lambda)
rel_pos_emb = self.rel_pos_emb[self.rel_pos]
# Handle real sequence length being possibly smaller
seq_len = q.shape[1]
rel_pos_emb = rel_pos_emb[:seq_len, :seq_len, :]
# Compute the position lambda for every possible combination in one go, then compute the
# position related contribution
position_lambdas = torch.einsum(
"mnk,bnv->bnkv", rel_pos_emb, v
) # one lambda per position
position_output = (q.unsqueeze(2) @ position_lambdas).squeeze()
att = content_output + position_output
att = self.attn_drop(att)
return att
|
bart_ls-main
|
xformers/xformers/components/attention/lambda_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
maybe_sparsify,
register_attention,
sparsify,
)
from xformers.components.attention.attention_patterns import (
causal_1d_pattern,
random_pattern,
)
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class RandomAttentionConfig(AttentionConfig):
r: Optional[
float
] # the ratio of keys that the query can attend to. 1.0 means dense attention
constant_masking: Optional[
bool
] # whether the randomness is per query or defined at construction time
force_sparsity: Optional[bool] # use sparsity in any case (potentially slower)
@register_attention("random", RandomAttentionConfig)
class RandomAttention(Attention):
def __init__(
self,
dropout: float,
causal: bool = False,
r: float = 0.01,
constant_masking: bool = True,
force_sparsity: bool = False,
*args,
**kwargs,
):
"""
"Random" attention, as proposed for instance in _BigBird.
Random means in that case means that each query can attend to a random set of keys.
This implementation is sparse-aware, meaning that the empty attention parts will not be represented in memory.
Args:
r (float): the ratio in [0,1] of keys that the query can attend to
constant_masking (bool): if true, keep the same random set for all queries.
_BigBird: "Zaheer, M., Guruganesh, G., Dubey, A., Ainslie, J., Alberti, C., Ontanon, S., Pham, P., Ravula,
A., Wang, Q., Yang, L., & Ahmed, A. (2020). Big Bird: Transformers for Longer Sequences. ArXiv, NeurIPS."
"""
super().__init__()
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.causal = causal
self.r = r
self.rand_attention_mask: Optional[torch.Tensor] = None
self.constant_masking = constant_masking
self.force_sparsity = force_sparsity
self.requires_same_k_q_dimensions = True
def _get_rand_mask(self, shape: torch.Size) -> torch.Tensor:
sparsity = 1 - self.r
mask = random_pattern(shape[1], sparsity=sparsity)
if self.causal:
mask &= causal_1d_pattern(shape[1])
mask = sparsify(mask) if self.force_sparsity else maybe_sparsify(mask)
return mask
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
*args,
**kwargs,
):
# Rand masking
if not self.constant_masking or self.rand_attention_mask is None:
self.rand_attention_mask = self._get_rand_mask(q.shape).to(q.device)
# Mask-aware attention
if att_mask is not None:
if att_mask.dtype == torch.bool and isinstance(
self.rand_attention_mask, AttentionMask
):
mask = self.rand_attention_mask + AttentionMask.from_bool(att_mask)
else:
mask = self.rand_attention_mask & att_mask
else:
mask = self.rand_attention_mask
# Handle q/k/v which would not fit the mask
seq_len = q.shape[-2]
q_, k_, v_ = map(lambda x: self._maybe_pad_sequence(x, mask), (q, k, v))
# Normal attention with the random mask
att = scaled_dot_product_attention(
q=q_, k=k_, v=v_, att_mask=mask, dropout=self.attn_drop
)
# Take into account an hypothetical padding
return att[:, :seq_len, :]
|
bart_ls-main
|
xformers/xformers/components/attention/random.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
from functools import partial, reduce
from inspect import isfunction
from operator import mul
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from xformers.components.attention import ATTENTION_REGISTRY, Attention, AttentionConfig, register_attention
from transformers.models.reformer.modeling_reformer import LSHSelfAttention, ReformerConfig, ReverseSort
"""
using implementation from huggingface transformers. Make LSH attention compatible with global tokens
"""
@dataclass
class LSHGlobalSelfAttentionConfig(AttentionConfig):
num_hash: int
num_heads: int
dim_model: int
seq_len: int
num_buckets: int
chunk_length: int
@register_attention("lsh_global", LSHGlobalSelfAttentionConfig)
class LSHGlobalAttention(Attention):
def __init__(
self,
num_heads: int,
dim_model: int,
num_hash: int = 4,
seq_len: int = 4096,
dropout: float = 0.0,
chunk_length: int = 16,
num_buckets: int = None,
*args,
**kwargs,
):
super().__init__()
attn_config = ReformerConfig()
attn_config.num_attention_heads = num_heads
attn_config.attn_layers = ["lsh"]
attn_config.is_decoder = False
attn_config.num_hashes = num_hash
attn_config.max_position_embeddings = seq_len
attn_config.attention_head_size = dim_model // num_heads
# attn_config.feed_forward_size = 1
if chunk_length:
attn_config.lsh_attn_chunk_length = chunk_length
attn_config.num_buckets = num_buckets
self.attn = ReformerAttention(attn_config)
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
*args, **kwargs
):
return self.attn(q, k, v, att_mask, key_padding_mask)
class ReformerAttention(LSHSelfAttention):
def __init__(self, config):
super().__init__(config)
# self.query_key = None
# self.value = None
del self.query_key
del self.value
self.num_heads = self.num_attention_heads
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
*args,
**kwargs,
):
orig_sequence_length, head_dim = q.shape[1], q.shape[2]
# padding to factors of self.chunk_length
# needs centain sequence length to make the block wise local attention work
def _pad_to_window_size(x, window_size):
seq_len = x.size(-2)
pad_len = (window_size - seq_len % window_size) % window_size
return F.pad(x, (0,0,0,pad_len), value=0), pad_len
q, _ = _pad_to_window_size(q, self.chunk_length)
v, _ = _pad_to_window_size(v, self.chunk_length)
if key_padding_mask.shape[1] % self.chunk_length != 0:
pad_len = (self.chunk_length - key_padding_mask.shape[1] % self.chunk_length) % self.chunk_length
# key padding mask: 1 means padding tokens
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1)
sequence_length = q.shape[1]
batch_size = q.shape[0] // self.num_attention_heads
# num hashes can optionally be overwritten by user
num_hashes = self.num_hashes
# @xwhan reformer needs the key and query vectors to be the same
# project hidden_states to query_key and value
orig_query = q.view(batch_size, self.num_attention_heads, sequence_length, head_dim)
orig_value = v.view(batch_size, self.num_attention_heads, sequence_length, head_dim)
do_standard_self_attention = (sequence_length <= self.chunk_length)
# xformer format, att_mask: B x seq_len x seq_len #TODO: better solution?
assert key_padding_mask is not None
key_padding_mask = key_padding_mask.to(q)
key_padding_mask[:,0] = -1
# for LSH attention, unmasked tokens are True
attention_mask = key_padding_mask == 0
# resolve mask that specify global tokens
extra_attention_mask = key_padding_mask < 0
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
max_num_extra_indices_per_batch = num_extra_indices_per_batch.max()
if max_num_extra_indices_per_batch <= 0:
extra_attention_mask = None
else:
extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True)
zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, device=extra_attention_mask.device)
# mask indicating which values are actually going to be padding
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1)
# 2) location of the non-padding values in the selected global attention
selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True)
# 3) location of the padding values in the selected global attention
selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True)
# LSH attention only makes sense if chunked attention should be performed
if not do_standard_self_attention:
# set `num_buckets` on the fly, recommended way to do it
if self.num_buckets is None:
self._set_num_buckets(sequence_length)
# hash query key vectors into buckets
buckets = self._hash_vectors(orig_query, num_hashes, attention_mask)
assert (
int(buckets.shape[-1]) == num_hashes * sequence_length
), f"last dim of buckets is {buckets.shape[-1]}, but should be {num_hashes * sequence_length}"
sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
sequence_length, buckets, num_hashes
)
# make sure bucket idx is not longer then sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
# query_key_vectors: (B, H, L, D)
# sorted_bucket_idx_per_hash: (B. H, L x num_hash)
# cluster query key value vectors according to hashed buckets
query_key_vectors = self._gather_by_expansion(orig_query, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(orig_value, sorted_bucket_idx_per_hash, num_hashes)
# (B. H, L x num_hash, D)
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
# (B. H, (L x num_hash) // chunk, chunk, D)
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
else:
# get sequence length indices
sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=orig_query.device).repeat(
batch_size, self.num_attention_heads, 1
)
query_key_vectors = orig_query
value_vectors = orig_value
# scale key vectors
# why length normalization
key_vectors = self._len_and_dim_norm(query_key_vectors)
# set query_vectors to query key vectors if LSH self attention
query_vectors = query_key_vectors
# free memory
del query_key_vectors
# global tokens' key and value vectors with indexing
if extra_attention_mask is not None:
selected_k = orig_query.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_attention_heads, head_dim)
orig_query_t = orig_query.transpose(1,2)
selected_k[selection_padding_mask_nonzeros] = orig_query_t[extra_attention_mask_nonzeros]
selected_k = self._len_and_dim_norm(selected_k)
orig_value_t = orig_value.transpose(1,2)
selected_v = orig_value.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_attention_heads, head_dim)
selected_v[selection_padding_mask_nonzeros] = orig_value_t[extra_attention_mask_nonzeros]
# get attention probs
out_vectors, logits, attention_probs = self.attend(
query_vectors=query_vectors,
key_vectors=key_vectors,
value_vectors=value_vectors,
sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,
attention_mask=attention_mask,
head_mask=None,
do_standard_self_attention=do_standard_self_attention,
do_cached_attention=False,
global_token_keys=selected_k,
global_token_values=selected_v,
selection_padding_mask_zeros=selection_padding_mask_zeros
)
# free memory
del key_vectors, value_vectors
# re-order out_vectors and logits
if not do_standard_self_attention:
# sort clusters back to correct ordering
out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
if not do_standard_self_attention:
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(
out_vectors,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
)
logits = self._split_seq_length_dim_to(
logits,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
).unsqueeze(-1)
# TODO @xwhan How is logits used here? looks wired as logits is already logsumexp
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
# free memory
del probs_vectors
# free memory
del logits
# replace the calculation of global tokens attending to all other tokens
selected_q = orig_query.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_attention_heads, head_dim)
selected_q[selection_padding_mask_nonzeros] = orig_query_t[extra_attention_mask_nonzeros]
# LSH attention, query and keys are the same
# need to do some normalization like LSH attention
g2all_attn_weights = selected_q.transpose(1,2).matmul(self._len_and_dim_norm(orig_query).transpose(-1, -2))
hard_mask = key_padding_mask == 1
g2all_attn_weights[selection_padding_mask_zeros[0], :, selection_padding_mask_zeros[1], :] = -10000.0
if hard_mask is not None:
g2all_attn_weights = g2all_attn_weights.masked_fill(
hard_mask.unsqueeze(1).unsqueeze(2),
-10000.0,
)
# still need to avoid the model attend to itself
g2all_attn_weights[extra_attention_mask_nonzeros[0],:,:,extra_attention_mask_nonzeros[1]] = -10000.0
g2all_attn_probs_float = F.softmax(g2all_attn_weights, dim=-1, dtype=torch.float32)
g2all_attn_probs = nn.functional.dropout(g2all_attn_probs_float.type_as(g2all_attn_weights), p=self.dropout, training=self.training)
g2all_attn = g2all_attn_probs.matmul(orig_value)
# (B, H, max_num_extra_indices_per_batch, L)
nonzero_global_attn = g2all_attn[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]].type_as(out_vectors)
out_vectors[extra_attention_mask_nonzeros[0], :, extra_attention_mask_nonzeros[1]] = nonzero_global_attn
# still need to avoid the model attend to itself
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
), "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`."
return out_vectors.view(-1, sequence_length, head_dim)[:,:orig_sequence_length,:].contiguous()
def attend(
self,
query_vectors,
key_vectors,
value_vectors,
sorted_bucket_idx_per_hash,
attention_mask,
head_mask,
do_standard_self_attention,
do_cached_attention,
global_token_keys=None,
global_token_values=None,
selection_padding_mask_zeros=None
):
# look at previous and following chunks if chunked attention
if not do_standard_self_attention:
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# (B, H, (L x num_hash) // chunk, chunk, 2*chunk)
# free memory
# del query_vectors, key_vectors
# if chunked attention split bucket idxs to query and key
if not do_standard_self_attention:
query_bucket_idx = self._split_seq_length_dim_to(
sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads
)
key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)
elif do_cached_attention and query_key_dots.ndim > 4:
key_value_bucket_idx = sorted_bucket_idx_per_hash
query_bucket_idx = (
key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max()
)
elif do_cached_attention and query_key_dots.ndim <= 4:
query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1]
key_value_bucket_idx = torch.arange(
query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device
)[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,))
else:
query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash
# get correct mask values depending on precision
if query_key_dots.dtype == torch.float16:
self_mask_value = self.self_mask_value_float16.half()
mask_value = self.mask_value_float16.half()
else:
self_mask_value = self.self_mask_value_float32
mask_value = self.mask_value_float32
if not do_cached_attention:
# (B, H, (L x num_hash) // chunk, chunk, 2 x chunk)
mask = self._compute_attn_mask(
query_bucket_idx,
key_value_bucket_idx,
attention_mask,
query_key_dots.shape,
do_standard_self_attention,
)
if mask is not None:
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# Self mask is ALWAYS applied.
# From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf):
# " While attention to the future is not allowed, typical implementations of the
# Transformer do allow a position to attend to itself.
# Such behavior is undesirable in a shared-QK formulation because the dot-product
# of a query vector with itself will almost always be greater than the dot product of a
# query vector with a vector at another position. We therefore modify the masking
# to forbid a token from attending to itself, except in situations
# where a token has no other valid attention targets (e.g. the first token in a sequence) "
self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(
query_bucket_idx.device
)
# apply self_mask
query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)
if not do_standard_self_attention:
# @xwhan trying to attend global tokens here
global_token_keys = global_token_keys.transpose(1,2).unsqueeze(2).repeat(1,1,query_vectors.shape[2],1,1)
query2global_dots = torch.matmul(query_vectors, global_token_keys.transpose(-1, -2))
query2global_dots[selection_padding_mask_zeros[0],:,:,:,selection_padding_mask_zeros[1]] = self_mask_value
query_key_dots = torch.cat([query_key_dots, query2global_dots], dim=-1)
global_token_values = global_token_values.transpose(1,2).unsqueeze(2).repeat(1,1,query_vectors.shape[2],1,1)
value_vectors = torch.cat([value_vectors, global_token_values], dim=3)
# free memory
del self_mask
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
# dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]`
# attention_probs = torch.exp(query_key_dots - logits)
# Is this causing training issues while using fp16?
attention_probs = F.softmax(query_key_dots, dim=-1, dtype=torch.float32).type_as(query_key_dots)
# free memory
del query_key_dots
# dropout
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if out_vectors.ndim > 4:
logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
return out_vectors, logits, attention_probs
|
bart_ls-main
|
xformers/xformers/components/attention/lsh_global.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import math
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from xformers.components.attention import Attention, AttentionConfig, register_attention
@dataclass
class LongShortConfig(AttentionConfig):
block_size: int
num_heads: int
dim_model: int
num_landmarks: int
window_size: int
global_tokens: int
@register_attention("longshort", LongShortConfig)
class LongShortAttention(Attention):
def __init__(
self,
dropout: float,
num_heads: int,
dim_model: int,
num_landmarks: int = 32,
window_size: int = 96,
global_tokens: int = 1,
*args, **kwargs
):
"""
longshort transformer
https://arxiv.org/abs/2107.02192
https://github.com/NVIDIA/transformer-ls
#TODO only support encoding only settings
"""
super().__init__()
self.drop_attn = nn.Dropout(dropout)
self.num_head = num_heads
self.head_dim = dim_model // num_heads
self.num_landmarks = num_landmarks
self.dim = dim_model
self.window_size = window_size
self.requires_orig_inputs = True
self.global_tokens = global_tokens # default num of global tokens
# Sec 3.4 to stablize the attention combination
self.dual_ln_s = nn.LayerNorm(self.num_head * self.head_dim)
self.dual_ln_l = nn.LayerNorm(self.num_head * self.head_dim)
self.dconv_fc = nn.Linear(self.dim, self.num_head * self.num_landmarks)
# input-dependent compression
#### v2: try to change how the additional layernorm is applied, make it more competible with roberta checkpoint
#### when compress the k/v, k/v are NOT normalized
def forward(
self,
x: torch.Tensor,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
*args, **kwargs
):
"""
This version enables specify some global tokens, according values in key_padding_mask: {-1: global, 0: local, 1: masked tokens}
"""
# global_tokens = self.global_tokens
batch_size = q.shape[0] // self.num_head
sequence_length = q.shape[1]
# Always set the first token as global tokens
key_padding_mask = key_padding_mask.to(q)
key_padding_mask[:,0] = -1
Q = q.view(batch_size, self.num_head, -1, self.head_dim).mul(1./math.sqrt(self.head_dim))
K = k.view(batch_size, self.num_head, -1, self.head_dim).transpose(1,2).reshape(batch_size, -1, self.dim)
V = v.view(batch_size, self.num_head, -1, self.head_dim).transpose(1,2).reshape(batch_size, -1, self.dim)
# needs centain sequence length to make the block wise local attention work
def _pad_to_window_size(x, window_size):
seq_len = x.size(-2)
pad_len = (window_size - seq_len % window_size) % window_size
return F.pad(x, (0,0,0,pad_len), value=0), pad_len
x, pad_len = _pad_to_window_size(x, self.window_size)
Q, _ = _pad_to_window_size(Q, self.window_size)
K, _ = _pad_to_window_size(K, self.window_size)
V, _ = _pad_to_window_size(V, self.window_size)
if key_padding_mask.shape[1] % self.window_size != 0:
pad_len = (self.window_size - key_padding_mask.shape[1] % self.window_size) % self.window_size
# key padding mask: 1 means padding tokens
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1)
# normalization is to have the same scale as compressed key/values
K_normalized = self.split_heads(self.dual_ln_l(K)) # (B, H, seq_len, head_dim)
V_normalized = self.split_heads(self.dual_ln_l(V))
K = self.split_heads(K)
V = self.split_heads(V)
# 1. check size of x (bsz, seq-1, dim_model); # TODO, 2. the padding mask value
# global attention tokens
extra_attention_mask = key_padding_mask < 0
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
max_num_extra_indices_per_batch = num_extra_indices_per_batch.max()
if max_num_extra_indices_per_batch <= 0:
extra_attention_mask = None
else:
extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True)
zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, device=extra_attention_mask.device)
# mask indicating which values are actually going to be padding
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1)
# 2) location of the non-padding values in the selected global attention
selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True)
# 3) location of the padding values in the selected global attention
selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True)
# keys of global tokens
if extra_attention_mask is not None:
K_transpose = K_normalized.transpose(1,2)
Q_transpose = Q.transpose(1,2)
selected_k = K_transpose.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
selected_k[selection_padding_mask_nonzeros] = K_transpose[extra_attention_mask_nonzeros]
# (bsz, seq_len, num_heads, max_num_extra_indices_per_batch)
selected_attn_weights = torch.einsum('blhd,bshd->blhs', (Q_transpose, selected_k))
selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000
attn_weights_over_g_tokens = selected_attn_weights.transpose(1,2)
V_transpose = V_normalized.transpose(1,2)
selected_v = V_transpose.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
selected_v[selection_padding_mask_nonzeros] = V_transpose[extra_attention_mask_nonzeros]
# linear attention mask
padding_mask = key_padding_mask != 0 # True means masked position
K_compress = V_compress = None
if self.num_landmarks > 0:
# self.dconv_fc(x): each lanmark of each head attend to each location
head_scores = self.dconv_fc(x).masked_fill(padding_mask[:, :, None], -10000) # only try to compress the k/v of those local tokens
head_scores = F.softmax(head_scores, dim=1, dtype=torch.float32) #.to(X)
# if not self.fp32:
head_scores = head_scores.to(x)
# bsz x num_head x num_lms x length
head_scores = head_scores.view(batch_size, -1, self.num_head, self.num_landmarks).permute(0, 2, 3, 1)
# TODO: should we use normalized K/V here? different from paper descriptions
K_compress = head_scores.matmul(K) # bsz x num_head x num_lms x head_dim
V_compress = head_scores.matmul(V)
assert self.num_landmarks > 0
if self.dual_ln_s is not None and K_compress is not None:
K_compress = self.dual_ln_s(K_compress.transpose(1, 2).contiguous().view(batch_size, -1, self.dim))
K_compress = self.split_heads(K_compress)
V_compress = self.dual_ln_s(V_compress.transpose(1, 2).contiguous().view(batch_size, -1, self.dim))
V_compress = self.split_heads(V_compress)
# if self.num_landmarks > 0 or (cls_embed_q is not None):
# bsz x num_head x length x num_lms
attn_compress = Q.matmul(K_compress.transpose(-1, -2))
win_attn_weights = self.sliding_chunks_matmul_qk_v2(Q, K_normalized, padding_mask) # bsz x num_heads x seqlen x 2winsize
# else:
# win_attn_weights = None
if extra_attention_mask is not None:
all_attn_ = torch.cat([attn_compress, win_attn_weights, attn_weights_over_g_tokens], dim=-1)
else:
all_attn_ = torch.cat([attn_compress, win_attn_weights], dim=-1)
all_attn = all_attn_.float().softmax(dim=-1).to(win_attn_weights)
hard_mask = key_padding_mask == 1
all_attn = all_attn.masked_fill(hard_mask[:,None,:,None], 0)
# if not self.fp32:
all_attn = all_attn.to(x)
all_attn = self.drop_attn(all_attn)
C = 0
if attn_compress is not None:
C += all_attn[:,:,:,:K_compress.shape[2]].matmul(V_compress)
if win_attn_weights is not None:
win_attn_probs = all_attn[:,:,:,K_compress.shape[2]:K_compress.shape[2] + win_attn_weights.shape[-1]]
seq_len = win_attn_probs.shape[2]
# if self.window_size > 0:
win_attn_probs = win_attn_probs.view(batch_size, self.num_head, seq_len // self.window_size, self.window_size,-1)
V_tiles = self.get_tiles_v2(V_normalized, transpose=False)
C += win_attn_probs.matmul(V_tiles).view(batch_size, self.num_head, seq_len, self.head_dim)
# else:
# C += win_attn_probs * V
if extra_attention_mask is not None:
global_attn_probs = all_attn[:,:,:,-attn_weights_over_g_tokens.shape[-1]:]
# selected_v shape: (batch_size, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
C += global_attn_probs.matmul(selected_v.transpose(1,2))
# global tokens attending to all other tokens.
# TODO: Two options are possible here: whether to use normalized key/value
selected_q = Q_transpose.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
selected_q[selection_padding_mask_nonzeros] = Q_transpose[extra_attention_mask_nonzeros]
g2all_attn_weights = selected_q.transpose(1,2).matmul(K_normalized.transpose(-1, -2)) # (batch_size, self.num_head, max_num_extra_indices_per_batch, seq_len)
g2all_attn_probs_float = F.softmax(g2all_attn_weights, dim=-1, dtype=torch.float32)
g2all_attn_probs = self.drop_attn(g2all_attn_probs_float.type_as(g2all_attn_weights))
g2all_attn = g2all_attn_probs.matmul(V_normalized) # (batch_size, self.num_head, max_num_extra_indices_per_batch, head_dim)
# replace results in C
nonzero_global_attn = g2all_attn[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]]
C[extra_attention_mask_nonzeros[0],:,extra_attention_mask_nonzeros[1]] = nonzero_global_attn
C = C[:,:,:sequence_length].view(-1, sequence_length, self.head_dim)
return C
# #### v1 with added global tokens at the begining
# def forward(
# self,
# x: torch.Tensor,
# q: torch.Tensor,
# k: torch.Tensor,
# v: torch.Tensor,
# att_mask: Optional[torch.Tensor] = None,
# key_padding_mask: Optional[Tensor] = None,
# *args, **kwargs
# ):
# """
# This version enables specify some global tokens, according values in key_padding_mask: {-1: global, 0: local, 1: masked tokens}
# """
# # global_tokens = self.global_tokens
# batch_size = q.shape[0] // self.num_head
# sequence_length = q.shape[1]
# # xformer format, att_mask: B x seq_len x seq_len #TODO: better solution?
# # if key_padding_mask is not None:
# # mask = key_padding_mask == 0
# # if att_mask is not None:
# # assert len(att_mask.shape) == 3
# # att_mask = att_mask.reshape(batch_size, self.num_head, -1, sequence_length)[:,0,:,:]
# # mask = att_mask.sum(1) > 0
# # else:
# # mask = None
# # Always set the first token as global tokens
# key_padding_mask = key_padding_mask.to(q)
# key_padding_mask[:,0] = -1
# Q = q.view(batch_size, self.num_head, -1, self.head_dim).mul(1./math.sqrt(self.head_dim))
# K = k.view(batch_size, self.num_head, -1, self.head_dim).transpose(1,2).reshape(batch_size, -1, self.dim)
# V = v.view(batch_size, self.num_head, -1, self.head_dim).transpose(1,2).reshape(batch_size, -1, self.dim)
# # needs centain sequence length to make the block wise local attention work
# def _pad_to_window_size(x, window_size):
# seq_len = x.size(-2)
# pad_len = (window_size - seq_len % window_size) % window_size
# return F.pad(x, (0,0,0,pad_len), value=0), pad_len
# x, pad_len = _pad_to_window_size(x, self.window_size)
# Q, _ = _pad_to_window_size(Q, self.window_size)
# K, _ = _pad_to_window_size(K, self.window_size)
# V, _ = _pad_to_window_size(V, self.window_size)
# if key_padding_mask.shape[1] % self.window_size != 0:
# pad_len = (self.window_size - key_padding_mask.shape[1] % self.window_size) % self.window_size
# # key padding mask: 1 means padding tokens
# key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1)
# # normalization is to have the same scale as compressed key/values
# K_normalized = self.split_heads(self.dual_ln_l(K)) # (B, H, seq_len, head_dim)
# V_normalized = self.split_heads(self.dual_ln_l(V))
# # 1. check size of x (bsz, seq-1, dim_model); # TODO, 2. the padding mask value
# # global attention tokens
# extra_attention_mask = key_padding_mask < 0
# num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
# max_num_extra_indices_per_batch = num_extra_indices_per_batch.max()
# if max_num_extra_indices_per_batch <= 0:
# extra_attention_mask = None
# else:
# extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True)
# zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, device=extra_attention_mask.device)
# # mask indicating which values are actually going to be padding
# num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
# selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1)
# # 2) location of the non-padding values in the selected global attention
# selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True)
# # 3) location of the padding values in the selected global attention
# selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True)
# # keys of global tokens
# if extra_attention_mask is not None:
# K_transpose = K_normalized.transpose(1,2)
# Q_transpose = Q.transpose(1,2)
# selected_k = K_transpose.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
# selected_k[selection_padding_mask_nonzeros] = K_transpose[extra_attention_mask_nonzeros]
# # (bsz, seq_len, num_heads, max_num_extra_indices_per_batch)
# selected_attn_weights = torch.einsum('blhd,bshd->blhs', (Q_transpose, selected_k))
# selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000
# attn_weights_over_g_tokens = selected_attn_weights.transpose(1,2)
# V_transpose = V_normalized.transpose(1,2)
# selected_v = V_transpose.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
# selected_v[selection_padding_mask_nonzeros] = V_transpose[extra_attention_mask_nonzeros]
# # linear attention mask
# padding_mask = key_padding_mask != 0 # True means masked position
# K_compress = V_compress = None
# if self.num_landmarks > 0:
# # self.dconv_fc(x): each lanmark of each head attend to each location
# head_scores = self.dconv_fc(x).masked_fill(padding_mask[:, :, None], -10000) # only try to compress the k/v of those local tokens
# head_scores = F.softmax(head_scores, dim=1, dtype=torch.float32) #.to(X)
# # if not self.fp32:
# head_scores = head_scores.to(x)
# # bsz x num_head x num_lms x length
# head_scores = head_scores.view(batch_size, -1, self.num_head, self.num_landmarks).permute(0, 2, 3, 1)
# # TODO: should we use normalized K/V here? different from paper descriptions
# K_compress = head_scores.matmul(K_normalized) # bsz x num_head x num_lms x head_dim
# V_compress = head_scores.matmul(V_normalized)
# assert self.num_landmarks > 0
# if self.dual_ln_s is not None and K_compress is not None:
# K_compress = self.dual_ln_s(K_compress.transpose(1, 2).contiguous().view(batch_size, -1, self.dim))
# K_compress = self.split_heads(K_compress)
# V_compress = self.dual_ln_s(V_compress.transpose(1, 2).contiguous().view(batch_size, -1, self.dim))
# V_compress = self.split_heads(V_compress)
# # if self.num_landmarks > 0 or (cls_embed_q is not None):
# # bsz x num_head x length x num_lms
# attn_compress = Q.matmul(K_compress.transpose(-1, -2))
# win_attn_weights = self.sliding_chunks_matmul_qk_v2(Q, K_normalized, padding_mask) # bsz x num_heads x seqlen x 2winsize
# # else:
# # win_attn_weights = None
# if extra_attention_mask is not None:
# all_attn_ = torch.cat([attn_compress, win_attn_weights, attn_weights_over_g_tokens], dim=-1)
# else:
# all_attn_ = torch.cat([attn_compress, win_attn_weights], dim=-1)
# all_attn = all_attn_.float().softmax(dim=-1).to(win_attn_weights)
# hard_mask = key_padding_mask == 1
# all_attn = all_attn.masked_fill(hard_mask[:,None,:,None], 0)
# # if not self.fp32:
# all_attn = all_attn.to(x)
# all_attn = self.drop_attn(all_attn)
# C = 0
# if attn_compress is not None:
# C += all_attn[:,:,:,:K_compress.shape[2]].matmul(V_compress)
# if win_attn_weights is not None:
# win_attn_probs = all_attn[:,:,:,K_compress.shape[2]:K_compress.shape[2] + win_attn_weights.shape[-1]]
# seq_len = win_attn_probs.shape[2]
# # if self.window_size > 0:
# win_attn_probs = win_attn_probs.view(batch_size, self.num_head, seq_len // self.window_size, self.window_size,-1)
# V_tiles = self.get_tiles_v2(V_normalized, transpose=False)
# C += win_attn_probs.matmul(V_tiles).view(batch_size, self.num_head, seq_len, self.head_dim)
# # else:
# # C += win_attn_probs * V
# if extra_attention_mask is not None:
# global_attn_probs = all_attn[:,:,:,-attn_weights_over_g_tokens.shape[-1]:]
# # selected_v shape: (batch_size, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
# C += global_attn_probs.matmul(selected_v.transpose(1,2))
# # global tokens attending to all other tokens.
# # TODO: Two options are possible here: whether to use normalized key/value
# selected_q = Q_transpose.new_zeros(batch_size, max_num_extra_indices_per_batch, self.num_head, self.head_dim)
# selected_q[selection_padding_mask_nonzeros] = Q_transpose[extra_attention_mask_nonzeros]
# g2all_attn_weights = selected_q.transpose(1,2).matmul(K_normalized.transpose(-1, -2)) # (batch_size, self.num_head, max_num_extra_indices_per_batch, seq_len)
# g2all_attn_probs_float = F.softmax(g2all_attn_weights, dim=-1, dtype=torch.float32)
# g2all_attn_probs = self.drop_attn(g2all_attn_probs_float.type_as(g2all_attn_weights))
# g2all_attn = g2all_attn_probs.matmul(V_normalized) # (batch_size, self.num_head, max_num_extra_indices_per_batch, head_dim)
# # replace results in C
# nonzero_global_attn = g2all_attn[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]]
# C[extra_attention_mask_nonzeros[0],:,extra_attention_mask_nonzeros[1]] = nonzero_global_attn
# # get rid of the padding positions
# C = C[:,:,:sequence_length].view(-1, sequence_length, self.head_dim)
# return C
def get_tiles(self, x, transpose=False):
# x: bsz x n_heads x seqlen x d_head
bsz, n_heads, seqlen, d_h = x.shape
out_shape = (bsz, n_heads, seqlen//self.window_size-1, 2 * self.window_size, d_h)
in_strides = x.stride()
out_strides = (in_strides[0], in_strides[1], in_strides[2]*self.window_size, in_strides[2], 1)
x_main = x.as_strided(size=out_shape, stride=out_strides)
x_last = x[:, :, None, -2*self.window_size:, :]
x = torch.cat([x_main, x_last], dim=2)
if transpose:
return x.transpose(-1, -2)
else:
# bsz x n_heads x seqlen//wlen x 2*wlen x d_h
return x
def get_tiled_mask(self, mask):
bsz, seqlen = mask.shape
out_shape = (bsz, seqlen//self.window_size-1, 2*self.window_size)
in_stride = mask.stride()
out_stride = (in_stride[0], in_stride[1]*self.window_size, in_stride[1])
mask_main = mask.as_strided(size=out_shape, stride=out_stride)[:, None, :, :]
mask_last = mask[:, None, None, -2*self.window_size:]
return torch.cat([mask_main, mask_last], dim=2)[:, :, :, None, :]
def sliding_chunks_matmul_qk(self, Q, K, padding_mask):
# Q, K: bsz x num_heads x seqlen x d_head
# padding_mask: bsz x seqlen
bsz, num_heads, seqlen, d_h = Q.shape
mask_tiles = self.get_tiled_mask(padding_mask)
K_tiles = self.get_tiles(K, transpose=True)
Q_tiles = Q.view(bsz, num_heads, seqlen//self.window_size, self.window_size, d_h)
# bsz x num_heads x seqlen//winsize x winsize x 2winsize
qk_scores = Q_tiles.matmul(K_tiles)
qk_scores.masked_fill_(mask_tiles, -10000)
return qk_scores.view(bsz, num_heads, seqlen, 2*self.window_size)
def get_tiles_v2(self, x, transpose=False):
if self.window_size <= 0:
return x
bsz, n_heads, seqlen, d_h = x.shape
n_groups = seqlen // self.window_size
ext_len = max(self.window_size//2, 1)
x = F.pad(x, (0, 0, ext_len, ext_len), value=0)
strides = x.stride()
if transpose:
out_shape = (bsz, n_heads, n_groups, d_h, 2 * ext_len + self.window_size)
out_stride = (strides[0], strides[1], self.window_size * strides[2], strides[3], strides[2])
else:
out_shape = (bsz, n_heads, n_groups, 2 * ext_len + self.window_size, d_h)
out_stride = (strides[0], strides[1], self.window_size * strides[2], strides[2], strides[3])
return torch.as_strided(x, size=out_shape, stride=out_stride)
def get_tiled_mask_v2(self, mask):
# only mask along the key dimension
bsz, seqlen = mask.shape
ext_len = max(self.window_size//2, 1)
mask = F.pad(mask, (ext_len, ext_len), value=True) # (bsz, seq_len + 2*ext_len)
out_shape = (bsz, seqlen//self.window_size, 2*ext_len + self.window_size)
in_stride = mask.stride()
out_stride = (in_stride[0], in_stride[1]*self.window_size, in_stride[1])
return mask.as_strided(size=out_shape, stride=out_stride)[:, None, :, None, :]
def sliding_chunks_matmul_qk_v2(self, Q, K, padding_mask):
bsz, num_heads, seqlen, d_h = Q.shape
if self.window_size > 0:
# Q, K: bsz x num_heads x seqlen x d_head
# padding_mask: bsz x seqlen
mask_tiles = self.get_tiled_mask_v2(padding_mask)
K_tiles = self.get_tiles_v2(K, transpose=True)
Q_tiles = Q.view(bsz, num_heads, seqlen//self.window_size, self.window_size, d_h)
# bsz x num_heads x seqlen//winsize x winsize x 2winsize
qk_scores = Q_tiles.matmul(K_tiles)
qk_scores = qk_scores.masked_fill(mask_tiles, -10000)
return qk_scores.view(bsz, num_heads, seqlen, -1)
else:
qk_scores = torch.sum(Q*K, dim=-1, keepdim=True)
return qk_scores
def combine_heads(self, X):
X = X.transpose(1, 2)
X = X.reshape(X.size(0), X.size(1), self.num_head * self.head_dim)
return X
def split_heads(self, X):
X = X.reshape(X.size(0), X.size(1), self.num_head, self.head_dim)
X = X.transpose(1, 2)
return X
# #### v0 the original implementaion
# def forward(
# self,
# x: torch.Tensor,
# q: torch.Tensor,
# k: torch.Tensor,
# v: torch.Tensor,
# att_mask: Optional[torch.Tensor] = None,
# key_padding_mask: Optional[Tensor] = None,
# *args, **kwargs
# ):
# global_tokens = self.global_tokens
# batch_size = q.shape[0] // self.num_head
# sequence_length = q.shape[1]
# # xformer format, att_mask: B x seq_len x seq_len #TODO: better solution?
# # if key_padding_mask is not None:
# # mask = key_padding_mask == 0
# if att_mask is not None:
# assert len(att_mask.shape) == 3
# att_mask = att_mask.reshape(batch_size, self.num_head, -1, sequence_length)[:,0,:,:]
# mask = att_mask.sum(1) > 0
# else:
# mask = None
# Q = q.view(batch_size, self.num_head, -1, self.head_dim).mul(1./math.sqrt(self.head_dim))
# K = k.view(batch_size, self.num_head, -1, self.head_dim).transpose(1,2).reshape(batch_size, -1, self.dim)
# V = v.view(batch_size, self.num_head, -1, self.head_dim).transpose(1,2).reshape(batch_size, -1, self.dim)
# if global_tokens > 0:
# # cls_embed = x[:,:1].contiguous()
# cls_embed_q = Q[:,:,:global_tokens].contiguous()
# cls_embed_k = K[:,:global_tokens].contiguous()
# cls_embed_v = V[:,:global_tokens].contiguous()
# x = x[:,global_tokens:].contiguous() # B x (seq - 1) x dim_model
# Q = Q[:,:,global_tokens:].contiguous() # B x heads x (seq - 1) x head_dim
# K = K[:,global_tokens:].contiguous() # B x (seq - 1) x dim_model
# V = V[:,global_tokens:].contiguous()
# mask = mask[:,global_tokens:].contiguous() if mask is not None else None # B x (seq - 1)
# def _pad_to_window_size(x, window_size):
# seq_len = x.size(-2)
# pad_len = (window_size - seq_len % window_size) % window_size
# return F.pad(x, (0,0,0,pad_len), value=0), pad_len
# x, pad_len = _pad_to_window_size(x, self.window_size)
# Q, _ = _pad_to_window_size(Q, self.window_size)
# K, _ = _pad_to_window_size(K, self.window_size)
# V, _ = _pad_to_window_size(V, self.window_size)
# if mask.shape[1] % self.window_size != 0:
# pad_len = (self.window_size - mask.shape[1] % self.window_size) % self.window_size
# mask = torch.cat([mask, mask.new_zeros(mask.size(0), pad_len).to(mask)], dim=1)
# K = self.split_heads(self.dual_ln_l(K))
# V = self.split_heads(self.dual_ln_l(V))
# # 1. check size of x (bsz, seq-1, dim_model); # TODO, 2. the padding mask value
# padding_mask = ~mask.bool()
# K_compress = V_compress = None
# if self.num_landmarks > 0:
# head_scores = self.dconv_fc(x).masked_fill(padding_mask[:, :, None], float('-inf'))
# head_scores = F.softmax(head_scores, dim=1, dtype=torch.float32) #.to(X)
# # if not self.fp32:
# head_scores = head_scores.to(x)
# # bsz x num_head x num_lms x length
# head_scores = head_scores.view(batch_size, -1, self.num_head, self.num_landmarks).permute(0, 2, 3, 1)
# K_compress = head_scores.matmul(K) # bsz x num_head x num_lms x head_dim
# V_compress = head_scores.matmul(V)
# if global_tokens > 0:
# Q_cls = cls_embed_q # B x heads x 1 x head_dim
# K_cls = self.split_heads(cls_embed_k) # B x heads x 1 x head_dim
# V_cls = self.split_heads(cls_embed_v)
# if self.num_landmarks > 0:
# K_compress = torch.cat([K_cls, K_compress], dim=2) # B x heads x (1 + lms) x head_dim
# V_compress = torch.cat([V_cls, V_compress], dim=2)
# else:
# K_compress = K_cls
# V_compress = V_cls
# if self.dual_ln_s is not None and K_compress is not None:
# K_compress = self.dual_ln_s(K_compress.transpose(1, 2).contiguous().view(batch_size, -1, self.dim))
# K_compress = self.split_heads(K_compress)
# V_compress = self.dual_ln_s(V_compress.transpose(1, 2).contiguous().view(batch_size, -1, self.dim))
# V_compress = self.split_heads(V_compress)
# if self.num_landmarks > 0 or (cls_embed_q is not None):
# # bsz x num_head x length x num_lms
# attn_compress = Q.matmul(K_compress.transpose(-1, -2))
# else:
# attn_compress = None
# if self.window_size > 0 or self.num_landmarks == 0:
# # First, compute the compressed part, or the attentions on the landmarks
# # First use window attention to attend to the diagonals
# # V: bsize, self.seq_len, self.num_head, self.head_dim
# # win_attn_weights = self.sliding_chunks_matmul_qk(Q, K, padding_mask)
# win_attn_weights = self.sliding_chunks_matmul_qk_v2(Q, K, padding_mask)
# else:
# win_attn_weights = None
# if attn_compress is None:
# all_attn_ = win_attn_weights
# elif win_attn_weights is None:
# all_attn_ = attn_compress
# else:
# all_attn_ = torch.cat([attn_compress, win_attn_weights], dim=-1)
# all_attn = all_attn_.float().softmax(dim=-1).to(win_attn_weights)
# all_attn = all_attn.masked_fill(padding_mask[:,None,:,None], 0)
# # if not self.fp32:
# all_attn = all_attn.to(x)
# all_attn = self.drop_attn(all_attn)
# C = 0
# if attn_compress is not None:
# C += all_attn[:,:,:,:K_compress.shape[2]].matmul(V_compress)
# if win_attn_weights is not None:
# win_attn_probs = all_attn[:,:,:,-win_attn_weights.shape[-1]:]
# seq_len = win_attn_probs.shape[2]
# if self.window_size > 0:
# win_attn_probs = win_attn_probs.view(batch_size, self.num_head, seq_len // self.window_size, self.window_size,-1)
# V_tiles = self.get_tiles_v2(V, transpose=False)
# C += win_attn_probs.matmul(V_tiles).view(batch_size, self.num_head, seq_len, self.head_dim)
# else:
# C += win_attn_probs * V
# if cls_embed_q is not None:
# # if self.fp32:
# # Q_cls, K_cls, V_cls = Q_cls.float(), K_cls.float(), V_cls.float()
# # bsz x n_heads x 1 x (1+seqlen)
# cls_scores = torch.cat([Q_cls.matmul(K_cls.transpose(-1, -2)),
# Q_cls.matmul(C.transpose(-1, -2)).masked_fill(padding_mask[:,None,None,:], float('-inf'))],
# dim=-1)
# cls_probs = torch.softmax(cls_scores, dim=-1, dtype=torch.float32)#.to(X)
# # if not self.fp32:
# cls_probs = cls_probs.to(x)
# out_cls_embed = cls_probs[:,:,:,:global_tokens].matmul(V_cls) + cls_probs[:,:,:,global_tokens:].matmul(C)
# if cls_embed_q is not None:
# C = torch.cat([out_cls_embed, C], dim=2)
# # get rid of the padding positions
# C = C[:,:,:sequence_length].view(-1, sequence_length, self.head_dim)
# return C
|
bart_ls-main
|
xformers/xformers/components/attention/longshort.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.components.attention import Attention, AttentionConfig, register_attention
@register_attention("fourier_mix", AttentionConfig)
class FourierMix(Attention):
def __init__(self, dropout: float, *_, **__):
"""
FFT-based pseudo-attention mechanism, from
"
"FNet: Mixing Tokens with Fourier Transforms"
Lee-Thorp et al., 2021, https://arxiv.org/pdf/2105.03824.pdf
"""
super().__init__()
self.attn_drop = torch.nn.Dropout(dropout, inplace=False)
self.requires_input_projection = False
def forward(self, q: torch.Tensor, *_, **__):
att = torch.fft.fft2(q).real
att = self.attn_drop(att)
return att
|
bart_ls-main
|
xformers/xformers/components/attention/fourier_mix.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from xformers.components.attention import (
Attention,
AttentionConfig,
AttentionMask,
register_attention,
)
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class ScaledDotProductConfig(AttentionConfig):
causal: Optional[bool]
seq_len: Optional[int]
to_seq_len: Optional[int]
@register_attention("scaled_dot_product", ScaledDotProductConfig)
class ScaledDotProduct(Attention):
r"""
Implementing the Scaled Dot-Product attention proposed in
`Attention is all you need`_, Vaswani et al.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762v5
"""
mask: Optional[AttentionMask]
def __init__(
self,
dropout: float = 0.0,
causal: bool = False,
seq_len: Optional[int] = None,
to_seq_len: Optional[int] = None,
*args,
**kwargs,
):
super().__init__()
self.attn_drop = nn.Dropout(dropout, inplace=False)
self.causal = causal
self.seq_len = seq_len
if causal and seq_len is not None:
self.mask = AttentionMask.make_causal(seq_len, to_seq_len)
else:
self.mask = None
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[Union[AttentionMask, torch.Tensor]] = None,
*args,
**kwargs,
) -> torch.Tensor:
r"""
att_mask A 2D or 3D mask which ignores attention at certain positions.
- If the mask is boolean, a value of True will keep the value,
while a value of False will mask the value.
Key padding masks (dimension: batch x sequence length) and attention masks
(dimension: sequence length x sequence length OR batch x sequence length x sequence length)
can be combined and passed in here. Method maybe_merge_masks provided in the utils can be
used for that merging.
- If the mask has the float type, then an additive mask is expected (masked values are -inf)
"""
# Convenience, create an attention mask if a tensor was passed
if att_mask is not None and isinstance(att_mask, torch.Tensor):
# By default we don't know of the causality, and a check would be expensive
att_mask = (
AttentionMask.from_bool(att_mask)
if att_mask.dtype == torch.bool
else AttentionMask(att_mask, is_causal=False)
)
# Handle a possibly deferred causal mask handling
if self.causal and self.mask is None:
self.mask = AttentionMask.make_causal(
seq_len=q.shape[-2],
to_seq_len=q.shape[-2],
device=q.device,
dtype=q.dtype,
)
# Merge the optional causal mask and the user-provided mask
if self.mask is not None:
self.mask = self.mask.to(dtype=q.dtype, device=q.device)
att_mask = att_mask + self.mask if att_mask is not None else self.mask
# Try to handle a case where the sequence is smaller than the mask
if (
att_mask is not None
and q.shape[-2] == k.shape[-2]
and q.shape[-2] < att_mask.shape[1]
):
if isinstance(att_mask, AttentionMask):
att_mask = att_mask.make_crop(seq_len=q.shape[-2])
else:
logging.error(
"Mismatching sparse attention mask and sequence length."
+ " Please pad the inputs or adjust the attention mask"
)
raise NotImplementedError
# Attend: (B x nh, S, hs) x (B x nh, hs, S) -> (B x nh, S, S)
y = scaled_dot_product_attention(
q=q, k=k, v=v, att_mask=att_mask, dropout=self.attn_drop
)
return y
|
bart_ls-main
|
xformers/xformers/components/attention/scaled_dot_product.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
# Reshapes key padding mask from (batch_size, src_len) -> (batch_size * num_heads 1, src_len)
def reshape_key_padding_mask(
key_padding_mask: torch.Tensor, batched_dim: int
) -> torch.Tensor:
assert key_padding_mask.ndim == 2
batch_size, src_len = key_padding_mask.size()
num_heads = batched_dim // batch_size
return _reshape_key_padding_mask(key_padding_mask, batch_size, src_len, num_heads)
def _reshape_key_padding_mask(
key_padding_mask: torch.Tensor, batch_size: int, src_len: int, num_heads: int
) -> torch.Tensor:
assert key_padding_mask.shape == (batch_size, src_len)
key_padding_mask = (
key_padding_mask.view(batch_size, 1, 1, src_len)
.expand(-1, num_heads, -1, -1)
.reshape(batch_size * num_heads, 1, src_len)
)
return key_padding_mask
# Combine the attention mask and key padding mask into a single mask
# Taken from https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py
# Additive masking not yet supported
def maybe_merge_masks(
att_mask: Optional[torch.Tensor],
key_padding_mask: Optional[torch.Tensor],
batch_size: int,
src_len: int,
num_heads: int,
tgt_len: Optional[int] = None,
) -> Optional[torch.Tensor]:
if tgt_len is None:
tgt_len = src_len
if key_padding_mask is not None:
assert key_padding_mask.shape == (batch_size, src_len)
key_padding_mask = _reshape_key_padding_mask(
key_padding_mask, batch_size, src_len, num_heads
)
if att_mask is None:
# make sure dimensions of key padding mask are the same as those expected for att_mask
att_mask = key_padding_mask.expand(-1, tgt_len, -1)
# Assumption is that False means to mask.
elif att_mask.dtype == torch.bool:
att_mask = att_mask.logical_and(key_padding_mask)
else:
att_mask = att_mask.masked_fill(~key_padding_mask, float("-inf"))
return att_mask
# Assumes that matrix passed in has had softmax applied to it.
def iterative_pinv(softmax_mat: torch.Tensor, n_iter=6, pinverse_original_init=False):
"""
Computing the Moore-Penrose inverse.
Use an iterative method from (Razavi et al. 2014) to approximate the Moore-Penrose inverse via efficient
matrix-matrix multiplications.
"""
i = torch.eye(
softmax_mat.size(-1), device=softmax_mat.device, dtype=softmax_mat.dtype
)
k = softmax_mat
# The entries of K are positive and ||K||_{\infty} = 1 due to softmax
if pinverse_original_init:
# This original implementation is more conservative to compute coefficient of Z_0.
v = 1 / torch.max(torch.sum(k, dim=-2)) * k.transpose(-1, -2)
else:
# This is the exact coefficient computation, 1 / ||K||_1, of initialization of Z_0, leading to faster
# convergence.
v = (
1
/ torch.max(torch.sum(k, dim=-2), dim=-1).values[:, None, None]
* k.transpose(-1, -2)
)
for _ in range(n_iter):
kv = torch.matmul(k, v)
v = torch.matmul(
0.25 * v,
13 * i - torch.matmul(kv, 15 * i - torch.matmul(kv, 7 * i - kv)),
)
return v
def bool_mask_to_additive(
mask: torch.Tensor, dtype: Optional[torch.dtype] = torch.float32
) -> torch.Tensor:
assert (
mask.dtype == torch.bool
), "This util is meant to convert in between bool masks and additive ones"
mask_ = torch.zeros_like(mask, dtype=dtype)
mask_[~mask] = float("-inf")
return mask_
|
bart_ls-main
|
xformers/xformers/components/attention/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Type, TypeVar
import torch
Self = TypeVar("Self", bound="AttentionMask")
class AttentionMask:
"""
Holds an attention mask, along with a couple of helpers and attributes.
.. note: this is an additive mask, meaning that coefficients which should be computed hold the '0.' value,
and coefficients which should be skipped hold the '-inf' value. Any other value is possible if the purpose
is to bias the attention computation for instance
.. note: the attention mask dimensions are expected to be `[batch, to_sequence, from_sequence]`,
`[to_sequence, from_sequence]`, or anything broadcastable in between
"""
def __init__(self, additive_mask: torch.Tensor, is_causal: bool = False):
assert additive_mask.is_floating_point(), additive_mask.dtype
assert not additive_mask.requires_grad
if additive_mask.ndim == 2:
additive_mask = additive_mask.unsqueeze(0)
self.values = additive_mask
self.is_causal = is_causal
self.seq_len = additive_mask.shape[1]
self.to_seq_len = additive_mask.shape[0]
def to_bool(self) -> torch.Tensor:
"""
.. warning: we assume here that True implies that the value should be computed
"""
return self.values != float("-inf")
@classmethod
def from_bool(cls: Type[Self], x: torch.Tensor) -> Self:
"""
Create an AttentionMask given a boolean pattern.
.. warning: we assume here that True implies that the value should be computed
"""
assert x.dtype == torch.bool
additive_mask = torch.empty_like(x, dtype=torch.float, device=x.device)
additive_mask.masked_fill_(x, 0.0)
additive_mask.masked_fill_(~x, float("-inf"))
return cls(additive_mask)
@classmethod
def from_multiplicative(cls: Type[Self], x: torch.Tensor) -> Self:
"""
Create an AttentionMask given a multiplicative attention mask.
"""
assert not x.dtype == torch.bool
additive_mask = torch.empty_like(x, dtype=torch.float, device=x.device)
x = x.bool()
additive_mask.masked_fill_(x, 0.0)
additive_mask.masked_fill_(~x, float("-inf"))
return cls(additive_mask)
@classmethod
def make_causal(
cls: Type[Self],
seq_len: int,
to_seq_len: Optional[int] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> Self:
if not to_seq_len:
to_seq_len = seq_len
additive_mask = torch.triu(
torch.ones(seq_len, to_seq_len, device=device, dtype=dtype) * float("-inf"),
diagonal=1,
)
return cls(additive_mask=additive_mask, is_causal=True)
def make_crop(
self, seq_len: int, to_seq_len: Optional[int] = None
) -> "AttentionMask":
"""
Return a cropped attention mask, whose underlying tensor is a view of this one
"""
if not to_seq_len:
to_seq_len = seq_len
return AttentionMask(
self.values[:, :seq_len, :to_seq_len], is_causal=self.is_causal
)
def __repr__(self):
return f"AttentionMask - causal {self.is_causal} - mask " + str(self.values)
@property
def device(self):
return self.values.device
@property
def is_sparse(self):
return False
@property
def ndim(self):
return len(self.values.shape)
@property
def dtype(self):
return self.values.dtype
@property
def shape(self):
return self.values.shape
def __add__(self, other):
assert isinstance(other, type(self))
return AttentionMask(self.values + other.values, is_causal=False)
def to(
self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None
) -> "AttentionMask":
assert device is None or isinstance(device, torch.device)
assert dtype is None or isinstance(dtype, torch.dtype)
assert device is not None or dtype is not None
# Noop if we don't need to create another instance
if ((device and device == self.device) or not device) and (
(dtype and dtype == self.dtype) or not dtype
):
return self
return AttentionMask(self.values.to(device=device, dtype=dtype), self.is_causal)
|
bart_ls-main
|
xformers/xformers/components/attention/attention_mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass
from typing import Optional
from functools import partial
import torch
from xformers import _is_triton_available
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.utils import bool_mask_to_additive
import torch.nn.functional as F
from triton.ops.blocksparse import matmul as blocksparse_matmul
from triton.ops.blocksparse import softmax as blocksparse_softmax
from xformers.triton.softmax import MaskType
from xformers.triton.utils import gpu_capabilities_older_than_70
# Blocksparse requires Tensor cores
if gpu_capabilities_older_than_70():
logging.warning(
"Blocksparse is not available: the current GPU does not expose Tensor cores"
)
_is_triton_available = False
def _split_heads(num_heads: int, x: torch.Tensor):
shape = list(x.shape)
shape[:1] = [-1, num_heads]
return x.reshape(*shape)
if _is_triton_available:
@dataclass
class BlockSparseLocalAttentionConfig(AttentionConfig):
# layout: torch.Tensor # The dimensions of the random features
max_seq_len: int
block_size: int
dropout: float
num_heads: int
global_blocks: int
@register_attention("bs_local", BlockSparseLocalAttentionConfig)
class BlockSparseLocalAttention(Attention):
r"""
Thin wrap over the Triton blocksparse computations. The sparsity pattern is determined through the layout.
.. warning: the layout is assumed to have the dimensions [heads, seq, seq].
If some dimensions are missing, we assume that the same layout is to be used across heads.
.. warning: for now, the sequence (context) length has to be a power of two. This constraint could
be relaxed in the future.
.. note: it is possible to pass a specific per batch mask in the forward call,
but this will not lead to any speed up.
Any constant sparsity pattern is better passed through the layout parameter.
"""
def __init__(
self,
max_seq_len: int = 16384,
block_size: int = 1024,
dropout: float = 0.0,
block_unit: int = 16,
num_heads: int = 1, # optional, used to adapt the layout if in need
global_blocks: int = 0,
*args,
**kwargs,
):
super().__init__()
self.max_seq_len = max_seq_len
self.num_heads = num_heads
self.global_blocks = global_blocks
self.attn_drop = torch.nn.Dropout(dropout, inplace=False)
self.block_size = block_size
self.block_unit = block_unit
# make sure that the head dimension is not folded down with the batch
self.requires_head_dimension = False
# key padding mask and attention mask must be passed in separately
self.requires_separate_masks = True
self.requires_same_k_q_dimensions = True
self.layout = self._generate_layout()
self.ops = {} # layout for different sequence lengths
def _generate_layout(self):
num_blocks = self.max_seq_len // self.block_unit
local_mask = torch.zeros((self.num_heads, num_blocks, num_blocks), dtype=torch.int64)
num_local_blocks = self.block_size // self.block_unit
for block_start in range(0, num_blocks, num_local_blocks):
# setup local patterns
local_mask[:, block_start:block_start+num_local_blocks, block_start:block_start+num_local_blocks] = 1
# global patterns
if self.global_blocks > 0:
assert self.global_blocks < num_local_blocks
local_mask[:, block_start:block_start + self.global_blocks, :] = 1
local_mask[:, :, block_start:block_start + self.global_blocks] = 1
return local_mask
def _trim_layout(self, seq_len):
assert seq_len % self.block_unit == 0
num_blocks = seq_len // self.block_unit
return self.layout[:, :num_blocks, :num_blocks].cpu()
def _build_operators(self, seq_len):
assert seq_len % self.block_unit == 0
if seq_len not in self.ops:
layout = self._trim_layout(seq_len)
# blocksparse operators
sparse_dot_sdd = blocksparse_matmul(
layout,
self.block_unit,
"sdd",
trans_a=False,
trans_b=True,
)
sparse_dot_dsd = blocksparse_matmul(
layout,
self.block_unit,
"dsd",
trans_a=False,
trans_b=False,
)
sparse_softmax = blocksparse_softmax(layout, self.block_unit)
self.ops[seq_len] = (sparse_dot_sdd, sparse_dot_dsd, sparse_softmax)
return self.ops[seq_len]
def update_mask_type(self, mask: torch.Tensor):
global _mask_type_warning
if _mask_type_warning:
logging.warning(
"Mask has to be additive. Fixing that but this slows things down"
)
mask = bool_mask_to_additive(mask)
# blocksparse requires fixed size input
def _pad_to_seq_size(self, x):
seq_len = x.size(-2)
pad_len = self.seq_len - seq_len
if pad_len == 0:
return x, pad_len
return F.pad(x, (0,0,0,pad_len), value=0), pad_len
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
scale: float = 1.0,
attn_bias: Optional[torch.Tensor] = None,
*args,
**kwargs,
) -> torch.Tensor:
r"""
att_mask A 2D attention mask. The dtype must be the same as q. An additive mask is expected,
meaning float values using "-inf" to mask values.
key_padding_mask A mask with size (batch size x sequence length). The dtype must be the same as q.
An additive mask is expected, meaning float values using "-inf" to mask values
"""
# NOTE:
# The attention mask will be taken into account when computing the softmax
# meaning that non-masked values which are present in the initial blocksparse layout will be computed.
# If blocks are to be constantly masked, better perf would thus be reached by signalling them out in the
# initial attention setup
bh = q.size(0)
orig_seq_len = q.size(1)
head_dim = q.size(-1)
if key_padding_mask is None:
key_padding_mask = torch.zeros(int(q.shape[0]/self.num_heads), q.size(-2))
att_mask = None # HACK
# pad the input length to factors of bucket size
def _pad_to_window_size(x, window_size):
"""
sequence length here should be power of 2
"""
seq_len = x.size(-2)
while window_size < seq_len:
window_size *= 2
pad_len = window_size - seq_len
return F.pad(x, (0,0,0,pad_len), value=0), pad_len
q, pad_len = _pad_to_window_size(q, self.block_size)
k, pad_len = _pad_to_window_size(k, self.block_size)
v, pad_len = _pad_to_window_size(v, self.block_size)
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1)
key_padding_mask[key_padding_mask == 1] = float('-inf')
key_padding_mask = key_padding_mask.to(q)
q, k, v = map(partial(_split_heads, self.num_heads), (q, k, v))
if att_mask is not None and att_mask.dtype == torch.bool:
self.update_mask_type(att_mask)
if key_padding_mask is not None and key_padding_mask.dtype == torch.bool:
self.update_mask_type(key_padding_mask)
assert (
att_mask is None or att_mask.dim() == 2
), "The attention mask is constant across heads, expected dimensions are [seq x seq]"
assert (
q.shape[-2] == k.shape[-2]
), "Blocksparse requires the same dimensions for K and Q for now"
assert math.log(
q.shape[-2], 2
).is_integer(), (
"For now blocksparse only works on power-of-two sequence lengths", q.shape
)
# Blocksparse only works on fp16
q_dtype = q.dtype
if q_dtype != torch.float16:
q, k, v = q.half(), k.half(), v.half()
if att_mask is not None:
att_mask = att_mask.half()
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.half()
# Self-attend: (B, nh, S, hs) x (B, nh, hs, S) -> (B, nh, S, S)
# When the computations are block sparse, the matrix types change along the way:
# - (sparse) attention matrix = (dense) Kt * (dense) Q
q = q / math.sqrt(q.size(-1))
sparse_dot_sdd, sparse_dot_dsd, sparse_softmax = self._build_operators(q.size(-2))
sparse_att_mat = sparse_dot_sdd(q, k)
if attn_bias is not None:
block_attn_bias = self.extract_block_bias(attn_bias, k.shape[-2])
block_attn_bias = block_attn_bias.view(1, self.num_heads, -1, self.block_size // self.block_unit, self.block_unit, self.block_size // self.block_unit, self.block_unit).transpose(-2,-3)
block_attn_bias = block_attn_bias.reshape(1, -1, self.block_unit, self.block_unit)
sparse_att_mat += block_attn_bias
# - softmax on the sparse attention matrix
sparse_att_mat = sparse_softmax(
sparse_att_mat,
scale=scale,
key_padding_mask=key_padding_mask,
attn_mask=att_mask,
key_padding_mask_mode=MaskType.ADD,
attn_mask_mode=MaskType.ADD,
)
sparse_att_mat = self.attn_drop(sparse_att_mat)
# - then (dense) attention is (sparse) attention matrix * dense (value)
a = sparse_dot_dsd(sparse_att_mat, v)
out = a.view(bh, -1, head_dim)[:,:orig_seq_len]
if q_dtype != torch.float16:
return out.to(q_dtype)
return out
def extract_block_bias(self, attn_bias, seq_len):
attn_bias = attn_bias[:,:,:seq_len,:seq_len]
block_bias = []
for b_idx in range(seq_len // self.block_size):
block_bias.append(attn_bias[:,:, b_idx*self.block_size: b_idx*self.block_size+self.block_size, b_idx*self.block_size: b_idx*self.block_size+self.block_size].unsqueeze(2))
return torch.cat(block_bias, dim=2)
|
bart_ls-main
|
xformers/xformers/components/attention/blocksparse_local.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Simple non-ovelapping local block attention. To test how strong are the locality inductive bias in these tasks
"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from torch import Tensor
import torch.nn.functional as F
from functools import partial
from xformers.components.attention import Attention, AttentionConfig, register_attention
@dataclass
class BlockNoglobalAttentionConfig(AttentionConfig):
block_size: int
num_heads: int
require_key_mask: bool
@register_attention("block_noglobal", BlockNoglobalAttentionConfig)
class BlockNoglobalAttention(Attention):
def __init__(
self,
dropout: float,
num_heads: int,
block_size: int = 1024,
*args, **kwargs
):
super().__init__()
self.block_size = block_size
self.drop_attn = nn.Dropout(dropout)
self.num_head = num_heads
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
attn_bias: Optional[torch.Tensor] = None,
*args, **kwargs
):
# Notation: batch size: B, sequence length: L, number of blocks: nb, number of heads: nh
# q, k, v: (B * nh, L, head_dim)
bh = q.size(0)
bsz = bh // self.num_head
head_dim = q.size(-1)
sequence_length = q.shape[1]
if key_padding_mask is None:
key_padding_mask = torch.zeros(int(q.shape[0]/self.num_head), q.size(-2))
key_padding_mask = key_padding_mask.to(q)
# # pad the input length to factors of bucket size
# def _pad_to_window_size(x, window_size):
# seq_len = x.size(-2)
# pad_len = (window_size - seq_len % window_size) % window_size
# return F.pad(x, (0,0,0,pad_len), value=0), pad_len
# q, _ = _pad_to_window_size(q, self.block_size)
# k, _ = _pad_to_window_size(k, self.block_size)
# v, _ = _pad_to_window_size(v, self.block_size)
# if key_padding_mask.shape[1] % self.block_size != 0:
# pad_len = (self.block_size - key_padding_mask.shape[1] % self.block_size) % self.block_size
# key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1)
assert q.shape[1] % self.block_size == 0, q.shape
num_blocks = q.shape[1] // self.block_size
b_q = blockify(num_blocks, q)
b_k, b_v = map(partial(blockify, num_blocks), (k, v)) # (B * nh, nb, L // nb, head_dim)
dots = torch.einsum('buie,buje->buij', b_q, b_k) * (head_dim ** -0.5) # (B * nh, nb, L // nb, L // nb)
mask_value = -10000
# this model does use global token markers (-1)
q_mask = key_padding_mask != 1
# 1 means not masking
kv_mask = q_mask
mq, mk = map(partial(blockify, num_blocks), (q_mask, kv_mask)) # (B, nb, L // nb)
mask = mq[:, :, :, None] * mk[:, :, None, :] # (B, nb, L // nb, L // nb)
dots = dots.view(bsz, self.num_head, num_blocks, self.block_size, self.block_size)
dots = dots.float().masked_fill_(~mask.unsqueeze(1), mask_value).to(q)
# add relational bias
seq_len = q.size(1)
if attn_bias is not None:
dots += self.extract_block_bias(attn_bias, seq_len)
block_attn_weights = dots.view(bsz*self.num_head, -1, self.block_size)
all_attn_probs = block_attn_weights.softmax(dim=-1)
all_attn_probs = self.drop_attn(all_attn_probs)
# calculate block attention
block_attn_probs = all_attn_probs[:, :, :block_attn_weights.shape[-1]]
block_attn_probs = block_attn_probs.view(bsz*self.num_head, -1, self.block_size, self.block_size)
out = block_attn_probs.matmul(b_v).view(bsz*self.num_head, -1, head_dim)
out = out[:, :sequence_length, :]
return out
def extract_block_bias(self, attn_bias, seq_len):
attn_bias = attn_bias[:,:,:seq_len,:seq_len]
block_bias = []
for b_idx in range(seq_len // self.block_size):
block_bias.append(attn_bias[:,:, b_idx*self.block_size: b_idx*self.block_size+self.block_size, b_idx*self.block_size: b_idx*self.block_size+self.block_size].unsqueeze(2))
return torch.cat(block_bias, dim=2)
def blockify(num_blocks, t, dim=1):
shape = list(t.shape)
shape[dim:dim+1] = [num_blocks, -1]
return t.reshape(*shape)
|
bart_ls-main
|
xformers/xformers/components/attention/block_noglobal.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
from torch import Tensor
import torch.nn.functional as F
import torch
import torch.nn as nn
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.core import scaled_dot_product_attention
@dataclass
class LinformerSelfAttentionConfig(AttentionConfig):
seq_len: int # dimension of the input sequence
k: Optional[int] # dimension of the internal space
@register_attention("linformer", LinformerSelfAttentionConfig)
class LinformerAttention(Attention):
def __init__(
self,
dropout: float,
num_heads: int,
max_seq_len: int = 4096,
compress: int = 8,
*args, **kwargs
):
"""
Linformer attention mechanism,
from `Linformer: Self-Attention with Linear Complexity`_, Wang et al (2020).
The original notation is kept as is.
.. _`Linformer: Self-Attention with Linear Complexity` : https://arxiv.org/abs/2006.04768v2
"""
super().__init__()
k = max_seq_len // compress
self.num_heads = num_heads
self.E = nn.Linear(max_seq_len, k, bias=False)
self.F = nn.Linear(max_seq_len, k, bias=False)
self.attn_drop = nn.Dropout(dropout, inplace=False)
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor,
key_padding_mask: Optional[Tensor] = None,
*args, **kwargs
):
bsz = q.shape[0] // self.num_heads
mask = ~key_padding_mask.eq(1)
mask = mask.to(q)
tgt_len = k.shape[1]
k_projected = F.linear(k.transpose(-2, -1), self.E.weight[:, 0:tgt_len]).transpose(-2, -1)
v_projected = F.linear(v.transpose(-2, -1), self.F.weight[:, 0:tgt_len]).transpose(-2, -1)
y = scaled_dot_product_attention(
q=q, k=k_projected, v=v_projected, att_mask=None, dropout=self.attn_drop
)
y = y.view(bsz, self.num_heads, y.shape[1], y.shape[2])
y = (y * mask[:,None,:,None]).view(bsz*self.num_heads, tgt_len, -1)
return y
|
bart_ls-main
|
xformers/xformers/components/attention/linformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List
import numpy as np
import torch
# generic nd cases
def _generate_nd_grid(*sizes):
coords = [torch.arange(s) for s in sizes]
return torch.meshgrid(*coords)
def local_nd_distance(*sizes, p=2.0, weights=None):
if weights is None:
weights = (1,) * len(sizes)
assert len(sizes) == len(weights)
grid = _generate_nd_grid(*sizes)
grid = [i.flatten() * w for i, w in zip(grid, weights)]
grid = torch.stack(grid, dim=1).float()
d = torch.cdist(grid, grid, p=p)
return d
def local_nd_gaussian_distribution(*sizes, sigma=1):
d = local_nd_distance(*sizes, p=2.0) ** 2
d = torch.exp(-0.5 * sigma ** (-2.0) * d)
return d
def local_nd_pattern(*sizes, distance, p=2.0):
d = local_nd_distance(*sizes, p=p)
return d < distance
def axial_nd_pattern(*sizes):
# axial is a special case with p=0 and distance=2
d = local_nd_distance(*sizes, p=0)
return d < 2
def random_pattern_from_probability_matrix(dist_matrix, nnz):
att = torch.zeros_like(dist_matrix, dtype=torch.bool)
# PyTorch multinomial wrongly doesn't support sampling when number of categories
# is > 2^24, arguing that it's because it's the max representable consecutive element
# in fp32 and that the kernels use float32. This is actually not true, and the kernels
# should work fine if double tensor is passed on CPU. This is a bug that was introduced
# in https://github.com/pytorch/pytorch/commit/bf04c2ca2f591d98ce57816f0ef0cd20a21bbf66
# when unifying the checks between CPU and CUDA. For now, just fall-back to numpy
if dist_matrix.numel() > 2 ** 24:
dist_matrix = dist_matrix.double()
dist_matrix /= dist_matrix.sum()
idxs = np.random.choice(
dist_matrix.numel(), nnz, p=dist_matrix.flatten(), replace=False
)
idxs = torch.as_tensor(idxs)
else:
idxs = torch.multinomial(dist_matrix.flatten(), nnz, replacement=False)
att.view(-1)[idxs] = True
return att
def global_token_pattern(attention_query_mask: torch.Tensor) -> torch.Tensor:
assert attention_query_mask.ndim == 1
assert attention_query_mask.dtype == torch.bool
attention_query_mask = attention_query_mask[None, :]
mask = attention_query_mask | attention_query_mask.transpose(1, 0)
return mask
def random_pattern(attn_size: int, sparsity: float) -> torch.Tensor:
assert 0 < sparsity < 1
mask = torch.rand(attn_size, attn_size) > sparsity
return mask
# 1d-specific cases
def local_1d_pattern(attn_size: int, window_size: int) -> torch.Tensor:
assert (
window_size % 2 == 1
), "The window size is assumed to be odd (counts self-attention + 2 wings)"
h_win_size = window_size // 2 + 1
return local_nd_pattern(attn_size, distance=h_win_size, p=1.0)
def causal_1d_pattern(attn_size: int) -> torch.Tensor:
mask = torch.tril(torch.ones(attn_size, attn_size, dtype=torch.bool))
return mask
# 2d-specific cases
def horizontal_axial_2d_distance(H, W, p=2.0):
d = local_nd_distance(H, W, p=p, weights=(1, 0))
return d
def vertical_axial_2d_distance(H, W, p=2.0):
d = local_nd_distance(H, W, p=p, weights=(0, 1))
return d
def local_2d_distance(H, W, p=2.0):
return local_nd_distance(H, W, p=p)
def local_2d_gausian_distribution(H, W, sigma=1):
return local_nd_gaussian_distribution(H, W, sigma=sigma)
def local_2d_pattern(H, W, distance, p=2.0):
return local_nd_pattern(H, W, distance=distance, p=p)
def axial_2d_pattern(H, W):
return axial_nd_pattern(H, W)
def swin_attention_pattern(H, W, window_size, shift_size=0):
assert H % window_size == 0
assert W % window_size == 0
assert 0 <= shift_size < window_size, "shift_size must in 0-window_size"
# input grid
i, j = _generate_nd_grid(H, W)
i, j = i + 0.5, j + 0.5
# anchors grid
# if shift is present, add extra element to the grid
# to account for the uneven partitioning
extra = int(shift_size % window_size != 0)
grid_h = H // window_size + extra
grid_w = W // window_size + extra
ii, jj = _generate_nd_grid(grid_h, grid_w)
# convert shift to be compatible with the paper representation
s = (-shift_size) % window_size
offset = window_size / 2 - s
ii = ii * window_size + offset
jj = jj * window_size + offset
input_coords = torch.stack([i.flatten(), j.flatten()], 1).float()
anchors_coords = torch.stack([ii.flatten(), jj.flatten()], 1).float()
anchor_id = torch.cdist(input_coords, anchors_coords, p=2).argmin(1)
mask = anchor_id[:, None] == anchor_id[None, :]
return mask
def dilated_2d_pattern(H, W, k=2):
"""
Returns a 2d pattern that samples 1 every k elements in the attention mask.
Can be seen as a form of downsampling, where every pixel attends to a downsampled
version of the input.
"""
d_h = local_nd_distance(H, W, p=1, weights=(1, 0))
d_w = local_nd_distance(H, W, p=1, weights=(0, 1))
d = (d_h.floor() % k == 0) & (d_w.floor() % k == 0)
return d
# Block sparse utils
def block_sparsify_tensor(x, mask, block_size):
"""
Block sparsify a tensor, given a mask and block size
"""
ret = torch.empty(
(x.size(0), mask.sum(), block_size, block_size), dtype=x.dtype, device=x.device
)
for idx, (h, i, j) in enumerate(zip(*mask.nonzero(as_tuple=True))):
ret[:, idx, :, :] = x[
:,
h,
i * block_size : (i + 1) * block_size,
j * block_size : (j + 1) * block_size,
]
return ret
def pattern_to_layout(mask: torch.Tensor, block_size: int) -> torch.Tensor:
r"""
Given a mask pattern and blocksize, return the corresponding layout
which makes sure that all the positives in the mask are covered
"""
assert mask.ndim >= 2, "We're expecting [Heads, Seq, Seq] or [Seq, Seq]"
_should_squeeze = False
if mask.ndim == 2:
mask = mask.unsqueeze(0)
_should_squeeze = True
assert (
mask.shape[1] % block_size == 0 and mask.shape[2] % block_size == 0
), "We're only handling masks divisible by block_size"
# Now mark the mask
layout = torch.nn.functional.max_pool2d(
mask.to(torch.float), kernel_size=block_size, stride=block_size
)
layout = layout.to(torch.long)
if _should_squeeze:
layout.squeeze_(0)
return layout
def alibi_pattern(threshold: float, mask_shape: torch.Size) -> torch.Tensor:
r"""
Use the additive bias computation from ALiBi_ to generate a mask.
Note that this mask can in turn be used to generate a blocksparse attention computation layout
.. note: mask_shape is expected to hold the [heads, seq, seq] dimensions
.. _ALiBi: https://arxiv.org/pdf/2108.12409.pdf
"""
# CREDITS: code snippet from Ofir Press, one of the authors
def get_slopes(n: int):
def get_slopes_power_of_2(n: int) -> List[float]:
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio ** i for i in range(n)]
# In the paper, we only train models that have 2^a heads for some a. This function has
# some good properties that only occur when the input is a power of 2. To maintain that even
# when the number of heads is not a power of 2, we use this workaround.
if math.log2(n).is_integer():
return get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
return (
get_slopes_power_of_2(closest_power_of_2)
+ get_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]
)
maxpos = mask_shape[1]
attn_heads = mask_shape[0]
slopes = torch.Tensor(get_slopes(attn_heads))
# In the next line, the part after the * is what constructs the diagonal matrix
# (right matrix in Figure 3 in the paper).
# If you run it you'll see that it doesn't exactly print out the same matrix as we have in Figure 3,
# but one where all rows are identical.
# This works because the softmax operation is invariant to translation,
# and our bias functions are always linear.
alibi = slopes.unsqueeze(1).unsqueeze(1) * torch.arange(maxpos).unsqueeze(
0
).unsqueeze(0).expand(attn_heads, -1, -1)
alibi = alibi.view(attn_heads, 1, maxpos)
# Now threshold arbitrarily, report the mask
return alibi < threshold
|
bart_ls-main
|
xformers/xformers/components/attention/attention_patterns.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.core import (
scaled_dot_product_attention,
scaled_query_key_softmax,
)
from xformers.components.attention.utils import (
bool_mask_to_additive,
iterative_pinv,
reshape_key_padding_mask,
)
@dataclass
class NystromSelfAttentionConfig(AttentionConfig):
"""
num_heads Number of heads.
num_landmarks Number of landmarks to use for softmax approximation. 64 often sufficient for a good
approximation according to https://arxiv.org/pdf/2102.03902.pdf.
causal Apply a causal mask, in that the attention cannot be applied to the future.
use_razavi_pinverse If true, use iterative method from (Razavi et al. 2014) to approximate the Moore-Penrose
inverse, otherwise use standard torch inverse.
pinverse_original_init True if using original initialization when calculating Moore-Penrose pseudo inverse using
method from (Razavi et al. 2014).
False if using exact coefficient computation (leads to faster convergence).
inv_iterations Number of iterations for calculating the Moore-Penrose pseudo inverse.
v_skip_connection A module that will take V as input and will be added as a skip connection to the
softmax approximation. A skip connection is added in the paper to help with training.
conv_kernel_size Kernel size for convolution optionally added to help in training.
If v_skip_connection is not specified, this will be used to define the default
depth wise convolution used as a skip connection.
If both conv_kernel_size and v_skip_connection are None, no skip connection will
be added.
landmark_pooling Which module to use when computing landmarks. Default is AdaptiveAvgPool2d.
"""
num_heads: int
num_landmarks: Optional[int]
landmark_pooling: Optional[nn.Module]
causal: Optional[bool]
pinverse_original_init: Optional[bool]
inv_iterations: Optional[int]
v_skip_connection: Optional[nn.Module]
conv_kernel_size: Optional[int]
use_razavi_pinverse: Optional[bool]
def get_avg_pool(n: int):
def avg_pool(x: torch.Tensor):
# Average independently for every segment in the sequence dimension
seq_len = x.shape[1]
head_dim = x.shape[2]
segments = seq_len // n
# Dimensions are a match
if seq_len % n == 0:
return x.reshape(
-1,
n,
segments,
head_dim,
).mean(dim=-2)
# Handle the last segment boundary being off
n_round = n - seq_len % n
x_avg_round = (
x[:, : n_round * segments, :]
.reshape(-1, n_round, segments, head_dim)
.mean(dim=-2)
)
x_avg_off = (
x[:, n_round * segments :, :]
.reshape(-1, n - n_round, segments + 1, head_dim)
.mean(dim=-2)
)
return torch.cat((x_avg_round, x_avg_off), dim=-2)
return avg_pool
@register_attention("nystrom", NystromSelfAttentionConfig)
class NystromAttention(Attention):
# TODO: update defaults for use_razavi_pinverse and inv_iterations
def __init__(
self,
dropout: float,
num_heads: int,
num_landmarks: int = 256,
landmark_pooling: Optional[nn.Module] = None,
causal: bool = False,
use_razavi_pinverse: bool = True,
pinverse_original_init: bool = False,
inv_iterations: int = 6, # recommended default in paper was 6.
conv_kernel_size: int = 35,
v_skip_connection: Optional[nn.Module] = None,
*args,
**kwargs,
):
"""
Nystrom attention mechanism, from Nystromformer_.
::
"A Nystrom-based Algorithm for Approximating Self-Attention."
Xiong, Y., Zeng, Z., Chakraborty, R., Tan, M., Fung, G., Li, Y., Singh, V. (2021)
Reference codebase: https://github.com/mlpen/Nystromformer
.. _Nystromformer: https://arxiv.org/pdf/2102.03902.pdf
"""
super().__init__()
# merged key padding mask and attention mask is not accepted
self.requires_separate_masks = True
self.num_landmarks = num_landmarks
# TODO: should be able to not have to pass in num_heads
self.num_heads = num_heads
self.use_razavi_pinverse = use_razavi_pinverse
self.pinverse_original_init = pinverse_original_init
self.inv_iterations = inv_iterations
self.attn_drop = nn.Dropout(dropout)
self.skip_connection = v_skip_connection
self.causal = causal
if self.skip_connection is None and conv_kernel_size is not None:
self.skip_connection = nn.Conv2d(
in_channels=self.num_heads,
out_channels=self.num_heads,
kernel_size=(conv_kernel_size, 1),
padding=(conv_kernel_size // 2, 0),
bias=False,
groups=self.num_heads,
)
if landmark_pooling is not None:
self.landmark_pooling = landmark_pooling
else:
self.landmark_pooling = get_avg_pool(self.num_landmarks)
# Optional lower triangular masks for causal attention
self.causal_mask_1: Optional[torch.Tensor] = None
self.causal_mask_2: Optional[torch.Tensor] = None
self.causal_mask_3: Optional[torch.Tensor] = None
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
key_padding_mask: Optional[torch.Tensor] = None,
*args,
**kwargs,
):
r"""
key_padding_mask Only a key padding mask is accepted here. The size must be (batch size, sequence length) or
(batch size * num_heads, 1, sequence length). If dimensions are not correct, the mask will
be ignored. An additive mask is expected, meaning float values using "-inf" to mask values
"""
batched_dim = k.size(0)
seq_len = k.size(-2)
tt = {"dtype": q.dtype, "device": q.device}
if key_padding_mask is not None:
if key_padding_mask.dtype == torch.bool:
logging.warning(
"Bool mask found, but an additive mask is expected. Converting but this is slow"
)
key_padding_mask = bool_mask_to_additive(key_padding_mask)
if key_padding_mask.ndim == 2:
key_padding_mask = reshape_key_padding_mask(
key_padding_mask, batched_dim
)
assert key_padding_mask.size() == (batched_dim, 1, seq_len), (
f"key_padding_mask has invalid dimensions {key_padding_mask.size()}."
f" Must have dimensions {batched_dim, 1, seq_len} or (batch_size, {seq_len})."
)
if self.num_landmarks >= seq_len:
mask: Optional[torch.Tensor] = None
if self.causal:
mask = self._triu_mask(batched_dim, seq_len, seq_len, **tt)
if key_padding_mask is not None:
mask = key_padding_mask if mask is None else mask + key_padding_mask
x = scaled_dot_product_attention(q=q, k=k, v=v, att_mask=mask)
else:
q_landmarks = self.landmark_pooling(q)
k_landmarks = self.landmark_pooling(k)
if self.causal and (
self.causal_mask_1 is None
or (batched_dim, seq_len, self.num_landmarks)
!= self.causal_mask_1.size()
):
self.causal_mask_1 = self._triu_mask(
batched_dim, seq_len, self.num_landmarks, **tt
)
self.causal_mask_2 = self._triu_mask(
batched_dim, self.num_landmarks, self.num_landmarks, **tt
)
self.causal_mask_3 = self._triu_mask(
batched_dim, self.num_landmarks, seq_len, **tt
)
mask_1: Optional[torch.Tensor] = self.causal_mask_1
mask_2: Optional[torch.Tensor] = self.causal_mask_2
mask_3: Optional[torch.Tensor] = self.causal_mask_3
if key_padding_mask is not None:
mask_1 = (
key_padding_mask.transpose(-2, -1)
if mask_1 is None
else mask_1 + key_padding_mask.transpose(-2, -1)
)
mask_3 = (
key_padding_mask if mask_3 is None else mask_3 + key_padding_mask
)
kernel_1 = scaled_query_key_softmax(q=q, k=k_landmarks, att_mask=mask_1)
kernel_2 = scaled_query_key_softmax(
q=q_landmarks, k=k_landmarks, att_mask=mask_2
)
# merge key_padding_mask and causal mask
if self.causal_mask_3 is None:
merged_mask = mask.to(torch.bool)
else:
merged_mask = self.causal_mask_3.logical_and(mask)
kernel_3 = scaled_dot_product_attention(
q=q_landmarks, k=k, v=v, att_mask=merged_mask
)
kernel_2_inv = (
iterative_pinv(
kernel_2, self.inv_iterations, self.pinverse_original_init
)
if self.use_razavi_pinverse
else torch.linalg.pinv(kernel_2)
)
x = torch.matmul(
torch.matmul(
kernel_1,
kernel_2_inv,
),
kernel_3,
)
if self.skip_connection:
# Assumption here is that v is 3D.
v_conv = self.skip_connection(
v.reshape(-1, self.num_heads, v.size(-2), v.size(-1))
)
x += v_conv.reshape(-1, v_conv.size(-2), v_conv.size(-1))
x = self.attn_drop(x)
return x
def _triu_mask(self, dim_1: int, dim_2: int, dim_3: int, **kwargs) -> torch.Tensor:
device = kwargs["device"]
dtype = kwargs["dtype"]
return torch.triu(
torch.ones(dim_2, dim_3, dtype=dtype, device=device) * float("-inf"),
diagonal=1,
).expand(
dim_1, -1, -1
) # micro optim, save memory on the batch dimension
|
bart_ls-main
|
xformers/xformers/components/attention/nystrom.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from xformers.components.attention import ATTENTION_REGISTRY, Attention, AttentionConfig, register_attention
from transformers.models.reformer.modeling_reformer import LSHSelfAttention, ReformerConfig, ReverseSort
@dataclass
class LSHSelfAttentionConfig(AttentionConfig):
num_hash: int
num_heads: int
dim_model: int
seq_len: int
num_buckets: int
chunk_length: int
@register_attention("lsh_reformer", LSHSelfAttentionConfig)
class LSHAttention(Attention):
"""
Using implementation from huggingface transformers
LSH attention mechanism, from
"
Reformer: The Efficient Transformer
Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya (2020)
"
ArXiv: https://arxiv.org/abs/2001.04451
Reference repository: https://huggingface.co/transformers/model_doc/reformer.html
"""
def __init__(
self,
num_heads: int,
dim_model: int,
num_hash: int = 4,
seq_len: int = 4096,
dropout: float = 0.0,
num_buckets: int = None,
chunk_length: int = None,
*args,
**kwargs,
):
super().__init__()
attn_config = ReformerConfig()
attn_config.num_attention_heads = num_heads
attn_config.attn_layers = ["lsh"]
attn_config.is_decoder = False
attn_config.num_hashes = num_hash
attn_config.max_position_embeddings = seq_len
attn_config.attention_head_size = dim_model // num_heads
# attn_config.feed_forward_size = 1
if chunk_length:
attn_config.lsh_attn_chunk_length = chunk_length
attn_config.num_buckets = num_buckets
self.attn = ReformerAttention(attn_config)
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
*args, **kwargs
):
return self.attn(q, k, v, att_mask, key_padding_mask)
class ReformerAttention(LSHSelfAttention):
def __init__(self, config):
super().__init__(config)
# self.query_key = None
# self.value = None
del self.query_key
del self.value
self.num_heads = self.num_attention_heads
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
# attention_mask=None,
# head_mask=None,
# num_hashes=None,
# buckets=None,
# past_buckets_states=None,
# use_cache=False,
# output_attentions=False,
*args,
**kwargs,
):
orig_sequence_length, head_dim = q.shape[1], q.shape[2]
# padding to factors of self.chunk_length
# needs centain sequence length to make the block wise local attention work
def _pad_to_window_size(x, window_size):
seq_len = x.size(-2)
pad_len = (window_size - seq_len % window_size) % window_size
return F.pad(x, (0,0,0,pad_len), value=0), pad_len
q, _ = _pad_to_window_size(q, self.chunk_length)
v, _ = _pad_to_window_size(v, self.chunk_length)
key_padding_mask = key_padding_mask.to(q)
if key_padding_mask.shape[1] % self.chunk_length != 0:
pad_len = (self.chunk_length - key_padding_mask.shape[1] % self.chunk_length) % self.chunk_length
# key padding mask: 1 means padding tokens
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1)
sequence_length = q.shape[1]
batch_size = q.shape[0] // self.num_attention_heads
# num hashes can optionally be overwritten by user
num_hashes = self.num_hashes
# @xwhan reformer needs the key and query vectors to be the same
# project hidden_states to query_key and value
query_key_vectors = q.view(batch_size, self.num_attention_heads, sequence_length, head_dim)
value_vectors = v.view(batch_size, self.num_attention_heads, sequence_length, head_dim)
do_standard_self_attention = (sequence_length <= self.chunk_length)
# xformer format, att_mask: B x seq_len x seq_len #TODO: better solution?
if key_padding_mask is not None:
attention_mask = key_padding_mask != 1
elif att_mask is not None:
assert len(att_mask.shape) == 3
att_mask = att_mask.reshape(batch_size, self.num_heads, -1, sequence_length)[:,0,:,:]
attention_mask = att_mask.sum(1) > 0
else:
attention_mask = None
# LSH attention only makes sense if chunked attention should be performed
if not do_standard_self_attention:
# set `num_buckets` on the fly, recommended way to do it
if self.num_buckets is None:
self._set_num_buckets(sequence_length.item())
# hash query key vectors into buckets
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
assert (
int(buckets.shape[-1]) == num_hashes * sequence_length
), f"last dim of buckets is {buckets.shape[-1]}, but should be {num_hashes * sequence_length}"
sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
sequence_length, buckets, num_hashes
)
# make sure bucket idx is not longer then sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
# cluster query key value vectors according to hashed buckets
query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
else:
# get sequence length indices
sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# scale key vectors
key_vectors = self._len_and_dim_norm(query_key_vectors)
# set query_vectors to query key vectors if LSH self attention
query_vectors = query_key_vectors
# free memory
del query_key_vectors
# get attention probs
out_vectors, logits, attention_probs = self._attend(
query_vectors=query_vectors,
key_vectors=key_vectors,
value_vectors=value_vectors,
sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,
attention_mask=attention_mask,
head_mask=None,
do_standard_self_attention=do_standard_self_attention,
do_cached_attention=False
)
# free memory
del key_vectors, value_vectors
# re-order out_vectors and logits
if not do_standard_self_attention:
# sort clusters back to correct ordering
out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
if not do_standard_self_attention:
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(
out_vectors,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
)
logits = self._split_seq_length_dim_to(
logits,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
).unsqueeze(-1)
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
# free memory
del probs_vectors
# free memory
del logits
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
), "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`."
# print(out_vectors.view(-1, sequence_length, head_dim).size())
# import pdb;pdb.set_trace()
# out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
return out_vectors.view(-1, sequence_length, head_dim)[:,:orig_sequence_length,:].contiguous()
# return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)
|
bart_ls-main
|
xformers/xformers/components/attention/lsh_reformer.py
|
"""
Simple non-ovelapping local block attention. To test how strong are the locality inductive bias in these tasks
"""
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from torch import Tensor
import torch.nn.functional as F
from functools import partial, reduce
from inspect import isfunction
from operator import mul
from xformers.components.attention import Attention, AttentionConfig, register_attention
@dataclass
class BlockAttentionConfig(AttentionConfig):
block_size: int
num_heads: int
require_key_mask: bool
num_global_tokens: int # global tokens per block
@register_attention("block", BlockAttentionConfig)
class BlockAttention(Attention):
def __init__(
self,
dropout: float,
num_heads: int,
block_size: int = 1024, #
num_global_tokens: int = 64,
*args, **kwargs
):
super().__init__()
self.bucket_size = block_size
self.drop_attn = nn.Dropout(dropout)
self.num_head = num_heads
self.num_global_tokens = num_global_tokens
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
*args, **kwargs
):
# q, k, v: (B * nh, S, hs)
bh = q.size(0)
orig_seq_len = q.size(1)
bsz = bh // self.num_head
head_dim = q.size(-1)
assert key_padding_mask is not None
key_padding_mask = key_padding_mask.to(q)
# adding global token masks
if self.num_global_tokens > 0:
for block_idx in range(key_padding_mask.shape[1] // self.bucket_size):
block_start = block_idx * self.bucket_size
key_padding_mask[:,block_start:block_start + self.num_global_tokens] = -1
# global attention tokens
extra_attention_mask = key_padding_mask < 0
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
max_num_extra_indices_per_batch = num_extra_indices_per_batch.max()
hard_mask = key_padding_mask == 1
if max_num_extra_indices_per_batch <= 0:
extra_attention_mask = None
else:
extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True)
zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, device=extra_attention_mask.device)
# mask indicating which values are actually going to be padding
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1)
# 2) location of the non-padding values in the selected global attention
selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True)
# 3) location of the padding values in the selected global attention
selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True)
# every token attend to global tokens
if extra_attention_mask is not None:
selected_k = k.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, head_dim)
k_splited = k.view(bsz, self.num_head, -1, head_dim).transpose(1,2)
q_splited = q.view(bsz, self.num_head, -1, head_dim).transpose(1,2)
v_splited = v.view(bsz, self.num_head, -1, head_dim).transpose(1,2)
selected_k[selection_padding_mask_nonzeros] = k_splited[extra_attention_mask_nonzeros]
# (bsz, seq_len, num_heads, max_num_extra_indices_per_batch)
selected_attn_weights = torch.einsum('blhd,bshd->blhs', (q_splited, selected_k)) * (head_dim ** -0.5)
selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000
attn_weights_over_g_tokens = selected_attn_weights.transpose(1,2)
selected_v = v.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, head_dim)
selected_v[selection_padding_mask_nonzeros] = v_splited[extra_attention_mask_nonzeros]
selected_v = selected_v.transpose(1,2).contiguous().view(bsz*self.num_head, max_num_extra_indices_per_batch, head_dim)
tgt_len = k.size(1)
buckets = q.shape[1] // self.bucket_size
b_q = bucket(buckets, q)
b_k, b_v = map(partial(bucket, buckets), (k, v)) # BH * bct * n_b * D
dots = torch.einsum('buie,buje->buij', b_q, b_k) * (head_dim ** -0.5)
mask_value = -10000
# # mask
# if key_padding_mask is not None:
q_mask = default(key_padding_mask.eq(0), lambda: torch.ones((bsz, tgt_len), device=q.device).bool())
# 1 means not masking
kv_mask = q_mask
mq, mk = bucket(buckets, q_mask), bucket(buckets, kv_mask) # B * bkt * n_b
expand_head_and_merge_into_batch = lambda x: merge_dims(0, 1, expand_dim(x.unsqueeze(1), 1, self.num_head))
mq, mk = map(expand_head_and_merge_into_batch, (mq, mk)) # BH * bkt * n_b
mask = mq[:, :, :, None] * mk[:, :, None, :]
dots.masked_fill_(~mask, mask_value)
block_attn_weights = dots.view(bsz*self.num_head, -1, self.bucket_size)
if extra_attention_mask is not None:
attn_weights_over_g_tokens = attn_weights_over_g_tokens.view(bsz*self.num_head, -1, max_num_extra_indices_per_batch)
all_attn = torch.cat([block_attn_weights, attn_weights_over_g_tokens], dim=-1)
else:
all_attn = block_attn_weights
all_attn_probs = F.softmax(all_attn, dim=-1)
all_attn_probs = self.drop_attn(all_attn_probs)
C = 0
# calculate block attention
block_attn_probs = all_attn_probs[:, :, :block_attn_weights.shape[-1]]
block_attn_probs = block_attn_probs.view(bsz*self.num_head, -1, self.bucket_size, self.bucket_size)
C += block_attn_probs.matmul(b_v).view(bsz*self.num_head, -1, head_dim)
if extra_attention_mask is not None:
attn_probs_over_g_tokens = all_attn_probs[:,:,-attn_weights_over_g_tokens.shape[-1]:]
C += attn_probs_over_g_tokens.matmul(selected_v)
# global tokens to attend all other tokens
selected_q = q_splited.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, head_dim)
selected_q[selection_padding_mask_nonzeros] = q_splited[extra_attention_mask_nonzeros]
g2all_attn_weights = selected_q.transpose(1,2).matmul(k_splited.permute(0,2,3,1)) * (head_dim ** -0.5)
g2all_attn_weights[selection_padding_mask_zeros[0], :, selection_padding_mask_zeros[1], :] = -10000
if hard_mask is not None:
g2all_attn_weights = g2all_attn_weights.masked_fill(
hard_mask.unsqueeze(1).unsqueeze(2),
-10000,
)
g2all_attn_probs_float = F.softmax(g2all_attn_weights, dim=-1, dtype=torch.float32)
g2all_attn_probs = self.drop_attn(g2all_attn_probs_float.type_as(g2all_attn_weights))
g2all_attn = g2all_attn_probs.matmul(v.view(bsz, self.num_head, -1, head_dim)) # (batch_size, self.num_head, max_num_extra_indices_per_batch, head_dim)
nonzero_global_attn = g2all_attn[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]]
C = C.view(bsz, self.num_head, -1, head_dim)
C[extra_attention_mask_nonzeros[0],:,extra_attention_mask_nonzeros[1]] = nonzero_global_attn
C = C.view(bsz*self.num_head, -1, head_dim)
C = C[:,:orig_seq_len]
return C
def expand_dim(t, dim, k):
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def default(x, d):
if x is None:
return d if not isfunction(d) else d()
return x
def bucket(buckets, t, dim=1):
shape = list(t.shape)
shape[dim:dim+1] = [buckets, -1]
return t.reshape(*shape)
def unbucket(t, dim=1):
shape = list(t.shape)
shape[dim:dim+2] = [-1]
return t.reshape(*shape)
|
bart_ls-main
|
xformers/xformers/components/attention/block.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
from xformers.components.attention import Attention, AttentionConfig, register_attention
from xformers.components.attention.feature_maps import (
FeatureMap,
FeatureMapType,
SMHyperbolic,
SMOrf,
SMReg,
)
@dataclass
class FavorAttentionConfig(AttentionConfig):
causal: Optional[bool]
dim_features: Optional[int] = None # The dimensions of the random features
dim_head: Optional[
int
] = None # The embedding dimension of the inputs. Only useful to get a dim_features estimate
iter_before_redraw: Optional[
int
] = None # The number of iterations before the random features are re-drawn from scratch
feature_map: Optional[FeatureMapType] = None
@register_attention("favor", FavorAttentionConfig)
class FavorAttention(Attention):
def __init__(
self,
causal: bool = False,
dropout: float = 0.0,
dim_features: int = 256,
dim_head: Optional[int] = None,
iter_before_redraw: Optional[int] = None,
feature_map_type: FeatureMapType = FeatureMapType.SMReg,
normalize_inputs: bool = False,
*_,
**__,
):
r"""
Kernelized attention, as proposed in Performers_
("Rethinking attention with performers." K. Choromanski et al. (2020).).
FAVOR stands for "Fast Attention Via positive Orthogonal Random features"
Args:
dropout (float): the probability of an output to be randomly dropped at training time
dim_features (int): the dimension of the random features space
iter_before_redraw (int): the number of steps (forward calls) before a redraw of the features
feature_map_type (FeatureMapType): the type of feature map being used,
for instance orthogonal random features.
.. _Performers: https://arxiv.org/pdf/2009.14794v1.pdf
"""
super().__init__()
self.causal = causal
self.iter_before_redraw = (
(2 * iter_before_redraw)
if iter_before_redraw is not None
else iter_before_redraw
) # This will be used for both key and query
self.normalize_inputs = normalize_inputs
self.feature_map_type = feature_map_type
self.attn_drop = nn.Dropout(dropout, inplace=True)
# Setup dimension-dependent variables
# Reasonable dimension default
if dim_features is None:
assert dim_head is not None, "dim_features or dim_head needs to be passed"
self.dim_features = math.ceil(dim_head * (1 + math.log2(dim_head)))
self.dim_features = 2 * (
self.dim_features // 2
) # needs to be even for some variants
logging.info(
f"FAVOR: Automatically setting the random mapping dimension to {self.dim_features} from {dim_head}"
)
else:
self.dim_features = dim_features
feature_map_constructor = {
FeatureMapType.SMHyp: SMHyperbolic,
FeatureMapType.SMReg: SMReg,
FeatureMapType.SMOrf: SMOrf,
}[self.feature_map_type]
feature_settings = {
"dim_features": self.dim_features,
"iter_before_redraw": self.iter_before_redraw,
"normalize_inputs": self.normalize_inputs,
}
self.feature_map: FeatureMap = feature_map_constructor(**feature_settings) # type: ignore
@staticmethod
def _maybe_promote(x: torch.Tensor) -> torch.Tensor:
# Only promote fp16 buffers, bfloat16 would be fine for instance
return x.float() if x.dtype == torch.float16 else x
@staticmethod
def _causal_attention(
k_prime: torch.Tensor, q_prime: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# Algorithm 1 in the paper
ref_v = torch.ones_like(v.unsqueeze(2)) # BATCH x SEQ x 1 x EMB
Gps = k_prime.unsqueeze(3) * v.unsqueeze(2)
Grenorm = k_prime.unsqueeze(3) * ref_v
# Consolidate against the feature dimension
att_raw = torch.einsum("bcfe,bcf->bce", Gps, q_prime)
att_norm = torch.einsum("bcfe,bcf->bce", Grenorm, q_prime)
# Cumulative sum over the sequence
att_raw = att_raw.cumsum(2)
att_norm = att_norm.cumsum(2)
return att_raw, att_norm
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
*_,
**__,
):
# Project key and queries onto the feature map space
k_prime = self.feature_map(k)
q_prime = self.feature_map(q)
with autocast(enabled=False):
# The softmax kernel approximation for Favor will easily overflow
# Force the computations here to stay in fp32 for numerical stability
# Note that the dimensions are vastly reduced when compared to scaled_dot_product
k_prime = self._maybe_promote(k_prime)
q_prime = self._maybe_promote(q_prime)
v = self._maybe_promote(v)
if not self.causal:
att_normalization = q_prime @ (
k_prime.transpose(-2, -1) @ torch.ones_like(v)
)
att_raw = q_prime @ (k_prime.transpose(-2, -1) @ v)
else:
# Actually compute attention
att_raw, att_normalization = self._causal_attention(k_prime, q_prime, v)
# Normalize
att = att_raw / att_normalization
if self.attn_drop is not None:
att = self.attn_drop(att)
return att
|
bart_ls-main
|
xformers/xformers/components/attention/favor.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
from functools import partial, reduce
from inspect import isfunction
from operator import mul
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from xformers.components.attention import Attention, AttentionConfig, register_attention
@dataclass
class SinkhornSelfAttentionConfig(AttentionConfig):
block_size: int
temperature: float
sinkhorm_iter: int
num_heads: int
require_key_mask: bool
@register_attention("sinkhorn", SinkhornSelfAttentionConfig)
class SinkhornAttention(Attention):
def __init__(
self,
dropout: float,
num_heads: int,
block_size: int = 128,
temperature: float = 0.7,
sinkhorm_iter: int = 7,
*args, **kwargs
):
"""
Sparse Sinkhorn Attention
https://arxiv.org/abs/2002.11296
Code largely based on https://github.com/lucidrains/sinkhorn-transformer
The paper's notation are kept wherever possible
#TODO only support encoding only settings
"""
super().__init__()
self.bucket_size = block_size
self.sinkhorn_iter = sinkhorm_iter
self.temperature = temperature
self.drop_attn = nn.Dropout(dropout)
self.num_head = num_heads
self.sort_net = AttentionSortNet(block_size, temperature, sinkhorm_iter)
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
*args, **kwargs
):
# q, k, v: (B * nh, S, hs)
bh = q.size(0)
orig_seq_len = q.size(1)
bsz = bh // self.num_head
head_dim = q.size(-1)
assert key_padding_mask is not None
key_padding_mask = key_padding_mask.to(q)
key_padding_mask[:,0] = -1
# pad the input length to factors of bucket size
def _pad_to_window_size(x, window_size):
seq_len = x.size(-2)
pad_len = (window_size - seq_len % window_size) % window_size
return F.pad(x, (0,0,0,pad_len), value=0), pad_len
q, _ = _pad_to_window_size(q, self.bucket_size)
k, _ = _pad_to_window_size(k, self.bucket_size)
v, _ = _pad_to_window_size(v, self.bucket_size)
if key_padding_mask.shape[1] % self.bucket_size != 0:
pad_len = (self.bucket_size - key_padding_mask.shape[1] % self.bucket_size) % self.bucket_size
# key padding mask: 1 means padding tokens
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1)
# global attention tokens
extra_attention_mask = key_padding_mask < 0
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
max_num_extra_indices_per_batch = num_extra_indices_per_batch.max()
hard_mask = key_padding_mask == 1
if max_num_extra_indices_per_batch <= 0:
extra_attention_mask = None
else:
extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True)
zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, device=extra_attention_mask.device)
# mask indicating which values are actually going to be padding
num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1)
selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1)
# 2) location of the non-padding values in the selected global attention
selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True)
# 3) location of the padding values in the selected global attention
selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True)
# every token attend to global tokens
if extra_attention_mask is not None:
selected_k = k.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, head_dim)
k_splited = k.view(bsz, self.num_head, -1, head_dim).transpose(1,2)
q_splited = q.view(bsz, self.num_head, -1, head_dim).transpose(1,2)
v_splited = v.view(bsz, self.num_head, -1, head_dim).transpose(1,2)
selected_k[selection_padding_mask_nonzeros] = k_splited[extra_attention_mask_nonzeros]
# (bsz, seq_len, num_heads, max_num_extra_indices_per_batch)
selected_attn_weights = torch.einsum('blhd,bshd->blhs', (q_splited, selected_k)) * (head_dim ** -0.5)
selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000
attn_weights_over_g_tokens = selected_attn_weights.transpose(1,2)
selected_v = v.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, head_dim)
selected_v[selection_padding_mask_nonzeros] = v_splited[extra_attention_mask_nonzeros]
selected_v = selected_v.transpose(1,2).contiguous().view(bsz*self.num_head, max_num_extra_indices_per_batch, head_dim)
tgt_len = k.size(1)
buckets = q.shape[1] // self.bucket_size
b_q = bucket(buckets, q)
b_k, b_v = map(partial(bucket, buckets), (k, v)) # BH * bct * n_b * D
R = self.sort_net(q, k)
R = R.type_as(q).to(q)
b_k_r = reorder_buckets(b_k, R).reshape(bh, buckets, -1, head_dim)
b_v_r = reorder_buckets(b_v, R).reshape(bh, buckets, -1, head_dim)
b_k = torch.cat((b_k_r, b_k), dim=2)
b_v = torch.cat((b_v_r, b_v), dim=2)
dots = torch.einsum('buie,buje->buij', b_q, b_k) * (head_dim ** -0.5)
mask_value = -10000
# # mask
# if key_padding_mask is not None:
q_mask = default(key_padding_mask.eq(0), lambda: torch.ones((bsz, tgt_len), device=q.device).bool())
# 1 means not masking
kv_mask = q_mask
mq, mk = bucket(buckets, q_mask), bucket(buckets, kv_mask) # B * bkt * n_b
expand_head_and_merge_into_batch = lambda x: merge_dims(0, 1, expand_dim(x.unsqueeze(1), 1, self.num_head))
mq, mk = map(expand_head_and_merge_into_batch, (mq, mk)) # BH * bkt * n_b
mk_r = batched_index_select(mk, R.abs().argmax(dim=-1))
mk_r = mk_r.reshape(bh, buckets, -1)
mk = torch.cat((mk_r, mk), dim=2)
mask = mq[:, :, :, None] * mk[:, :, None, :]
dots.masked_fill_(~mask, mask_value)
del mask
sinkhorn_attn_weights = dots.view(bsz*self.num_head, -1, self.bucket_size*2)
attn_weights_over_g_tokens = attn_weights_over_g_tokens.view(bsz*self.num_head, -1, max_num_extra_indices_per_batch)
all_attn = torch.cat([sinkhorn_attn_weights, attn_weights_over_g_tokens], dim=-1)
all_attn_probs = all_attn.softmax(dim=-1)
all_attn_probs = self.drop_attn(all_attn_probs)
C = 0
# calculate block attention
block_attn_probs = all_attn_probs[:, :, :sinkhorn_attn_weights.shape[-1]]
block_attn_probs = block_attn_probs.view(bsz*self.num_head, -1, self.bucket_size, 2*self.bucket_size)
C += block_attn_probs.matmul(b_v).view(bsz*self.num_head, -1, head_dim)
if extra_attention_mask is not None:
attn_probs_over_g_tokens = all_attn_probs[:,:,-attn_weights_over_g_tokens.shape[-1]:]
C += attn_probs_over_g_tokens.matmul(selected_v)
# global tokens to attend all other tokens
selected_q = q_splited.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_head, head_dim)
selected_q[selection_padding_mask_nonzeros] = q_splited[extra_attention_mask_nonzeros]
g2all_attn_weights = selected_q.transpose(1,2).matmul(k_splited.permute(0,2,3,1)) * (head_dim ** -0.5)
g2all_attn_weights[selection_padding_mask_zeros[0], :, selection_padding_mask_zeros[1], :] = -10000.0
if hard_mask is not None:
g2all_attn_weights = g2all_attn_weights.masked_fill(
hard_mask.unsqueeze(1).unsqueeze(2),
-10000.0,
)
g2all_attn_probs_float = F.softmax(g2all_attn_weights, dim=-1, dtype=torch.float32)
g2all_attn_probs = self.drop_attn(g2all_attn_probs_float.type_as(g2all_attn_weights))
g2all_attn = g2all_attn_probs.matmul(v.view(bsz, self.num_head, -1, head_dim)) # (batch_size, self.num_head, max_num_extra_indices_per_batch, head_dim)
nonzero_global_attn = g2all_attn[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]]
C = C.view(bsz, self.num_head, -1, head_dim)
C[extra_attention_mask_nonzeros[0],:,extra_attention_mask_nonzeros[1]] = nonzero_global_attn
C = C.view(bsz*self.num_head, -1, head_dim)
C = C[:,:orig_seq_len]
return C
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
def expand_dim(t, dim, k):
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def default(x, d):
if x is None:
return d if not isfunction(d) else d()
return x
def bucket(buckets, t, dim=1):
shape = list(t.shape)
shape[dim:dim+1] = [buckets, -1]
return t.reshape(*shape)
def reorder_buckets(t, r):
return torch.einsum('buv,bvtd->butd', r, t)
def unbucket(t, dim=1):
shape = list(t.shape)
shape[dim:dim+2] = [-1]
return t.reshape(*shape)
class AttentionSortNet(nn.Module):
def __init__(self, bucket_size, temperature, sinkhorn_iter):
super().__init__()
self.bucket_size = bucket_size
self.temperature = temperature
self.sinkhorn_iter = sinkhorn_iter
def forward(self, q, k, topk=1):
dim = q.size(-1)
buckets = q.shape[1] // self.bucket_size
kv_buckets = k.shape[1] // self.bucket_size
b_q = bucket(buckets, q)
b_k = bucket(kv_buckets, k)
sq = b_q.mean(dim=2) # TODO original paper uses sum
sk = b_k.mean(dim=2)
R = torch.einsum('bie,bje->bij', sq, sk).to(q) * (dim ** -0.5)
return gumbel_sinkhorn(F.relu(R), self.sinkhorn_iter, self.temperature)
def log(t, eps = 1e-6):
return torch.log(t + eps)
def gumbel_sinkhorn(r, n_iters=8, temperature=0.7):
r = log(r)
gumbel = sample_gumbel(r.shape, r.device, r.dtype)
r = (r + gumbel) / temperature
return sinkhorn_sorting_operator(r, n_iters)
def sample_gumbel(shape, device, dtype, eps=1e-6):
u = torch.empty(shape, device=device, dtype=dtype).uniform_(0, 1)
return -log(-log(u, eps), eps)
def sinkhorn_sorting_operator(r, n_iters=8):
n = r.shape[1]
for _ in range(n_iters):
r = r - torch.logsumexp(r, dim=2, keepdim=True)
r = r - torch.logsumexp(r, dim=1, keepdim=True)
return torch.exp(r)
|
bart_ls-main
|
xformers/xformers/components/attention/sinkhorn.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from dataclasses import asdict, dataclass
from typing import Optional, Type, TypeVar
import torch
import torch.nn as nn
from xformers.components.attention import AttentionMask
@dataclass
class AttentionConfig:
"""Parameters required for all Attentions.
Can accept and store extra parameters.
"""
name: str # the registered name for this attention mechanism
dropout: float # dropout probability
Self = TypeVar("Self", bound="Attention")
# Define the common interface, every attention block needs to derive from it
class Attention(nn.Module, metaclass=ABCMeta):
r"""The base Attention mechanism, which is typically a sub-part of the multi-head attention"""
_causal_mask: Optional[AttentionMask] = None
@abstractmethod
def __init__(self, dropout: Optional[float] = None, *args, **kwargs):
super().__init__()
# Requires the inputs to be projected
self.requires_input_projection = True
# Whether the head dimension needs to be present (if not it can be folded into the batch dimension)
self.requires_head_dimension = False
# key padding mask and attention mask must be passed in as separate arguments instead of a merged attention mask
self.requires_separate_masks = False
# Requires that K and Q have the same sequence length
self.requires_same_k_q_dimensions = False
# Whether the attention owns the single head/multihead mechanism
# so that the MHA wrapper should skip it
self.requires_skip_multi_head = False
@classmethod
def from_config(cls: Type[Self], config: AttentionConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
@abstractmethod
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, *args, **kwargs
) -> torch.Tensor:
raise NotImplementedError
@staticmethod
def _maybe_pad_sequence(x: torch.Tensor, mask: torch.Tensor):
"""
If the sequence is shorter than the mask, return a padded view
"""
if x.shape[-2] != mask.shape[-1]:
assert x.shape[-2] < mask.shape[-1], (
"Sequence is bigger than the provided mask, cannot infer what to do with it."
" Please update your attention mask"
)
pad_size = (0, 0, 0, mask.shape[-1] - x.shape[-2], 0, 0)
return torch.nn.functional.pad(x, pad_size, mode="constant", value=0.0)
return x
|
bart_ls-main
|
xformers/xformers/components/attention/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from .base import FeatureMap, FeatureMapConfig
from .softmax import NormDistribution, SMHyperbolic, SMOrf, SMReg
class FeatureMapType(str, Enum):
SMOrf = "sm_orf"
SMHyp = "sm_hyp"
SMReg = "sm_reg" # regularized softmax kernel
__all__ = [
"SMOrf",
"SMReg",
"SMHyperbolic",
"NormDistribution",
"FeatureMapConfig",
"FeatureMap",
]
|
bart_ls-main
|
xformers/xformers/components/attention/feature_maps/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from enum import Enum, auto
from typing import Optional
import torch
from torch.autograd.profiler import record_function
from .base import FeatureMap
"""
A set of feature maps which approximate the softmax kernel, as per the Performers_ paper.
_Performers: "Rethinking attention with performers." K. Choromanski et al. (2020).
https://arxiv.org/pdf/2009.14794v1.pdf
"""
class NormDistribution(Enum):
Xi = auto()
Uniform = auto()
class SoftMaxPositiveEstimators(FeatureMap):
def __init__(
self,
dim_features: int,
iter_before_redraw: Optional[int],
normalize_inputs: bool = False,
epsilon: float = 1e-6,
softmax_temp: float = -1,
):
super().__init__(dim_features, iter_before_redraw, normalize_inputs, epsilon)
self.softmax_temp = softmax_temp
self.offset = -1.0
# Handle the scaling from all kernels by √m.
# This normalizes for all the feature maps involved
self.h_scale = math.log(math.sqrt(self.dim_features))
def pre_scale(self, x: torch.Tensor) -> torch.Tensor:
with record_function("feature_map::pre_scale"):
# Re-draw counting logic
if (
(
self.iter_before_redraw is not None
and self._iter_counter > self.iter_before_redraw
)
or self.features is None
or self.features.device != x.device
):
# The feature map is actually using half the dimension, we'll concatenate + and - features
self._iter_counter = 1
self.features = self._get_feature_map(
x.shape[-1], self.dim_feature_map, x.device
)
assert self.features is not None
if self.features.dtype != x.dtype:
self.features = self.features.to(x.dtype)
self._iter_counter += 1
# Normalization / softmax
if self.softmax_temp < 0:
# A = exp(QK.t/√d), so each input will be scaled by √√d
self.softmax_temp = x.shape[-1] ** -0.25
x_scaled = x * self.softmax_temp
# Compute the scaling factors in logspace, applied from within the exponential
# - dimnish possible exponential overflow
# - remove a multiply across the batch, replace by an addition
norm_x_2 = torch.einsum("...d,...d->...", x_scaled, x_scaled).unsqueeze(-1)
self.offset = -0.5 * norm_x_2 - self.h_scale + self.epsilon
if self.normalize_inputs:
# L0 normalize the exponential term, can be useful for numerical stability
# This ensures that features +- offset is below 1
self.offset -= norm_x_2.max(1, keepdim=True)[0]
# Return the scaled inputs, the rest depends on the kernel being used
return x_scaled
@staticmethod
@torch.no_grad()
def _get_random_ortho_matrix(
blocks: int,
dim: int,
device: torch.device,
norm_distribution: NormDistribution = NormDistribution.Uniform,
) -> torch.Tensor:
r"""
Generate a random matrix whose rows are exactly orthonormal
"How to generate random matrices from the classical compact groups", Mezzadri, 2007
https://arxiv.org/pdf/math-ph/0609050v2.pdf
.. note: the typical qr decomposition does not give uniform results, qr decomposition is not
unique and the qr decomposition routines are biased towards numerical stability. See the above
paper for more information.
.. note: this does not follow the original implementation from the Performers authors.
see docs/assets/kde plots to visualize the impact of using the R signs to correct Q
"""
H = torch.randn((blocks, dim, dim), device=device, requires_grad=False)
# Randomly scale the norms of the features, Xi distributed
if norm_distribution == NormDistribution.Xi:
# NOTE: This averages to sqrt(d)
norms = torch.sqrt(torch.einsum("...d,...d->...", H, H))
Q, R = torch.linalg.qr(H)
Q = torch.diag_embed(torch.sign(torch.diagonal(R, dim1=1, dim2=2))) @ Q
# Normalize if need be. Uniform NormDistribution does nothing, Q is already orthonormal
if norm_distribution == NormDistribution.Xi:
return torch.diag_embed(norms) @ Q
return Q
class SMOrf(SoftMaxPositiveEstimators):
"""
"Positive random orthogonal features" softmax estimator,
SM_ort^m+, as proposed in the Performers_ paper, Lemma 1.
_Performers: "Rethinking attention with performers." K. Choromanski et al. (2020).
https://arxiv.org/pdf/2009.14794v1.pdf
"""
@torch.no_grad()
def _get_feature_map(self, dim_input: int, dim_features: int, device: torch.device):
"""
Generate the projection matrix onto the random features
.. note: The heads dimension needs to be taken into account, hence the per-block random matrix
and not uniformally random.
"""
# Get per block random unitary matrices.
# We need enough of them to project the whole input dimension, regardless of the
# requested dimension of the features
features = self._get_random_ortho_matrix(
math.ceil(dim_input / dim_features),
dim_features,
norm_distribution=NormDistribution.Xi,
device=device,
)
return features.flatten(0, 1)[:dim_input]
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Softmax-dimension related scaling, shared for all kernels
x_scaled = super().pre_scale(x)
assert self.features is not None
# Project onto the random feature map.
x_scaled = x_scaled @ self.features
return torch.exp(x_scaled + self.offset)
class SMHyperbolic(SoftMaxPositiveEstimators):
"""
"Positive random features hyperbolic" estimator, SMHyp+,
as proposed in the Performers_ paper, Lemma 1.
_Performers: "Rethinking attention with performers." K. Choromanski et al. (2020).
https://arxiv.org/pdf/2009.14794v1.pdf
"""
def __init__(
self,
dim_features: int,
iter_before_redraw: Optional[int],
normalize_inputs: bool = False,
epsilon: float = 1e-6,
softmax_temp: float = -1,
):
super().__init__(
dim_features, iter_before_redraw, normalize_inputs, epsilon, softmax_temp
)
assert (
dim_features % 2 == 0
), "The feature dimension needs to be even with this kernel"
self.dim_feature_map = self.dim_features // 2
@torch.no_grad()
def _get_feature_map(self, dim_input: int, dim_features: int, device: torch.device):
"""
Generate the projection matrix onto the random features
.. note: The heads dimension needs to be taken into account, hence the per-block random matrix
and not uniformally random.
"""
# Get per block random unitary matrices.
# We need enough of them to project the whole input dimension, regardless of the
# requested dimension of the features
features = self._get_random_ortho_matrix(
math.ceil(dim_input / dim_features),
dim_features,
norm_distribution=NormDistribution.Xi,
device=device,
)
return features.flatten(0, 1)[:dim_input]
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Softmax-dimension related scaling, shared for all kernels
x_scaled = super().pre_scale(x)
# Project onto the random feature map, concatenate both + and - results
# This follows Lemma 1 in the original Performers Paper to best approximate a
# softmax kernel (cosh representation)
x_scaled = x_scaled @ self.features
return torch.cat(
[torch.exp(x_scaled + self.offset), torch.exp(-x_scaled + self.offset)],
dim=-1,
)
class SMReg(SoftMaxPositiveEstimators):
"""
"Regularized softmax kernel" estimator, SMREG+, as proposed in the Performers_ paper.
_Performers: "Rethinking attention with performers." K. Choromanski et al. (2020).
https://arxiv.org/pdf/2009.14794v1.pdf
"""
def __init__(
self,
dim_features: int,
iter_before_redraw: Optional[int],
normalize_inputs: bool = False,
epsilon: float = 1e-6,
softmax_temp: float = -1,
):
super().__init__(
dim_features, iter_before_redraw, normalize_inputs, epsilon, softmax_temp
)
assert (
dim_features % 2 == 0
), "The feature dimension needs to be even with this kernel"
self.dim_feature_map = self.dim_features // 2
@torch.no_grad()
def _get_feature_map(self, dim_input: int, dim_features: int, device: torch.device):
"""
Generate the projection matrix onto the random features
.. note: The heads dimension needs to be taken into account, hence the per-block random matrix
and not uniformally random.
"""
# Get per block random unitary matrices.
# We need enough of them to project the whole input dimension, regardless of the
# requested dimension of the features
features = self._get_random_ortho_matrix(
math.ceil(dim_input / dim_features),
dim_features,
norm_distribution=NormDistribution.Uniform,
device=device,
).flatten(0, 1)
norms = math.sqrt(dim_input) * torch.ones(features.shape[0], device=device)
return (torch.diag(norms) @ features)[:dim_input]
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Softmax-dimension related scaling, shared for all kernels
x_scaled = super().pre_scale(x)
# Project onto the random feature map, concatenate both + and - results
# This follows Lemma 1 in the original Performers Paper to best approximate a
# softmax kernel (cosh representation + sample regularization)
x_scaled = x_scaled @ self.features
return torch.cat(
[torch.exp(x_scaled + self.offset), torch.exp(-x_scaled + self.offset)],
dim=-1,
)
|
bart_ls-main
|
xformers/xformers/components/attention/feature_maps/softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import abstractmethod
from dataclasses import asdict, dataclass
from typing import Optional, Type, TypeVar
import torch
"""
Feature maps allow for a given query or key to be encoded in a different space.
"""
Self = TypeVar("Self", bound="FeatureMap")
@dataclass
class FeatureMapConfig:
name: str
dim_features: int
iter_before_redraw: Optional[int]
normalize_inputs: Optional[bool]
epsilon: Optional[float]
class FeatureMap(torch.nn.Module):
def __init__(
self,
dim_features: int,
iter_before_redraw: Optional[int] = None,
normalize_inputs: bool = False,
epsilon: float = 1e-6,
):
super().__init__()
self.dim_features = dim_features
self.dim_feature_map = dim_features
self.iter_before_redraw = iter_before_redraw
self.features: Optional[torch.Tensor] = None
self.epsilon = epsilon
self.normalize_inputs = normalize_inputs
self._iter_counter = 0
@abstractmethod
def _get_feature_map(self, dim_input: int, dim_features: int, device: torch.device):
raise NotImplementedError()
@classmethod
def from_config(cls: Type[Self], config: FeatureMapConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
|
bart_ls-main
|
xformers/xformers/components/attention/feature_maps/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Any, Callable, Dict, Set, Union
from xformers.utils import (
generate_matching_config,
get_registry_decorator,
import_all_modules,
)
from .base import Feedforward, FeedforwardConfig # noqa
# CREDITS: Classy Vision registry mechanism
FEEDFORWARD_REGISTRY: Dict[str, Any] = {}
FEEDFORWARD_CLASS_NAMES: Set[str] = set()
def build_feedforward(config: Union[Dict[str, Any], FeedforwardConfig]):
"""Builds a feedforward from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_feedforward",
"foo": "bar"}` will find a class that was registered as "my_feedforward"
(see :func:`register_feedforward`) and call .from_config on it."""
if not isinstance(config, FeedforwardConfig):
config_instance = generate_matching_config(
config, FEEDFORWARD_REGISTRY[config["name"]].config
)
else:
config_instance = config
return FEEDFORWARD_REGISTRY[config_instance.name].constructor.from_config(
config_instance
)
"""Registers a Feedforward subclass.
This decorator allows xFormers to instantiate a subclass of Feedforward
from a configuration file, even if the class itself is not part of the
xFormers framework. To use it, apply this decorator to a Feedforward
subclass, like this:
.. code-block:: python
@dataclass
class MyConfig:
...
@register_feedforward('my_ff', MyConfig)
class MyFeedforward(Feedforward):
...
To instantiate a feedforward from a configuration file, see :func:`build_feedforward`."""
register_feedforward: Callable[
[str, Any], Callable[[Any], Any]
] = get_registry_decorator(
FEEDFORWARD_REGISTRY, FEEDFORWARD_CLASS_NAMES, Feedforward, FeedforwardConfig
)
try:
from .fused_mlp import FusedMLP # noqa
_fused_mlp_available = True
except ImportError:
_fused_mlp_available = False
from .mlp import MLP # noqa
__all__ = [
"MLP",
"Feedforward",
"build_feedforward",
"register_feedforward",
]
if _fused_mlp_available:
__all__ += ["FusedMLP"]
# automatically import any Python files in the directory
import_all_modules(str(Path(__file__).parent), "xformers.components.feedforward")
|
bart_ls-main
|
xformers/xformers/components/feedforward/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Optional, Union
import torch
from xformers.components import Activation
from xformers.components.feedforward import (
Feedforward,
FeedforwardConfig,
register_feedforward,
)
_is_fairscale_available = True
try:
import torch.distributed as dist
from fairscale.nn import MOELayer, Top2Gate
from xformers.components.feedforward import MLP
except ImportError:
logging.warning(
"Either FairScale or torch distributed is not available, MixtureOfExperts will not be exposed."
" Please install them if you would like to use MoE"
)
_is_fairscale_available = False
if _is_fairscale_available:
# Credits: initially implemented in FairScale for sanity checking
class RoundRobinGate(torch.nn.Module):
def __init__(self, model_dim, num_experts):
super().__init__()
self.model_dim = model_dim
self.num_experts = num_experts
def forward(self, input):
s = input.shape[0]
assert s % self.num_experts == 0, f"{s} % {self.num_experts} != 0"
capacity = 2 * s // self.num_experts
output = torch.zeros(
s, self.num_experts, capacity, dtype=input.dtype, device=input.device
)
for i in range(s):
output[i, i % self.num_experts, i // self.num_experts] = 1.0
return 0.0, output, output.bool()
class GateConfig(str, Enum):
RoundRobin = "round_robin"
Top2 = "top_2"
# Other gating techniques could be exposed here
@dataclass
class MoEConfig(FeedforwardConfig):
number_of_experts: int
gate: GateConfig
number_of_local_experts: Optional[int] = None
expert_constructor: Optional[Any] = None
hidden_layer_multiplier: Optional[int] = None
group: Optional[Any] = None
@register_feedforward("MixtureOfExperts", MoEConfig)
class MixtureOfExperts(Feedforward):
"""
A MLP variant which uses the "Mixture of Experts" paradigm, as described in Gshard_.
xFormers uses the FairScale_ implementation under the hood.
.. warning: Please note that most of the benefits of MoE are present in a distributed training environmentt
.. _Gshard: https://arxiv.org/pdf/2006.16668.pdf
.. _FairScale: https://github.com/facebookresearch/fairscale/
"""
def __init__(
self,
dim_model: int,
dropout: float,
activation: Activation,
number_of_experts: int,
gate: Union[GateConfig, torch.nn.Module],
number_of_local_experts: Optional[int] = None,
expert_constructor: Optional[Callable[[], torch.nn.Module]] = None,
hidden_layer_multiplier: Optional[int] = None,
group: Optional[Any] = None,
*_,
**__,
):
super().__init__()
# Handle a possibly uninitialized process group
assert (
dist.is_initialized()
), "Mixture of Experts require torch distributed to be initialized"
if number_of_local_experts is not None:
assert number_of_experts >= number_of_local_experts
else:
if dist.get_world_size() == 1:
logging.warning("Local experts no specified but world size of 1")
logging.warning("Assuming that all experts are local")
number_of_local_experts = number_of_experts
else:
number_of_local_experts = 1
# Programatically handle the gating technique
if not isinstance(gate, torch.nn.Module):
gate_constructor = {
GateConfig.RoundRobin: RoundRobinGate,
GateConfig.Top2: Top2Gate,
}[gate]
self.gate = gate_constructor(dim_model, number_of_experts)
else:
self.gate = gate
# Programatically handle the experts
if expert_constructor is None:
multiplier = (
hidden_layer_multiplier
if hidden_layer_multiplier is not None
else 4
)
def expert_constructor() -> torch.nn.Module:
return MLP(dim_model, dropout, activation, multiplier)
assert expert_constructor is not None
local_experts = torch.nn.ModuleList(
[expert_constructor() for _ in range(number_of_local_experts)]
)
self.moe = MOELayer(gate=self.gate, experts=local_experts, group=group)
self.requires_cuda = True
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
# FairScale MoE assumes that the dimensions are [S, B, E]
# xFormers assumes [B, S, E]
return self.moe(inputs.movedim(0, 1)).movedim(0, 1)
|
bart_ls-main
|
xformers/xformers/components/feedforward/mixture_of_experts.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import torch
import torch.nn as nn
from xformers.components import Activation, build_activation
from xformers.components.feedforward import Feedforward, FeedforwardConfig
from . import register_feedforward
@dataclass
class MlpConfig(FeedforwardConfig):
hidden_layer_multiplier: int
@register_feedforward("MLP", MlpConfig)
class MLP(Feedforward):
def __init__(
self,
dim_model: int,
dropout: float,
activation: Activation,
hidden_layer_multiplier: int,
*args,
**kwargs
):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(dim_model, hidden_layer_multiplier * dim_model),
build_activation(activation),
nn.Dropout(dropout),
nn.Linear(hidden_layer_multiplier * dim_model, dim_model),
nn.Dropout(dropout),
)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self.mlp(inputs)
|
bart_ls-main
|
xformers/xformers/components/feedforward/mlp.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
import torch
import torch.nn as nn
from xformers.components import Activation
from xformers.components.feedforward import (
Feedforward,
FeedforwardConfig,
register_feedforward,
)
if torch.cuda.is_available():
try:
from xformers.triton import FusedDropoutBias
@dataclass
class FusedMlpConfig(FeedforwardConfig):
hidden_layer_multiplier: int
@register_feedforward("FusedMLP", FusedMlpConfig)
class FusedMLP(Feedforward):
"""
A MLP using fused linear layers.
"""
def __init__(
self,
dim_model: int,
dropout: float,
activation: Activation,
hidden_layer_multiplier: int,
*args,
**kwargs,
):
super().__init__()
dim_mlp = hidden_layer_multiplier * dim_model
self.mlp = nn.Sequential(
nn.Linear(in_features=dim_model, out_features=dim_mlp, bias=False),
# pyre-ignore[16]: TODO(T101400990): Pyre did not recognize
# the `FusedLinear` import.
FusedDropoutBias(
p=dropout, bias_shape=dim_mlp, activation=activation
),
nn.Linear(in_features=dim_mlp, out_features=dim_model, bias=False),
# pyre-ignore[16]: TODO(T101400990): Pyre did not recognize
# the `FusedLinear` import.
FusedDropoutBias(p=dropout, bias_shape=dim_model, activation=None),
)
self.requires_cuda = True
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self.mlp(inputs)
except ImportError:
logging.warning("Triton is not available, FusedMLP will not be enabled.")
|
bart_ls-main
|
xformers/xformers/components/feedforward/fused_mlp.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from dataclasses import asdict, dataclass
from typing import Optional, Type, TypeVar
import torch.nn as nn
from xformers.components import Activation
Self = TypeVar("Self", bound="Feedforward")
@dataclass
class FeedforwardConfig:
name: str
dim_model: int
dropout: float
activation: Activation
# Define the common interface, every feedforward block needs to derive from it
class Feedforward(nn.Module, metaclass=ABCMeta):
@abstractmethod
def __init__(
self,
dim_model: Optional[int] = None,
dropout: Optional[float] = None,
activation: Optional[Activation] = None,
*args,
**kwargs,
):
super().__init__()
self.requires_cuda = False
@classmethod
def from_config(cls: Type[Self], config: FeedforwardConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
|
bart_ls-main
|
xformers/xformers/components/feedforward/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from xformers.components.positional_embedding import (
PositionEmbedding,
PositionEmbeddingConfig,
register_positional_embedding,
)
@dataclass
class VocabEmbeddingConfig(PositionEmbeddingConfig):
vocab_size: int
dropout: float
@register_positional_embedding("vocab", VocabEmbeddingConfig)
class VocabEmbedding(PositionEmbedding):
def __init__(
self,
dim_model: int,
seq_len: int,
vocab_size: int,
dropout: float = 0.0,
*args,
**kwargs
):
super().__init__()
self.vocab_size = vocab_size
self.dim_model = dim_model
self.dropout = torch.nn.Dropout(p=dropout)
self.position_embeddings = nn.Embedding(seq_len, self.dim_model)
torch.nn.init.normal_(self.position_embeddings.weight, std=0.02)
self.position_ids: Optional[torch.Tensor] = None
self.word_embeddings = nn.Embedding(self.vocab_size, self.dim_model)
torch.nn.init.normal_(self.word_embeddings.weight, std=0.02)
def forward(self, x: torch.Tensor):
position_ids = torch.arange(x.shape[1], dtype=torch.long, device=x.device)[
None, :
].repeat(x.shape[0], 1)
X_token = self.word_embeddings(x)
X_pos = self.position_embeddings(position_ids)
X = X_token + X_pos
X = self.dropout(X)
return X
|
bart_ls-main
|
xformers/xformers/components/positional_embedding/vocab.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Any, Callable, Dict, Set, Union
from xformers.utils import (
generate_matching_config,
get_registry_decorator,
import_all_modules,
)
from .base import PositionEmbedding, PositionEmbeddingConfig # noqa
# CREDITS: Classy Vision registry mechanism
POSITION_EMBEDDING_REGISTRY: Dict[str, Any] = {}
POSITION_EMBEDDING_CLASS_NAMES: Set[str] = set()
def build_positional_embedding(config: Union[Dict[str, Any], PositionEmbeddingConfig]):
"""Builds a position encoding from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_position_encoding",
"foo": "bar"}` will find a class that was registered as "my_position_encoding"
(see :func:`register_positional_embedding`) and call .from_config on it."""
if not isinstance(config, PositionEmbeddingConfig):
config_instance = generate_matching_config(
config, POSITION_EMBEDDING_REGISTRY[config["name"]].config
)
else:
config_instance = config
return POSITION_EMBEDDING_REGISTRY[config_instance.name].constructor.from_config(
config_instance
)
"""Registers a PositionEncoding subclass.
This decorator allows xFormers to instantiate a subclass of PositionEncoding
from a configuration file, even if the class itself is not part of the
xFormers framework. To use it, apply this decorator to a `PositionEncoding`
subclass, like this:
.. code-block:: python
@dataclass
class MyConfig:
...
@register_positional_embedding('my_encoding', MyConfig)
class MyEncoding(PositionEncoding):
...
To instantiate a position encoding from a configuration file, see :func:`build_positional_embedding`."""
register_positional_embedding: Callable[
[str, Any], Callable[[Any], Any]
] = get_registry_decorator(
POSITION_EMBEDDING_REGISTRY,
POSITION_EMBEDDING_CLASS_NAMES,
PositionEmbedding,
PositionEmbeddingConfig,
)
from .rotary import RotaryEmbedding # noqa
from .sine import SinePositionalEmbedding # type: ignore # noqa
from .vocab import VocabEmbedding # noqa
__all__ = [
"RotaryEmbedding",
"SinePositionalEmbedding",
"VocabEmbedding",
"build_positional_embedding",
"register_positional_embedding",
]
# automatically import any Python files in the directory
import_all_modules(
str(Path(__file__).parent), "xformers.components.positional_embedding"
)
|
bart_ls-main
|
xformers/xformers/components/positional_embedding/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Silence Mypy errors in this file.
# type: ignore
from __future__ import annotations
import math
from typing import TypeVar
import torch
from pyre_extensions import Generic
from typing_extensions import Literal as L
from xformers.components.positional_embedding import (
PositionEmbedding,
PositionEmbeddingConfig,
register_positional_embedding,
)
N = TypeVar("N", bound=int)
DimModel = TypeVar("DimModel", bound=int)
@register_positional_embedding("sine", PositionEmbeddingConfig)
class SinePositionalEmbedding(PositionEmbedding, Generic[DimModel]):
def __init__(self, dim_model: DimModel, *args, **kwargs):
super().__init__()
self.dim_model: DimModel = dim_model
def forward(
self, x: torch.Tensor[torch.float, N, N]
) -> torch.Tensor[torch.float, N, N, DimModel]:
seq_len = x.shape[1]
pos = (
torch.arange(0, seq_len, device=x.device, dtype=torch.float32)
.unsqueeze(1)
.repeat(1, self.dim_model)
)
dim = (
torch.arange(0, self.dim_model, device=x.device, dtype=torch.float32)
.unsqueeze(0)
.repeat(seq_len, 1)
)
div = torch.exp(-math.log(10000) * (2 * (dim // 2) / self.dim_model))
pos *= div
pos[:, 0::2] = torch.sin(pos[:, 0::2])
pos[:, 1::2] = torch.cos(pos[:, 1::2])
# pyre-ignore[9]: x was declared as N x N but is expected as N * N * 1.
# Handle a non-existing embedding dimension
output: torch.Tensor[torch.float32, N, N, L[1]] = (
x.unsqueeze(-1) if x.ndim == 2 else x
)
return output + pos.unsqueeze(0)
|
bart_ls-main
|
xformers/xformers/components/positional_embedding/sine.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# CREDITS: This implementation is inspired by GPT-NeoX https://github.com/EleutherAI/gpt-neox
# NOTE: Almost the same right now, moving parts to Triton is the next step
from typing import Tuple
import torch
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
@torch.jit.script
def apply_rotary_pos_emb(x, cos, sin):
# NOTE: This could probably be moved to Triton
# Handle a possible sequence length mismatch in between q and k
cos = cos[:, :, : x.shape[-2], :]
sin = sin[:, :, : x.shape[-2], :]
return (x * cos) + (rotate_half(x) * sin)
class RotaryEmbedding(torch.nn.Module):
"""
The rotary position embeddings from RoFormer_ (Su et. al).
A crucial insight from the method is that the query and keys are
transformed by rotation matrices which depend on the relative positions.
Other implementations are available in the Rotary Transformer repo_ and in
GPT-NeoX_, GPT-NeoX was an inspiration
.. _RoFormer: https://arxiv.org/abs/2104.09864
.. _repo: https://github.com/ZhuiyiTechnology/roformer
.. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
.. warning: Please note that this embedding is not registered on purpose, as it is transformative
(it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis
"""
def __init__(self, dim_model: int, *_, **__):
super().__init__()
# Generate and save the inverse frequency buffer (non trainable)
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))
self.register_buffer("inv_freq", inv_freq)
self._seq_len_cached = None
self._cos_cached = None
self._sin_cached = None
def _update_cos_sin_tables(self, x, seq_dimension=1):
seq_len = x.shape[seq_dimension]
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
self._seq_len_cached = seq_len
t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(
self.inv_freq
)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self._cos_cached = emb.cos()[None, None, :, :]
self._sin_cached = emb.sin()[None, None, :, :]
return self._cos_cached, self._sin_cached
def forward(
self, q: torch.Tensor, k: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
self._cos_cached, self._sin_cached = self._update_cos_sin_tables(
k, seq_dimension=-2
)
return (
apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
)
|
bart_ls-main
|
xformers/xformers/components/positional_embedding/rotary.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from dataclasses import asdict, dataclass
from typing import Type, TypeVar
import torch.nn as nn
Self = TypeVar("Self", bound="PositionEmbedding")
@dataclass
class PositionEmbeddingConfig:
name: str
dim_model: int
seq_len: int
class PositionEmbedding(nn.Module, metaclass=ABCMeta):
@abstractmethod
def __init__(self, *args, **kwargs) -> None:
super().__init__()
@classmethod
def from_config(cls: Type[Self], config: PositionEmbeddingConfig) -> Self:
# Generate the class inputs from the config
fields = asdict(config)
# Skip all Nones so that default values are used
fields = {k: v for k, v in fields.items() if v is not None}
return cls(**fields)
|
bart_ls-main
|
xformers/xformers/components/positional_embedding/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .utils import _csr_to_coo, _transpose_with_info
def _should_use_coo(a, sparsity):
if not a.is_cuda:
return False
B, M, K = a.shape
# amortize overhead of converting from csr to coo
if B < 32 and M < 4096:
return False
if sparsity > 0.995:
return False
if sparsity < 0.9:
return False
if K > 64:
return False
# let's be overly cautious here for now
return sparsity > 0.97
def _should_use_csr_ge(a, sparsity):
if not a.is_cuda:
return False
return sparsity > 0.99
def _sddmm_func(a, b, row_indices, row_offsets, column_indices):
sparsity = 1 - column_indices.shape[0] / (a.shape[1] * b.shape[1])
if _should_use_coo(a, sparsity):
m = a.shape[-2]
n = b.shape[-2]
# converting from csr to coo has a constant overhead of ~150us
# so only dispatch to it for reasonably large problem sizes
ro, ci = _csr_to_coo(m, n, row_offsets, column_indices)
return torch.ops.xformers.coo_sddmm(a, b, row_indices, ro, ci)
elif _should_use_csr_ge(a, sparsity):
return torch.ops.xformers.csr_sddmm(
a, b, row_indices, row_offsets, column_indices
)
return torch.ops.xformers.sddmm_sputnik(
a, b, row_indices, row_offsets, column_indices
)
class _SparseSoftmax(torch.autograd.Function):
@staticmethod
def forward(ctx, m, n, row_indices, values, row_offsets, column_indices):
out = torch.ops.xformers.sparse_softmax_sputnik(
m, n, row_indices, values, row_offsets, column_indices
)
# note: save out and not values, as an optimization step
ctx.save_for_backward(row_indices, out, row_offsets, column_indices)
ctx.size = (m, n)
return out
@staticmethod
def backward(ctx, grad):
row_indices, out, row_offsets, column_indices = ctx.saved_tensors
m, n = ctx.size
# gradients w.r.t. values
grad = grad.contiguous()
ga = torch.ops.xformers.sparse_softmax_backward_sputnik(
m, n, row_indices, out, grad, row_offsets, column_indices
)
return None, None, None, ga, None, None
class _sddmm(torch.autograd.Function):
@staticmethod
def forward(ctx, a, b, row_indices, row_offsets, column_indices, _transp_info):
out = _sddmm_func(a, b, row_indices, row_offsets, column_indices)
ctx.save_for_backward(
a, b, row_indices, row_offsets, column_indices, *_transp_info
)
return out
@staticmethod
def backward(ctx, grad):
(
a,
b,
row_indices,
row_offsets,
column_indices,
*_transp_info,
) = ctx.saved_tensors
m, n = a.shape[1], b.shape[1]
# gradients w.r.t. values
grad = grad.contiguous()
a = a.contiguous()
b = b.contiguous()
a_grad = torch.ops.xformers.spmm_sputnik(
b, row_indices, grad, row_offsets, column_indices, m
)
(
row_indices_t,
grad_t,
row_offsets_t,
column_indices_t,
) = _transpose_with_info(grad, _transp_info)
b_grad = torch.ops.xformers.spmm_sputnik(
a, row_indices_t, grad_t, row_offsets_t, column_indices_t, n
)
return a_grad, b_grad, None, None, None, None
class _spmm(torch.autograd.Function):
@staticmethod
def forward(
ctx, b, row_indices, values, row_offsets, column_indices, m, _transp_info
):
out = torch.ops.xformers.spmm_sputnik(
b, row_indices, values, row_offsets, column_indices, m
)
ctx.save_for_backward(
b, row_indices, values, row_offsets, column_indices, *_transp_info
)
return out
@staticmethod
def backward(ctx, grad):
(
b,
row_indices,
values,
row_offsets,
column_indices,
*_transp_info,
) = ctx.saved_tensors
k = b.shape[1]
# gradients w.r.t. values
grad = grad.contiguous()
grad_sparse = _sddmm_func(grad, b, row_indices, row_offsets, column_indices)
(
row_indices_t,
values_t,
row_offsets_t,
column_indices_t,
) = _transpose_with_info(values, _transp_info)
grad_dense = torch.ops.xformers.spmm_sputnik(
grad, row_indices_t, values_t, row_offsets_t, column_indices_t, k
)
return grad_dense, None, grad_sparse, None, None, None, None
|
bart_ls-main
|
xformers/xformers/sparse/_csr_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .csr_tensor import SparseCSRTensor # noqa: F401
|
bart_ls-main
|
xformers/xformers/sparse/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
def _coo_to_csr(m, n, row_indices, column_indices):
# assumes coalesced coo
row_offsets = row_indices.bincount(minlength=n).cumsum(0, dtype=row_indices.dtype)
row_offsets = torch.nn.functional.pad(row_offsets, (1, 0))
return row_offsets, column_indices
def _csr_to_coo(m, n, row_offsets, column_indices):
# convert from compressed rows to uncompressed
indices = torch.arange(m, dtype=row_offsets.dtype, device=row_offsets.device)
row_sizes = torch.diff(row_offsets)
row_coo = torch.repeat_interleave(indices, row_sizes.long())
return row_coo, column_indices
def _diffsort(a):
return torch.argsort(torch.diff(a), dim=0, descending=True)
def _get_transpose_info(m, n, row_indices, row_offsets, column_indices):
# strategy:
# - uncompress the rows to have data in COO format
# - get permutation for stable sort of the columns to get the rows for the transposed matrix
# - compress the new rows and return the permutation to be applied on the values
# convert from compressed rows to uncompressed
row_coo, _ = _csr_to_coo(m, n, row_offsets, column_indices)
# get the permutation for the stable sort
# unfortunately PyTorch sort is not stable, so we need to
# do it in two steps
row_offsets_t, perm1 = column_indices.sort(0)
new_columns = row_coo[perm1]
# workaround the lack of stable sorting in PyTorch
perm2 = torch.argsort(new_columns + row_offsets_t * m)
column_indices_t = new_columns[perm2]
# find the final permutation corresponding to the indices of the stable sort
perm = perm1[perm2]
row_offsets_t, _ = _coo_to_csr(m, n, row_offsets_t, column_indices)
row_indices_t = _diffsort(row_offsets_t).int()
return row_indices_t, row_offsets_t, column_indices_t, perm
def _transpose_with_info(values, _transpose_info):
row_indices_t, row_offsets_t, column_indices_t, perm = _transpose_info
values_t = values[:, perm]
return row_indices_t, values_t, row_offsets_t, column_indices_t
def _transpose(m, n, row_indices, values, row_offsets, column_indices):
_transpose_info = _get_transpose_info(
m, n, row_indices, row_offsets, column_indices
)
return _transpose_with_info(values, _transpose_info)
def _nonzero_mask_to_sparse_csr_indices(mask, device):
"""Converts dense 2d matrix to a csr sparse matrix."""
assert len(mask.shape) == 2
index_dtype = torch.int32
# Calculate the offset of each row.
row_offsets = mask.sum(dim=-1, dtype=index_dtype).cumsum(dim=-1, dtype=index_dtype)
row_offsets = torch.nn.functional.pad(row_offsets, (1, 0))
# Create the row indices and sort them.
row_indices = _diffsort(row_offsets).to(index_dtype)
# Extract the column indices for the nonzero values.
column_indices = torch.where(mask)[1].to(index_dtype).contiguous()
row_indices = row_indices.to(device)
row_offsets = row_offsets.to(device)
column_indices = column_indices.to(device)
return row_indices, row_offsets, column_indices
def _dense_to_sparse(matrix, device):
"""Converts dense 2d matrix to a csr sparse matrix."""
assert len(matrix.shape) == 2
value_dtype = torch.float32
# Extract the nonzero values.
mask = matrix != 0
values = matrix[mask].to(dtype=value_dtype, device=device)
row_indices, row_offsets, column_indices = _nonzero_mask_to_sparse_csr_indices(
mask, device
)
return values, row_indices, row_offsets, column_indices
def _round_nnz(mask, divisible_by=4):
nonzero = torch.where(mask)
nnz = nonzero[0].shape[0]
nonzero = tuple(n[: (nnz - nnz % divisible_by)] for n in nonzero)
nm = torch.zeros_like(mask)
nm[nonzero] = True
return nm
def _dense3d_to_sparse(matrix, device):
assert len(matrix.shape) == 3
mask = matrix != 0
if not torch.all(mask == mask[0]):
raise ValueError("Expected the same sparsity pattern over the batch dimension")
# for now, our kernels assume that we have the number of
# nnz to be divisible by 4
mask = _round_nnz(mask[0], divisible_by=4)
mask = mask[None].expand(matrix.shape)
values = matrix[mask].reshape(matrix.shape[0], -1).to(device)
row_indices, row_offsets, column_indices = _nonzero_mask_to_sparse_csr_indices(
mask[0], device
)
return values, row_indices, row_offsets, column_indices
|
bart_ls-main
|
xformers/xformers/sparse/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.ops import masked_matmul
from xformers.sparse import _csr_ops
from xformers.sparse.utils import (
_csr_to_coo,
_dense3d_to_sparse,
_diffsort,
_get_transpose_info,
_transpose_with_info,
)
class SparseCSRTensor(torch.Tensor):
@staticmethod
def __new__(cls, row_offsets, column_indices, values, shape):
kwargs = {}
kwargs["device"] = values.device
kwargs["dtype"] = values.dtype
kwargs["layout"] = values.layout
kwargs["requires_grad"] = values.requires_grad
assert len(shape) == 3
assert torch.__version__ > (1, 10), "SparseCSRTensor requires PyTorch 1.11+"
return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
def __init__(self, row_offsets, column_indices, values, shape):
assert row_offsets.ndim == 1
assert column_indices.ndim == 1
assert values.ndim == 2
self.__row_offsets = row_offsets.contiguous()
self.__row_indices = _diffsort(row_offsets).to(row_offsets.dtype)
self.__column_indices = column_indices.contiguous()
self.__values = values.contiguous()
self.__transp_info = _get_transpose_info(
self.shape[1],
self.shape[2],
self.__row_indices,
self.__row_offsets,
self.__column_indices,
)
def __repr__(self):
return f"sparse_csr_tensor(shape={self.shape}, values={self.__values})"
@classmethod
def from_dense(cls, matrix):
values, row_indices, row_offsets, column_indices = _dense3d_to_sparse(
matrix, matrix.device
)
return cls(row_offsets, column_indices, values, matrix.shape)
@classmethod
def from_sparse_coo(cls, arg0):
"""
assert arg0.is_sparse
x = arg0.coalesce()
rows, cols = x.indices().unbind(0)
vals = x.values()
_coo_to_csr()
"""
pass
@classmethod
def _wrap(
cls, shape, values, row_indices, row_offsets, column_indices, _transp_info
):
matrix = cls.__new__(cls, row_offsets, column_indices, values, shape)
matrix.__values = values
matrix.__row_indices = row_indices
matrix.__row_offsets = row_offsets
matrix.__column_indices = column_indices
matrix.__transp_info = _transp_info
return matrix
@property
def _csr_values(self):
return self.__values
@property
def _csr_row_indices(self):
return self.__row_indices
@property
def _csr_row_offsets(self):
return self.__row_offsets
@property
def _csr_column_indices(self):
return self.__column_indices
@property
def _csr_transp_info(self):
return self.__transp_info
@classmethod
def _bmm(cls, arg0, arg1):
if not (isinstance(arg0, cls) and type(arg1) == torch.Tensor):
return NotImplemented
assert arg0.ndim == 3
assert arg1.ndim == 3
self = arg0
b = arg1
_, m, n = self.shape
row_indices = self.__row_indices
values = self.__values
row_offsets = self.__row_offsets
column_indices = self.__column_indices
out = _csr_ops._spmm.apply(
b, row_indices, values, row_offsets, column_indices, m, self.__transp_info
)
return out
@classmethod
def _softmax(cls, arg0, dim):
if not (dim == -1 or dim == 2):
return NotImplemented
self = arg0
_, m, n = self.shape
row_indices = self.__row_indices
values = self.__values
row_offsets = self.__row_offsets
column_indices = self.__column_indices
out = _csr_ops._SparseSoftmax.apply(
m, n, row_indices, values, row_offsets, column_indices
)
return cls._wrap(
self.shape,
out,
row_indices,
row_offsets,
column_indices,
self.__transp_info,
)
@classmethod
def _transpose(cls, arg0, dim0, dim1):
# TODO: check if need to return this or not
if not (dim0 == 1 or dim0 == -2):
return NotImplemented
if not (dim1 == 2 or dim1 == -1):
return NotImplemented
B, m, n = arg0.shape
values = arg0.__values
(
output_row_indices,
output_values,
output_row_offsets,
output_column_indices,
) = _transpose_with_info(values, arg0.__transp_info)
new_transp_info = _get_transpose_info(
n, m, output_row_indices, output_row_offsets, output_column_indices
)
return cls._wrap(
(B, n, m),
output_values,
output_row_indices,
output_row_offsets,
output_column_indices,
new_transp_info,
)
@classmethod
def _masked_matmul(cls, a, b, mask):
if not (type(a) == torch.Tensor and type(b) == torch.Tensor):
return NotImplemented
assert mask.shape[1] == a.shape[1]
assert mask.shape[2] == b.shape[2]
row_indices = mask.__row_indices
row_offsets = mask.__row_offsets
column_indices = mask.__column_indices
out = _csr_ops._sddmm.apply(
a,
b.transpose(-2, -1),
row_indices,
row_offsets,
column_indices,
mask.__transp_info,
)
# TODO add bias here
return cls._wrap(
mask.shape,
out,
row_indices,
row_offsets,
column_indices,
mask.__transp_info,
)
@classmethod
def _to(cls, arg0, device):
if isinstance(device, str):
device = torch.device(device)
assert isinstance(device, torch.device)
return cls._wrap(
arg0.shape,
arg0.__values.to(device=device),
arg0.__row_indices.to(device=device),
arg0.__row_offsets.to(device=device),
arg0.__column_indices.to(device=device),
tuple(t.to(device=device) for t in arg0.__transp_info),
)
@classmethod
def _copy(cls, arg0, arg1):
if not (isinstance(arg0, cls) and isinstance(arg1, cls)):
return NotImplemented
assert arg0.shape == arg1.shape
av0, av1 = arg0.__values, arg1.__values
av0.resize_as_(av1).copy_(av1)
av0, av1 = arg0.__row_indices, arg1.__row_indices
av0.resize_as_(av1).copy_(av1)
av0, av1 = arg0.__row_offsets, arg1.__row_offsets
av0.resize_as_(av1).copy_(av1)
av0, av1 = arg0.__column_indices, arg1.__column_indices
av0.resize_as_(av1).copy_(av1)
for v0, v1 in zip(arg0.__transp_info, arg1.__transp_info):
v0.resize_as_(v1).copy_(v1)
return arg0
@classmethod
def _equal(cls, arg0, arg1):
if not (isinstance(arg0, cls) and isinstance(arg1, cls)):
return NotImplemented
if arg0.shape != arg1.shape:
return False
if not torch.equal(arg0.__values, arg1.__values):
return False
if not torch.equal(arg0.__row_offsets, arg1.__row_offsets):
return False
if not torch.equal(arg0.__column_indices, arg1.__column_indices):
return False
return True
@classmethod
def _to_dense(cls, arg0):
_, m, n = arg0.shape
shape = arg0.shape
matrix = torch.zeros(shape, dtype=arg0.dtype, device=arg0.device)
row_offsets = arg0.__row_offsets.long()
column_indices = arg0.__column_indices.long()
row_coo, _ = _csr_to_coo(m, n, row_offsets, column_indices)
b_idxs = torch.arange(len(arg0.__values), device=arg0.device)[:, None]
matrix[b_idxs, row_coo, column_indices] = arg0.__values
return matrix
@classmethod
def _binary_op(cls, func, arg0, arg1):
if not (
isinstance(arg0, (cls, int, float)) and isinstance(arg1, (cls, int, float))
):
return NotImplemented
v0, v1 = arg0, arg1
if isinstance(arg0, cls):
v0 = arg0.__values
if isinstance(arg1, cls):
v1 = arg1.__values
# assert arg0.shape == arg1.shape
if isinstance(arg0, cls) and isinstance(arg1, cls):
msg = f"arg0 and arg1 need to have the same sparsity pattern in {func} (for now)"
if not arg0.__row_offsets.shape == arg1.__row_offsets.shape:
raise NotImplementedError(msg)
if not arg0.__column_indices.shape == arg1.__column_indices.shape:
raise NotImplementedError(msg)
if not arg0.__values.shape == arg1.__values.shape:
raise NotImplementedError(msg)
# TODO this is not always true, but is a fast approximation for now
if arg0.__row_offsets is not arg1.__row_offsets:
raise NotImplementedError(msg)
if arg0.__column_indices is not arg1.__column_indices:
raise NotImplementedError(msg)
out = func(v0, v1)
return cls._wrap(
arg0.shape,
out,
arg0.__row_indices,
arg0.__row_offsets,
arg0.__column_indices,
arg0.__transp_info,
)
@classmethod
def _binary_op_slow(cls, func, arg0, arg1):
# assert arg0.shape == arg1.shape
v0, v1 = arg0, arg1
if isinstance(arg0, cls):
v0 = arg0.to_dense()
if isinstance(arg1, cls):
v1 = arg1.to_dense()
out = func(v0, v1)
return cls.from_dense(out)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func in [
torch.Tensor.bmm,
torch.bmm,
torch.Tensor.__matmul__,
torch.matmul,
torch.Tensor.matmul,
]:
assert len(args) == 2
return cls._bmm(args[0], args[1])
if func in [torch.Tensor.softmax, torch.nn.functional.softmax, torch.softmax]:
return cls._softmax(args[0], kwargs["dim"])
if func in [torch.Tensor.transpose, torch.transpose]:
assert len(kwargs) == 0
return cls._transpose(args[0], args[1], args[2])
if func == masked_matmul:
assert len(args) == 3
return cls._masked_matmul(args[0], args[1], args[2])
if func in [
torch.Tensor.add,
torch.add,
torch.Tensor.__add__,
]:
assert len(args) == 2
if not (isinstance(args[0], cls) and isinstance(args[1], cls)):
raise NotImplementedError(
f"{func} with {type(args[0])} and {type(args[1])} not implemented"
)
return cls._binary_op(func, args[0], args[1])
if func in [
torch.Tensor.mul,
torch.mul,
torch.Tensor.__mul__,
]:
assert len(args) == 2
return cls._binary_op(func, args[0], args[1])
if func in [torch.Tensor.logical_and, torch.logical_and, torch.Tensor.__and__]:
assert len(args) == 2
return cls._binary_op_slow(func, args[0], args[1])
if func in [torch.nn.functional.dropout, torch.dropout, torch.dropout_]:
x = args[0]
values = x.__values.clone()
values = func(values, *args[1:], **kwargs)
return cls._wrap(
x.shape,
values,
x.__row_indices,
x.__row_offsets,
x.__column_indices,
x.__transp_info,
)
if func == torch.Tensor.to:
# print(args, kwargs)
assert len(args) >= 2
return cls._to(args[0], args[1])
# return cls._to(args[0], kwargs["device"])
if func in [torch.Tensor.copy_]:
assert len(args) == 2
return cls._copy(args[0], args[1])
if func in [torch.Tensor.equal, torch.equal]:
assert len(args) == 2
return cls._equal(args[0], args[1])
if func == torch.Tensor.to_dense:
assert len(args) == 1
return cls._to_dense(args[0])
if func == torch.Tensor.detach:
x = args[0]
return cls._wrap(
x.shape,
x.__values.detach(),
x.__row_indices,
x.__row_offsets,
x.__column_indices,
x.__transp_info,
)
if func == torch.Tensor.__deepcopy__:
x = args[0]
memo = args[1]
return cls._wrap(
x.shape,
x.__values.__deepcopy__(memo),
x.__row_indices.__deepcopy__(memo),
x.__row_offsets.__deepcopy__(memo),
x.__column_indices.__deepcopy__(memo),
tuple(v.__deepcopy__(memo) for v in x.__transp_info),
)
if func in [torch.Tensor.grad.__get__, torch.Tensor._grad.__get__]:
assert len(args) == 1
assert len(kwargs) == 0
x = args[0]
return cls._wrap(
x.shape,
x.__values.grad,
x.__row_indices,
x.__row_offsets,
x.__column_indices,
x.__transp_info,
)
if func == torch.Tensor.requires_grad_:
func(args[0].__values)
with torch._C.DisableTorchFunction():
ret = func(*args, **kwargs)
# TODO: check this
if func in torch.overrides.get_default_nowrap_functions():
return ret
return torch._tensor._convert(ret, cls)
return NotImplemented
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
return NotImplemented
|
bart_ls-main
|
xformers/xformers/sparse/csr_tensor.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import torch
def init_torch_distributed_local():
if torch.distributed.is_initialized():
return
init_url = "file://" + tempfile.mkstemp()[1]
backend = (
torch.distributed.Backend.NCCL
if torch.cuda.is_available()
else torch.distributed.Backend.GLOO
)
torch.distributed.init_process_group(
backend=backend,
rank=0,
world_size=1,
init_method=init_url,
)
|
bart_ls-main
|
xformers/xformers/helpers/test_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .timm_sparse_attention import TimmSparseAttention # noqa
|
bart_ls-main
|
xformers/xformers/helpers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from xformers.components.attention.core import scaled_dot_product_attention
class TimmSparseAttention(torch.nn.Module):
"""
Almost drop-in replacement for timm attention
but using the sparsity-aware scaled_dot_product_attention from xformers
"""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.0,
proj_drop=0.0,
attn_mask=None,
):
super().__init__()
self.num_heads = num_heads
self.qkv = torch.nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = torch.nn.Dropout(attn_drop)
self.proj = torch.nn.Linear(dim, dim)
self.proj_drop = torch.nn.Dropout(proj_drop)
self.attn_mask = attn_mask
def forward(self, x):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
qkv = qkv.flatten(1, 2)
q, k, v = qkv.unbind()
x = scaled_dot_product_attention(
q, k, v, self.attn_mask, dropout=self.attn_drop
)
x = x.reshape(B, self.num_heads, N, C // self.num_heads)
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
|
bart_ls-main
|
xformers/xformers/helpers/timm_sparse_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from xformers.components import MultiHeadDispatchConfig # noqa
from xformers.components.attention import AttentionConfig # noqa
from xformers.components.feedforward import FeedforwardConfig # noqa
from xformers.components.positional_embedding import PositionEmbeddingConfig # noqa
from .block_factory import xFormerDecoderBlock # noqa
from .block_factory import xFormerDecoderConfig # noqa
from .block_factory import xFormerEncoderBlock # noqa
from .block_factory import xFormerEncoderConfig # noqa
from .model_factory import xFormer, xFormerConfig # noqa
|
bart_ls-main
|
xformers/xformers/factory/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# register components configs into Hydra ConfigStore
# component config classes could be used to validate configs
import logging
from hydra.core.config_store import ConfigStore
from omegaconf.errors import ValidationError
from xformers.components.attention import ATTENTION_REGISTRY
from xformers.components.feedforward import FEEDFORWARD_REGISTRY
from xformers.components.positional_embedding import POSITION_EMBEDDING_REGISTRY
log = logging.getLogger(__name__)
def import_xformer_config_schema():
"""
Best effort - OmegaConf supports limited typing, so we may fail to import
certain config classes. For example, pytorch typing are not supported.
"""
cs = ConfigStore.instance()
for k, v in {
"ff": FEEDFORWARD_REGISTRY,
"pe": POSITION_EMBEDDING_REGISTRY,
"attention": ATTENTION_REGISTRY,
}.items():
for kk in v.keys():
try:
cs.store(name=f"{kk}_schema", node=v[kk].config, group=f"xformers/{k}")
except ValidationError as e:
log.debug(f"Error registering {kk}_schema, error: {e}")
|
bart_ls-main
|
xformers/xformers/factory/hydra_helper.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from xformers.components import (
LayerNormStyle,
PostNorm,
PreNorm,
Residual,
build_multi_head_attention,
)
from xformers.components.feedforward import (
FEEDFORWARD_REGISTRY,
FeedforwardConfig,
build_feedforward,
)
from xformers.components.positional_embedding import (
POSITION_EMBEDDING_REGISTRY,
PositionEmbeddingConfig,
build_positional_embedding,
)
from xformers.utils import generate_matching_config
class LayerPositionBitmask(int, Enum):
First = 0b01
Last = 0b10
Default = 0b11
class LayerPosition:
""" Bitmask to mark this layer as first, last, nothing or both"""
def __init__(self):
self.bitmask = LayerPositionBitmask.Default
def is_first(self):
return bool(self.bitmask & LayerPositionBitmask.First)
def is_last(self):
return bool(self.bitmask & LayerPositionBitmask.Last)
def mark_not_first(self):
self.bitmask &= ~LayerPositionBitmask.First
def mark_not_last(self):
self.bitmask &= ~LayerPositionBitmask.Last
class BlockType(str, Enum):
Encoder = "encoder"
Decoder = "decoder"
def _get_ln_factory(
d_model: int,
layer_norm_style: Optional[LayerNormStyle],
residual: bool = True,
use_triton: bool = True,
):
def get_layer_wrapper(
d_model: int,
sublayer: nn.Module,
layer_norm_style: Optional[LayerNormStyle],
residual: bool = True,
):
if residual:
return (
Residual(PreNorm(d_model, sublayer, use_triton))
if layer_norm_style == LayerNormStyle.Pre
else PostNorm(d_model, Residual(sublayer), use_triton)
)
return (
PreNorm(d_model, sublayer, use_triton)
if layer_norm_style == LayerNormStyle.Pre
else PostNorm(d_model, sublayer, use_triton)
)
def ln_factory(sublayer: nn.Module):
return get_layer_wrapper(d_model, sublayer, layer_norm_style, residual)
return ln_factory
@dataclass(init=False) # handle constructors explicitly to force type changes
class xFormerBlockConfig:
"""
The configuration structure to define a Transformer block.
This base class is applicable to both encoder and decoder definitions.
This completely defines each of the blocks, for instance in terms of dimensions,
position encoding, pre or post layer norms or reversibility.
"""
dim_model: int
feedforward_config: FeedforwardConfig
position_encoding_config: Optional[PositionEmbeddingConfig]
block_type: BlockType
layer_norm_style: LayerNormStyle
layer_position: LayerPosition
use_triton: bool
reversible: bool
num_layers: int
def __init__(
self,
dim_model: int,
feedforward_config: Dict[str, Any],
position_encoding_config: Optional[Dict[str, Any]],
block_type: BlockType,
layer_norm_style: LayerNormStyle = LayerNormStyle("post"),
reversible: bool = False,
num_layers: int = 1,
layer_position: Optional[LayerPosition] = None,
):
self.dim_model = dim_model
self.block_type = block_type
self.layer_norm_style = layer_norm_style
self.reversible = reversible
self.num_layers = num_layers
# Fill in possible gaps in the config for subparts of the block
self.feedforward_config = generate_matching_config(
feedforward_config,
FEEDFORWARD_REGISTRY[feedforward_config["name"]].config,
)
self.position_encoding_config = (
generate_matching_config(
position_encoding_config,
POSITION_EMBEDDING_REGISTRY[position_encoding_config["name"]].config,
)
if position_encoding_config is not None
else None
)
# Default is that this layer is the only one, so both first and last
if layer_position:
self.layer_position = layer_position
else:
self.layer_position = LayerPosition()
@dataclass(init=False)
class xFormerEncoderConfig(xFormerBlockConfig):
"""
The configuration structure for an encoder block
"""
multi_head_config: Dict[str, Any]
use_triton: bool
def __init__(
self,
dim_model: int,
feedforward_config: Dict[str, Any],
multi_head_config: Dict[str, Any],
position_encoding_config: Optional[Dict[str, Any]] = None,
layer_norm_style: str = "post",
use_triton: bool = True,
**kwargs,
):
# Convenience, fill in duplicated field
try:
if "dim_model" not in multi_head_config.keys():
multi_head_config["dim_model"] = dim_model
if "dim_model" not in feedforward_config.keys():
feedforward_config["dim_model"] = dim_model
if (
position_encoding_config is not None
and "dim_model" not in position_encoding_config.keys()
):
position_encoding_config["dim_model"] = dim_model
except AttributeError:
# A config instance was passed in, this is fine
pass
if "block_type" in kwargs:
assert kwargs["block_type"] == "encoder"
kwargs["block_type"] = BlockType("encoder")
super().__init__(
dim_model=dim_model,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
layer_norm_style=LayerNormStyle(layer_norm_style),
**kwargs,
)
self.multi_head_config = multi_head_config
self.use_triton = use_triton
@dataclass(init=False)
class xFormerDecoderConfig(xFormerBlockConfig):
"""
The configuration structure for a decoder block.
This specifically defines the masked and cross attention mechanisms,
on top of the settings defining all blocks.
"""
multi_head_config_masked: Dict[str, Any] # prior to encoder output
multi_head_config_cross: Dict[str, Any] # cross attention, takes encoder output
def __init__(
self,
dim_model: int,
feedforward_config: Dict[str, Any],
multi_head_config_masked: Dict[str, Any],
multi_head_config_cross: Dict[str, Any],
position_encoding_config: Optional[Dict[str, Any]] = None,
layer_norm_style: str = "post",
use_triton: bool = True,
**kwargs,
):
# Convenience, fill in duplicated field
try:
if "dim_model" not in multi_head_config_masked.keys():
multi_head_config_masked["dim_model"] = dim_model
if "dim_model" not in multi_head_config_cross.keys():
multi_head_config_cross["dim_model"] = dim_model
if "dim_model" not in feedforward_config.keys():
feedforward_config["dim_model"] = dim_model
if (
position_encoding_config is not None
and "dim_model" not in position_encoding_config.keys()
):
position_encoding_config["dim_model"] = dim_model
except AttributeError:
# A config instance was passed in, this is fine
pass
if "block_type" in kwargs.keys():
assert kwargs["block_type"] == "decoder"
kwargs["block_type"] = BlockType("decoder")
super().__init__(
dim_model=dim_model,
feedforward_config=feedforward_config,
position_encoding_config=position_encoding_config,
layer_norm_style=LayerNormStyle(layer_norm_style),
**kwargs,
)
self.multi_head_config_masked = multi_head_config_masked
self.multi_head_config_cross = multi_head_config_cross
self.use_triton = use_triton
class xFormerEncoderBlock(torch.nn.Module):
r""" A vanilla Transformer Encoder block """
def __init__(self, config: xFormerEncoderConfig, **kwargs):
super().__init__()
self.reversible_f = None
self.reversible_g = None
self.layer_norm_style = config.layer_norm_style
self.dim_model = config.dim_model
# If this layer is the first one, and a pose encoding has been requested
self.pose_encoding = (
build_positional_embedding(asdict(config.position_encoding_config))
if config.position_encoding_config and config.layer_position.is_first()
else None
)
# mini helper, builds a LayerNorm with the right Pre/Post config, residuals, and the right dimensions
ln_factory = _get_ln_factory(
config.dim_model, config.layer_norm_style, use_triton=config.use_triton
)
self.mha = build_multi_head_attention(config.multi_head_config)
self.feedforward = build_feedforward(asdict(config.feedforward_config))
# Wrappers handle the different layer norm styles (pre- and post-) and the residual path
self.wrap_att = ln_factory(self.mha)
self.wrap_ff: Union[Residual, PostNorm] = ln_factory(self.feedforward)
if (
config.layer_norm_style == LayerNormStyle.Pre
and config.layer_position.is_last()
):
self.wrap_ff = PostNorm(config.dim_model, self.wrap_ff)
@classmethod
def from_config(cls, config: xFormerEncoderConfig):
return cls(config)
@staticmethod
def get_reversible_layer(config) -> Tuple[nn.Module, nn.Module]:
ln_factory = _get_ln_factory(
config.dim_model,
config.layer_norm_style,
residual=False,
use_triton=config.use_triton,
)
mha = build_multi_head_attention(config.multi_head_config)
feedforward = build_feedforward(asdict(config.feedforward_config))
reversible_f = ln_factory(mha)
reversible_g = ln_factory(feedforward)
return reversible_f, reversible_g
def forward(
self,
x: torch.Tensor,
att_mask: Optional[torch.Tensor] = None,
input_mask: Optional[torch.Tensor] = None,
):
if self.pose_encoding:
x = self.pose_encoding(x)
# Handle the optional input masking, differs on Q, K, V
if input_mask is not None:
q = x
k = x * input_mask.unsqueeze(-1)
v = k
else:
q, k, v = x, x, x
bsz, src_len, num_heads = q.size(0), k.size(1), self.mha.num_heads
att_mask = maybe_merge_masks(att_mask, input_mask, bsz, src_len, num_heads)
# Pre/Post norms and residual paths are already handled
x = self.wrap_att(q, k, v, att_mask=att_mask, key_padding_mask=~input_mask.bool())
x = self.wrap_ff(x)
return x
class xFormerDecoderBlock(torch.nn.Module):
r"""A vanilla Transformer Decoder block
... note: this implementation is not (yet ?) reversible"""
def __init__(self, config: xFormerDecoderConfig, **kwargs):
super().__init__()
# If this layer is the first one, and a pose encoding as been requested
self.pose_encoding = (
build_positional_embedding(config.position_encoding_config)
if config.position_encoding_config and config.layer_position.is_first()
else None
)
# mini helper, builds a LayerNorm with the right Pre/Post config and the right dimensions
ln_factory = _get_ln_factory(
config.dim_model, config.layer_norm_style, use_triton=config.use_triton
)
self.mha = build_multi_head_attention(config.multi_head_config_masked)
self.cross_mha = build_multi_head_attention(config.multi_head_config_cross)
self.feedforward = build_feedforward(config.feedforward_config)
self.wrap_att = ln_factory(self.mha)
self.wrap_cross = ln_factory(self.cross_mha)
self.wrap_ff: Union[Residual, PostNorm] = ln_factory(self.feedforward)
if (
config.layer_norm_style == LayerNormStyle.Pre
and config.layer_position.is_last()
):
self.wrap_ff = PostNorm(config.dim_model, self.wrap_ff)
@classmethod
def from_config(cls, config: xFormerDecoderConfig):
return cls(config)
def forward(
self,
target: torch.Tensor,
memory: torch.Tensor,
encoder_att_mask: Optional[torch.Tensor] = None,
decoder_att_mask: Optional[torch.Tensor] = None,
input_mask: Optional[torch.Tensor] = None,
):
if self.pose_encoding:
target = self.pose_encoding(target)
# Handle the optional input masking, differs on Q, K, V
if input_mask is not None:
target_q = target
target_k = target * input_mask.unsqueeze(-1)
target_v = target_k
else:
target_q, target_k, target_v = target, target, target
x = self.wrap_att([target_q, target_k, target_v], att_mask=decoder_att_mask)
x = self.wrap_cross([x, memory, memory], att_mask=encoder_att_mask)
x = self.wrap_ff(x)
return x
|
bart_ls-main
|
xformers/xformers/factory/block_factory.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import torch
from xformers.components import reversible as rv
from xformers.factory.block_factory import (
xFormerBlockConfig,
xFormerDecoderBlock,
xFormerDecoderConfig,
xFormerEncoderBlock,
xFormerEncoderConfig,
)
@dataclass(init=False)
class xFormerConfig:
"""
The configuration structure to define a full Transformer.
This can include a stack of encoder layers, and a stack of decoder layers.
It is optionally possible to share the embedding weights in between
the encoder and decoder positional encoding, as proposed for instance by
`Using the Output Embedding to Improve Language Models`, Press et al.
A full config example is for instance as follows:
::
xformer_config = [
{
"reversible": False, # Turn on to test the effect of using reversible layers
"block_type": "encoder",
"num_layers": LAYERS,
"dim_model": EMB,
"layer_norm_style": "pre",
"position_encoding_config": {
"name": "vocab",
"seq_len": CONTEXT,
"vocab_size": VOCAB_SIZE,
},
"multi_head_config": {
"num_heads": NUM_HEADS,
"residual_dropout": RES_DROP,
"use_rotary_embeddings": True,
"attention": {
"name": ATTENTION_MECHANISM_STR,
"dropout": ATTN_DROP,
"causal": True,
"seq_len": CONTEXT,
},
},
"feedforward_config": {
"name": "FusedMLP", # Use MLP if Triton is not available
"dropout": MLP_DROP,
"activation": "gelu",
"hidden_layer_multiplier": MLP_MULTIPLIER,
},
}
]
.. _`Using the Output Embedding to Improve Language Models`: https://arxiv.org/pdf/1608.05859.pdf
"""
stack_configs: Union[List[xFormerBlockConfig], Dict[str, xFormerBlockConfig]]
tie_embedding_weights: bool = False
def __init__(
self,
stack_configs: Union[List[Dict[str, Any]], Dict[str, Dict[str, Any]]],
tie_embedding_weights: bool = False,
):
# Type all the configurations. Possible typos are caught here
if isinstance(stack_configs, dict):
self.stack_configs = {}
for k, config in stack_configs.items():
if config["block_type"] == "encoder":
self.stack_configs[k] = xFormerEncoderConfig(**config)
else:
self.stack_configs[k] = xFormerDecoderConfig(**config)
else:
self.stack_configs = []
for config in stack_configs:
if config["block_type"] == "encoder":
self.stack_configs.append(xFormerEncoderConfig(**config))
else:
self.stack_configs.append(xFormerDecoderConfig(**config))
self.tie_embedding_weights = tie_embedding_weights
class xFormer(torch.nn.Module):
def __init__(
self,
stack_configs: Union[
xFormerBlockConfig, List[xFormerBlockConfig], Dict[str, xFormerBlockConfig]
],
tie_embedding_weights: bool = False,
):
"""
Given a serialized configuration, generate the corresponding model.
This is only a helper and can easily be bypassed
"""
super().__init__()
if isinstance(stack_configs, Dict):
stack_configs = list(stack_configs.values())
# Convenience, users can pass either a list of configs or a single one
if not isinstance(stack_configs, List):
stack_configs = [stack_configs]
self._verify_reversible(stack_configs)
encoders: List[torch.nn.Module] = []
decoders: List[torch.nn.Module] = []
self.reversible_encoder = False
self.rev_enc_pose_encoding = None
# Unroll the configs and build the model
for config in stack_configs:
# Handle either Encoder or Decoder stacks
builder = (
xFormerEncoderBlock.from_config
if isinstance(config, xFormerEncoderConfig)
else xFormerDecoderBlock.from_config
)
recipient = (
encoders if isinstance(config, xFormerEncoderConfig) else decoders
)
# Build up the stack
for i in range(config.num_layers):
# Label where this layer is in the stack
# (for instance useful for the positional encoding, or late layer norm)
if i > 0:
config.layer_position.mark_not_first()
if i < config.num_layers - 1:
config.layer_position.mark_not_last()
block = builder(config) # type: ignore
# If reversible: extract the reversible sub-parts, else append the block as-is
if config.reversible:
# WARNING: only one pose encoding is saved here (not Focal Transformer compatible for instance)
assert isinstance(config, xFormerEncoderConfig)
if block.pose_encoding is not None:
self.rev_enc_pose_encoding = block.pose_encoding
self.reversible_encoder = True
f, g = xFormerEncoderBlock.get_reversible_layer(config)
recipient.append(torch.nn.ModuleList([f, g]))
else:
recipient.append(block) # type: ignore
# Tie embedding weights, if requested and possible
assert (
not tie_embedding_weights or not self.reversible_encoder
), "Reversible layers and tied embeddings is not supported for now"
if (
tie_embedding_weights
and encoders
and encoders[0].pose_encoding
and decoders
and decoders[0].pose_encoding
and not config.reversible
):
logging.info("Tying encoder and decoder embeddings, as requested")
encoders[0].pose_encoding = decoders[0].pose_encoding
self.encoders: torch.nn.Module = (
rv.ReversibleSequence(torch.nn.ModuleList(encoders))
if self.reversible_encoder
else torch.nn.ModuleList(encoders)
)
self.decoders = torch.nn.ModuleList(decoders)
if len(self.decoders) > 0:
# Use Xavier init for encoding/decoding tasks
self._reset_parameters()
@classmethod
def from_config(cls, config: xFormerConfig):
return cls(config.stack_configs, config.tie_embedding_weights)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model
following the Xavier distribution."""
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
def _verify_reversible(self, stack_configs: List[xFormerBlockConfig]):
reversible = [
c.reversible
for c in filter(lambda x: x.block_type == "encoder", stack_configs)
]
non_reversible = [not rev for rev in reversible]
assert all(reversible) or all(non_reversible), (
"All layers need to have the same reversibility setting. "
+ f"Currently {reversible}"
)
def forward(
self,
src: torch.Tensor,
tgt: Optional[torch.Tensor] = None,
encoder_input_mask: Optional[torch.Tensor] = None,
decoder_input_mask: Optional[torch.Tensor] = None,
) -> Optional[torch.Tensor]:
# Encode to latent space if encoder is present
if len(list(self.encoders.parameters())) > 0:
encoders = self.encoders
memory = src.clone()
if isinstance(encoders, torch.nn.ModuleList):
for encoder in encoders:
memory = encoder(memory, input_mask=encoder_input_mask)
else:
if self.rev_enc_pose_encoding:
memory = self.rev_enc_pose_encoding(src)
# Reversible Encoder
x = torch.cat([memory, memory], dim=-1)
# TODO: pass in key and value independently.
kwargs = {"att_mask": encoder_input_mask}
x = encoders(x, **kwargs)
memory = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if not self.decoders:
return memory
# If decoder: either use the encoder ouput, or just decode, both options are possible
if len(self.decoders) > 0:
tgt = src.clone() if tgt is None else tgt
for decoder in self.decoders:
tgt = decoder(
target=tgt,
# pyre-fixme[61]: `memory` is not always initialized here.
memory=memory,
input_mask=decoder_input_mask,
)
return tgt
return None
|
bart_ls-main
|
xformers/xformers/factory/model_factory.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
import argparse
import json
import random
import shutil
import torch
torch.backends.cudnn.enabled = True
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import h5py
import iep.utils as utils
import iep.preprocess
from iep.data import ClevrDataset, ClevrDataLoader
from iep.models import ModuleNet, Seq2Seq, LstmModel, CnnLstmModel, CnnLstmSaModel
parser = argparse.ArgumentParser()
# Input data
parser.add_argument('--train_question_h5', default='data/train_questions.h5')
parser.add_argument('--train_features_h5', default='data/train_features.h5')
parser.add_argument('--val_question_h5', default='data/val_questions.h5')
parser.add_argument('--val_features_h5', default='data/val_features.h5')
parser.add_argument('--feature_dim', default='1024,14,14')
parser.add_argument('--vocab_json', default='data/vocab.json')
parser.add_argument('--loader_num_workers', type=int, default=1)
parser.add_argument('--use_local_copies', default=0, type=int)
parser.add_argument('--cleanup_local_copies', default=1, type=int)
parser.add_argument('--family_split_file', default=None)
parser.add_argument('--num_train_samples', default=None, type=int)
parser.add_argument('--num_val_samples', default=10000, type=int)
parser.add_argument('--shuffle_train_data', default=1, type=int)
# What type of model to use and which parts to train
parser.add_argument('--model_type', default='PG',
choices=['PG', 'EE', 'PG+EE', 'LSTM', 'CNN+LSTM', 'CNN+LSTM+SA'])
parser.add_argument('--train_program_generator', default=1, type=int)
parser.add_argument('--train_execution_engine', default=1, type=int)
parser.add_argument('--baseline_train_only_rnn', default=0, type=int)
# Start from an existing checkpoint
parser.add_argument('--program_generator_start_from', default=None)
parser.add_argument('--execution_engine_start_from', default=None)
parser.add_argument('--baseline_start_from', default=None)
# RNN options
parser.add_argument('--rnn_wordvec_dim', default=300, type=int)
parser.add_argument('--rnn_hidden_dim', default=256, type=int)
parser.add_argument('--rnn_num_layers', default=2, type=int)
parser.add_argument('--rnn_dropout', default=0, type=float)
# Module net options
parser.add_argument('--module_stem_num_layers', default=2, type=int)
parser.add_argument('--module_stem_batchnorm', default=0, type=int)
parser.add_argument('--module_dim', default=128, type=int)
parser.add_argument('--module_residual', default=1, type=int)
parser.add_argument('--module_batchnorm', default=0, type=int)
# CNN options (for baselines)
parser.add_argument('--cnn_res_block_dim', default=128, type=int)
parser.add_argument('--cnn_num_res_blocks', default=0, type=int)
parser.add_argument('--cnn_proj_dim', default=512, type=int)
parser.add_argument('--cnn_pooling', default='maxpool2',
choices=['none', 'maxpool2'])
# Stacked-Attention options
parser.add_argument('--stacked_attn_dim', default=512, type=int)
parser.add_argument('--num_stacked_attn', default=2, type=int)
# Classifier options
parser.add_argument('--classifier_proj_dim', default=512, type=int)
parser.add_argument('--classifier_downsample', default='maxpool2',
choices=['maxpool2', 'maxpool4', 'none'])
parser.add_argument('--classifier_fc_dims', default='1024')
parser.add_argument('--classifier_batchnorm', default=0, type=int)
parser.add_argument('--classifier_dropout', default=0, type=float)
# Optimization options
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_iterations', default=100000, type=int)
parser.add_argument('--learning_rate', default=5e-4, type=float)
parser.add_argument('--reward_decay', default=0.9, type=float)
# Output options
parser.add_argument('--checkpoint_path', default='data/checkpoint.pt')
parser.add_argument('--randomize_checkpoint_path', type=int, default=0)
parser.add_argument('--record_loss_every', type=int, default=1)
parser.add_argument('--checkpoint_every', default=10000, type=int)
def main(args):
if args.randomize_checkpoint_path == 1:
name, ext = os.path.splitext(args.checkpoint_path)
num = random.randint(1, 1000000)
args.checkpoint_path = '%s_%06d%s' % (name, num, ext)
vocab = utils.load_vocab(args.vocab_json)
if args.use_local_copies == 1:
shutil.copy(args.train_question_h5, '/tmp/train_questions.h5')
shutil.copy(args.train_features_h5, '/tmp/train_features.h5')
shutil.copy(args.val_question_h5, '/tmp/val_questions.h5')
shutil.copy(args.val_features_h5, '/tmp/val_features.h5')
args.train_question_h5 = '/tmp/train_questions.h5'
args.train_features_h5 = '/tmp/train_features.h5'
args.val_question_h5 = '/tmp/val_questions.h5'
args.val_features_h5 = '/tmp/val_features.h5'
question_families = None
if args.family_split_file is not None:
with open(args.family_split_file, 'r') as f:
question_families = json.load(f)
train_loader_kwargs = {
'question_h5': args.train_question_h5,
'feature_h5': args.train_features_h5,
'vocab': vocab,
'batch_size': args.batch_size,
'shuffle': args.shuffle_train_data == 1,
'question_families': question_families,
'max_samples': args.num_train_samples,
'num_workers': args.loader_num_workers,
}
val_loader_kwargs = {
'question_h5': args.val_question_h5,
'feature_h5': args.val_features_h5,
'vocab': vocab,
'batch_size': args.batch_size,
'question_families': question_families,
'max_samples': args.num_val_samples,
'num_workers': args.loader_num_workers,
}
with ClevrDataLoader(**train_loader_kwargs) as train_loader, \
ClevrDataLoader(**val_loader_kwargs) as val_loader:
train_loop(args, train_loader, val_loader)
if args.use_local_copies == 1 and args.cleanup_local_copies == 1:
os.remove('/tmp/train_questions.h5')
os.remove('/tmp/train_features.h5')
os.remove('/tmp/val_questions.h5')
os.remove('/tmp/val_features.h5')
def train_loop(args, train_loader, val_loader):
vocab = utils.load_vocab(args.vocab_json)
program_generator, pg_kwargs, pg_optimizer = None, None, None
execution_engine, ee_kwargs, ee_optimizer = None, None, None
baseline_model, baseline_kwargs, baseline_optimizer = None, None, None
baseline_type = None
pg_best_state, ee_best_state, baseline_best_state = None, None, None
# Set up model
if args.model_type == 'PG' or args.model_type == 'PG+EE':
program_generator, pg_kwargs = get_program_generator(args)
pg_optimizer = torch.optim.Adam(program_generator.parameters(),
lr=args.learning_rate)
print('Here is the program generator:')
print(program_generator)
if args.model_type == 'EE' or args.model_type == 'PG+EE':
execution_engine, ee_kwargs = get_execution_engine(args)
ee_optimizer = torch.optim.Adam(execution_engine.parameters(),
lr=args.learning_rate)
print('Here is the execution engine:')
print(execution_engine)
if args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
baseline_model, baseline_kwargs = get_baseline_model(args)
params = baseline_model.parameters()
if args.baseline_train_only_rnn == 1:
params = baseline_model.rnn.parameters()
baseline_optimizer = torch.optim.Adam(params, lr=args.learning_rate)
print('Here is the baseline model')
print(baseline_model)
baseline_type = args.model_type
loss_fn = torch.nn.CrossEntropyLoss().cuda()
stats = {
'train_losses': [], 'train_rewards': [], 'train_losses_ts': [],
'train_accs': [], 'val_accs': [], 'val_accs_ts': [],
'best_val_acc': -1, 'model_t': 0,
}
t, epoch, reward_moving_average = 0, 0, 0
set_mode('train', [program_generator, execution_engine, baseline_model])
print('train_loader has %d samples' % len(train_loader.dataset))
print('val_loader has %d samples' % len(val_loader.dataset))
while t < args.num_iterations:
epoch += 1
print('Starting epoch %d' % epoch)
for batch in train_loader:
t += 1
questions, _, feats, answers, programs, _ = batch
questions_var = Variable(questions.cuda())
feats_var = Variable(feats.cuda())
answers_var = Variable(answers.cuda())
if programs[0] is not None:
programs_var = Variable(programs.cuda())
reward = None
if args.model_type == 'PG':
# Train program generator with ground-truth programs
pg_optimizer.zero_grad()
loss = program_generator(questions_var, programs_var)
loss.backward()
pg_optimizer.step()
elif args.model_type == 'EE':
# Train execution engine with ground-truth programs
ee_optimizer.zero_grad()
scores = execution_engine(feats_var, programs_var)
loss = loss_fn(scores, answers_var)
loss.backward()
ee_optimizer.step()
elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
baseline_optimizer.zero_grad()
baseline_model.zero_grad()
scores = baseline_model(questions_var, feats_var)
loss = loss_fn(scores, answers_var)
loss.backward()
baseline_optimizer.step()
elif args.model_type == 'PG+EE':
programs_pred = program_generator.reinforce_sample(questions_var)
scores = execution_engine(feats_var, programs_pred)
loss = loss_fn(scores, answers_var)
_, preds = scores.data.cpu().max(1)
raw_reward = (preds == answers).float()
reward_moving_average *= args.reward_decay
reward_moving_average += (1.0 - args.reward_decay) * raw_reward.mean()
centered_reward = raw_reward - reward_moving_average
if args.train_execution_engine == 1:
ee_optimizer.zero_grad()
loss.backward()
ee_optimizer.step()
if args.train_program_generator == 1:
pg_optimizer.zero_grad()
program_generator.reinforce_backward(centered_reward.cuda())
pg_optimizer.step()
if t % args.record_loss_every == 0:
print(t, loss.data[0])
stats['train_losses'].append(loss.data[0])
stats['train_losses_ts'].append(t)
if reward is not None:
stats['train_rewards'].append(reward)
if t % args.checkpoint_every == 0:
print('Checking training accuracy ... ')
train_acc = check_accuracy(args, program_generator, execution_engine,
baseline_model, train_loader)
print('train accuracy is', train_acc)
print('Checking validation accuracy ...')
val_acc = check_accuracy(args, program_generator, execution_engine,
baseline_model, val_loader)
print('val accuracy is ', val_acc)
stats['train_accs'].append(train_acc)
stats['val_accs'].append(val_acc)
stats['val_accs_ts'].append(t)
if val_acc > stats['best_val_acc']:
stats['best_val_acc'] = val_acc
stats['model_t'] = t
best_pg_state = get_state(program_generator)
best_ee_state = get_state(execution_engine)
best_baseline_state = get_state(baseline_model)
checkpoint = {
'args': args.__dict__,
'program_generator_kwargs': pg_kwargs,
'program_generator_state': best_pg_state,
'execution_engine_kwargs': ee_kwargs,
'execution_engine_state': best_ee_state,
'baseline_kwargs': baseline_kwargs,
'baseline_state': best_baseline_state,
'baseline_type': baseline_type,
'vocab': vocab
}
for k, v in stats.items():
checkpoint[k] = v
print('Saving checkpoint to %s' % args.checkpoint_path)
torch.save(checkpoint, args.checkpoint_path)
del checkpoint['program_generator_state']
del checkpoint['execution_engine_state']
del checkpoint['baseline_state']
with open(args.checkpoint_path + '.json', 'w') as f:
json.dump(checkpoint, f)
if t == args.num_iterations:
break
def parse_int_list(s):
return tuple(int(n) for n in s.split(','))
def get_state(m):
if m is None:
return None
state = {}
for k, v in m.state_dict().items():
state[k] = v.clone()
return state
def get_program_generator(args):
vocab = utils.load_vocab(args.vocab_json)
if args.program_generator_start_from is not None:
pg, kwargs = utils.load_program_generator(args.program_generator_start_from)
cur_vocab_size = pg.encoder_embed.weight.size(0)
if cur_vocab_size != len(vocab['question_token_to_idx']):
print('Expanding vocabulary of program generator')
pg.expand_encoder_vocab(vocab['question_token_to_idx'])
kwargs['encoder_vocab_size'] = len(vocab['question_token_to_idx'])
else:
kwargs = {
'encoder_vocab_size': len(vocab['question_token_to_idx']),
'decoder_vocab_size': len(vocab['program_token_to_idx']),
'wordvec_dim': args.rnn_wordvec_dim,
'hidden_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
}
pg = Seq2Seq(**kwargs)
pg.cuda()
pg.train()
return pg, kwargs
def get_execution_engine(args):
vocab = utils.load_vocab(args.vocab_json)
if args.execution_engine_start_from is not None:
ee, kwargs = utils.load_execution_engine(args.execution_engine_start_from)
# TODO: Adjust vocab?
else:
kwargs = {
'vocab': vocab,
'feature_dim': parse_int_list(args.feature_dim),
'stem_batchnorm': args.module_stem_batchnorm == 1,
'stem_num_layers': args.module_stem_num_layers,
'module_dim': args.module_dim,
'module_residual': args.module_residual == 1,
'module_batchnorm': args.module_batchnorm == 1,
'classifier_proj_dim': args.classifier_proj_dim,
'classifier_downsample': args.classifier_downsample,
'classifier_fc_layers': parse_int_list(args.classifier_fc_dims),
'classifier_batchnorm': args.classifier_batchnorm == 1,
'classifier_dropout': args.classifier_dropout,
}
ee = ModuleNet(**kwargs)
ee.cuda()
ee.train()
return ee, kwargs
def get_baseline_model(args):
vocab = utils.load_vocab(args.vocab_json)
if args.baseline_start_from is not None:
model, kwargs = utils.load_baseline(args.baseline_start_from)
elif args.model_type == 'LSTM':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'fc_dims': parse_int_list(args.classifier_fc_dims),
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = LstmModel(**kwargs)
elif args.model_type == 'CNN+LSTM':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'cnn_feat_dim': parse_int_list(args.feature_dim),
'cnn_num_res_blocks': args.cnn_num_res_blocks,
'cnn_res_block_dim': args.cnn_res_block_dim,
'cnn_proj_dim': args.cnn_proj_dim,
'cnn_pooling': args.cnn_pooling,
'fc_dims': parse_int_list(args.classifier_fc_dims),
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = CnnLstmModel(**kwargs)
elif args.model_type == 'CNN+LSTM+SA':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'cnn_feat_dim': parse_int_list(args.feature_dim),
'stacked_attn_dim': args.stacked_attn_dim,
'num_stacked_attn': args.num_stacked_attn,
'fc_dims': parse_int_list(args.classifier_fc_dims),
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = CnnLstmSaModel(**kwargs)
if model.rnn.token_to_idx != vocab['question_token_to_idx']:
# Make sure new vocab is superset of old
for k, v in model.rnn.token_to_idx.items():
assert k in vocab['question_token_to_idx']
assert vocab['question_token_to_idx'][k] == v
for token, idx in vocab['question_token_to_idx'].items():
model.rnn.token_to_idx[token] = idx
kwargs['vocab'] = vocab
model.rnn.expand_vocab(vocab['question_token_to_idx'])
model.cuda()
model.train()
return model, kwargs
def set_mode(mode, models):
assert mode in ['train', 'eval']
for m in models:
if m is None: continue
if mode == 'train': m.train()
if mode == 'eval': m.eval()
def check_accuracy(args, program_generator, execution_engine, baseline_model, loader):
set_mode('eval', [program_generator, execution_engine, baseline_model])
num_correct, num_samples = 0, 0
for batch in loader:
questions, _, feats, answers, programs, _ = batch
questions_var = Variable(questions.cuda(), volatile=True)
feats_var = Variable(feats.cuda(), volatile=True)
answers_var = Variable(feats.cuda(), volatile=True)
if programs[0] is not None:
programs_var = Variable(programs.cuda(), volatile=True)
scores = None # Use this for everything but PG
if args.model_type == 'PG':
vocab = utils.load_vocab(args.vocab_json)
for i in range(questions.size(0)):
program_pred = program_generator.sample(Variable(questions[i:i+1].cuda(), volatile=True))
program_pred_str = iep.preprocess.decode(program_pred, vocab['program_idx_to_token'])
program_str = iep.preprocess.decode(programs[i], vocab['program_idx_to_token'])
if program_pred_str == program_str:
num_correct += 1
num_samples += 1
elif args.model_type == 'EE':
scores = execution_engine(feats_var, programs_var)
elif args.model_type == 'PG+EE':
programs_pred = program_generator.reinforce_sample(
questions_var, argmax=True)
scores = execution_engine(feats_var, programs_pred)
elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
scores = baseline_model(questions_var, feats_var)
if scores is not None:
_, preds = scores.data.cpu().max(1)
num_correct += (preds == answers).sum()
num_samples += preds.size(0)
if num_samples >= args.num_val_samples:
break
set_mode('train', [program_generator, execution_engine, baseline_model])
acc = float(num_correct) / num_samples
return acc
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
clevr-iep-main
|
scripts/train_model.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import argparse
import json
import os
import h5py
import numpy as np
import iep.programs
from iep.preprocess import tokenize, encode, build_vocab
"""
Preprocessing script for CLEVR question files.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='prefix',
choices=['chain', 'prefix', 'postfix'])
parser.add_argument('--input_questions_json', required=True)
parser.add_argument('--input_vocab_json', default='')
parser.add_argument('--expand_vocab', default=0, type=int)
parser.add_argument('--unk_threshold', default=1, type=int)
parser.add_argument('--encode_unk', default=0, type=int)
parser.add_argument('--output_h5_file', required=True)
parser.add_argument('--output_vocab_json', default='')
def program_to_str(program, mode):
if mode == 'chain':
if not iep.programs.is_chain(program):
return None
return iep.programs.list_to_str(program)
elif mode == 'prefix':
program_prefix = iep.programs.list_to_prefix(program)
return iep.programs.list_to_str(program_prefix)
elif mode == 'postfix':
program_postfix = iep.programs.list_to_postfix(program)
return iep.programs.list_to_str(program_postfix)
return None
def main(args):
if (args.input_vocab_json == '') and (args.output_vocab_json == ''):
print('Must give one of --input_vocab_json or --output_vocab_json')
return
print('Loading data')
with open(args.input_questions_json, 'r') as f:
questions = json.load(f)['questions']
# Either create the vocab or load it from disk
if args.input_vocab_json == '' or args.expand_vocab == 1:
print('Building vocab')
if 'answer' in questions[0]:
answer_token_to_idx = build_vocab(
(q['answer'] for q in questions)
)
question_token_to_idx = build_vocab(
(q['question'] for q in questions),
min_token_count=args.unk_threshold,
punct_to_keep=[';', ','], punct_to_remove=['?', '.']
)
all_program_strs = []
for q in questions:
if 'program' not in q: continue
program_str = program_to_str(q['program'], args.mode)
if program_str is not None:
all_program_strs.append(program_str)
program_token_to_idx = build_vocab(all_program_strs)
vocab = {
'question_token_to_idx': question_token_to_idx,
'program_token_to_idx': program_token_to_idx,
'answer_token_to_idx': answer_token_to_idx,
}
if args.input_vocab_json != '':
print('Loading vocab')
if args.expand_vocab == 1:
new_vocab = vocab
with open(args.input_vocab_json, 'r') as f:
vocab = json.load(f)
if args.expand_vocab == 1:
num_new_words = 0
for word in new_vocab['question_token_to_idx']:
if word not in vocab['question_token_to_idx']:
print('Found new word %s' % word)
idx = len(vocab['question_token_to_idx'])
vocab['question_token_to_idx'][word] = idx
num_new_words += 1
print('Found %d new words' % num_new_words)
if args.output_vocab_json != '':
with open(args.output_vocab_json, 'w') as f:
json.dump(vocab, f)
# Encode all questions and programs
print('Encoding data')
questions_encoded = []
programs_encoded = []
question_families = []
orig_idxs = []
image_idxs = []
answers = []
for orig_idx, q in enumerate(questions):
question = q['question']
orig_idxs.append(orig_idx)
image_idxs.append(q['image_index'])
if 'question_family_index' in q:
question_families.append(q['question_family_index'])
question_tokens = tokenize(question,
punct_to_keep=[';', ','],
punct_to_remove=['?', '.'])
question_encoded = encode(question_tokens,
vocab['question_token_to_idx'],
allow_unk=args.encode_unk == 1)
questions_encoded.append(question_encoded)
if 'program' in q:
program = q['program']
program_str = program_to_str(program, args.mode)
program_tokens = tokenize(program_str)
program_encoded = encode(program_tokens, vocab['program_token_to_idx'])
programs_encoded.append(program_encoded)
if 'answer' in q:
answers.append(vocab['answer_token_to_idx'][q['answer']])
# Pad encoded questions and programs
max_question_length = max(len(x) for x in questions_encoded)
for qe in questions_encoded:
while len(qe) < max_question_length:
qe.append(vocab['question_token_to_idx']['<NULL>'])
if len(programs_encoded) > 0:
max_program_length = max(len(x) for x in programs_encoded)
for pe in programs_encoded:
while len(pe) < max_program_length:
pe.append(vocab['program_token_to_idx']['<NULL>'])
# Create h5 file
print('Writing output')
questions_encoded = np.asarray(questions_encoded, dtype=np.int32)
programs_encoded = np.asarray(programs_encoded, dtype=np.int32)
print(questions_encoded.shape)
print(programs_encoded.shape)
with h5py.File(args.output_h5_file, 'w') as f:
f.create_dataset('questions', data=questions_encoded)
f.create_dataset('image_idxs', data=np.asarray(image_idxs))
f.create_dataset('orig_idxs', data=np.asarray(orig_idxs))
if len(programs_encoded) > 0:
f.create_dataset('programs', data=programs_encoded)
if len(question_families) > 0:
f.create_dataset('question_families', data=np.asarray(question_families))
if len(answers) > 0:
f.create_dataset('answers', data=np.asarray(answers))
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
clevr-iep-main
|
scripts/preprocess_questions.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse, os, json
import h5py
import numpy as np
from scipy.misc import imread, imresize
import torch
import torchvision
parser = argparse.ArgumentParser()
parser.add_argument('--input_image_dir', required=True)
parser.add_argument('--max_images', default=None, type=int)
parser.add_argument('--output_h5_file', required=True)
parser.add_argument('--image_height', default=224, type=int)
parser.add_argument('--image_width', default=224, type=int)
parser.add_argument('--model', default='resnet101')
parser.add_argument('--model_stage', default=3, type=int)
parser.add_argument('--batch_size', default=128, type=int)
def build_model(args):
if not hasattr(torchvision.models, args.model):
raise ValueError('Invalid model "%s"' % args.model)
if not 'resnet' in args.model:
raise ValueError('Feature extraction only supports ResNets')
cnn = getattr(torchvision.models, args.model)(pretrained=True)
layers = [
cnn.conv1,
cnn.bn1,
cnn.relu,
cnn.maxpool,
]
for i in range(args.model_stage):
name = 'layer%d' % (i + 1)
layers.append(getattr(cnn, name))
model = torch.nn.Sequential(*layers)
model.cuda()
model.eval()
return model
def run_batch(cur_batch, model):
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
image_batch = np.concatenate(cur_batch, 0).astype(np.float32)
image_batch = (image_batch / 255.0 - mean) / std
image_batch = torch.FloatTensor(image_batch).cuda()
image_batch = torch.autograd.Variable(image_batch, volatile=True)
feats = model(image_batch)
feats = feats.data.cpu().clone().numpy()
return feats
def main(args):
input_paths = []
idx_set = set()
for fn in os.listdir(args.input_image_dir):
if not fn.endswith('.png'): continue
idx = int(os.path.splitext(fn)[0].split('_')[-1])
input_paths.append((os.path.join(args.input_image_dir, fn), idx))
idx_set.add(idx)
input_paths.sort(key=lambda x: x[1])
assert len(idx_set) == len(input_paths)
assert min(idx_set) == 0 and max(idx_set) == len(idx_set) - 1
if args.max_images is not None:
input_paths = input_paths[:args.max_images]
print(input_paths[0])
print(input_paths[-1])
model = build_model(args)
img_size = (args.image_height, args.image_width)
with h5py.File(args.output_h5_file, 'w') as f:
feat_dset = None
i0 = 0
cur_batch = []
for i, (path, idx) in enumerate(input_paths):
img = imread(path, mode='RGB')
img = imresize(img, img_size, interp='bicubic')
img = img.transpose(2, 0, 1)[None]
cur_batch.append(img)
if len(cur_batch) == args.batch_size:
feats = run_batch(cur_batch, model)
if feat_dset is None:
N = len(input_paths)
_, C, H, W = feats.shape
feat_dset = f.create_dataset('features', (N, C, H, W),
dtype=np.float32)
i1 = i0 + len(cur_batch)
feat_dset[i0:i1] = feats
i0 = i1
print('Processed %d / %d images' % (i1, len(input_paths)))
cur_batch = []
if len(cur_batch) > 0:
feats = run_batch(cur_batch, model)
i1 = i0 + len(cur_batch)
feat_dset[i0:i1] = feats
print('Processed %d / %d images' % (i1, len(input_paths)))
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
clevr-iep-main
|
scripts/extract_features.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import random
import shutil
import sys
import os
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import numpy as np
import h5py
from scipy.misc import imread, imresize
import iep.utils as utils
import iep.programs
from iep.data import ClevrDataset, ClevrDataLoader
from iep.preprocess import tokenize, encode
parser = argparse.ArgumentParser()
parser.add_argument('--program_generator', default=None)
parser.add_argument('--execution_engine', default=None)
parser.add_argument('--baseline_model', default=None)
parser.add_argument('--use_gpu', default=1, type=int)
# For running on a preprocessed dataset
parser.add_argument('--input_question_h5', default='data/val_questions.h5')
parser.add_argument('--input_features_h5', default='data-ssd/val_features.h5')
parser.add_argument('--use_gt_programs', default=0, type=int)
# This will override the vocab stored in the checkpoint;
# we need this to run CLEVR models on human data
parser.add_argument('--vocab_json', default=None)
# For running on a single example
parser.add_argument('--question', default=None)
parser.add_argument('--image', default=None)
parser.add_argument('--cnn_model', default='resnet101')
parser.add_argument('--cnn_model_stage', default=3, type=int)
parser.add_argument('--image_width', default=224, type=int)
parser.add_argument('--image_height', default=224, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_samples', default=None, type=int)
parser.add_argument('--family_split_file', default=None)
parser.add_argument('--sample_argmax', type=int, default=1)
parser.add_argument('--temperature', default=1.0, type=float)
# If this is passed, then save all predictions to this file
parser.add_argument('--output_h5', default=None)
def main(args):
print()
model = None
if args.baseline_model is not None:
print('Loading baseline model from ', args.baseline_model)
model, _ = utils.load_baseline(args.baseline_model)
if args.vocab_json is not None:
new_vocab = utils.load_vocab(args.vocab_json)
model.rnn.expand_vocab(new_vocab['question_token_to_idx'])
elif args.program_generator is not None and args.execution_engine is not None:
print('Loading program generator from ', args.program_generator)
program_generator, _ = utils.load_program_generator(args.program_generator)
print('Loading execution engine from ', args.execution_engine)
execution_engine, _ = utils.load_execution_engine(args.execution_engine, verbose=False)
if args.vocab_json is not None:
new_vocab = utils.load_vocab(args.vocab_json)
program_generator.expand_encoder_vocab(new_vocab['question_token_to_idx'])
model = (program_generator, execution_engine)
else:
print('Must give either --baseline_model or --program_generator and --execution_engine')
return
if args.question is not None and args.image is not None:
run_single_example(args, model)
else:
vocab = load_vocab(args)
loader_kwargs = {
'question_h5': args.input_question_h5,
'feature_h5': args.input_features_h5,
'vocab': vocab,
'batch_size': args.batch_size,
}
if args.num_samples is not None and args.num_samples > 0:
loader_kwargs['max_samples'] = args.num_samples
if args.family_split_file is not None:
with open(args.family_split_file, 'r') as f:
loader_kwargs['question_families'] = json.load(f)
with ClevrDataLoader(**loader_kwargs) as loader:
run_batch(args, model, loader)
def load_vocab(args):
path = None
if args.baseline_model is not None:
path = args.baseline_model
elif args.program_generator is not None:
path = args.program_generator
elif args.execution_engine is not None:
path = args.execution_engine
return utils.load_cpu(path)['vocab']
def run_single_example(args, model):
dtype = torch.FloatTensor
if args.use_gpu == 1:
dtype = torch.cuda.FloatTensor
# Build the CNN to use for feature extraction
print('Loading CNN for feature extraction')
cnn = build_cnn(args, dtype)
# Load and preprocess the image
img_size = (args.image_height, args.image_width)
img = imread(args.image, mode='RGB')
img = imresize(img, img_size, interp='bicubic')
img = img.transpose(2, 0, 1)[None]
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
img = (img.astype(np.float32) / 255.0 - mean) / std
# Use CNN to extract features for the image
img_var = Variable(torch.FloatTensor(img).type(dtype), volatile=True)
feats_var = cnn(img_var)
# Tokenize the question
vocab = load_vocab(args)
question_tokens = tokenize(args.question,
punct_to_keep=[';', ','],
punct_to_remove=['?', '.'])
question_encoded = encode(question_tokens,
vocab['question_token_to_idx'],
allow_unk=True)
question_encoded = torch.LongTensor(question_encoded).view(1, -1)
question_encoded = question_encoded.type(dtype).long()
question_var = Variable(question_encoded, volatile=True)
# Run the model
print('Running the model\n')
scores = None
predicted_program = None
if type(model) is tuple:
program_generator, execution_engine = model
program_generator.type(dtype)
execution_engine.type(dtype)
predicted_program = program_generator.reinforce_sample(
question_var,
temperature=args.temperature,
argmax=(args.sample_argmax == 1))
scores = execution_engine(feats_var, predicted_program)
else:
model.type(dtype)
scores = model(question_var, feats_var)
# Print results
_, predicted_answer_idx = scores.data.cpu()[0].max(dim=0)
predicted_answer = vocab['answer_idx_to_token'][predicted_answer_idx[0]]
print('Question: "%s"' % args.question)
print('Predicted answer: ', predicted_answer)
if predicted_program is not None:
print()
print('Predicted program:')
program = predicted_program.data.cpu()[0]
num_inputs = 1
for fn_idx in program:
fn_str = vocab['program_idx_to_token'][fn_idx]
num_inputs += iep.programs.get_num_inputs(fn_str) - 1
print(fn_str)
if num_inputs == 0:
break
def build_cnn(args, dtype):
if not hasattr(torchvision.models, args.cnn_model):
raise ValueError('Invalid model "%s"' % args.cnn_model)
if not 'resnet' in args.cnn_model:
raise ValueError('Feature extraction only supports ResNets')
whole_cnn = getattr(torchvision.models, args.cnn_model)(pretrained=True)
layers = [
whole_cnn.conv1,
whole_cnn.bn1,
whole_cnn.relu,
whole_cnn.maxpool,
]
for i in range(args.cnn_model_stage):
name = 'layer%d' % (i + 1)
layers.append(getattr(whole_cnn, name))
cnn = torch.nn.Sequential(*layers)
cnn.type(dtype)
cnn.eval()
return cnn
def run_batch(args, model, loader):
dtype = torch.FloatTensor
if args.use_gpu == 1:
dtype = torch.cuda.FloatTensor
if type(model) is tuple:
program_generator, execution_engine = model
run_our_model_batch(args, program_generator, execution_engine, loader, dtype)
else:
run_baseline_batch(args, model, loader, dtype)
def run_baseline_batch(args, model, loader, dtype):
model.type(dtype)
model.eval()
all_scores, all_probs = [], []
num_correct, num_samples = 0, 0
for batch in loader:
questions, images, feats, answers, programs, program_lists = batch
questions_var = Variable(questions.type(dtype).long(), volatile=True)
feats_var = Variable(feats.type(dtype), volatile=True)
scores = model(questions_var, feats_var)
probs = F.softmax(scores)
_, preds = scores.data.cpu().max(1)
all_scores.append(scores.data.cpu().clone())
all_probs.append(probs.data.cpu().clone())
num_correct += (preds == answers).sum()
num_samples += preds.size(0)
print('Ran %d samples' % num_samples)
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
all_scores = torch.cat(all_scores, 0)
all_probs = torch.cat(all_probs, 0)
if args.output_h5 is not None:
print('Writing output to %s' % args.output_h5)
with h5py.File(args.output_h5, 'w') as fout:
fout.create_dataset('scores', data=all_scores.numpy())
fout.create_dataset('probs', data=all_probs.numpy())
def run_our_model_batch(args, program_generator, execution_engine, loader, dtype):
program_generator.type(dtype)
program_generator.eval()
execution_engine.type(dtype)
execution_engine.eval()
all_scores, all_programs = [], []
all_probs = []
num_correct, num_samples = 0, 0
for batch in loader:
questions, images, feats, answers, programs, program_lists = batch
questions_var = Variable(questions.type(dtype).long(), volatile=True)
feats_var = Variable(feats.type(dtype), volatile=True)
programs_pred = program_generator.reinforce_sample(
questions_var,
temperature=args.temperature,
argmax=(args.sample_argmax == 1))
if args.use_gt_programs == 1:
scores = execution_engine(feats_var, program_lists)
else:
scores = execution_engine(feats_var, programs_pred)
probs = F.softmax(scores)
_, preds = scores.data.cpu().max(1)
all_programs.append(programs_pred.data.cpu().clone())
all_scores.append(scores.data.cpu().clone())
all_probs.append(probs.data.cpu().clone())
num_correct += (preds == answers).sum()
num_samples += preds.size(0)
print('Ran %d samples' % num_samples)
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
all_scores = torch.cat(all_scores, 0)
all_probs = torch.cat(all_probs, 0)
all_programs = torch.cat(all_programs, 0)
if args.output_h5 is not None:
print('Writing output to "%s"' % args.output_h5)
with h5py.File(args.output_h5, 'w') as fout:
fout.create_dataset('scores', data=all_scores.numpy())
fout.create_dataset('probs', data=all_probs.numpy())
fout.create_dataset('predicted_programs', data=all_programs.numpy())
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
clevr-iep-main
|
scripts/run_model.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for preprocessing sequence data.
Special tokens that are in all dictionaries:
<NULL>: Extra parts of the sequence that we should ignore
<START>: Goes at the start of a sequence
<END>: Goes at the end of a sequence, before <NULL> tokens
<UNK>: Out-of-vocabulary words
"""
SPECIAL_TOKENS = {
'<NULL>': 0,
'<START>': 1,
'<END>': 2,
'<UNK>': 3,
}
def tokenize(s, delim=' ',
add_start_token=True, add_end_token=True,
punct_to_keep=None, punct_to_remove=None):
"""
Tokenize a sequence, converting a string s into a list of (string) tokens by
splitting on the specified delimiter. Optionally keep or remove certain
punctuation marks and add start and end tokens.
"""
if punct_to_keep is not None:
for p in punct_to_keep:
s = s.replace(p, '%s%s' % (delim, p))
if punct_to_remove is not None:
for p in punct_to_remove:
s = s.replace(p, '')
tokens = s.split(delim)
if add_start_token:
tokens.insert(0, '<START>')
if add_end_token:
tokens.append('<END>')
return tokens
def build_vocab(sequences, min_token_count=1, delim=' ',
punct_to_keep=None, punct_to_remove=None):
token_to_count = {}
tokenize_kwargs = {
'delim': delim,
'punct_to_keep': punct_to_keep,
'punct_to_remove': punct_to_remove,
}
for seq in sequences:
seq_tokens = tokenize(seq, **tokenize_kwargs,
add_start_token=False, add_end_token=False)
for token in seq_tokens:
if token not in token_to_count:
token_to_count[token] = 0
token_to_count[token] += 1
token_to_idx = {}
for token, idx in SPECIAL_TOKENS.items():
token_to_idx[token] = idx
for token, count in sorted(token_to_count.items()):
if count >= min_token_count:
token_to_idx[token] = len(token_to_idx)
return token_to_idx
def encode(seq_tokens, token_to_idx, allow_unk=False):
seq_idx = []
for token in seq_tokens:
if token not in token_to_idx:
if allow_unk:
token = '<UNK>'
else:
raise KeyError('Token "%s" not in vocab' % token)
seq_idx.append(token_to_idx[token])
return seq_idx
def decode(seq_idx, idx_to_token, delim=None, stop_at_end=True):
tokens = []
for idx in seq_idx:
tokens.append(idx_to_token[idx])
if stop_at_end and tokens[-1] == '<END>':
break
if delim is None:
return tokens
else:
return delim.join(tokens)
|
clevr-iep-main
|
iep/preprocess.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for dealing with embeddings.
"""
def convert_pretrained_wordvecs(vocab, word2vec):
N = len(vocab['question_idx_to_token'])
D = word2vec['vecs'].size(1)
embed = torch.nn.Embedding(N, D)
print(type(embed.weight))
word2vec_word_to_idx = {w: i for i, w in enumerate(word2vec['words'])}
print(type(word2vec['vecs']))
for idx, word in vocab['question_idx_to_token'].items():
word2vec_idx = word2vec_word_to_idx.get(word, None)
if word2vec_idx is not None:
embed.weight.data[idx] = word2vec['vecs'][word2vec_idx]
return embed
def expand_embedding_vocab(embed, token_to_idx, word2vec=None, std=0.01):
old_weight = embed.weight.data
old_N, D = old_weight.size()
new_N = 1 + max(idx for idx in token_to_idx.values())
new_weight = old_weight.new(new_N, D).normal_().mul_(std)
new_weight[:old_N].copy_(old_weight)
if word2vec is not None:
num_found = 0
assert D == word2vec['vecs'].size(1), 'Word vector dimension mismatch'
word2vec_token_to_idx = {w: i for i, w in enumerate(word2vec['words'])}
for token, idx in token_to_idx.items():
word2vec_idx = word2vec_token_to_idx.get(token, None)
if idx >= old_N and word2vec_idx is not None:
vec = word2vec['vecs'][word2vec_idx]
new_weight[idx].copy_(vec)
num_found += 1
embed.num_embeddings = new_N
embed.weight.data = new_weight
return embed
|
clevr-iep-main
|
iep/embedding.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
clevr-iep-main
|
iep/__init__.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with and converting between the various data structures
used to represent programs.
"""
def is_chain(program_list):
visited = [False for fn in program_list]
cur_idx = len(program_list) - 1
while True:
visited[cur_idx] = True
inputs = program_list[cur_idx]['inputs']
if len(inputs) == 0:
break
elif len(inputs) == 1:
cur_idx = inputs[0]
elif len(inputs) > 1:
return False
return all(visited)
def list_to_tree(program_list):
def build_subtree(cur):
return {
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
'inputs': [build_subtree(program_list[i]) for i in cur['inputs']],
}
return build_subtree(program_list[-1])
def tree_to_prefix(program_tree):
output = []
def helper(cur):
output.append({
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
})
for node in cur['inputs']:
helper(node)
helper(program_tree)
return output
def list_to_prefix(program_list):
return tree_to_prefix(list_to_tree(program_list))
def tree_to_postfix(program_tree):
output = []
def helper(cur):
for node in cur['inputs']:
helper(node)
output.append({
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
})
helper(program_tree)
return output
def tree_to_list(program_tree):
# First count nodes
def count_nodes(cur):
return 1 + sum(count_nodes(x) for x in cur['inputs'])
num_nodes = count_nodes(program_tree)
output = [None] * num_nodes
def helper(cur, idx):
output[idx] = {
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
'inputs': [],
}
next_idx = idx - 1
for node in reversed(cur['inputs']):
output[idx]['inputs'].insert(0, next_idx)
next_idx = helper(node, next_idx)
return next_idx
helper(program_tree, num_nodes - 1)
return output
def prefix_to_tree(program_prefix):
program_prefix = [x for x in program_prefix]
def helper():
cur = program_prefix.pop(0)
return {
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
'inputs': [helper() for _ in range(get_num_inputs(cur))],
}
return helper()
def prefix_to_list(program_prefix):
return tree_to_list(prefix_to_tree(program_prefix))
def list_to_postfix(program_list):
return tree_to_postfix(list_to_tree(program_list))
def postfix_to_tree(program_postfix):
program_postfix = [x for x in program_postfix]
def helper():
cur = program_postfix.pop()
return {
'function': cur['function'],
'value_inputs': [x for x in cur['value_inputs']],
'inputs': [helper() for _ in range(get_num_inputs(cur))][::-1],
}
return helper()
def postfix_to_list(program_postfix):
return tree_to_list(postfix_to_tree(program_postfix))
def function_to_str(f):
value_str = ''
if f['value_inputs']:
value_str = '[%s]' % ','.join(f['value_inputs'])
return '%s%s' % (f['function'], value_str)
def str_to_function(s):
if '[' not in s:
return {
'function': s,
'value_inputs': [],
}
name, value_str = s.replace(']', '').split('[')
return {
'function': name,
'value_inputs': value_str.split(','),
}
def list_to_str(program_list):
return ' '.join(function_to_str(f) for f in program_list)
def get_num_inputs(f):
# This is a litle hacky; it would be better to look up from metadata.json
if type(f) is str:
f = str_to_function(f)
name = f['function']
if name == 'scene':
return 0
if 'equal' in name or name in ['union', 'intersect', 'less_than', 'greater_than']:
return 2
return 1
|
clevr-iep-main
|
iep/programs.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import torch
from iep.models import ModuleNet, Seq2Seq, LstmModel, CnnLstmModel, CnnLstmSaModel
def invert_dict(d):
return {v: k for k, v in d.items()}
def load_vocab(path):
with open(path, 'r') as f:
vocab = json.load(f)
vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
vocab['program_idx_to_token'] = invert_dict(vocab['program_token_to_idx'])
vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
# Sanity check: make sure <NULL>, <START>, and <END> are consistent
assert vocab['question_token_to_idx']['<NULL>'] == 0
assert vocab['question_token_to_idx']['<START>'] == 1
assert vocab['question_token_to_idx']['<END>'] == 2
assert vocab['program_token_to_idx']['<NULL>'] == 0
assert vocab['program_token_to_idx']['<START>'] == 1
assert vocab['program_token_to_idx']['<END>'] == 2
return vocab
def load_cpu(path):
"""
Loads a torch checkpoint, remapping all Tensors to CPU
"""
return torch.load(path, map_location=lambda storage, loc: storage)
def load_program_generator(path):
checkpoint = load_cpu(path)
kwargs = checkpoint['program_generator_kwargs']
state = checkpoint['program_generator_state']
model = Seq2Seq(**kwargs)
model.load_state_dict(state)
return model, kwargs
def load_execution_engine(path, verbose=True):
checkpoint = load_cpu(path)
kwargs = checkpoint['execution_engine_kwargs']
state = checkpoint['execution_engine_state']
kwargs['verbose'] = verbose
model = ModuleNet(**kwargs)
cur_state = model.state_dict()
model.load_state_dict(state)
return model, kwargs
def load_baseline(path):
model_cls_dict = {
'LSTM': LstmModel,
'CNN+LSTM': CnnLstmModel,
'CNN+LSTM+SA': CnnLstmSaModel,
}
checkpoint = load_cpu(path)
baseline_type = checkpoint['baseline_type']
kwargs = checkpoint['baseline_kwargs']
state = checkpoint['baseline_state']
model = model_cls_dict[baseline_type](**kwargs)
model.load_state_dict(state)
return model, kwargs
|
clevr-iep-main
|
iep/utils.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import h5py
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import iep.programs
def _dataset_to_tensor(dset, mask=None):
arr = np.asarray(dset, dtype=np.int64)
if mask is not None:
arr = arr[mask]
tensor = torch.LongTensor(arr)
return tensor
class ClevrDataset(Dataset):
def __init__(self, question_h5, feature_h5, vocab, mode='prefix',
image_h5=None, max_samples=None, question_families=None,
image_idx_start_from=None):
mode_choices = ['prefix', 'postfix']
if mode not in mode_choices:
raise ValueError('Invalid mode "%s"' % mode)
self.image_h5 = image_h5
self.vocab = vocab
self.feature_h5 = feature_h5
self.mode = mode
self.max_samples = max_samples
mask = None
if question_families is not None:
# Use only the specified families
all_families = np.asarray(question_h5['question_families'])
N = all_families.shape[0]
print(question_families)
target_families = np.asarray(question_families)[:, None]
mask = (all_families == target_families).any(axis=0)
if image_idx_start_from is not None:
all_image_idxs = np.asarray(question_h5['image_idxs'])
mask = all_image_idxs >= image_idx_start_from
# Data from the question file is small, so read it all into memory
print('Reading question data into memory')
self.all_questions = _dataset_to_tensor(question_h5['questions'], mask)
self.all_image_idxs = _dataset_to_tensor(question_h5['image_idxs'], mask)
self.all_programs = None
if 'programs' in question_h5:
self.all_programs = _dataset_to_tensor(question_h5['programs'], mask)
self.all_answers = _dataset_to_tensor(question_h5['answers'], mask)
def __getitem__(self, index):
question = self.all_questions[index]
image_idx = self.all_image_idxs[index]
answer = self.all_answers[index]
program_seq = None
if self.all_programs is not None:
program_seq = self.all_programs[index]
image = None
if self.image_h5 is not None:
image = self.image_h5['images'][image_idx]
image = torch.FloatTensor(np.asarray(image, dtype=np.float32))
feats = self.feature_h5['features'][image_idx]
feats = torch.FloatTensor(np.asarray(feats, dtype=np.float32))
program_json = None
if program_seq is not None:
program_json_seq = []
for fn_idx in program_seq:
fn_str = self.vocab['program_idx_to_token'][fn_idx]
if fn_str == '<START>' or fn_str == '<END>': continue
fn = iep.programs.str_to_function(fn_str)
program_json_seq.append(fn)
if self.mode == 'prefix':
program_json = iep.programs.prefix_to_list(program_json_seq)
elif self.mode == 'postfix':
program_json = iep.programs.postfix_to_list(program_json_seq)
return (question, image, feats, answer, program_seq, program_json)
def __len__(self):
if self.max_samples is None:
return self.all_questions.size(0)
else:
return min(self.max_samples, self.all_questions.size(0))
class ClevrDataLoader(DataLoader):
def __init__(self, **kwargs):
if 'question_h5' not in kwargs:
raise ValueError('Must give question_h5')
if 'feature_h5' not in kwargs:
raise ValueError('Must give feature_h5')
if 'vocab' not in kwargs:
raise ValueError('Must give vocab')
feature_h5_path = kwargs.pop('feature_h5')
print('Reading features from ', feature_h5_path)
self.feature_h5 = h5py.File(feature_h5_path, 'r')
self.image_h5 = None
if 'image_h5' in kwargs:
image_h5_path = kwargs.pop('image_h5')
print('Reading images from ', image_h5_path)
self.image_h5 = h5py.File(image_h5_path, 'r')
vocab = kwargs.pop('vocab')
mode = kwargs.pop('mode', 'prefix')
question_families = kwargs.pop('question_families', None)
max_samples = kwargs.pop('max_samples', None)
question_h5_path = kwargs.pop('question_h5')
image_idx_start_from = kwargs.pop('image_idx_start_from', None)
print('Reading questions from ', question_h5_path)
with h5py.File(question_h5_path, 'r') as question_h5:
self.dataset = ClevrDataset(question_h5, self.feature_h5, vocab, mode,
image_h5=self.image_h5,
max_samples=max_samples,
question_families=question_families,
image_idx_start_from=image_idx_start_from)
kwargs['collate_fn'] = clevr_collate
super(ClevrDataLoader, self).__init__(self.dataset, **kwargs)
def close(self):
if self.image_h5 is not None:
self.image_h5.close()
if self.feature_h5 is not None:
self.feature_h5.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def clevr_collate(batch):
transposed = list(zip(*batch))
question_batch = default_collate(transposed[0])
image_batch = transposed[1]
if any(img is not None for img in image_batch):
image_batch = default_collate(image_batch)
feat_batch = transposed[2]
if any(f is not None for f in feat_batch):
feat_batch = default_collate(feat_batch)
answer_batch = default_collate(transposed[3])
program_seq_batch = transposed[4]
if transposed[4][0] is not None:
program_seq_batch = default_collate(transposed[4])
program_struct_batch = transposed[5]
return [question_batch, image_batch, feat_batch, answer_batch, program_seq_batch, program_struct_batch]
|
clevr-iep-main
|
iep/data.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from iep.models.layers import ResidualBlock
from iep.embedding import expand_embedding_vocab
class StackedAttention(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(StackedAttention, self).__init__()
self.Wv = nn.Conv2d(input_dim, hidden_dim, kernel_size=1, padding=0)
self.Wu = nn.Linear(input_dim, hidden_dim)
self.Wp = nn.Conv2d(hidden_dim, 1, kernel_size=1, padding=0)
self.hidden_dim = hidden_dim
self.attention_maps = None
def forward(self, v, u):
"""
Input:
- v: N x D x H x W
- u: N x D
Returns:
- next_u: N x D
"""
N, K = v.size(0), self.hidden_dim
D, H, W = v.size(1), v.size(2), v.size(3)
v_proj = self.Wv(v) # N x K x H x W
u_proj = self.Wu(u) # N x K
u_proj_expand = u_proj.view(N, K, 1, 1).expand(N, K, H, W)
h = F.tanh(v_proj + u_proj_expand)
p = F.softmax(self.Wp(h).view(N, H * W)).view(N, 1, H, W)
self.attention_maps = p.data.clone()
v_tilde = (p.expand_as(v) * v).sum(2).sum(3).view(N, D)
next_u = u + v_tilde
return next_u
class LstmEncoder(nn.Module):
def __init__(self, token_to_idx, wordvec_dim=300,
rnn_dim=256, rnn_num_layers=2, rnn_dropout=0):
super(LstmEncoder, self).__init__()
self.token_to_idx = token_to_idx
self.NULL = token_to_idx['<NULL>']
self.START = token_to_idx['<START>']
self.END = token_to_idx['<END>']
self.embed = nn.Embedding(len(token_to_idx), wordvec_dim)
self.rnn = nn.LSTM(wordvec_dim, rnn_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
def expand_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.embed, token_to_idx,
word2vec=word2vec, std=std)
def forward(self, x):
N, T = x.size()
idx = torch.LongTensor(N).fill_(T - 1)
# Find the last non-null element in each sequence
x_cpu = x.data.cpu()
for i in range(N):
for t in range(T - 1):
if x_cpu[i, t] != self.NULL and x_cpu[i, t + 1] == self.NULL:
idx[i] = t
break
idx = idx.type_as(x.data).long()
idx = Variable(idx, requires_grad=False)
hs, _ = self.rnn(self.embed(x))
idx = idx.view(N, 1, 1).expand(N, 1, hs.size(2))
H = hs.size(2)
return hs.gather(1, idx).view(N, H)
def build_cnn(feat_dim=(1024, 14, 14),
res_block_dim=128,
num_res_blocks=0,
proj_dim=512,
pooling='maxpool2'):
C, H, W = feat_dim
layers = []
if num_res_blocks > 0:
layers.append(nn.Conv2d(C, res_block_dim, kernel_size=3, padding=1))
layers.append(nn.ReLU(inplace=True))
C = res_block_dim
for _ in range(num_res_blocks):
layers.append(ResidualBlock(C))
if proj_dim > 0:
layers.append(nn.Conv2d(C, proj_dim, kernel_size=1, padding=0))
layers.append(nn.ReLU(inplace=True))
C = proj_dim
if pooling == 'maxpool2':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
H, W = H // 2, W // 2
return nn.Sequential(*layers), (C, H, W)
def build_mlp(input_dim, hidden_dims, output_dim,
use_batchnorm=False, dropout=0):
layers = []
D = input_dim
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
if use_batchnorm:
layers.append(nn.BatchNorm1d(input_dim))
for dim in hidden_dims:
layers.append(nn.Linear(D, dim))
if use_batchnorm:
layers.append(nn.BatchNorm1d(dim))
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
layers.append(nn.ReLU(inplace=True))
D = dim
layers.append(nn.Linear(D, output_dim))
return nn.Sequential(*layers)
class LstmModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
fc_use_batchnorm=False, fc_dropout=0, fc_dims=(1024,)):
super(LstmModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
classifier_kwargs = {
'input_dim': rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_kwargs)
def forward(self, questions, feats):
q_feats = self.rnn(questions)
scores = self.classifier(q_feats)
return scores
class CnnLstmModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
cnn_feat_dim=(1024,14,14),
cnn_res_block_dim=128, cnn_num_res_blocks=0,
cnn_proj_dim=512, cnn_pooling='maxpool2',
fc_dims=(1024,), fc_use_batchnorm=False, fc_dropout=0):
super(CnnLstmModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
cnn_kwargs = {
'feat_dim': cnn_feat_dim,
'res_block_dim': cnn_res_block_dim,
'num_res_blocks': cnn_num_res_blocks,
'proj_dim': cnn_proj_dim,
'pooling': cnn_pooling,
}
self.cnn, (C, H, W) = build_cnn(**cnn_kwargs)
classifier_kwargs = {
'input_dim': C * H * W + rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_kwargs)
def forward(self, questions, feats):
N = questions.size(0)
assert N == feats.size(0)
q_feats = self.rnn(questions)
img_feats = self.cnn(feats)
cat_feats = torch.cat([q_feats, img_feats.view(N, -1)], 1)
scores = self.classifier(cat_feats)
return scores
class CnnLstmSaModel(nn.Module):
def __init__(self, vocab,
rnn_wordvec_dim=300, rnn_dim=256, rnn_num_layers=2, rnn_dropout=0,
cnn_feat_dim=(1024,14,14),
stacked_attn_dim=512, num_stacked_attn=2,
fc_use_batchnorm=False, fc_dropout=0, fc_dims=(1024,)):
super(CnnLstmSaModel, self).__init__()
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout,
}
self.rnn = LstmEncoder(**rnn_kwargs)
C, H, W = cnn_feat_dim
self.image_proj = nn.Conv2d(C, rnn_dim, kernel_size=1, padding=0)
self.stacked_attns = []
for i in range(num_stacked_attn):
sa = StackedAttention(rnn_dim, stacked_attn_dim)
self.stacked_attns.append(sa)
self.add_module('stacked-attn-%d' % i, sa)
classifier_args = {
'input_dim': rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.classifier = build_mlp(**classifier_args)
def forward(self, questions, feats):
u = self.rnn(questions)
v = self.image_proj(feats)
for sa in self.stacked_attns:
u = sa(v, u)
scores = self.classifier(u)
return scores
|
clevr-iep-main
|
iep/models/baselines.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models
from iep.models.layers import ResidualBlock, GlobalAveragePool, Flatten
import iep.programs
class ConcatBlock(nn.Module):
def __init__(self, dim, with_residual=True, with_batchnorm=True):
super(ConcatBlock, self).__init__()
self.proj = nn.Conv2d(2 * dim, dim, kernel_size=1, padding=0)
self.res_block = ResidualBlock(dim, with_residual=with_residual,
with_batchnorm=with_batchnorm)
def forward(self, x, y):
out = torch.cat([x, y], 1) # Concatentate along depth
out = F.relu(self.proj(out))
out = self.res_block(out)
return out
def build_stem(feature_dim, module_dim, num_layers=2, with_batchnorm=True):
layers = []
prev_dim = feature_dim
for i in range(num_layers):
layers.append(nn.Conv2d(prev_dim, module_dim, kernel_size=3, padding=1))
if with_batchnorm:
layers.append(nn.BatchNorm2d(module_dim))
layers.append(nn.ReLU(inplace=True))
prev_dim = module_dim
return nn.Sequential(*layers)
def build_classifier(module_C, module_H, module_W, num_answers,
fc_dims=[], proj_dim=None, downsample='maxpool2',
with_batchnorm=True, dropout=0):
layers = []
prev_dim = module_C * module_H * module_W
if proj_dim is not None and proj_dim > 0:
layers.append(nn.Conv2d(module_C, proj_dim, kernel_size=1))
if with_batchnorm:
layers.append(nn.BatchNorm2d(proj_dim))
layers.append(nn.ReLU(inplace=True))
prev_dim = proj_dim * module_H * module_W
if downsample == 'maxpool2':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
prev_dim //= 4
elif downsample == 'maxpool4':
layers.append(nn.MaxPool2d(kernel_size=4, stride=4))
prev_dim //= 16
layers.append(Flatten())
for next_dim in fc_dims:
layers.append(nn.Linear(prev_dim, next_dim))
if with_batchnorm:
layers.append(nn.BatchNorm1d(next_dim))
layers.append(nn.ReLU(inplace=True))
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
prev_dim = next_dim
layers.append(nn.Linear(prev_dim, num_answers))
return nn.Sequential(*layers)
class ModuleNet(nn.Module):
def __init__(self, vocab, feature_dim=(1024, 14, 14),
stem_num_layers=2,
stem_batchnorm=False,
module_dim=128,
module_residual=True,
module_batchnorm=False,
classifier_proj_dim=512,
classifier_downsample='maxpool2',
classifier_fc_layers=(1024,),
classifier_batchnorm=False,
classifier_dropout=0,
verbose=True):
super(ModuleNet, self).__init__()
self.stem = build_stem(feature_dim[0], module_dim,
num_layers=stem_num_layers,
with_batchnorm=stem_batchnorm)
if verbose:
print('Here is my stem:')
print(self.stem)
num_answers = len(vocab['answer_idx_to_token'])
module_H, module_W = feature_dim[1], feature_dim[2]
self.classifier = build_classifier(module_dim, module_H, module_W, num_answers,
classifier_fc_layers,
classifier_proj_dim,
classifier_downsample,
with_batchnorm=classifier_batchnorm,
dropout=classifier_dropout)
if verbose:
print('Here is my classifier:')
print(self.classifier)
self.stem_times = []
self.module_times = []
self.classifier_times = []
self.timing = False
self.function_modules = {}
self.function_modules_num_inputs = {}
self.vocab = vocab
for fn_str in vocab['program_token_to_idx']:
num_inputs = iep.programs.get_num_inputs(fn_str)
self.function_modules_num_inputs[fn_str] = num_inputs
if fn_str == 'scene' or num_inputs == 1:
mod = ResidualBlock(module_dim,
with_residual=module_residual,
with_batchnorm=module_batchnorm)
elif num_inputs == 2:
mod = ConcatBlock(module_dim,
with_residual=module_residual,
with_batchnorm=module_batchnorm)
self.add_module(fn_str, mod)
self.function_modules[fn_str] = mod
self.save_module_outputs = False
def expand_answer_vocab(self, answer_to_idx, std=0.01, init_b=-50):
# TODO: This is really gross, dipping into private internals of Sequential
final_linear_key = str(len(self.classifier._modules) - 1)
final_linear = self.classifier._modules[final_linear_key]
old_weight = final_linear.weight.data
old_bias = final_linear.bias.data
old_N, D = old_weight.size()
new_N = 1 + max(answer_to_idx.values())
new_weight = old_weight.new(new_N, D).normal_().mul_(std)
new_bias = old_bias.new(new_N).fill_(init_b)
new_weight[:old_N].copy_(old_weight)
new_bias[:old_N].copy_(old_bias)
final_linear.weight.data = new_weight
final_linear.bias.data = new_bias
def _forward_modules_json(self, feats, program):
def gen_hook(i, j):
def hook(grad):
self.all_module_grad_outputs[i][j] = grad.data.cpu().clone()
return hook
self.all_module_outputs = []
self.all_module_grad_outputs = []
# We can't easily handle minibatching of modules, so just do a loop
N = feats.size(0)
final_module_outputs = []
for i in range(N):
if self.save_module_outputs:
self.all_module_outputs.append([])
self.all_module_grad_outputs.append([None] * len(program[i]))
module_outputs = []
for j, f in enumerate(program[i]):
f_str = iep.programs.function_to_str(f)
module = self.function_modules[f_str]
if f_str == 'scene':
module_inputs = [feats[i:i+1]]
else:
module_inputs = [module_outputs[j] for j in f['inputs']]
module_outputs.append(module(*module_inputs))
if self.save_module_outputs:
self.all_module_outputs[-1].append(module_outputs[-1].data.cpu().clone())
module_outputs[-1].register_hook(gen_hook(i, j))
final_module_outputs.append(module_outputs[-1])
final_module_outputs = torch.cat(final_module_outputs, 0)
return final_module_outputs
def _forward_modules_ints_helper(self, feats, program, i, j):
used_fn_j = True
if j < program.size(1):
fn_idx = program.data[i, j]
fn_str = self.vocab['program_idx_to_token'][fn_idx]
else:
used_fn_j = False
fn_str = 'scene'
if fn_str == '<NULL>':
used_fn_j = False
fn_str = 'scene'
elif fn_str == '<START>':
used_fn_j = False
return self._forward_modules_ints_helper(feats, program, i, j + 1)
if used_fn_j:
self.used_fns[i, j] = 1
j += 1
module = self.function_modules[fn_str]
if fn_str == 'scene':
module_inputs = [feats[i:i+1]]
else:
num_inputs = self.function_modules_num_inputs[fn_str]
module_inputs = []
while len(module_inputs) < num_inputs:
cur_input, j = self._forward_modules_ints_helper(feats, program, i, j)
module_inputs.append(cur_input)
module_output = module(*module_inputs)
return module_output, j
def _forward_modules_ints(self, feats, program):
"""
feats: FloatTensor of shape (N, C, H, W) giving features for each image
program: LongTensor of shape (N, L) giving a prefix-encoded program for
each image.
"""
N = feats.size(0)
final_module_outputs = []
self.used_fns = torch.Tensor(program.size()).fill_(0)
for i in range(N):
cur_output, _ = self._forward_modules_ints_helper(feats, program, i, 0)
final_module_outputs.append(cur_output)
self.used_fns = self.used_fns.type_as(program.data).float()
final_module_outputs = torch.cat(final_module_outputs, 0)
return final_module_outputs
def forward(self, x, program):
N = x.size(0)
assert N == len(program)
feats = self.stem(x)
if type(program) is list or type(program) is tuple:
final_module_outputs = self._forward_modules_json(feats, program)
elif type(program) is Variable and program.dim() == 2:
final_module_outputs = self._forward_modules_ints(feats, program)
else:
raise ValueError('Unrecognized program format')
# After running modules for each input, concatenat the outputs from the
# final module and run the classifier.
out = self.classifier(final_module_outputs)
return out
|
clevr-iep-main
|
iep/models/module_net.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from iep.models.module_net import ModuleNet
from iep.models.seq2seq import Seq2Seq
from iep.models.baselines import LstmModel, CnnLstmModel, CnnLstmSaModel
|
clevr-iep-main
|
iep/models/__init__.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_dim, out_dim=None, with_residual=True, with_batchnorm=True):
if out_dim is None:
out_dim = in_dim
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=1)
self.with_batchnorm = with_batchnorm
if with_batchnorm:
self.bn1 = nn.BatchNorm2d(out_dim)
self.bn2 = nn.BatchNorm2d(out_dim)
self.with_residual = with_residual
if in_dim == out_dim or not with_residual:
self.proj = None
else:
self.proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
def forward(self, x):
if self.with_batchnorm:
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
else:
out = self.conv2(F.relu(self.conv1(x)))
res = x if self.proj is None else self.proj(x)
if self.with_residual:
out = F.relu(res + out)
else:
out = F.relu(out)
return out
class GlobalAveragePool(nn.Module):
def forward(self, x):
N, C = x.size(0), x.size(1)
return x.view(N, C, -1).mean(2).squeeze(2)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
|
clevr-iep-main
|
iep/models/layers.py
|
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from iep.embedding import expand_embedding_vocab
class Seq2Seq(nn.Module):
def __init__(self,
encoder_vocab_size=100,
decoder_vocab_size=100,
wordvec_dim=300,
hidden_dim=256,
rnn_num_layers=2,
rnn_dropout=0,
null_token=0,
start_token=1,
end_token=2,
encoder_embed=None
):
super(Seq2Seq, self).__init__()
self.encoder_embed = nn.Embedding(encoder_vocab_size, wordvec_dim)
self.encoder_rnn = nn.LSTM(wordvec_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_embed = nn.Embedding(decoder_vocab_size, wordvec_dim)
self.decoder_rnn = nn.LSTM(wordvec_dim + hidden_dim, hidden_dim, rnn_num_layers,
dropout=rnn_dropout, batch_first=True)
self.decoder_linear = nn.Linear(hidden_dim, decoder_vocab_size)
self.NULL = null_token
self.START = start_token
self.END = end_token
self.multinomial_outputs = None
def expand_encoder_vocab(self, token_to_idx, word2vec=None, std=0.01):
expand_embedding_vocab(self.encoder_embed, token_to_idx,
word2vec=word2vec, std=std)
def get_dims(self, x=None, y=None):
V_in = self.encoder_embed.num_embeddings
V_out = self.decoder_embed.num_embeddings
D = self.encoder_embed.embedding_dim
H = self.encoder_rnn.hidden_size
L = self.encoder_rnn.num_layers
N = x.size(0) if x is not None else None
N = y.size(0) if N is None and y is not None else N
T_in = x.size(1) if x is not None else None
T_out = y.size(1) if y is not None else None
return V_in, V_out, D, H, L, N, T_in, T_out
def before_rnn(self, x, replace=0):
# TODO: Use PackedSequence instead of manually plucking out the last
# non-NULL entry of each sequence; it is cleaner and more efficient.
N, T = x.size()
idx = torch.LongTensor(N).fill_(T - 1)
# Find the last non-null element in each sequence. Is there a clean
# way to do this?
x_cpu = x.cpu()
for i in range(N):
for t in range(T - 1):
if x_cpu.data[i, t] != self.NULL and x_cpu.data[i, t + 1] == self.NULL:
idx[i] = t
break
idx = idx.type_as(x.data)
x[x.data == self.NULL] = replace
return x, Variable(idx)
def encoder(self, x):
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(x=x)
x, idx = self.before_rnn(x)
embed = self.encoder_embed(x)
h0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
c0 = Variable(torch.zeros(L, N, H).type_as(embed.data))
out, _ = self.encoder_rnn(embed, (h0, c0))
# Pull out the hidden state for the last non-null value in each input
idx = idx.view(N, 1, 1).expand(N, 1, H)
return out.gather(1, idx).view(N, H)
def decoder(self, encoded, y, h0=None, c0=None):
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
if T_out > 1:
y, _ = self.before_rnn(y)
y_embed = self.decoder_embed(y)
encoded_repeat = encoded.view(N, 1, H).expand(N, T_out, H)
rnn_input = torch.cat([encoded_repeat, y_embed], 2)
if h0 is None:
h0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
if c0 is None:
c0 = Variable(torch.zeros(L, N, H).type_as(encoded.data))
rnn_output, (ht, ct) = self.decoder_rnn(rnn_input, (h0, c0))
rnn_output_2d = rnn_output.contiguous().view(N * T_out, H)
output_logprobs = self.decoder_linear(rnn_output_2d).view(N, T_out, V_out)
return output_logprobs, ht, ct
def compute_loss(self, output_logprobs, y):
"""
Compute loss. We assume that the first element of the output sequence y is
a start token, and that each element of y is left-aligned and right-padded
with self.NULL out to T_out. We want the output_logprobs to predict the
sequence y, shifted by one timestep so that y[0] is fed to the network and
then y[1] is predicted. We also don't want to compute loss for padded
timesteps.
Inputs:
- output_logprobs: Variable of shape (N, T_out, V_out)
- y: LongTensor Variable of shape (N, T_out)
"""
self.multinomial_outputs = None
V_in, V_out, D, H, L, N, T_in, T_out = self.get_dims(y=y)
mask = y.data != self.NULL
y_mask = Variable(torch.Tensor(N, T_out).fill_(0).type_as(mask))
y_mask[:, 1:] = mask[:, 1:]
y_masked = y[y_mask]
out_mask = Variable(torch.Tensor(N, T_out).fill_(0).type_as(mask))
out_mask[:, :-1] = mask[:, 1:]
out_mask = out_mask.view(N, T_out, 1).expand(N, T_out, V_out)
out_masked = output_logprobs[out_mask].view(-1, V_out)
loss = F.cross_entropy(out_masked, y_masked)
return loss
def forward(self, x, y):
encoded = self.encoder(x)
output_logprobs, _, _ = self.decoder(encoded, y)
loss = self.compute_loss(output_logprobs, y)
return loss
def sample(self, x, max_length=50):
# TODO: Handle sampling for minibatch inputs
# TODO: Beam search?
self.multinomial_outputs = None
assert x.size(0) == 1, "Sampling minibatches not implemented"
encoded = self.encoder(x)
y = [self.START]
h0, c0 = None, None
while True:
cur_y = Variable(torch.LongTensor([y[-1]]).type_as(x.data).view(1, 1))
logprobs, h0, c0 = self.decoder(encoded, cur_y, h0=h0, c0=c0)
_, next_y = logprobs.data.max(2)
y.append(next_y[0, 0, 0])
if len(y) >= max_length or y[-1] == self.END:
break
return y
def reinforce_sample(self, x, max_length=30, temperature=1.0, argmax=False):
N, T = x.size(0), max_length
encoded = self.encoder(x)
y = torch.LongTensor(N, T).fill_(self.NULL)
done = torch.ByteTensor(N).fill_(0)
cur_input = Variable(x.data.new(N, 1).fill_(self.START))
h, c = None, None
self.multinomial_outputs = []
self.multinomial_probs = []
for t in range(T):
# logprobs is N x 1 x V
logprobs, h, c = self.decoder(encoded, cur_input, h0=h, c0=c)
logprobs = logprobs / temperature
probs = F.softmax(logprobs.view(N, -1)) # Now N x V
if argmax:
_, cur_output = probs.max(1)
else:
cur_output = probs.multinomial() # Now N x 1
self.multinomial_outputs.append(cur_output)
self.multinomial_probs.append(probs)
cur_output_data = cur_output.data.cpu()
not_done = logical_not(done)
y[:, t][not_done] = cur_output_data[not_done]
done = logical_or(done, cur_output_data.cpu() == self.END)
cur_input = cur_output
if done.sum() == N:
break
return Variable(y.type_as(x.data))
def reinforce_backward(self, reward, output_mask=None):
"""
If output_mask is not None, then it should be a FloatTensor of shape (N, T)
giving a multiplier to the output.
"""
assert self.multinomial_outputs is not None, 'Must call reinforce_sample first'
grad_output = []
def gen_hook(mask):
def hook(grad):
return grad * mask.contiguous().view(-1, 1).expand_as(grad)
return hook
if output_mask is not None:
for t, probs in enumerate(self.multinomial_probs):
mask = Variable(output_mask[:, t])
probs.register_hook(gen_hook(mask))
for sampled_output in self.multinomial_outputs:
sampled_output.reinforce(reward)
grad_output.append(None)
torch.autograd.backward(self.multinomial_outputs, grad_output, retain_variables=True)
def logical_and(x, y):
return x * y
def logical_or(x, y):
return (x + y).clamp_(0, 1)
def logical_not(x):
return x == 0
|
clevr-iep-main
|
iep/models/seq2seq.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
class Battery:
capacity = 0 # Max MWh storage capacity
current_load = 0 # Current load in the battery, in MWh
def __init__(self, capacity, current_load=0):
self.capacity = capacity
self.current_load = current_load
# charge the battery based on an hourly load
# returns the total load after charging with input_load
def charge(self, input_load):
self.current_load = self.current_load + input_load
if(self.current_load > self.capacity):
self.current_load = self.capacity
return self.current_load
# returns how much energy is discharged when
# output_load is drawn from the battery in an hour
def discharge(self, output_load):
self.current_load = self.current_load - output_load
if(self.current_load < 0): # not enough battery load
lacking_amount = self.current_load
self.current_load = 0
return output_load + lacking_amount
return output_load
def is_full(self):
return (self.capacity == self.current_load)
# calculate the minimum battery capacity required
# to be able to charge it with input_load
# amount of energy within an hour and
# expand the existing capacity with that amount
def find_and_init_capacity(self, input_load):
self.capacity = self.capacity + input_load
# Battery model that includes efficiency and
# linear charging/discharging rate limits with respect to battery capacity
# refer to C/L/C model in following reference for details:
# "Tractable lithium-ion storage models for optimizing energy systems."
# Energy Informatics 2.1 (2019): 1-22.
class Battery2:
capacity = 0 # Max MWh storage capacity
current_load = 0 # Current load in the battery, in MWh
# charging and discharging efficiency, including DC-AC inverter loss
eff_c = 1
eff_d = 1
c_lim = 3
d_lim = 3
# Maximum charging energy in one time step
# is limited by (u * applied power) + v
upper_lim_u = 0
upper_lim_v = 1
# Maximum discharged energy in one time step
# is limited by (u * applied power) + v
lower_lim_u = 0
lower_lim_v = 0
# NMC: eff_c = 0.98, eff_d = 1.05,
# c_lim = 3, d_lim = 3,
# upper_u = -0.125, upper_v = 1,
# lower_u = 0.05, lower_v = 0
# defaults for lithium NMC cell
def __init__(self, capacity, current_load=0,
eff_c = 0.97, eff_d = 1.04,
c_lim = 3, d_lim = 3,
upper_u = -0.04, upper_v = 1,
lower_u = 0.01, lower_v = 0):
self.capacity = capacity
self.current_load = current_load
self.eff_c = eff_c
self.eff_d = eff_d
self.c_lim = c_lim
self.d_lim = d_lim
self.upper_lim_u = upper_u
self.upper_lim_v = upper_v
self.lower_lim_u = lower_u
self.lower_lim_v = lower_v
def calc_max_charge(self, T_u):
# energy content in current (next) time step: b_k (b_{k+1}, which is just b_k + p_k*eff_c)
# charging power in current time step: p_k
# b_{k+1} <= u * p_k + v is equivalent to
# p_k <= (v - b_k) / (eff_c - u)
max_charge = min((self.capacity/self.eff_c) * self.c_lim,
(self.upper_lim_v*self.capacity - self.current_load)/((self.eff_c*T_u) - self.upper_lim_u))
return max_charge
def calc_max_discharge(self, T_u):
# energy content in current (next) time step: b_k (b_{k+1}, which is just b_k - p_k*eff_d)
# charging power in current time step: p_k
# b_{k+1} <= u * p_k + v is equivalent to
# p_k <= (b_k - v) / (u + eff_d)
max_discharge = min((self.capacity/self.eff_d) * self.d_lim,
(self.current_load - self.lower_lim_v*self.capacity)/(self.lower_lim_u + (self.eff_d*T_u)))
return max_discharge
# charge the battery based on an hourly load
# returns the total load after charging with input_load
def charge(self, input_load, T_u):
max_charge = self.calc_max_charge(T_u)
self.current_load = self.current_load + (min(max_charge, input_load) * self.eff_c * T_u)
return self.current_load
# returns how much energy is discharged when
# output_load is drawn from the battery over T_u hours (default T_u is 1/60)
def discharge(self, output_load, T_u):
max_discharge = self.calc_max_discharge(T_u)
self.current_load = self.current_load - (min(max_discharge, output_load) * self.eff_d * T_u)
if(max_discharge < output_load): # not enough battery load
return max_discharge*T_u
return output_load*T_u
def is_full(self):
return (self.capacity == self.current_load)
# calculate the minimum battery capacity required
# to be able to charge it with input_load
# amount of energy within an hour and
# expand the existing capacity with that amount
def find_and_init_capacity(self, input_load):
self.capacity = self.capacity + input_load*self.eff_d
# increase the capacity until we can discharge input_load
# TODO: find analytical value for this
new_capacity = input_load*self.eff_d
while True:
power_lim = (new_capacity - self.lower_lim_v*self.capacity)/(self.lower_lim_u + self.eff_d)
if power_lim < input_load:
self.capacity += 0.1
new_capacity += 0.1
else:
break
# return True if battery can meet all demand, False otherwise
def sim_battery_247(df_ren, df_dc_pow, b):
points_per_hour = 60
for i in range(df_dc_pow.shape[0]):
ren_mw = df_ren[i]
df_dc = df_dc_pow["avg_dc_power_mw"][i]
net_load = ren_mw - df_dc
actual_discharge = 0
# Apply the net power points_per_hour times
for j in range(points_per_hour):
# surplus, charge
if net_load > 0:
b.charge(net_load, 1/points_per_hour)
else:
# deficit, discharge
actual_discharge += b.discharge(-net_load, 1/points_per_hour)
# if we couldnt discharge enough, exit
# check if actual dicharge was sufficient to meet net load (with some tolerance for imprecision)
if net_load < 0 and actual_discharge < -net_load - 0.0001:
return False
return True
# binary search for smallest battery size that meets all demand
def calculate_247_battery_capacity_b2_sim(df_ren, df_dc_pow, max_bsize):
# first check special case, no battery:
if sim_battery_247(df_ren, df_dc_pow, Battery2(0,0)):
return 0
l = 0
u = max_bsize
while u - l > 0.1:
med = (u + l) / 2
if sim_battery_247(df_ren, df_dc_pow, Battery2(med,med)):
u = med
else:
l = med
# check if max size was too small
if u == max_bsize:
return np.nan
return med
# binary search for smallest battery size that meets all demand
def calculate_247_battery_capacity_b1_sim(df_ren, df_dc_pow, max_bsize):
# first check special case, no battery:
if sim_battery_247(df_ren, df_dc_pow, Battery(0,0)):
return 0
l = 0
u = max_bsize
while u - l > 0.1:
med = (u + l) / 2
if sim_battery_247(df_ren, df_dc_pow, Battery(med,med)):
u = med
else:
l = med
# check if max size was too small
if u == max_bsize:
return np.nan
return med
# Takes renewable supply and dc power as input dataframes
# returns how much battery capacity is needed to make
# dc operate on renewables 24/7
def calculate_247_battery_capacity(df_ren, df_dc_pow):
battery_cap = 0 # return value stored here, capacity needed
b = Battery(0) # start with an empty battery
for i in range(df_dc_pow.shape[0]):
ren_mw = df_ren[i]
df_dc = df_dc_pow["avg_dc_power_mw"][i]
if df_dc > ren_mw: # if there's not enough renewable supply, need to discharge
if(b.capacity == 0):
b.find_and_init_capacity(df_dc - ren_mw) # find how much battery cap needs to be
else:
load_before = b.current_load
if(load_before == 0):
b.find_and_init_capacity(df_dc - ren_mw)
else:
drawn_amount = b.discharge(df_dc - ren_mw)
if(drawn_amount < (df_dc - ren_mw)):
b.find_and_init_capacity((df_dc - ren_mw) - drawn_amount)
else: # there's excess renewable supply, charge batteries
if b.capacity > 0:
b.charge(ren_mw-df_dc)
elif b.is_full():
b = Battery(0)
if b.capacity > 0 and battery_cap != np.nan:
battery_cap = max(battery_cap, b.capacity)
return battery_cap
# Takes battery capacity, renewable supply and dc power as input dataframes
# and calculates how much battery can increase renewable coverage
# returns the non renewable amount that battery cannot cover
def apply_battery(battery_capacity, df_ren, df_dc_pow):
b = Battery2(battery_capacity, battery_capacity)
tot_non_ren_mw = 0 # store the mw amount battery cannot supply here
points_per_hour = 60
for i in range(df_dc_pow.shape[0]):
ren_mw = df_ren[i]
df_dc = df_dc_pow["avg_dc_power_mw"][i]
gap = df_dc - ren_mw
discharged_amount = 0
for j in range(points_per_hour):
# lack or excess renewable supply
if gap > 0: #discharging from battery
discharged_amount += b.discharge(gap, 1/points_per_hour)
else: # charging the battery
b.charge(-gap, 1/points_per_hour)
df_ren[i] += gap * (1 / points_per_hour) # decrease the available renewable energy
if gap > 0:
tot_non_ren_mw = tot_non_ren_mw + gap - discharged_amount
df_ren[i] += discharged_amount # increase the renewables available by the discharged
return tot_non_ren_mw, df_ren
|
CarbonExplorer-main
|
src/battery.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
# Function that calculates pareto frontier given a set of points
def pareto_frontier(Xs, Ys, maxX=True, maxY= True):
# Sort the list in descending order of X
tmp_list = sorted([[Xs[i], Ys[i]] for i in range(len(Xs))], reverse=maxX)
# Start with the first value in the sorted list
front_point = [tmp_list[0]]
for pair in tmp_list[1:]:
if maxY is True:
if pair[1] >= front_point[-1][1]:
front_point.append(pair)
else:
if pair[1] <= front_point[-1][1]:
front_point.append(pair)
frontX = [pair[0] for pair in front_point]
frontY = [pair[1] for pair in front_point]
return frontX, frontY
# Given renewable and dc power dataframes,
# calculate renewable coverage percentage (%)
def calculate_coverage(df_ren, df_dc):
non_ren_mw = 0
for i in range(df_ren.shape[0]):
if df_dc[i] > df_ren[i]:
non_ren_mw = non_ren_mw + df_dc[i] - df_ren[i]
sum_dc = df_dc.sum()
coverage = (sum_dc - non_ren_mw) / sum_dc * 100
# print("Renewable coverage ratio: ", coverage, "%")
return coverage
|
CarbonExplorer-main
|
src/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
# Carbon Aware Scheduling Algorithm, to optimize for 24/7
# takes a dataframe that contains renewable and dc power, dc_all
# applies cas within the flexible_workload_ratio, and max_capacity constraints
# returns the carbon balanced version of the input dataframe, balanced_df
def cas(df_all, flexible_workload_ratio, max_capacity):
# work on 24 hour basis
# sort the df in terms of ascending renewable en
# take flexible_workload_ratio from the highest carbon intensity hours
# to lowest ones if there is not enough renewables until max_capacity is hit
balanced_df = []
for i in range(0, df_all.shape[0], 24):
sorted_df = df_all[i : i + 24].sort_values(
by=["tot_renewable", "avg_dc_power_mw"]
)
start = 0
end = 23
if sorted_df.shape[0] < 23:
break
work_to_move = 0
while start < end:
renewable_surplus = (
sorted_df["tot_renewable"].iloc[end]
- sorted_df["avg_dc_power_mw"].iloc[end]
)
renewable_gap = (
sorted_df["avg_dc_power_mw"].iloc[start]
- sorted_df["tot_renewable"].iloc[start]
)
available_space = min(
renewable_surplus,
(max_capacity - sorted_df["avg_dc_power_mw"].iloc[end]),
)
if renewable_surplus <= 0:
end = end - 1
continue
if renewable_gap <= 0:
start = start + 1
continue
if work_to_move <= 0 and renewable_gap > 0:
work_to_move = min(
renewable_gap,
(
flexible_workload_ratio
/ 100
* sorted_df["avg_dc_power_mw"].iloc[start]
),
)
if available_space > work_to_move:
sorted_df["avg_dc_power_mw"].iloc[end] = (
sorted_df["avg_dc_power_mw"].iloc[end]
+ work_to_move
)
sorted_df["avg_dc_power_mw"].iloc[start] = (
sorted_df["avg_dc_power_mw"].iloc[start]
- work_to_move
)
start = start + 1
work_to_move = 0
else:
sorted_df["avg_dc_power_mw"].iloc[end] = (
sorted_df["avg_dc_power_mw"].iloc[end]
+ available_space
)
sorted_df["avg_dc_power_mw"].iloc[start] = (
sorted_df["avg_dc_power_mw"].iloc[start]
- available_space
)
work_to_move = work_to_move - available_space
end = end - 1
balanced_df.append(sorted_df)
final_balanced_df = pd.concat(balanced_df).sort_values(by=["index"])
return final_balanced_df
# Carbon Aware Scheduling Algorithm, to optimize for Grid Carbon Mix
def cas_grid_mix(df_all, flexible_workload_ratio, max_capacity):
# work on 24 hour basis
# work on 24 hour basis
# sort the df in terms of ascending carbon
# take flexible_workload_ratio from the highest carbon intensity hours
# to lowest ones until max_capacity is hit
# until avg carbon is hit or shifting does not reduce
balanced_df = []
for i in range(0, df_all.shape[0], 24):
sorted_df = df_all[i : i + 24].sort_values(by=["carbon_intensity", "avg_dc_power_mw"])
start = 0
end = 23
if sorted_df.shape[0] < 23:
break
work_to_move = 0
while start < end:
available_space = max_capacity - sorted_df["avg_dc_power_mw"].iloc[start]
if work_to_move <= 0:
work_to_move = flexible_workload_ratio / 100 * sorted_df["avg_dc_power_mw"].iloc[end]
if available_space > work_to_move:
sorted_df["avg_dc_power_mw"].iloc[start] = (
sorted_df["avg_dc_power_mw"].iloc[start] + work_to_move
)
sorted_df["avg_dc_power_mw"].iloc[end] = sorted_df["avg_dc_power_mw"].iloc[end] - work_to_move
end = end - 1
work_to_move = 0
else:
sorted_df["avg_dc_power_mw"].iloc[start] = max_capacity
sorted_df["avg_dc_power_mw"].iloc[end] = (
sorted_df["avg_dc_power_mw"].iloc[end] - available_space
)
work_to_move = work_to_move - available_space
start = start + 1
balanced_df.append(sorted_df)
final_balanced_df = pd.concat(balanced_df).sort_values(by=["index"])
return final_balanced_df
|
CarbonExplorer-main
|
src/cas.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import wget
import zipfile
import pandas as pd
import math
import re
import numpy as np
# Download EIA's U.S. Electric System Operating Data
def downloadAndExtract(path):
url = "https://api.eia.gov/bulk/EBA.zip"
wget.download(url)
# extract the data
with zipfile.ZipFile("EBA.zip","r") as zip_ref:
zip_ref.extractall(path)
# Split json into multiple csv's for manual analysis, view
#
def writeCSV(eba_json):
numRecords = eba_json.shape[0]
recordsPerFile = 100
numFiles = math.ceil(numRecords / recordsPerFile)
for i in range(numFiles):
if i == numFiles - 1:
r = range(recordsPerFile * i + 1, numRecords)
else:
r = range(recordsPerFile * i + 1, recordsPerFile * (i + 1))
print("Writing csv records in {0}".format(r))
df = eba_json.iloc[r, :]
df.to_csv("{0}/EBA_sub_{1}.csv".format(EIA_bulk_data_dir,i))
eba_json = None
ba_list = []
ts_list = []
def prepareEIAData(EIA_data_path):
global eba_json
global ba_list
global ts_list
# EBA.txt includes time series for power generation from
# each balancing authority in json format.
#
eba_json = pd.read_json("{0}/EBA.txt".format(EIA_data_path), lines=True)
#writeCSV(eba_json)
# Construct list of BAs (ba_list)
# Construct list of time series (ts_list) using CISO as reference
#
series_id_unique = list(eba_json.series_id.unique())
series_id_unique = list(filter(lambda x: type(x) == str, series_id_unique))
ba_num = 0
for sid in series_id_unique:
m = re.search("EBA.(.+?)-", str(sid))
ba_this = m.group(1)
if ba_this not in ba_list:
ba_list.append(ba_this)
ba_num = ba_num + 1
if ba_this == "CISO":
m = re.search("EBA.CISO-([A-Z\-]+\.)([A-Z\.\-]*)", str(sid))
ts_list.append(m.group(2))
print("EIA data prep done!")
return eba_json, ba_list, ts_list
# Energy types
ng_list = [
"WND", # wind
"SUN", # solar
"WAT", # hydro
"OIL", # oil
"NG", # natural gas
"COL", # coal
"NUC", # nuclear
"OTH", # other
]
# Renewable energy types
rn_list = ["WND", "SUN", "WAT"]
# Carbon intensity of the energy types, gCO2eq/kWh
carbon_intensity = {
"WND": 11,
"SUN": 41,
"WAT": 24,
"OIL": 650,
"NG": 490,
"COL": 820,
"NUC": 12,
"OTH": 230,
}
# Construct dataframe from json
# Target specific balancing authority and day
def extractBARange(ba_idx, start_day, end_day):
global eba_json
start_idx = pd.Timestamp('{0}T00Z'.format(start_day), tz='UTC')
end_idx = pd.Timestamp('{0}T00Z'.format(end_day), tz='UTC')
idx = pd.date_range(start_day, end_day, freq = "H", tz='UTC')
# Loop over generating assets and append data to ba_list
#
ba_list = []
for ng_idx in ng_list:
# Target json for specific balancing authority.
# Note .H series means timestamps are in GMT / UTC
#
series_idx = 'EBA.{0}-ALL.NG.{1}.H'.format(ba_idx, ng_idx)
this_json = eba_json[eba_json['series_id'] == series_idx]
this_json = this_json.reset_index(drop=True)
if this_json.empty:
#print('Dataset does not include {0} data'.format(ng_idx))
ba_list.append([0]*(idx.shape[0])) # append a list with zeros
continue
# Check start/end dates for BA's json include target day
#
start_dat = pd.Timestamp(this_json['start'].reset_index(drop=True)[0])
end_dat = pd.Timestamp(this_json['end'].reset_index(drop=True)[0])
if (start_idx < start_dat):
print('Indexed start ({0}) precedes {1} dataset range ({2})'.format(start_idx, ng_idx, start_dat))
#continue
if (end_idx > end_dat):
print('Indexed end ({0}) beyond {1} dataset range ({2})'.format(end_idx, ng_idx, end_dat))
#continue
# Extract data tuples for target day
# this_json['data'][0] is a list of items x = [date, MWh] tuples
#
tuple_list = this_json['data'][0]
tuple_filtered = list(filter(\
lambda x: (pd.Timestamp(x[0]) >= start_idx) & (pd.Timestamp(x[0]) <= end_idx), \
tuple_list))
df = pd.DataFrame(tuple_filtered, columns =['timestamp', 'power'])
df['timestamp'] = pd.to_datetime(df['timestamp'])
df = df.sort_values(by=['timestamp'], ascending=(True))
df.set_index(pd.DatetimeIndex(df['timestamp']), inplace=True)
df.drop(columns=['timestamp'], inplace=True)
df = df.reindex(index=idx, fill_value=0).reset_index()
ba_list.append(df['power'].tolist())
dfa = pd.DataFrame(np.array(ba_list).transpose(), columns=ng_list)
dfa = dfa.set_index(idx)
return dfa
# Calculate carbon intensity of the grid (kg CO2/MWh)
# Takes a dataframe of energy generation as input (i.e. output of extractBARange)
# Returns a time series of carbon intensity dataframe
def calculateAVGCarbonIntensity(db):
tot_carbon = None
db[db < 0] = 0
sum_db = db.sum(axis=1)
for c in carbon_intensity:
if tot_carbon is None:
tot_carbon = carbon_intensity[c]*db[c]
else:
tot_carbon = tot_carbon + carbon_intensity[c]*db[c]
tot_carbon = tot_carbon.div(sum_db).to_frame()
tot_carbon.rename(columns={tot_carbon.columns[0]: "carbon_intensity"}, inplace=True)
return tot_carbon
|
CarbonExplorer-main
|
src/download_and_process.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
setuptools.setup(
name="co3d",
version="2.1.0",
author="FAIR",
author_email="dnovotny@fb.com",
packages=setuptools.find_packages(exclude=["tests", "examples"]),
license="LICENSE",
description="Common Objects in 3D codebase",
long_description=open("README.md").read(),
install_requires=[
"numpy",
"Pillow",
"requests",
"tqdm",
"plyfile",
],
)
|
co3d-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
co3d-main
|
tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
import torchvision
from visdom import Visdom
_CO3DV2_DATASET_ROOT: str = os.getenv("CO3DV2_DATASET_ROOT", "")
from pytorch3d.implicitron.tools.point_cloud_utils import render_point_cloud_pytorch3d
from pytorch3d.implicitron.tools.config import expand_args_fields
from pytorch3d.implicitron.dataset.visualize import get_implicitron_sequence_pointcloud
from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
from pytorch3d.vis.plotly_vis import plot_scene
class TestDatasetVisualize(unittest.TestCase):
def setUp(self):
torch.manual_seed(42)
category = "skateboard"
dataset_root = _CO3DV2_DATASET_ROOT
frame_file = os.path.join(dataset_root, category, "frame_annotations.jgz")
sequence_file = os.path.join(dataset_root, category, "sequence_annotations.jgz")
self.image_size = 256
expand_args_fields(JsonIndexDataset)
self.datasets = {
"simple": JsonIndexDataset(
frame_annotations_file=frame_file,
sequence_annotations_file=sequence_file,
dataset_root=dataset_root,
image_height=self.image_size,
image_width=self.image_size,
box_crop=True,
load_point_clouds=True,
),
"nonsquare": JsonIndexDataset(
frame_annotations_file=frame_file,
sequence_annotations_file=sequence_file,
dataset_root=dataset_root,
image_height=self.image_size,
image_width=self.image_size // 2,
box_crop=True,
load_point_clouds=True,
),
"nocrop": JsonIndexDataset(
frame_annotations_file=frame_file,
sequence_annotations_file=sequence_file,
dataset_root=dataset_root,
image_height=self.image_size,
image_width=self.image_size // 2,
box_crop=False,
load_point_clouds=True,
),
"nocrop2": JsonIndexDataset(
frame_annotations_file=frame_file,
sequence_annotations_file=sequence_file,
dataset_root=dataset_root,
image_height=self.image_size // 2,
image_width=self.image_size,
box_crop=False,
load_point_clouds=True,
),
}
self.visdom = Visdom()
if not self.visdom.check_connection():
print("Visdom server not running! Disabling visdom visualisations.")
self.visdom = None
def _render_one_pointcloud(self, point_cloud, cameras, render_size):
(_image_render, _, _) = render_point_cloud_pytorch3d(
cameras,
point_cloud,
render_size=render_size,
point_radius=2e-2,
topk=10,
bg_color=0.0,
bin_size=0,
)
return _image_render.clamp(0.0, 1.0)
def test_one(self):
"""Test dataset visualisation."""
point_clouds = {}
for max_frames in (16,):
for load_dataset_point_cloud in (True, False):
for dataset_key in self.datasets:
point_cloud, cameras = self._gen_and_render_pointcloud(
max_frames, load_dataset_point_cloud, dataset_key
)
test_name = f"{max_frames}_{load_dataset_point_cloud}_{dataset_key}"
point_clouds[test_name] = point_cloud
if self.visdom is not None:
plotlyplot = plot_scene(
{
"point_clouds": {
"cameras": cameras,
**point_clouds,
}
},
camera_scale=1.0,
pointcloud_max_points=10000,
pointcloud_marker_size=1.0,
)
self.visdom.plotlyplot(
plotlyplot,
env="test_dataset_visualize",
win=f"pcl",
)
def _gen_and_render_pointcloud(
self, max_frames, load_dataset_point_cloud, dataset_key
):
dataset = self.datasets[dataset_key]
# load the point cloud of the first sequence
sequence_show = list(dataset.seq_annots.keys())[0]
device = torch.device("cuda:0")
point_cloud, sequence_frame_data = get_implicitron_sequence_pointcloud(
dataset,
sequence_name=sequence_show,
mask_points=True,
max_frames=max_frames,
num_workers=10,
load_dataset_point_cloud=load_dataset_point_cloud,
)
# render on gpu
point_cloud = point_cloud.to(device)
cameras = sequence_frame_data.camera.to(device)
# render the point_cloud from the viewpoint of loaded cameras
images_render = torch.cat(
[
self._render_one_pointcloud(
point_cloud,
cameras[frame_i],
(
dataset.image_height,
dataset.image_width,
),
)
for frame_i in range(len(cameras))
]
).cpu()
images_gt_and_render = torch.cat(
[
sequence_frame_data.image_rgb,
images_render,
(sequence_frame_data.image_rgb-images_render).abs(),
], dim=3
)
imfile = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
f"test_dataset_visualize"
+ f"_max_frames={max_frames}"
+ f"_load_pcl={load_dataset_point_cloud}"
+ f"_dataset_key={dataset_key}.png",
)
print(f"Exporting image {imfile}.")
torchvision.utils.save_image(images_gt_and_render, imfile, nrow=2)
if self.visdom is not None:
test_name = f"{max_frames}_{load_dataset_point_cloud}_{dataset_key}"
self.visdom.images(
images_gt_and_render,
env="test_dataset_visualize",
win=f"pcl_renders_{test_name}",
opts={"title": f"pcl_renders_{test_name}"},
)
return point_cloud, cameras
if __name__ == "__main__":
unittest.main()
|
co3d-main
|
tests/test_dataset_visualize.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.