python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from .adamod import AdaMod
|
AdaMod-master
|
adamod/__init__.py
|
import math
import torch
from torch.optim import Optimizer
class AdaMod(Optimizer):
"""Implements AdaMod algorithm with Decoupled Weight Decay (arxiv.org/abs/1711.05101)
It has been proposed in `Adaptive and Momental Bounds for Adaptive Learning Rate Methods`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
beta3 (float, optional): smoothing coefficient for adaptive learning rates (default: 0.9999)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), beta3=0.999,
eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= beta3 < 1.0:
raise ValueError("Invalid beta3 parameter: {}".format(beta3))
defaults = dict(lr=lr, betas=betas, beta3=beta3, eps=eps,
weight_decay=weight_decay)
super(AdaMod, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaMod, self).__setstate__(state)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdaMod does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Exponential moving average of actual learning rates
state['exp_avg_lr'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq, exp_avg_lr = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_lr']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
# Applies momental bounds on actual learning rates
step_size = torch.full_like(denom, step_size)
step_size.div_(denom)
exp_avg_lr.mul_(group['beta3']).add_(1 - group['beta3'], step_size)
step_size = torch.min(step_size, exp_avg_lr)
step_size.mul_(exp_avg)
p.data.add_(-step_size)
return loss
|
AdaMod-master
|
adamod/adamod.py
|
import os
#%matplotlib notebook
import matplotlib.pyplot as plt
import torch
import numpy as np
LABELS = ['SGD','Adam', 'AdaMod']
def get_folder_path(use_pretrained=True):
if use_pretrained:
path = 'pretrained'
else:
path = 'curve'
return path
def get_curve_data(use_pretrained=True, model='ResNet'):
folder_path = get_folder_path(use_pretrained)
filenames = [name for name in os.listdir(folder_path) if name.startswith(model.lower())]
paths = [os.path.join(folder_path, name) for name in filenames]
keys = [name.split('-')[1] for name in filenames]
return {key: torch.load(fp) for key, fp in zip(keys, paths)}
def plot(use_pretrained=True, model='ResNet', optimizers=None, curve_type='train'):
assert model in ['ResNet', 'DenseNet'], 'Invalid model name: {}'.format(model)
assert curve_type in ['train', 'test'], 'Invalid curve type: {}'.format(curve_type)
assert all(_ in LABELS for _ in optimizers), 'Invalid optimizer'
curve_data = get_curve_data(use_pretrained, model=model)
plt.figure()
plt.title('{} Accuracy for {} on CIFAR-100'.format(curve_type.capitalize(), model))
plt.xlabel('Epoch')
plt.ylabel('{} Accuracy %'.format(curve_type.capitalize()))
if curve_type == 'train':
plt.ylim(80, 101)
else:
plt.ylim(50, 81)
for optim in optimizers:
accuracies = np.array(curve_data[optim.lower()]['{}_acc'.format(curve_type)])
plt.plot(accuracies, label=optim)
plt.grid(ls='--')
plt.legend()
plt.show()
plt.savefig('cifar100-{}-{}.png'.format(model, curve_type.capitalize()))
def main():
# plot(use_pretrained=True, model='ResNet', optimizers=LABELS, curve_type='train')
# plot(use_pretrained=True, model='ResNet', optimizers=LABELS, curve_type='test')
plot(use_pretrained=True, model='DenseNet', optimizers=LABELS, curve_type='train')
plot(use_pretrained=True, model='DenseNet', optimizers=LABELS, curve_type='test')
if __name__ == '__main__':
main()
|
AdaMod-master
|
demos/cifar100/visualization.py
|
"""Train CIFAR100 with PyTorch."""
from __future__ import print_function
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models import *
from adamod import AdaMod
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch CIFAR100 Training')
parser.add_argument('--model', default='resnet', type=str, help='model',
choices=['resnet', 'densenet'])
parser.add_argument('--optim', default='adamod', type=str, help='optimizer',
choices=['sgd', 'adam', 'adamod'])
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--beta3', default=0.999, type=float,
help=' smoothing coefficient term of AdaMod')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum term')
parser.add_argument('--beta1', default=0.9, type=float, help='Adam coefficients beta_1')
parser.add_argument('--beta2', default=0.999, type=float, help='Adam coefficients beta_2')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='weight decay for optimizers')
return parser
def build_dataset():
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
])
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True,
num_workers=2)
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
# classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return train_loader, test_loader
def get_ckpt_name(dataset='cifar100', model='resnet', optimizer='adamod', lr=0.1, momentum=0.9,
beta1=0.9, beta2=0.999, beta3=0.999):
name = {
'sgd': 'lr{}-momentum{}'.format(lr, momentum),
'adam': 'lr{}-betas{}-{}'.format(lr, beta1, beta2),
'adamod': 'lr{}-betas{}-{}-{}'.format(lr, beta1, beta2, beta3),
}[optimizer]
return '{}-{}-{}'.format(model, optimizer, name)
def load_checkpoint(ckpt_name):
print('==> Resuming from checkpoint..')
path = os.path.join('checkpoint', ckpt_name)
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
assert os.path.exists(path), 'Error: checkpoint {} not found'.format(ckpt_name)
return torch.load(ckpt_name)
def build_model(args, device, ckpt=None):
print('==> Building model..')
net = {
'resnet': ResNet34,
'densenet': DenseNet121,
}[args.model]()
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if ckpt:
net.load_state_dict(ckpt['net'])
return net
def create_optimizer(args, model_params):
if args.optim == 'sgd':
return optim.SGD(model_params, args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optim == 'adam':
return optim.AdamW(model_params, args.lr, betas=(args.beta1, args.beta2),
weight_decay=args.weight_decay)
elif args.optim == 'adamod':
return AdaMod(model_params, args.lr, betas=(args.beta1, args.beta2),
beta3=args.beta3, weight_decay=args.weight_decay)
def train(net, epoch, device, data_loader, optimizer, criterion):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(data_loader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
accuracy = 100. * correct / total
print('train acc %.3f' % accuracy)
return accuracy
def test(net, device, data_loader, criterion):
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(data_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
accuracy = 100. * correct / total
print(' test acc %.3f' % accuracy)
return accuracy
def main():
parser = get_parser()
args = parser.parse_args()
train_loader, test_loader = build_dataset()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
ckpt_name = get_ckpt_name(model=args.model, optimizer=args.optim, lr=args.lr,
momentum=args.momentum, beta1=args.beta1, beta2=args.beta2, beta3=args.beta3)
if args.resume:
ckpt = load_checkpoint(ckpt_name)
best_acc = ckpt['acc']
start_epoch = ckpt['epoch']
else:
ckpt = None
best_acc = 0
start_epoch = -1
net = build_model(args, device, ckpt=ckpt)
criterion = nn.CrossEntropyLoss()
optimizer = create_optimizer(args, net.parameters())
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [150, 225], gamma=0.1,
last_epoch=start_epoch)
train_accuracies = []
test_accuracies = []
for epoch in range(start_epoch + 1, 300):
scheduler.step()
train_acc = train(net, epoch, device, train_loader, optimizer, criterion)
test_acc = test(net, device, test_loader, criterion)
# Save checkpoint.
if test_acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': test_acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, os.path.join('checkpoint', ckpt_name))
best_acc = test_acc
train_accuracies.append(train_acc)
test_accuracies.append(test_acc)
if not os.path.isdir('curve'):
os.mkdir('curve')
torch.save({'train_acc': train_accuracies, 'test_acc': test_accuracies},
os.path.join('curve', ckpt_name))
if __name__ == '__main__':
main()
|
AdaMod-master
|
demos/cifar100/main.py
|
"""
.. Densely Connected Convolutional Networks:
https://arxiv.org/abs/1608.06993
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4 * growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4 * growth_rate)
self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out, x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=100):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2 * growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3] * growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48)
def densenet_cifar():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12)
def test():
net = densenet_cifar()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
# test()
|
AdaMod-master
|
demos/cifar100/models/densenet.py
|
from .resnet import *
from .densenet import *
|
AdaMod-master
|
demos/cifar100/models/__init__.py
|
"""
.. Deep Residual Learning for Image Recognition:
https://arxiv.org/abs/1512.03385
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride,
bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride,
bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
|
AdaMod-master
|
demos/cifar100/models/resnet.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_lr_scheduler import FairseqLRScheduler
LR_SCHEDULER_REGISTRY = {}
def build_lr_scheduler(args, optimizer):
return LR_SCHEDULER_REGISTRY[args.lr_scheduler](args, optimizer)
def register_lr_scheduler(name):
"""Decorator to register a new LR scheduler."""
def register_lr_scheduler_cls(cls):
if name in LR_SCHEDULER_REGISTRY:
raise ValueError('Cannot register duplicate LR scheduler ({})'.format(name))
if not issubclass(cls, FairseqLRScheduler):
raise ValueError('LR Scheduler ({}: {}) must extend FairseqLRScheduler'.format(name, cls.__name__))
LR_SCHEDULER_REGISTRY[name] = cls
return cls
return register_lr_scheduler_cls
# automatically import any Python files in the optim/lr_scheduler/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.optim.lr_scheduler.' + module)
|
AdaMod-master
|
demos/nmt/lr_scheduler/__init__.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('cold_start')
class ColdStartSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = args.lr * sqrt(args.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with inverse_sqrt.'
' Consider --lr-scheduler=fixed instead.'
)
warmup_end_lr = args.lr[0]
if args.warmup_init_lr < 0:
args.warmup_init_lr = warmup_end_lr
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * args.warmup_updates**0.5
# initial learning rate
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
# fmt: off
parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
# fmt: on
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.args.warmup_updates:
self.lr = self.args.lr[0]
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr
|
AdaMod-master
|
demos/nmt/lr_scheduler/cold_start_scheduler.py
|
from setuptools import setup, find_packages
setup(
name = 'uniformer-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Uniformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/uniformer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'video classification'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
uniformer-pytorch-main
|
setup.py
|
import torch
from torch import nn, einsum
from einops import rearrange
from einops.layers.torch import Reduce
# helpers
def exists(val):
return val is not None
# classes
class LayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1, 1))
def forward(self, x):
std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
def FeedForward(dim, mult = 4, dropout = 0.):
return nn.Sequential(
LayerNorm(dim),
nn.Conv3d(dim, dim * mult, 1),
nn.GELU(),
nn.Dropout(dropout),
nn.Conv3d(dim * mult, dim, 1)
)
# MHRAs (multi-head relation aggregators)
class LocalMHRA(nn.Module):
def __init__(
self,
dim,
heads,
dim_head = 64,
local_aggr_kernel = 5
):
super().__init__()
self.heads = heads
inner_dim = dim_head * heads
# they use batchnorm for the local MHRA instead of layer norm
self.norm = nn.BatchNorm3d(dim)
# only values, as the attention matrix is taking care of by a convolution
self.to_v = nn.Conv3d(dim, inner_dim, 1, bias = False)
# this should be equivalent to aggregating by an attention matrix parameterized as a function of the relative positions across each axis
self.rel_pos = nn.Conv3d(heads, heads, local_aggr_kernel, padding = local_aggr_kernel // 2, groups = heads)
# combine out across all the heads
self.to_out = nn.Conv3d(inner_dim, dim, 1)
def forward(self, x):
x = self.norm(x)
b, c, *_, h = *x.shape, self.heads
# to values
v = self.to_v(x)
# split out heads
v = rearrange(v, 'b (c h) ... -> (b c) h ...', h = h)
# aggregate by relative positions
out = self.rel_pos(v)
# combine heads
out = rearrange(out, '(b c) h ... -> b (c h) ...', b = b)
return self.to_out(out)
class GlobalMHRA(nn.Module):
def __init__(
self,
dim,
heads,
dim_head = 64,
dropout = 0.
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.to_qkv = nn.Conv1d(dim, inner_dim * 3, 1, bias = False)
self.to_out = nn.Conv1d(inner_dim, dim, 1)
def forward(self, x):
x = self.norm(x)
shape, h = x.shape, self.heads
x = rearrange(x, 'b c ... -> b c (...)')
q, k, v = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h d) n -> b h n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# attention
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b (h d) n', h = h)
out = self.to_out(out)
return out.view(*shape)
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
heads,
mhsa_type = 'g',
local_aggr_kernel = 5,
dim_head = 64,
ff_mult = 4,
ff_dropout = 0.,
attn_dropout = 0.
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
if mhsa_type == 'l':
attn = LocalMHRA(dim, heads = heads, dim_head = dim_head, local_aggr_kernel = local_aggr_kernel)
elif mhsa_type == 'g':
attn = GlobalMHRA(dim, heads = heads, dim_head = dim_head, dropout = attn_dropout)
else:
raise ValueError('unknown mhsa_type')
self.layers.append(nn.ModuleList([
nn.Conv3d(dim, dim, 3, padding = 1),
attn,
FeedForward(dim, mult = ff_mult, dropout = ff_dropout),
]))
def forward(self, x):
for dpe, attn, ff in self.layers:
x = dpe(x) + x
x = attn(x) + x
x = ff(x) + x
return x
# main class
class Uniformer(nn.Module):
def __init__(
self,
*,
num_classes,
dims = (64, 128, 256, 512),
depths = (3, 4, 8, 3),
mhsa_types = ('l', 'l', 'g', 'g'),
local_aggr_kernel = 5,
channels = 3,
ff_mult = 4,
dim_head = 64,
ff_dropout = 0.,
attn_dropout = 0.
):
super().__init__()
init_dim, *_, last_dim = dims
self.to_tokens = nn.Conv3d(channels, init_dim, (3, 4, 4), stride = (2, 4, 4), padding = (1, 0, 0))
dim_in_out = tuple(zip(dims[:-1], dims[1:]))
mhsa_types = tuple(map(lambda t: t.lower(), mhsa_types))
self.stages = nn.ModuleList([])
for ind, (depth, mhsa_type) in enumerate(zip(depths, mhsa_types)):
is_last = ind == len(depths) - 1
stage_dim = dims[ind]
heads = stage_dim // dim_head
self.stages.append(nn.ModuleList([
Transformer(
dim = stage_dim,
depth = depth,
heads = heads,
mhsa_type = mhsa_type,
ff_mult = ff_mult,
ff_dropout = ff_dropout,
attn_dropout = attn_dropout
),
nn.Sequential(
nn.Conv3d(stage_dim, dims[ind + 1], (1, 2, 2), stride = (1, 2, 2)),
LayerNorm(dims[ind + 1]),
) if not is_last else None
]))
self.to_logits = nn.Sequential(
Reduce('b c t h w -> b c', 'mean'),
nn.LayerNorm(last_dim),
nn.Linear(last_dim, num_classes)
)
def forward(self, video):
x = self.to_tokens(video)
for transformer, conv in self.stages:
x = transformer(x)
if exists(conv):
x = conv(x)
return self.to_logits(x)
|
uniformer-pytorch-main
|
uniformer_pytorch/uniformer_pytorch.py
|
from uniformer_pytorch.uniformer_pytorch import Uniformer
|
uniformer-pytorch-main
|
uniformer_pytorch/__init__.py
|
import csv
from clap.datasets import tokenize
import torch
import torchaudio
# constants
MAX_TOKEN_LENGTH = 256
DATA_DIR = './data'
NUM_MEL = 80
TSV_FILE_NAME = 'subset.tsv'
# helpers
def tsv_to_dict(path):
with open(path) as fd:
rd = csv.DictReader(fd, delimiter = "\t", quotechar = '"')
return [row for row in rd]
# script
voice_clips = tsv_to_dict(f'{DATA_DIR}/{TSV_FILE_NAME}')
for clip in voice_clips:
filename = clip['path']
text = clip['sentence']
waveform, sample_rate = torchaudio.load(f"{DATA_DIR}/clips/{filename}", normalization = True)
output = torchaudio.transforms.MelSpectrogram(sample_rate, n_mels = NUM_MEL)(waveform)[0]
tokenized = torch.tensor([int(byte) for i, byte in enumerate(text.encode('utf-8'))], dtype = torch.uint8)
save_path = f"{DATA_DIR}/{filename}.pt"
torch.save({
'audio': output.t(),
'text': tokenized
}, save_path)
|
CLAP-main
|
preprocess.py
|
from setuptools import setup, find_packages
setup(
name="clap-jax",
packages=find_packages(),
version="0.0.1",
license="MIT",
description="CLAP - Contrastive Language-Audio Pretraining",
author="Charles Foster",
author_email="",
url="https://github.com/cfoster0/CLAP",
keywords=[
"artificial intelligence",
"deep learning",
"contrastive learning",
"audio",
],
install_requires=[
"click",
"click-option-group",
"einops>=0.3",
"flax",
"jax",
"jaxlib",
"lm_dataformat",
"optax",
"torch",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
|
CLAP-main
|
setup.py
|
import click
from click_option_group import optgroup
import jax
from jax import random, numpy as np, value_and_grad, jit, tree_util
from optax import chain, clip_by_global_norm, scale_by_adam, scale, apply_updates, add_decayed_weights, masked
from clap.models import CLAP
# data
from torch.utils.data import DataLoader
from clap.datasets import pair_text_spectrogram_dataset_collate_fn, PairTextSpectrogramDataset
@click.command()
@optgroup.group('Model settings')
@optgroup.option('--text_vocab', default = 256, type = int)
@optgroup.option('--text_dim', default = 512, type = int)
@optgroup.option('--text_depth', default = 1, type = int)
@optgroup.option('--text_heads', default = 8, type = int)
@optgroup.option('--audio_dim', default = 512, type = int)
@optgroup.option('--audio_depth', default = 1, type = int)
@optgroup.option('--audio_heads', default = 8, type = int)
@optgroup.group('Training settings')
@optgroup.option('--data_folder', default = './data', type = str)
@optgroup.option('--batch_size', default = 16, type = int)
@optgroup.option('--epochs', default = 100, type = int)
@optgroup.option('--learning_rate', default = 3e-4, type = float)
@optgroup.option('--weight_decay', default = 1e-1, type = float)
@optgroup.option('--seed', default = 0, type = int)
@optgroup.option('--max_norm', default = 0.5, type = float)
def train(
*,
data_folder,
batch_size,
epochs,
learning_rate,
weight_decay,
seed,
max_norm,
text_vocab,
text_dim,
text_depth,
text_heads,
audio_dim,
audio_depth,
audio_heads
):
# rng
rng_key = random.PRNGKey(seed)
# data
dataset = PairTextSpectrogramDataset(data_folder)
dl = DataLoader(dataset, batch_size = batch_size, collate_fn = pair_text_spectrogram_dataset_collate_fn, drop_last = True, shuffle = True)
# model
model = CLAP(
text_vocab = text_vocab,
text_dim = text_dim,
text_depth = text_depth,
text_heads = text_heads,
audio_dim = audio_dim,
audio_depth = audio_depth,
audio_heads = audio_heads
)
# optimizer
exclude_bias = lambda params: tree_util.tree_map(lambda x: x.ndim != 1, params)
optim = chain(
clip_by_global_norm(max_norm),
scale_by_adam(eps=1e-4),
add_decayed_weights(weight_decay, exclude_bias),
scale(-learning_rate)
)
# init
audio, audio_mask, text, text_mask = next(iter(dl))
params = model.init(rng_key, text, audio, text_mask, audio_mask)
optim_state = optim.init(params)
# loss function, for use with value_and_grad
@jit
@value_and_grad
def loss_fn(params, text, audio, text_mask, audio_mask):
return model.apply(params, text, audio, text_mask, audio_mask)
# train loop
for _ in range(epochs):
for audio, audio_mask, text, text_mask in dl:
loss, grads = loss_fn(params, text, audio, text_mask, audio_mask)
updates, optim_state = optim.update(grads, optim_state, params)
params = apply_updates(params, updates)
print(f'loss: {loss}')
# finished
if __name__ == "__main__":
train()
|
CLAP-main
|
train.py
|
import jax
from typing import Any, Callable, Sequence, Optional
from jax import lax, random, numpy as np, vmap, jit
from jax.ops import index, index_update
# einsum and einops
from jax.numpy import einsum
from einops import rearrange, repeat
# flax
import flax
from flax.core import freeze, unfreeze
from flax import linen as nn
# constants
LARGE_NEG_VALUE = -1e10
# config
from jax.config import config
config.enable_omnistaging() # Linen requires enabling omnistaging
# helpers
def cross_entropy(logits, targets, axis=-1):
logprobs = nn.log_softmax(logits, axis=axis)
nll = np.take_along_axis(logprobs, np.expand_dims(targets, axis=axis), axis=axis)
ce = -np.mean(nll)
return ce
def fixed_pos_embedding(seq, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(seq), inv_freq)
return np.sin(sinusoid_inp), np.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = np.stack((-x2, x1), axis=-1)
return rearrange(x, "... d j -> ... (d j)")
def apply_rotary_pos_emb(x, sincos):
sin, cos = map(lambda t: repeat(t, "b n -> b (n j)", j=2)[:, None, :], sincos)
return (x * cos) + (rotate_every_two(x) * sin)
# main class
class Attention(nn.Module):
dim: int
heads: int
dim_head: int = 64
causal: bool = False
@nn.compact
def __call__(self, x, pos_emb, mask):
dim_in, h = x.shape[-1], self.heads
scale = dim_in ** -0.5
norm = nn.LayerNorm()
to_qkv = nn.Dense(features=self.dim_head * h * 3, use_bias=False)
to_out = nn.Dense(features=dim_in)
x = norm(x)
qkv = np.split(to_qkv(x), 3, axis=-1)
q, k, v = map(lambda t: rearrange(t, "i (h d) -> i h d", h=h), qkv)
q = index_update(q, index[1:], apply_rotary_pos_emb(q[1:], pos_emb))
k = index_update(k, index[1:], apply_rotary_pos_emb(k[1:], pos_emb))
sim = einsum("i h d, j h d -> i j h", q, k) * scale
mask = np.pad(mask, (1, 0), constant_values=True)
mask = rearrange(mask, "j -> () j ()")
if self.causal:
i, j = sim.shape[:2]
tri_mask = np.ones((i - 1, j - 1), dtype=bool)
tri_mask = np.pad(tri_mask, ((1, 0), (1, 0)), constant_values=False)
causal_mask = np.triu(tri_mask, j - i + 1)
causal_mask = rearrange(causal_mask, "i j -> i j ()")
mask = ~causal_mask * mask
sim = np.where(mask, sim, LARGE_NEG_VALUE)
attn = nn.softmax(sim, axis=-2)
out = einsum("i j h, j h d -> i h d", attn, v)
out = rearrange(out, "i h d -> i (h d)")
return to_out(out)
class FeedForward(nn.Module):
mult: int = 4
@nn.compact
def __call__(self, x):
dim_in, mult = x.shape[-1], self.mult
norm = nn.LayerNorm()
to_intermediate = nn.Dense(features=dim_in * mult)
to_out = nn.Dense(features=dim_in)
x = norm(x)
x = to_intermediate(x)
x = nn.gelu(x)
x = to_out(x)
return x
class Transformer(nn.Module):
dim: int
depth: int
heads: int
dim_head: int = 64
causal: bool = False
cls_init: Callable = nn.initializers.lecun_normal()
def setup(self):
self.layers = [
(
Attention(
dim=self.dim,
heads=self.heads,
dim_head=self.dim_head,
causal=self.causal,
),
FeedForward(),
)
for _ in range(self.depth)
]
@nn.compact
def __call__(self, x, mask):
n, d, h, dh, dim = *x.shape, self.heads, self.dim_head, self.dim
if d != dim:
x = nn.Dense(features=dim)(x)
cls_token = self.param("cls", self.cls_init, (1, x.shape[-1]))
to_norm_out = nn.LayerNorm()
sincos = fixed_pos_embedding(n, self.dim_head)
x = np.concatenate((cls_token, x), axis=0)
for attn, ff in self.layers:
x = attn(x, pos_emb=sincos, mask=mask) + x
x = ff(x) + x
x = to_norm_out(x)
return x
class CLAP(nn.Module):
text_vocab: int
text_dim: int
text_depth: int
text_heads: int
audio_dim: int
audio_depth: int
audio_heads: int
temp_init: Callable = nn.initializers.zeros
def setup(self):
self.audio_encoder = Transformer(
dim=self.audio_dim, depth=self.audio_depth, heads=self.audio_heads
)
self.text_encoder = Transformer(
dim=self.text_dim, depth=self.text_depth, heads=self.text_heads, causal=True
)
@nn.compact
def __call__(self, text, audio, text_mask, audio_mask, return_loss=True):
b, text_vocab, text_dim = text.shape[0], self.text_vocab, self.text_dim
to_text_tokens = nn.Embed(num_embeddings=text_vocab, features=text_dim)
temp = self.param("temperature", self.temp_init, tuple())
text = to_text_tokens(text)
enc_text = vmap(self.text_encoder)(text, mask=text_mask)
enc_audio = vmap(self.audio_encoder)(audio, mask=audio_mask)
enc_text = enc_text[:, 0]
enc_audio = enc_audio[:, 0]
enc_text = enc_text / np.linalg.norm(enc_text, axis=-1, keepdims=True)
enc_audio = enc_audio / np.linalg.norm(enc_audio, axis=-1, keepdims=True)
sim = einsum("i d, j d -> i j", enc_text, enc_audio) * np.exp(temp)
if not return_loss:
return sim
labels = np.arange(b)
loss = (
cross_entropy(sim, labels, axis=0) + cross_entropy(sim, labels, axis=1)
) / 2
return loss
|
CLAP-main
|
clap/models.py
|
import glob
import torch
from pathlib import Path
import lm_dataformat as lmd
from itertools import cycle, islice, chain
import torch.nn.functional as F
from torch.utils.data import Dataset, TensorDataset, ConcatDataset, IterableDataset
class CaptionedAudioMetadataset(IterableDataset):
def __init__(self, path_pairs, lazy=False):
self.datasets = [
CaptionedAudioDataset(captions_path, spectrograms_path, lazy=lazy)
for (captions_path, spectrograms_path) in path_pairs
]
def __iter__(self):
def roundrobin(datasets):
num_active = len(datasets)
nexts = cycle(iter(it).__next__ for it in datasets)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = cycle(islice(nexts, num_active))
iterator = roundrobin(self.datasets)
return iterator
class CaptionedAudioDataset(IterableDataset):
def __init__(self, captions_path, spectrograms_path, lazy=False):
self.lazy = lazy
if self.lazy:
# Warning: The lazy path does not check whether the cpation metadata
# links it to the spectrogram. It assumes that the specrogram data,
# read from the files from the path in sorted order, loaded in as
# tensors, follows the exact same ordering as the LMD-encoded captions.
self.captions = lmd.Reader(captions_path).stream_data(get_meta=False)
self.spectrograms = SpectrogramLazyDataset(spectrograms_path)
else:
self.captions = lmd.Reader(captions_path).stream_data(get_meta=True)
self.spectrograms = SpectrogramDataset(spectrograms_path)
def __iter__(self):
if self.lazy:
iterator = (
(tokenize(text), spectrogram)
for ((text, _), spectrogram) in zip(self.captions, self.spectrograms)
)
else:
iterator = (
(tokenize(text), self.spectrograms[meta["index"]])
for (text, meta) in self.captions
)
return iterator
class SpectrogramDataset(Dataset):
def __init__(self, path):
self.shard_paths = sorted(glob.glob(f"{path}/*.pt"))
self.data = ConcatDataset(
[SpectrogramDatasetShard(shard_path) for shard_path in self.shard_paths]
)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class SpectrogramLazyDataset(IterableDataset):
def __init__(self, path):
self.shard_paths = sorted(glob.glob(f"{path}/*.pt"))
def __iter__(self):
def lazy_shard_loader():
for shard_path in self.shard_paths:
self.shard_data = SpectrogramDatasetShard(shard_path)
for example in self.shard_data:
yield example
return lazy_shard_loader()
class SpectrogramDatasetShard(Dataset):
def __init__(self, path):
self.dataset_shard = TensorDataset(torch.load(path))
def __len__(self):
# Layout is [examples, frames, channels]
return len(self.dataset_shard)
def __getitem__(self, idx):
return self.dataset_shard[idx]
class PairTextSpectrogramDataset(Dataset):
def __init__(self, folder, max_audio_len = 2048, max_text_len = 256):
self.paths = [path for path in Path(folder).glob('*.pt')]
self.max_audio_len = max_audio_len
self.max_text_len = max_text_len
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
max_audio_len, max_text_len = self.max_audio_len, self.max_text_len
path = self.paths[idx]
data = torch.load(path)
audio, text = data['audio'], data['text']
audio = audio[:max_audio_len]
text = text[:max_text_len]
audio_mask = torch.ones(audio.shape[:-1]).bool()
text_mask = torch.ones_like(text).bool()
return audio, audio_mask, text, text_mask
def pair_text_spectrogram_dataset_collate_fn(batch):
audios = [el[0] for el in batch]
texts = [el[2] for el in batch]
max_audio_len = max([audio.shape[0] for audio in audios])
max_text_len = max([text.shape[0] for text in texts])
padded_batch = []
for audio, audio_mask, text, text_mask in batch:
audio_len = audio.shape[0]
text_len = text.shape[0]
audio_pad_len = max_audio_len - audio_len
text_pad_len = max_text_len - text_len
if audio_pad_len > 0:
audio = F.pad(audio, (0, 0, audio_pad_len, 0), value = 0.)
audio_mask = F.pad(audio_mask, (audio_pad_len, 0), value = False)
if text_pad_len > 0:
text = F.pad(text, (text_pad_len, 0), value = 0.)
text_mask = F.pad(text_mask, (text_pad_len, 0), value = False)
padded_batch.append((audio, audio_mask, text, text_mask))
output = tuple(map(lambda t: torch.stack(t).numpy(), zip(*padded_batch)))
return output
def tokenize(text, pad_to=256):
# Padding token is 0, the null byte
tokens = torch.zeros(pad_to, dtype=torch.uint8)
# Truncate to context window size on the right if need be
for i, byte in enumerate(text.encode("utf-8")):
if i < pad_to:
tokens[i] = int(byte)
else:
break
return torch.tensor(tokens)
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
num_active = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = cycle(islice(nexts, num_active))
|
CLAP-main
|
clap/datasets.py
|
from clap.models import CLAP
from clap.datasets import CaptionedAudioDataset, CaptionedAudioMetadataset, tokenize
|
CLAP-main
|
clap/__init__.py
|
# Modified from Google's Vision Transformer repo, whose notice is reproduced below.
#
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import einops
import flax.linen as nn
import jax.numpy as jnp
class MlpBlock(nn.Module):
mlp_dim: int
@nn.compact
def __call__(self, x):
y = nn.Dense(self.mlp_dim)(x)
y = nn.gelu(y)
return nn.Dense(x.shape[-1])(y)
class MixerBlock(nn.Module):
"""Mixer block layer."""
tokens_mlp_dim: int
channels_mlp_dim: int
@nn.compact
def __call__(self, x):
y = nn.LayerNorm()(x)
y = jnp.swapaxes(y, 0, 1)
y = MlpBlock(self.tokens_mlp_dim, name="token_mixing")(y)
y = jnp.swapaxes(y, 0, 1)
x = x + y
y = nn.LayerNorm()(x)
return x + MlpBlock(self.channels_mlp_dim, name="channel_mixing")(y)
class MlpMixer(nn.Module):
"""Mixer architecture."""
patches: Any
strides: Any
num_classes: int
num_blocks: int
hidden_dim: int
tokens_mlp_dim: int
channels_mlp_dim: int
@nn.compact
def __call__(self, inputs):
x = nn.Conv(
self.hidden_dim, self.patches.size, strides=self.strides.size, name="stem"
)(inputs)
x = einops.rearrange(x, "h w c -> (h w) c")
for _ in range(self.num_blocks):
x = MixerBlock(self.tokens_mlp_dim, self.channels_mlp_dim)(x)
x = nn.LayerNorm(name="pre_head_layer_norm")(x)
x = jnp.mean(x, axis=0)
return nn.Dense(
self.num_classes, kernel_init=nn.initializers.zeros, name="head"
)(x)
|
CLAP-main
|
clap/trunks.py
|
from setuptools import setup, find_packages
setup(
name = 'robocat-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'RoboCat - A Self-Improving Foundation Agent for Robotic Manipulation',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/robocat-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'self-improving robotic manipulation'
],
install_requires=[
'einops>=0.6.1',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
robocat-pytorch-main
|
setup.py
|
robocat-pytorch-main
|
robocat_pytorch/__init__.py
|
|
robocat-pytorch-main
|
robocat_pytorch/robocat.py
|
|
from setuptools import setup, find_packages
exec(open('gigagan_pytorch/version.py').read())
setup(
name = 'gigagan-pytorch',
packages = find_packages(exclude=[]),
version = __version__,
license='MIT',
description = 'GigaGAN - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/ETSformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'generative adversarial networks'
],
install_requires=[
'accelerate',
'beartype',
'einops>=0.6',
'ema-pytorch',
'kornia',
'numerize',
'open-clip-torch>=2.0.0,<3.0.0',
'pillow',
'torch>=1.6',
'torchvision',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
gigagan-pytorch-main
|
setup.py
|
__version__ = '0.2.19'
|
gigagan-pytorch-main
|
gigagan_pytorch/version.py
|
from collections import namedtuple
from pathlib import Path
from math import log2, sqrt
from random import random
from functools import partial
from torchvision import utils
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.autograd import grad as torch_grad
from torch.utils.data import DataLoader
from torch.cuda.amp import GradScaler
from beartype import beartype
from beartype.typing import List, Optional, Tuple, Dict, Union, Iterable
from einops import rearrange, pack, unpack, repeat, reduce
from einops.layers.torch import Rearrange, Reduce
from kornia.filters import filter2d
from ema_pytorch import EMA
from gigagan_pytorch.version import __version__
from gigagan_pytorch.open_clip import OpenClipAdapter
from gigagan_pytorch.optimizer import get_optimizer
from gigagan_pytorch.distributed import all_gather
from tqdm import tqdm
from numerize import numerize
from accelerate import Accelerator, DistributedType
from accelerate.utils import DistributedDataParallelKwargs
# helpers
def exists(val):
return val is not None
@beartype
def is_empty(arr: Iterable):
return len(arr) == 0
def default(*vals):
for val in vals:
if exists(val):
return val
return None
def cast_tuple(t, length = 1):
return t if isinstance(t, tuple) else ((t,) * length)
def is_power_of_two(n):
return log2(n).is_integer()
def safe_unshift(arr):
if len(arr) == 0:
return None
return arr.pop(0)
def divisible_by(numer, denom):
return (numer % denom) == 0
def group_by_num_consecutive(arr, num):
out = []
for ind, el in enumerate(arr):
if ind > 0 and divisible_by(ind, num):
yield out
out = []
out.append(el)
if len(out) > 0:
yield out
def is_unique(arr):
return len(set(arr)) == len(arr)
def cycle(dl):
while True:
for data in dl:
yield data
def num_to_groups(num, divisor):
groups, remainder = divmod(num, divisor)
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def mkdir_if_not_exists(path):
path.mkdir(exist_ok = True, parents = True)
@beartype
def set_requires_grad_(
m: nn.Module,
requires_grad: bool
):
for p in m.parameters():
p.requires_grad = requires_grad
# activation functions
def leaky_relu(neg_slope = 0.2):
return nn.LeakyReLU(neg_slope)
def conv2d_3x3(dim_in, dim_out):
return nn.Conv2d(dim_in, dim_out, 3, padding = 1)
# tensor helpers
def log(t, eps = 1e-20):
return t.clamp(min = eps).log()
def gradient_penalty(
images,
outputs,
grad_output_weights = None,
weight = 10,
scaler: Optional[GradScaler] = None,
eps = 1e-4
):
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
if exists(scaler):
outputs = [*map(scaler.scale, outputs)]
if not exists(grad_output_weights):
grad_output_weights = (1,) * len(outputs)
maybe_scaled_gradients, *_ = torch_grad(
outputs = outputs,
inputs = images,
grad_outputs = [(torch.ones_like(output) * weight) for output, weight in zip(outputs, grad_output_weights)],
create_graph = True,
retain_graph = True,
only_inputs = True
)
gradients = maybe_scaled_gradients
if exists(scaler):
scale = scaler.get_scale()
inv_scale = 1. / max(scale, eps)
gradients = maybe_scaled_gradients * inv_scale
gradients = rearrange(gradients, 'b ... -> b (...)')
return weight * ((gradients.norm(2, dim = 1) - 1) ** 2).mean()
# hinge gan losses
def generator_hinge_loss(fake):
return fake.mean()
def discriminator_hinge_loss(real, fake):
return (F.relu(1 + real) + F.relu(1 - fake)).mean()
# auxiliary losses
def aux_matching_loss(real, fake):
"""
making logits negative, as in this framework, discriminator is 0 for real, high value for fake. GANs can have this arbitrarily swapped, as it only matters if the generator and discriminator are opposites
"""
return (log(1 + (-real).exp()) + log(1 + (-fake).exp())).mean()
@beartype
def aux_clip_loss(
clip: OpenClipAdapter,
images: Tensor,
texts: Optional[List[str]] = None,
text_embeds: Optional[Tensor] = None
):
assert exists(texts) ^ exists(text_embeds)
images, batch_sizes = all_gather(images, 0, None)
if exists(texts):
text_embeds, _ = clip.embed_texts(texts)
text_embeds, _ = all_gather(text_embeds, 0, batch_sizes)
return clip.contrastive_loss(images = images, text_embeds = text_embeds)
# differentiable augmentation - Karras et al. stylegan-ada
# start with horizontal flip
class DiffAugment(nn.Module):
def __init__(
self,
*,
prob,
horizontal_flip,
horizontal_flip_prob = 0.5
):
super().__init__()
self.prob = prob
assert 0 <= prob <= 1.
self.horizontal_flip = horizontal_flip
self.horizontal_flip_prob = horizontal_flip_prob
def forward(
self,
images,
rgbs: List[Tensor]
):
if random() >= self.prob:
return images, rgbs
if random() < self.horizontal_flip_prob:
images = torch.flip(images, (-1,))
rgbs = [torch.flip(rgb, (-1,)) for rgb in rgbs]
return images, rgbs
# rmsnorm (newer papers show mean-centering in layernorm not necessary)
class ChannelRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim, 1, 1))
def forward(self, x):
normed = F.normalize(x, dim = 1)
return normed * self.scale * self.gamma
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
normed = F.normalize(x, dim = -1)
return normed * self.scale * self.gamma
# down and upsample
class Blur(nn.Module):
def __init__(self):
super().__init__()
f = torch.Tensor([1, 2, 1])
self.register_buffer('f', f)
def forward(self, x):
f = self.f
f = f[None, None, :] * f [None, :, None]
return filter2d(x, f, normalized = True)
def Upsample(*args):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'bilinear', align_corners = False),
Blur()
)
class PixelShuffleUpsample(nn.Module):
def __init__(self, dim):
super().__init__()
conv = nn.Conv2d(dim, dim * 4, 1)
self.net = nn.Sequential(
conv,
nn.SiLU(),
nn.PixelShuffle(2)
)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, h, w = conv.weight.shape
conv_weight = torch.empty(o // 4, i, h, w)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
return self.net(x)
def Downsample(dim):
return nn.Sequential(
Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),
nn.Conv2d(dim * 4, dim, 1)
)
# skip layer excitation
def SqueezeExcite(dim, dim_out, reduction = 4, dim_min = 32):
dim_hidden = max(dim_out // reduction, dim_min)
return nn.Sequential(
Reduce('b c h w -> b c', 'mean'),
nn.Linear(dim, dim_hidden),
nn.SiLU(),
nn.Linear(dim_hidden, dim_out),
nn.Sigmoid(),
Rearrange('b c -> b c 1 1')
)
# adaptive conv
# the main novelty of the paper - they propose to learn a softmax weighted sum of N convolutional kernels, depending on the text embedding
def get_same_padding(size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
class AdaptiveConv2DMod(nn.Module):
def __init__(
self,
dim,
dim_out,
kernel,
*,
demod = True,
stride = 1,
dilation = 1,
eps = 1e-8,
num_conv_kernels = 1 # set this to be greater than 1 for adaptive
):
super().__init__()
self.eps = eps
self.dim_out = dim_out
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.adaptive = num_conv_kernels > 1
self.weights = nn.Parameter(torch.randn((num_conv_kernels, dim_out, dim, kernel, kernel)))
self.demod = demod
nn.init.kaiming_normal_(self.weights, a = 0, mode = 'fan_in', nonlinearity = 'leaky_relu')
def forward(
self,
fmap,
mod: Optional[Tensor] = None,
kernel_mod: Optional[Tensor] = None
):
"""
notation
b - batch
n - convs
o - output
i - input
k - kernel
"""
b, h = fmap.shape[0], fmap.shape[-2]
# account for feature map that has been expanded by the scale in the first dimension
# due to multiscale inputs and outputs
if mod.shape[0] != b:
mod = repeat(mod, 'b ... -> (s b) ...', s = b // mod.shape[0])
if exists(kernel_mod):
kernel_mod_has_el = kernel_mod.numel() > 0
assert self.adaptive or not kernel_mod_has_el
if kernel_mod_has_el and kernel_mod.shape[0] != b:
kernel_mod = repeat(kernel_mod, 'b ... -> (s b) ...', s = b // kernel_mod.shape[0])
# prepare weights for modulation
weights = self.weights
if self.adaptive:
weights = repeat(weights, '... -> b ...', b = b)
# determine an adaptive weight and 'select' the kernel to use with softmax
assert exists(kernel_mod) and kernel_mod.numel() > 0
kernel_attn = kernel_mod.softmax(dim = -1)
kernel_attn = rearrange(kernel_attn, 'b n -> b n 1 1 1 1')
weights = reduce(weights * kernel_attn, 'b n ... -> b ...', 'sum')
# do the modulation, demodulation, as done in stylegan2
mod = rearrange(mod, 'b i -> b 1 i 1 1')
weights = weights * (mod + 1)
if self.demod:
inv_norm = reduce(weights ** 2, 'b o i k1 k2 -> b o 1 1 1', 'sum').clamp(min = self.eps).rsqrt()
weights = weights * inv_norm
fmap = rearrange(fmap, 'b c h w -> 1 (b c) h w')
weights = rearrange(weights, 'b o ... -> (b o) ...')
padding = get_same_padding(h, self.kernel, self.dilation, self.stride)
fmap = F.conv2d(fmap, weights, padding = padding, groups = b)
return rearrange(fmap, '1 (b o) ... -> b o ...', b = b)
# attention
# they use an attention with a better Lipchitz constant - l2 distance similarity instead of dot product - also shared query / key space - shown in vitgan to be more stable
# not sure what they did about token attention to self, so masking out, as done in some other papers using shared query / key space
class SelfAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
dot_product = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
dim_inner = dim_head * heads
self.dot_product = dot_product
self.norm = ChannelRMSNorm(dim)
self.to_q = nn.Conv2d(dim, dim_inner, 1, bias = False)
self.to_k = nn.Conv2d(dim, dim_inner, 1, bias = False) if dot_product else None
self.to_v = nn.Conv2d(dim, dim_inner, 1, bias = False)
self.null_kv = nn.Parameter(torch.randn(2, heads, dim_head))
self.to_out = nn.Conv2d(dim_inner, dim, 1, bias = False)
def forward(self, fmap):
"""
einstein notation
b - batch
h - heads
x - height
y - width
d - dimension
i - source seq (attend from)
j - target seq (attend to)
"""
batch = fmap.shape[0]
fmap = self.norm(fmap)
x, y = fmap.shape[-2:]
h = self.heads
q, v = self.to_q(fmap), self.to_v(fmap)
k = self.to_k(fmap) if exists(self.to_k) else q
q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> (b h) (x y) d', h = self.heads), (q, k, v))
# add a null key / value, so network can choose to pay attention to nothing
nk, nv = map(lambda t: repeat(t, 'h d -> (b h) 1 d', b = batch), self.null_kv)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# l2 distance or dot product
if self.dot_product:
sim = einsum('b i d, b j d -> b i j', q, k)
else:
# using pytorch cdist leads to nans in lightweight gan training framework, at least
q_squared = (q * q).sum(dim = -1)
k_squared = (k * k).sum(dim = -1)
l2dist_squared = rearrange(q_squared, 'b i -> b i 1') + rearrange(k_squared, 'b j -> b 1 j') - 2 * einsum('b i d, b j d -> b i j', q, k) # hope i'm mathing right
sim = -l2dist_squared
# scale
sim = sim * self.scale
# attention
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', x = x, y = y, h = h)
return self.to_out(out)
class CrossAttention(nn.Module):
def __init__(
self,
dim,
dim_context,
dim_head = 64,
heads = 8
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
dim_inner = dim_head * heads
kv_input_dim = default(dim_context, dim)
self.norm = ChannelRMSNorm(dim)
self.norm_context = RMSNorm(kv_input_dim)
self.to_q = nn.Conv2d(dim, dim_inner, 1, bias = False)
self.to_kv = nn.Linear(kv_input_dim, dim_inner * 2, bias = False)
self.to_out = nn.Conv2d(dim_inner, dim, 1, bias = False)
def forward(self, fmap, context, mask = None):
"""
einstein notation
b - batch
h - heads
x - height
y - width
d - dimension
i - source seq (attend from)
j - target seq (attend to)
"""
fmap = self.norm(fmap)
context = self.norm_context(context)
x, y = fmap.shape[-2:]
h = self.heads
q, k, v = (self.to_q(fmap), *self.to_kv(context).chunk(2, dim = -1))
k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (k, v))
q = rearrange(q, 'b (h d) x y -> (b h) (x y) d', h = self.heads)
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if exists(mask):
mask = repeat(mask, 'b j -> (b h) 1 j', h = self.heads)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', x = x, y = y, h = h)
return self.to_out(out)
# classic transformer attention, stick with l2 distance
class TextAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
dim_inner = dim_head * heads
self.norm = RMSNorm(dim)
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False)
self.null_kv = nn.Parameter(torch.randn(2, heads, dim_head))
self.to_out = nn.Linear(dim_inner, dim, bias = False)
def forward(self, encodings, mask = None):
"""
einstein notation
b - batch
h - heads
x - height
y - width
d - dimension
i - source seq (attend from)
j - target seq (attend to)
"""
batch = encodings.shape[0]
encodings = self.norm(encodings)
h = self.heads
q, k, v = self.to_qkv(encodings).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = self.heads), (q, k, v))
# add a null key / value, so network can choose to pay attention to nothing
nk, nv = map(lambda t: repeat(t, 'h d -> (b h) 1 d', b = batch), self.null_kv)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
# key padding mask
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = repeat(mask, 'b n -> (b h) 1 n', h = h)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
return self.to_out(out)
# feedforward
def FeedForward(
dim,
mult = 4,
channel_first = False
):
dim_hidden = int(dim * mult)
norm_klass = ChannelRMSNorm if channel_first else RMSNorm
proj = partial(nn.Conv2d, kernel_size = 1) if channel_first else nn.Linear
return nn.Sequential(
norm_klass(dim),
proj(dim, dim_hidden),
nn.GELU(),
proj(dim_hidden, dim)
)
# different types of transformer blocks or transformers (multiple blocks)
class SelfAttentionBlock(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
ff_mult = 4,
dot_product = False
):
super().__init__()
self.attn = SelfAttention(dim = dim, dim_head = dim_head, heads = heads, dot_product = dot_product)
self.ff = FeedForward(dim = dim, mult = ff_mult, channel_first = True)
def forward(self, x):
x = self.attn(x) + x
x = self.ff(x) + x
return x
class CrossAttentionBlock(nn.Module):
def __init__(
self,
dim,
dim_context,
dim_head = 64,
heads = 8,
ff_mult = 4
):
super().__init__()
self.attn = CrossAttention(dim = dim, dim_context = dim_context, dim_head = dim_head, heads = heads)
self.ff = FeedForward(dim = dim, mult = ff_mult, channel_first = True)
def forward(self, x, context, mask = None):
x = self.attn(x, context = context, mask = mask) + x
x = self.ff(x) + x
return x
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
TextAttention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
self.norm = RMSNorm(dim)
def forward(self, x, mask = None):
for attn, ff in self.layers:
x = attn(x, mask = mask) + x
x = ff(x) + x
return self.norm(x)
# text encoder
class TextEncoder(nn.Module):
@beartype
def __init__(
self,
*,
dim,
depth,
clip: Optional[OpenClipAdapter] = None,
dim_head = 64,
heads = 8,
):
super().__init__()
self.dim = dim
if not exists(clip):
clip = OpenClipAdapter()
self.clip = clip
set_requires_grad_(clip, False)
self.learned_global_token = nn.Parameter(torch.randn(dim))
self.project_in = nn.Linear(clip.dim_latent, dim) if clip.dim_latent != dim else nn.Identity()
self.transformer = Transformer(
dim = dim,
depth = depth,
dim_head = dim_head,
heads = heads
)
@beartype
def forward(
self,
texts: Optional[List[str]] = None,
text_encodings: Optional[Tensor] = None
):
assert exists(texts) ^ exists(text_encodings)
if not exists(text_encodings):
with torch.no_grad():
self.clip.eval()
_, text_encodings = self.clip.embed_texts(texts)
mask = (text_encodings != 0.).any(dim = -1)
text_encodings = self.project_in(text_encodings)
mask_with_global = F.pad(mask, (1, 0), value = True)
batch = text_encodings.shape[0]
global_tokens = repeat(self.learned_global_token, 'd -> b d', b = batch)
text_encodings, ps = pack([global_tokens, text_encodings], 'b * d')
text_encodings = self.transformer(text_encodings, mask = mask_with_global)
global_tokens, text_encodings = unpack(text_encodings, ps, 'b * d')
return global_tokens, text_encodings, mask
# style mapping network
class EqualLinear(nn.Module):
def __init__(
self,
dim,
dim_out,
lr_mul = 1,
bias = True
):
super().__init__()
self.weight = nn.Parameter(torch.randn(dim_out, dim))
if bias:
self.bias = nn.Parameter(torch.zeros(dim_out))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)
class StyleNetwork(nn.Module):
def __init__(
self,
dim,
depth,
lr_mul = 0.1,
dim_text_latent = 0
):
super().__init__()
self.dim = dim
self.dim_text_latent = dim_text_latent
layers = []
for i in range(depth):
is_first = i == 0
dim_in = (dim + dim_text_latent) if is_first else dim
layers.extend([EqualLinear(dim_in, dim, lr_mul), leaky_relu()])
self.net = nn.Sequential(*layers)
def forward(
self,
x,
text_latent = None
):
x = F.normalize(x, dim = 1)
if self.dim_text_latent > 0:
assert exists(text_latent)
x = torch.cat((x, text_latent), dim = -1)
return self.net(x)
# noise
class Noise(nn.Module):
def __init__(self, dim):
super().__init__()
self.weight = nn.Parameter(torch.zeros(dim, 1, 1))
def forward(
self,
x,
noise = None
):
b, _, h, w, device = *x.shape, x.device
if not exists(noise):
noise = torch.randn(b, 1, h, w, device = device)
return x + self.weight * noise
# generator
class BaseGenerator(nn.Module):
pass
class Generator(BaseGenerator):
@beartype
def __init__(
self,
*,
image_size,
dim_capacity = 16,
dim_max = 2048,
channels = 3,
style_network: Optional[Union[StyleNetwork, Dict]] = None,
style_network_dim = None,
text_encoder: Optional[Union[TextEncoder, Dict]] = None,
dim_latent = 512,
self_attn_resolutions: Tuple[int, ...] = (32, 16),
self_attn_dim_head = 64,
self_attn_heads = 8,
self_attn_dot_product = True,
self_attn_ff_mult = 4,
cross_attn_resolutions: Tuple[int, ...] = (32, 16),
cross_attn_dim_head = 64,
cross_attn_heads = 8,
cross_attn_ff_mult = 4,
num_conv_kernels = 2, # the number of adaptive conv kernels
num_skip_layers_excite = 0,
unconditional = False,
pixel_shuffle_upsample = False
):
super().__init__()
self.channels = channels
if isinstance(style_network, dict):
style_network = StyleNetwork(**style_network)
self.style_network = style_network
assert exists(style_network) ^ exists(style_network_dim), 'style_network_dim must be given to the generator if StyleNetwork not passed in as style_network'
if not exists(style_network_dim):
style_network_dim = style_network.dim
self.style_network_dim = style_network_dim
if isinstance(text_encoder, dict):
text_encoder = TextEncoder(**text_encoder)
self.text_encoder = text_encoder
self.unconditional = unconditional
assert not (unconditional and exists(text_encoder))
assert not (unconditional and exists(style_network) and style_network.dim_text_latent > 0)
assert unconditional or (exists(text_encoder) and text_encoder.dim == style_network.dim_text_latent), 'the `dim_text_latent` on your StyleNetwork must be equal to the `dim` set for the TextEncoder'
assert is_power_of_two(image_size)
num_layers = int(log2(image_size) - 1)
self.num_layers = num_layers
# generator requires convolutions conditioned by the style vector
# and also has N convolutional kernels adaptively selected (one of the only novelties of the paper)
is_adaptive = num_conv_kernels > 1
dim_kernel_mod = num_conv_kernels if is_adaptive else 0
style_embed_split_dims = []
adaptive_conv = partial(AdaptiveConv2DMod, kernel = 3, num_conv_kernels = num_conv_kernels)
# initial 4x4 block and conv
self.init_block = nn.Parameter(torch.randn(dim_latent, 4, 4))
self.init_conv = adaptive_conv(dim_latent, dim_latent)
style_embed_split_dims.extend([
dim_latent,
dim_kernel_mod
])
# main network
num_layers = int(log2(image_size) - 1)
self.num_layers = num_layers
resolutions = image_size / ((2 ** torch.arange(num_layers).flip(0)))
resolutions = resolutions.long().tolist()
dim_layers = (2 ** (torch.arange(num_layers) + 1)) * dim_capacity
dim_layers.clamp_(max = dim_max)
dim_layers = torch.flip(dim_layers, (0,))
dim_layers = F.pad(dim_layers, (1, 0), value = dim_latent)
dim_layers = dim_layers.tolist()
dim_pairs = list(zip(dim_layers[:-1], dim_layers[1:]))
self.num_skip_layers_excite = num_skip_layers_excite
self.layers = nn.ModuleList([])
# go through layers and construct all parameters
for ind, ((dim_in, dim_out), resolution) in enumerate(zip(dim_pairs, resolutions)):
is_last = (ind + 1) == len(dim_pairs)
is_first = ind == 0
should_upsample = not is_first
should_upsample_rgb = not is_last
should_skip_layer_excite = num_skip_layers_excite > 0 and (ind + num_skip_layers_excite) < len(dim_pairs)
has_self_attn = resolution in self_attn_resolutions
has_cross_attn = resolution in cross_attn_resolutions and not unconditional
skip_squeeze_excite = None
if should_skip_layer_excite:
dim_skip_in, _ = dim_pairs[ind + num_skip_layers_excite]
skip_squeeze_excite = SqueezeExcite(dim_in, dim_skip_in)
resnet_block = nn.ModuleList([
adaptive_conv(dim_in, dim_out),
Noise(dim_out),
leaky_relu(),
adaptive_conv(dim_out, dim_out),
Noise(dim_out),
leaky_relu()
])
to_rgb = AdaptiveConv2DMod(dim_out, channels, 1, num_conv_kernels = 1, demod = False)
self_attn = cross_attn = rgb_upsample = upsample = None
upsample_klass = Upsample if not pixel_shuffle_upsample else PixelShuffleUpsample
upsample = upsample_klass(dim_in) if should_upsample else None
rgb_upsample = upsample_klass(channels) if should_upsample_rgb else None
if has_self_attn:
self_attn = SelfAttentionBlock(
dim_out,
dim_head = self_attn_dim_head,
heads = self_attn_heads,
ff_mult = self_attn_ff_mult,
dot_product = self_attn_dot_product
)
if has_cross_attn:
cross_attn = CrossAttentionBlock(
dim_out,
dim_context = text_encoder.dim,
dim_head = cross_attn_dim_head,
heads = cross_attn_heads,
ff_mult = cross_attn_ff_mult,
)
style_embed_split_dims.extend([
dim_in, # for first conv in resnet block
dim_kernel_mod, # first conv kernel selection
dim_out, # second conv in resnet block
dim_kernel_mod, # second conv kernel selection
dim_out, # to RGB conv
0, # RGB conv kernel selection
])
self.layers.append(nn.ModuleList([
skip_squeeze_excite,
resnet_block,
to_rgb,
self_attn,
cross_attn,
upsample,
rgb_upsample
]))
# determine the projection of the style embedding to convolutional modulation weights (+ adaptive kernel selection weights) for all layers
self.style_to_conv_modulations = nn.Linear(style_network_dim, sum(style_embed_split_dims))
self.style_embed_split_dims = style_embed_split_dims
self.apply(self.init_)
nn.init.normal_(self.init_block, std = 0.02)
def init_(self, m):
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a = 0, mode = 'fan_in', nonlinearity = 'leaky_relu')
@property
def total_params(self):
return sum([p.numel() for p in self.parameters() if p.requires_grad])
@property
def device(self):
return next(self.parameters()).device
@beartype
def forward(
self,
styles = None,
noise = None,
texts: Optional[List[str]] = None,
text_encodings: Optional[Tensor] = None,
global_text_tokens = None,
fine_text_tokens = None,
text_mask = None,
batch_size = 1,
return_all_rgbs = False
):
# take care of text encodings
# which requires global text tokens to adaptively select the kernels from the main contribution in the paper
# and fine text tokens to attend to using cross attention
if not self.unconditional:
if exists(texts) or exists(text_encodings):
assert exists(texts) ^ exists(text_encodings), 'either raw texts as List[str] or text_encodings (from clip) as Tensor is passed in, but not both'
assert exists(self.text_encoder)
if exists(texts):
text_encoder_kwargs = dict(texts = texts)
elif exists(text_encodings):
text_encoder_kwargs = dict(text_encodings = text_encodings)
global_text_tokens, fine_text_tokens, text_mask = self.text_encoder(**text_encoder_kwargs)
else:
assert all([*map(exists, (global_text_tokens, fine_text_tokens, text_mask))]), 'raw text or text embeddings were not passed in for conditional training'
else:
assert not any([*map(exists, (texts, global_text_tokens, fine_text_tokens))])
# determine styles
if not exists(styles):
assert exists(self.style_network)
if not exists(noise):
noise = torch.randn((batch_size, self.style_network_dim), device = self.device)
styles = self.style_network(noise, global_text_tokens)
# project styles to conv modulations
conv_mods = self.style_to_conv_modulations(styles)
conv_mods = conv_mods.split(self.style_embed_split_dims, dim = -1)
conv_mods = iter(conv_mods)
# prepare initial block
batch_size = styles.shape[0]
x = repeat(self.init_block, 'c h w -> b c h w', b = batch_size)
x = self.init_conv(x, mod = next(conv_mods), kernel_mod = next(conv_mods))
rgb = torch.zeros((batch_size, self.channels, 4, 4), device = self.device, dtype = x.dtype)
# skip layer squeeze excitations
excitations = [None] * self.num_skip_layers_excite
# all the rgb's of each layer of the generator is to be saved for multi-resolution input discrimination
rgbs = []
# main network
for squeeze_excite, (resnet_conv1, noise1, act1, resnet_conv2, noise2, act2), to_rgb_conv, self_attn, cross_attn, upsample, upsample_rgb in self.layers:
if exists(upsample):
x = upsample(x)
if exists(squeeze_excite):
skip_excite = squeeze_excite(x)
excitations.append(skip_excite)
excite = safe_unshift(excitations)
if exists(excite):
x = x * excite
x = resnet_conv1(x, mod = next(conv_mods), kernel_mod = next(conv_mods))
x = noise1(x)
x = act1(x)
x = resnet_conv2(x, mod = next(conv_mods), kernel_mod = next(conv_mods))
x = noise2(x)
x = act2(x)
if exists(self_attn):
x = self_attn(x)
if exists(cross_attn):
x = cross_attn(x, context = fine_text_tokens, mask = text_mask)
layer_rgb = to_rgb_conv(x, mod = next(conv_mods), kernel_mod = next(conv_mods))
rgb = rgb + layer_rgb
rgbs.append(rgb)
if exists(upsample_rgb):
rgb = upsample_rgb(rgb)
# sanity check
assert is_empty([*conv_mods]), 'convolutions were incorrectly modulated'
if return_all_rgbs:
return rgb, rgbs
return rgb
# discriminator
@beartype
class SimpleDecoder(nn.Module):
def __init__(
self,
dim,
*,
dims: Tuple[int, ...],
patch_dim: int = 1,
frac_patches: float = 1.,
dropout: float = 0.5
):
super().__init__()
assert 0 < frac_patches <= 1.
self.patch_dim = patch_dim
self.frac_patches = frac_patches
self.dropout = nn.Dropout(dropout)
dims = [dim, *dims]
layers = [conv2d_3x3(dim, dim)]
for dim_in, dim_out in zip(dims[:-1], dims[1:]):
layers.append(nn.Sequential(
Upsample(dim_in),
conv2d_3x3(dim_in, dim_out),
leaky_relu()
))
self.net = nn.Sequential(*layers)
@property
def device(self):
return next(self.parameters()).device
def forward(
self,
fmap,
orig_image
):
fmap = self.dropout(fmap)
if self.frac_patches < 1.:
batch, patch_dim = fmap.shape[0], self.patch_dim
fmap_size, img_size = fmap.shape[-1], orig_image.shape[-1]
assert divisible_by(fmap_size, patch_dim), f'feature map dimensions are {fmap_size}, but the patch dim was designated to be {patch_dim}'
assert divisible_by(img_size, patch_dim), f'image size is {img_size} but the patch dim was specified to be {patch_dim}'
fmap, orig_image = map(lambda t: rearrange(t, 'b c (p1 h) (p2 w) -> b (p1 p2) c h w', p1 = patch_dim, p2 = patch_dim), (fmap, orig_image))
total_patches = patch_dim ** 2
num_patches_recon = max(int(self.frac_patches * total_patches), 1)
batch_arange = torch.arange(batch, device = self.device)[..., None]
batch_randperm = torch.randn((batch, total_patches)).sort(dim = -1).indices
patch_indices = batch_randperm[..., :num_patches_recon]
fmap, orig_image = map(lambda t: t[batch_arange, patch_indices], (fmap, orig_image))
fmap, orig_image = map(lambda t: rearrange(t, 'b p ... -> (b p) ...'), (fmap, orig_image))
recon = self.net(fmap)
return F.mse_loss(recon, orig_image)
class RandomFixedProjection(nn.Module):
def __init__(
self,
dim,
dim_out,
channel_first = True
):
super().__init__()
weights = torch.randn(dim, dim_out)
nn.init.kaiming_normal_(weights, mode = 'fan_out', nonlinearity = 'linear')
self.channel_first = channel_first
self.register_buffer('fixed_weights', weights)
def forward(self, x):
if not self.channel_first:
return x @ self.fixed_weights
return einsum('b c ..., c d -> b d ...', x, self.fixed_weights)
class VisionAidedDiscriminator(nn.Module):
""" the vision-aided gan loss """
@beartype
def __init__(
self,
*,
depth = 2,
dim_head = 64,
heads = 8,
clip: Optional[OpenClipAdapter] = None,
layer_indices = (-1, -2, -3),
conv_dim = None,
text_dim = None,
unconditional = False,
num_conv_kernels = 2
):
super().__init__()
if not exists(clip):
clip = OpenClipAdapter()
self.clip = clip
dim = clip._dim_image_latent
self.unconditional = unconditional
text_dim = default(text_dim, dim)
conv_dim = default(conv_dim, dim)
self.layer_discriminators = nn.ModuleList([])
self.layer_indices = layer_indices
conv_klass = partial(AdaptiveConv2DMod, kernel = 3, num_conv_kernels = num_conv_kernels) if not unconditional else conv2d_3x3
for _ in layer_indices:
self.layer_discriminators.append(nn.ModuleList([
RandomFixedProjection(dim, conv_dim),
conv_klass(conv_dim, conv_dim),
nn.Linear(text_dim, conv_dim) if not unconditional else None,
nn.Linear(text_dim, num_conv_kernels) if not unconditional else None,
nn.Sequential(
conv2d_3x3(conv_dim, 1),
Rearrange('b 1 ... -> b ...')
)
]))
def parameters(self):
return self.layer_discriminators.parameters()
@property
def total_params(self):
return sum([p.numel() for p in self.parameters()])
@beartype
def forward(
self,
images,
texts: Optional[List[str]] = None,
text_embeds: Optional[Tensor] = None,
return_clip_encodings = False
):
assert self.unconditional or (exists(text_embeds) ^ exists(texts))
with torch.no_grad():
if not self.unconditional and exists(texts):
self.clip.eval()
text_embeds = self.clip.embed_texts
_, image_encodings = self.clip.embed_images(images)
logits = []
for layer_index, (rand_proj, conv, to_conv_mod, to_conv_kernel_mod, to_logits) in zip(self.layer_indices, self.layer_discriminators):
image_encoding = image_encodings[layer_index]
cls_token, rest_tokens = image_encoding[:, :1], image_encoding[:, 1:]
height_width = int(sqrt(rest_tokens.shape[-2])) # assume square
img_fmap = rearrange(rest_tokens, 'b (h w) d -> b d h w', h = height_width)
img_fmap = img_fmap + rearrange(cls_token, 'b 1 d -> b d 1 1 ') # pool the cls token into the rest of the tokens
img_fmap = rand_proj(img_fmap)
if self.unconditional:
img_fmap = conv(img_fmap)
else:
assert exists(text_embeds)
img_fmap = conv(
img_fmap,
mod = to_conv_mod(text_embeds),
kernel_mod = to_conv_kernel_mod(text_embeds)
)
layer_logits = to_logits(img_fmap)
logits.append(layer_logits)
if not return_clip_encodings:
return logits
return logits, image_encodings
class Predictor(nn.Module):
def __init__(
self,
dim,
depth = 4,
num_conv_kernels = 2,
unconditional = False
):
super().__init__()
self.unconditional = unconditional
self.residual_fn = nn.Conv2d(dim, dim, 1)
self.residual_scale = 2 ** -0.5
self.layers = nn.ModuleList([])
klass = nn.Conv2d if unconditional else partial(AdaptiveConv2DMod, num_conv_kernels = num_conv_kernels)
klass_kwargs = dict(padding = 1) if unconditional else dict()
for ind in range(depth):
self.layers.append(nn.ModuleList([
klass(dim, dim, 3, **klass_kwargs),
leaky_relu(),
klass(dim, dim, 3, **klass_kwargs),
leaky_relu()
]))
self.to_logits = nn.Conv2d(dim, 1, 1)
def forward(
self,
x,
mod = None,
kernel_mod = None
):
residual = self.residual_fn(x)
kwargs = dict()
if not self.unconditional:
kwargs = dict(mod = mod, kernel_mod = kernel_mod)
for conv1, activation, conv2, activation in self.layers:
inner_residual = x
x = conv1(x, **kwargs)
x = activation(x)
x = conv2(x, **kwargs)
x = activation(x)
x = x + inner_residual
x = x * self.residual_scale
x = x + residual
return self.to_logits(x)
class Discriminator(nn.Module):
@beartype
def __init__(
self,
*,
dim_capacity = 16,
image_size,
dim_max = 2048,
channels = 3,
attn_resolutions: Tuple[int, ...] = (32, 16),
attn_dim_head = 64,
attn_heads = 8,
self_attn_dot_product = False,
ff_mult = 4,
text_encoder: Optional[Union[TextEncoder, Dict]] = None,
text_dim = None,
filter_input_resolutions: bool = True,
multiscale_input_resolutions: Tuple[int, ...] = (64, 32, 16, 8),
multiscale_output_skip_stages: int = 1,
aux_recon_resolutions: Tuple[int, ...] = (8,),
aux_recon_patch_dims: Tuple[int, ...] = (2,),
aux_recon_frac_patches: Tuple[float, ...] = (0.25,),
aux_recon_fmap_dropout: float = 0.5,
resize_mode = 'bilinear',
num_conv_kernels = 2,
num_skip_layers_excite = 0,
unconditional = False,
predictor_depth = 2
):
super().__init__()
self.unconditional = unconditional
assert not (unconditional and exists(text_encoder))
assert is_power_of_two(image_size)
assert all([*map(is_power_of_two, attn_resolutions)])
if filter_input_resolutions:
multiscale_input_resolutions = [*filter(lambda t: t < image_size, multiscale_input_resolutions)]
assert is_unique(multiscale_input_resolutions)
assert all([*map(is_power_of_two, multiscale_input_resolutions)])
assert all([*map(lambda t: t < image_size, multiscale_input_resolutions)])
self.multiscale_input_resolutions = multiscale_input_resolutions
assert multiscale_output_skip_stages > 0
multiscale_output_resolutions = [resolution // (2 ** multiscale_output_skip_stages) for resolution in multiscale_input_resolutions]
assert all([*map(lambda t: t >= 4, multiscale_output_resolutions)])
assert all([*map(lambda t: t < image_size, multiscale_output_resolutions)])
if len(multiscale_input_resolutions) > 0 and len(multiscale_output_resolutions) > 0:
assert max(multiscale_input_resolutions) > max(multiscale_output_resolutions)
assert min(multiscale_input_resolutions) > min(multiscale_output_resolutions)
self.multiscale_output_resolutions = multiscale_output_resolutions
assert all([*map(is_power_of_two, aux_recon_resolutions)])
assert len(aux_recon_resolutions) == len(aux_recon_patch_dims) == len(aux_recon_frac_patches)
self.aux_recon_resolutions_to_patches = {resolution: (patch_dim, frac_patches) for resolution, patch_dim, frac_patches in zip(aux_recon_resolutions, aux_recon_patch_dims, aux_recon_frac_patches)}
self.resize_mode = resize_mode
num_layers = int(log2(image_size) - 1)
self.num_layers = num_layers
self.image_size = image_size
resolutions = image_size / ((2 ** torch.arange(num_layers)))
resolutions = resolutions.long().tolist()
dim_layers = (2 ** (torch.arange(num_layers) + 1)) * dim_capacity
dim_layers = F.pad(dim_layers, (1, 0), value = channels)
dim_layers.clamp_(max = dim_max)
dim_layers = dim_layers.tolist()
dim_last = dim_layers[-1]
dim_pairs = list(zip(dim_layers[:-1], dim_layers[1:]))
self.num_skip_layers_excite = num_skip_layers_excite
self.residual_scale = 2 ** -0.5
self.layers = nn.ModuleList([])
upsample_dims = []
predictor_dims = []
dim_kernel_attn = (num_conv_kernels if num_conv_kernels > 1 else 0)
for ind, ((dim_in, dim_out), resolution) in enumerate(zip(dim_pairs, resolutions)):
is_first = ind == 0
is_last = (ind + 1) == len(dim_pairs)
should_downsample = not is_last
should_skip_layer_excite = not is_first and num_skip_layers_excite > 0 and (ind + num_skip_layers_excite) < len(dim_pairs)
has_attn = resolution in attn_resolutions
has_multiscale_output = resolution in multiscale_output_resolutions
has_aux_recon_decoder = resolution in aux_recon_resolutions
upsample_dims.insert(0, dim_in)
skip_squeeze_excite = None
if should_skip_layer_excite:
dim_skip_in, _ = dim_pairs[ind + num_skip_layers_excite]
skip_squeeze_excite = SqueezeExcite(dim_in, dim_skip_in)
# multi-scale rgb input to feature dimension
from_rgb = nn.Conv2d(channels, dim_in, 7, padding = 3)
# residual convolution
residual_conv = nn.Conv2d(dim_in, dim_out, 1, stride = (2 if should_downsample else 1))
# main resnet block
resnet_block = nn.Sequential(
conv2d_3x3(dim_in, dim_out),
leaky_relu(),
conv2d_3x3(dim_out, dim_out),
leaky_relu()
)
# multi-scale output
multiscale_output_predictor = None
if has_multiscale_output:
multiscale_output_predictor = Predictor(dim_out, num_conv_kernels = num_conv_kernels, depth = 2, unconditional = unconditional)
predictor_dims.extend([dim_out, dim_kernel_attn])
aux_recon_decoder = None
if has_aux_recon_decoder:
patch_dim, frac_patches = self.aux_recon_resolutions_to_patches[resolution]
aux_recon_decoder = SimpleDecoder(
dim_out,
dims = tuple(upsample_dims),
patch_dim = patch_dim,
frac_patches = frac_patches,
dropout = aux_recon_fmap_dropout
)
self.layers.append(nn.ModuleList([
skip_squeeze_excite,
from_rgb,
resnet_block,
residual_conv,
SelfAttentionBlock(dim_out, heads = attn_heads, dim_head = attn_dim_head, ff_mult = ff_mult, dot_product = self_attn_dot_product) if has_attn else None,
multiscale_output_predictor,
aux_recon_decoder,
Downsample(dim_out) if should_downsample else None,
]))
self.to_logits = nn.Sequential(
conv2d_3x3(dim_last, dim_last),
Rearrange('b c h w -> b (c h w)'),
nn.Linear(dim_last * (4 ** 2), 1),
Rearrange('b 1 -> b')
)
# take care of text conditioning in the multiscale predictor branches
assert unconditional or (exists(text_dim) ^ exists(text_encoder))
if not unconditional:
if isinstance(text_encoder, dict):
text_encoder = TextEncoder(**text_encoder)
self.text_dim = default(text_dim, text_encoder.dim)
self.predictor_dims = predictor_dims
self.text_to_conv_conditioning = nn.Linear(self.text_dim, sum(predictor_dims)) if exists(self.text_dim) else None
self.text_encoder = text_encoder
self.apply(self.init_)
def init_(self, m):
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a = 0, mode = 'fan_in', nonlinearity = 'leaky_relu')
def resize_image_to(self, images, resolution):
return F.interpolate(images, resolution, mode = self.resize_mode)
def real_images_to_rgbs(self, images):
return [self.resize_image_to(images, resolution) for resolution in self.multiscale_input_resolutions]
@property
def total_params(self):
return sum([p.numel() for p in self.parameters()])
@property
def device(self):
return next(self.parameters()).device
@beartype
def forward(
self,
images,
rgbs: List[Tensor], # multi-resolution inputs (rgbs) from the generator
texts: Optional[List[str]] = None,
text_encodings: Optional[Tensor] = None,
text_embeds = None,
real_images = None, # if this were passed in, the network will automatically append the real to the presumably generated images passed in as the first argument, and generate all intermediate resolutions through resizing and concat appropriately
return_multiscale_outputs = True, # can force it not to return multi-scale logits
calc_aux_loss = True
):
if not self.unconditional:
assert (exists(texts) ^ exists(text_encodings)) ^ exists(text_embeds), 'either texts as List[str] is passed in, or clip text_encodings as Tensor'
if exists(texts):
assert exists(self.text_encoder)
text_embeds, *_ = self.text_encoder(texts = texts)
elif exists(text_encodings):
assert exists(self.text_encoder)
text_embeds, *_ = self.text_encoder(text_encodings = text_encodings)
assert exists(text_embeds), 'raw text or text embeddings were not passed into discriminator for conditional training'
conv_mods = self.text_to_conv_conditioning(text_embeds).split(self.predictor_dims, dim = -1)
conv_mods = iter(conv_mods)
else:
assert not any([*map(exists, (texts, text_embeds))])
x = images
image_size = (self.image_size, self.image_size)
assert x.shape[-2:] == image_size
batch = x.shape[0]
# index the rgbs by resolution
rgbs_index = {t.shape[-1]: t for t in rgbs} if exists(rgbs) else {}
# assert that the necessary resolutions are there
assert is_empty(set(self.multiscale_input_resolutions) - set(rgbs_index.keys())), f'rgbs of necessary resolution {self.multiscale_input_resolutions} were not passed in'
# hold multiscale outputs
multiscale_outputs = []
# hold auxiliary recon losses
aux_recon_losses = []
# excitations
excitations = [None] * (self.num_skip_layers_excite + 1) # +1 since first image in pixel space is not excited
for squeeze_excite, from_rgb, block, residual_fn, attn, predictor, recon_decoder, downsample in self.layers:
resolution = x.shape[-1]
if exists(squeeze_excite):
skip_excite = squeeze_excite(x)
excitations.append(skip_excite)
excite = safe_unshift(excitations)
if exists(excite):
excite = repeat(excite, 'b ... -> (s b) ...', s = x.shape[0] // excite.shape[0])
x = x * excite
batch_prev_stage = x.shape[0]
has_multiscale_input = resolution in self.multiscale_input_resolutions
if has_multiscale_input:
rgb = rgbs_index.get(resolution, None)
# multi-scale input features
multi_scale_input_feats = from_rgb(rgb)
# expand multi-scale input features, as could include extra scales from previous stage
multi_scale_input_feats = repeat(multi_scale_input_feats, 'b ... -> (s b) ...', s = x.shape[0] // rgb.shape[0])
# add the multi-scale input features to the current hidden state from main stem
x = x + multi_scale_input_feats
# and also concat for scale invariance
x = torch.cat((x, multi_scale_input_feats), dim = 0)
residual = residual_fn(x)
x = block(x)
if exists(attn):
x = attn(x)
if exists(predictor):
pred_kwargs = dict()
if not self.unconditional:
pred_kwargs = dict(mod = next(conv_mods), kernel_mod = next(conv_mods))
if return_multiscale_outputs:
predictor_input = x[:batch_prev_stage]
multiscale_outputs.append(predictor(predictor_input, **pred_kwargs))
if exists(downsample):
x = downsample(x)
x = x + residual
x = x * self.residual_scale
if exists(recon_decoder) and calc_aux_loss:
recon_output = x[:batch_prev_stage]
recon_output = rearrange(x, '(s b) ... -> s b ...', b = batch)
aux_recon_target = images
# only use the input real images for aux recon
recon_output = recon_output[0]
# only reconstruct a fraction of images across batch and scale
# for efficiency
aux_recon_loss = recon_decoder(recon_output, aux_recon_target)
aux_recon_losses.append(aux_recon_loss)
# sanity check
assert self.unconditional or is_empty([*conv_mods]), 'convolutions were incorrectly modulated'
# to logits
logits = self.to_logits(x)
logits = rearrange(logits, '(s b) ... -> s b ...', b = batch)
return logits, multiscale_outputs, aux_recon_losses
# gan
TrainDiscrLosses = namedtuple('TrainDiscrLosses', [
'divergence',
'multiscale_divergence',
'vision_aided_divergence',
'total_matching_aware_loss',
'gradient_penalty',
'aux_reconstruction'
])
TrainGenLosses = namedtuple('TrainGenLosses', [
'divergence',
'multiscale_divergence',
'total_vd_divergence',
'contrastive_loss'
])
class GigaGAN(nn.Module):
@beartype
def __init__(
self,
*,
generator: Union[BaseGenerator, Dict],
discriminator: Union[Discriminator, Dict],
vision_aided_discriminator: Optional[Union[VisionAidedDiscriminator, Dict]] = None,
diff_augment: Optional[Union[DiffAugment, Dict]] = None,
learning_rate = 2e-4,
betas = (0.5, 0.9),
weight_decay = 0.,
discr_aux_recon_loss_weight = 1.,
multiscale_divergence_loss_weight = 0.1,
vision_aided_divergence_loss_weight = 0.5,
generator_contrastive_loss_weight = 0.1,
matching_awareness_loss_weight = 0.1,
calc_multiscale_loss_every = 1,
apply_gradient_penalty_every = 4,
resize_image_mode = 'bilinear',
train_upsampler = False,
log_steps_every = 20,
create_ema_generator_at_init = True,
save_and_sample_every = 1000,
early_save_thres_steps = 2500,
early_save_and_sample_every = 100,
num_samples = 25,
model_folder = './gigagan-models',
results_folder = './gigagan-results',
sample_upsampler_dl: Optional[DataLoader] = None,
accelerator: Optional[Accelerator] = None,
accelerate_kwargs: dict = {},
find_unused_parameters = True,
amp = False,
mixed_precision_type = 'fp16'
):
super().__init__()
# create accelerator
if accelerator:
self.accelerator = accelerator
assert is_empty(accelerate_kwargs)
else:
kwargs = DistributedDataParallelKwargs(find_unused_parameters = find_unused_parameters)
self.accelerator = Accelerator(
kwargs_handlers = [kwargs],
mixed_precision = mixed_precision_type if amp else 'no',
**accelerate_kwargs
)
# whether to train upsampler or not
self.train_upsampler = train_upsampler
if train_upsampler:
from gigagan_pytorch.unet_upsampler import UnetUpsampler
generator_klass = UnetUpsampler
else:
generator_klass = Generator
# gradient penalty and auxiliary recon loss
self.apply_gradient_penalty_every = apply_gradient_penalty_every
self.calc_multiscale_loss_every = calc_multiscale_loss_every
if isinstance(generator, dict):
generator = generator_klass(**generator)
if isinstance(discriminator, dict):
discriminator = Discriminator(**discriminator)
if exists(vision_aided_discriminator) and isinstance(vision_aided_discriminator, dict):
vision_aided_discriminator = VisionAidedDiscriminator(**vision_aided_discriminator)
assert isinstance(generator, generator_klass)
# diff augment
if isinstance(diff_augment, dict):
diff_augment = DiffAugment(**diff_augment)
self.diff_augment = diff_augment
# use _base to designate unwrapped models
self.G = generator
self.D = discriminator
self.VD = vision_aided_discriminator
# validate multiscale input resolutions
if train_upsampler:
assert is_empty(set(discriminator.multiscale_input_resolutions) - set(generator.allowable_rgb_resolutions)), f'only multiscale input resolutions of {generator.allowable_rgb_resolutions} is allowed based on the unet input and output image size. simply do Discriminator(multiscale_input_resolutions = unet.allowable_rgb_resolutions) to resolve this error'
# ema
self.has_ema_generator = False
if self.is_main and create_ema_generator_at_init:
self.create_ema_generator()
# print number of parameters
self.print('\n')
self.print(f'Generator: {numerize.numerize(generator.total_params)}')
self.print(f'Discriminator: {numerize.numerize(discriminator.total_params)}')
if exists(self.VD):
self.print(f'Vision Discriminator: {numerize.numerize(vision_aided_discriminator.total_params)}')
self.print('\n')
# text encoder
assert generator.unconditional == discriminator.unconditional
assert not exists(vision_aided_discriminator) or vision_aided_discriminator.unconditional == generator.unconditional
self.unconditional = generator.unconditional
# optimizers
self.G_opt = get_optimizer(self.G.parameters(), lr = learning_rate, betas = betas, weight_decay = weight_decay)
self.D_opt = get_optimizer(self.D.parameters(), lr = learning_rate, betas = betas, weight_decay = weight_decay)
# prepare for distributed
self.G, self.D, self.G_opt, self.D_opt = self.accelerator.prepare(self.G, self.D, self.G_opt, self.D_opt)
# vision aided discriminator optimizer
if exists(self.VD):
self.VD_opt = get_optimizer(self.VD.parameters(), lr = learning_rate, betas = betas, weight_decay = weight_decay)
self.VD_opt = self.accelerator.prepare(self.VD_opt)
# loss related
self.discr_aux_recon_loss_weight = discr_aux_recon_loss_weight
self.multiscale_divergence_loss_weight = multiscale_divergence_loss_weight
self.vision_aided_divergence_loss_weight = vision_aided_divergence_loss_weight
self.generator_contrastive_loss_weight = generator_contrastive_loss_weight
self.matching_awareness_loss_weight = matching_awareness_loss_weight
# resize image mode
self.resize_image_mode = resize_image_mode
# steps
self.log_steps_every = log_steps_every
self.register_buffer('steps', torch.ones(1, dtype = torch.long))
# save and sample
self.save_and_sample_every = save_and_sample_every
self.early_save_thres_steps = early_save_thres_steps
self.early_save_and_sample_every = early_save_and_sample_every
self.num_samples = num_samples
self.train_dl = None
self.sample_upsampler_dl_iter = None
if exists(sample_upsampler_dl):
self.sample_upsampler_dl_iter = cycle(self.sample_upsampler_dl)
self.results_folder = Path(results_folder)
self.model_folder = Path(model_folder)
mkdir_if_not_exists(self.results_folder)
mkdir_if_not_exists(self.model_folder)
def save(self, path, overwrite = True):
path = Path(path)
mkdir_if_not_exists(path.parents[0])
assert overwrite or not path.exists()
pkg = dict(
G = self.unwrapped_G.state_dict(),
D = self.unwrapped_D.state_dict(),
G_opt = self.G_opt.state_dict(),
D_opt = self.D_opt.state_dict(),
steps = self.steps.item(),
version = __version__
)
if exists(self.G_opt.scaler):
pkg['G_scaler'] = self.G_opt.scaler.state_dict()
if exists(self.D_opt.scaler):
pkg['D_scaler'] = self.D_opt.scaler.state_dict()
if exists(self.VD):
pkg['VD'] = self.unwrapped_VD.state_dict()
pkg['VD_opt'] = self.VD_opt.state_dict()
if exists(self.VD_opt.scaler):
pkg['VD_scaler'] = self.VD_opt.scaler.state_dict()
if self.has_ema_generator:
pkg['G_ema'] = self.G_ema.state_dict()
torch.save(pkg, str(path))
def load(self, path, strict = False):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path))
if 'version' in pkg and pkg['version'] != __version__:
print(f"trying to load from version {pkg['version']}")
self.unwrapped_G.load_state_dict(pkg['G'], strict = strict)
self.unwrapped_D.load_state_dict(pkg['D'], strict = strict)
if exists(self.VD):
self.unwrapped_VD.load_state_dict(pkg['VD'], strict = strict)
if self.has_ema_generator:
self.G_ema.load_state_dict(pkg['G_ema'])
if 'steps' in pkg:
self.steps.copy_(torch.tensor([pkg['steps']]))
if 'G_opt'not in pkg or 'D_opt' not in pkg:
return
try:
self.G_opt.load_state_dict(pkg['G_opt'])
self.D_opt.load_state_dict(pkg['D_opt'])
if exists(self.VD):
self.VD_opt.load_state_dict(pkg['VD_opt'])
if 'G_scaler' in pkg and exists(self.G_opt.scaler):
self.G_opt.scaler.load_state_dict(pkg['G_scaler'])
if 'D_scaler' in pkg and exists(self.D_opt.scaler):
self.D_opt.scaler.load_state_dict(pkg['D_scaler'])
if 'VD_scaler' in pkg and exists(self.VD_opt.scaler):
self.VD_opt.scaler.load_state_dict(pkg['VD_scaler'])
except Exception as e:
self.print(f'unable to load optimizers {e.msg}- optimizer states will be reset')
pass
# accelerate related
@property
def device(self):
return self.accelerator.device
@property
def unwrapped_G(self):
return self.accelerator.unwrap_model(self.G)
@property
def unwrapped_D(self):
return self.accelerator.unwrap_model(self.D)
@property
def unwrapped_VD(self):
return self.accelerator.unwrap_model(self.VD)
@property
def need_vision_aided_discriminator(self):
return exists(self.VD) and self.vision_aided_divergence_loss_weight > 0.
@property
def need_contrastive_loss(self):
return self.generator_contrastive_loss_weight > 0. and not self.unconditional
def print(self, msg):
self.accelerator.print(msg)
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def resize_image_to(self, images, resolution):
return F.interpolate(images, resolution, mode = self.resize_image_mode)
@beartype
def set_dataloader(self, dl: DataLoader):
assert not exists(self.train_dl), 'training dataloader has already been set'
self.train_dl = dl
self.train_dl_batch_size = dl.batch_size
self.train_dl = self.accelerator.prepare(self.train_dl)
# generate function
@torch.inference_mode()
def generate(self, *args, **kwargs):
model = self.G_ema if self.has_ema_generator else self.G
model.eval()
return model(*args, **kwargs)
# create EMA generator
def create_ema_generator(
self,
update_every = 10,
update_after_step = 100,
decay = 0.995
):
if not self.is_main:
return
assert not self.has_ema_generator, 'EMA generator has already been created'
self.G_ema = EMA(self.unwrapped_G, update_every = update_every, update_after_step = update_after_step, beta = decay)
self.has_ema_generator = True
def generate_kwargs(self, dl_iter, batch_size):
# what to pass into the generator
# depends on whether training upsampler or not
maybe_text_kwargs = dict()
if self.train_upsampler or not self.unconditional:
assert exists(dl_iter)
if self.unconditional:
real_images = next(dl_iter)
else:
result = next(dl_iter)
assert isinstance(result, tuple), 'dataset should return a tuple of two items for text conditioned training, (images: Tensor, texts: List[str])'
real_images, texts = result
maybe_text_kwargs['texts'] = texts[:batch_size]
real_images = real_images.to(self.device)
# if training upsample generator, need to downsample real images
if self.train_upsampler:
size = self.unwrapped_G.input_image_size
lowres_real_images = F.interpolate(real_images, (size, size))
G_kwargs = dict(lowres_image = lowres_real_images)
else:
assert exists(batch_size)
G_kwargs = dict(batch_size = batch_size)
# create noise
noise = torch.randn(batch_size, self.unwrapped_G.style_network.dim, device = self.device)
G_kwargs.update(noise = noise)
return G_kwargs, maybe_text_kwargs
@beartype
def train_discriminator_step(
self,
dl_iter: Iterable,
grad_accum_every = 1,
apply_gradient_penalty = False,
calc_multiscale_loss = True
):
total_divergence = 0.
total_vision_aided_divergence = 0.
total_gp_loss = 0.
total_aux_loss = 0.
total_multiscale_divergence = 0. if calc_multiscale_loss else None
has_matching_awareness = not self.unconditional and self.matching_awareness_loss_weight > 0.
total_matching_aware_loss = 0.
all_texts = []
all_fake_images = []
all_fake_rgbs = []
all_real_images = []
self.G.train()
self.D.train()
self.D_opt.zero_grad()
if self.need_vision_aided_discriminator:
self.VD.train()
self.VD_opt.zero_grad()
for _ in range(grad_accum_every):
if self.unconditional:
real_images = next(dl_iter)
else:
result = next(dl_iter)
assert isinstance(result, tuple), 'dataset should return a tuple of two items for text conditioned training, (images: Tensor, texts: List[str])'
real_images, texts = result
all_real_images.append(real_images)
all_texts.extend(texts)
# requires grad for real images, for gradient penalty
real_images = real_images.to(self.device)
real_images.requires_grad_()
real_images_rgbs = self.unwrapped_D.real_images_to_rgbs(real_images)
# diff augment real images
if exists(self.diff_augment):
real_images, real_images_rgbs = self.diff_augment(real_images, real_images_rgbs)
# batch size
batch_size = real_images.shape[0]
# for discriminator training, fit upsampler and image synthesis logic under same function
G_kwargs, maybe_text_kwargs = self.generate_kwargs(dl_iter, batch_size)
# generator
with torch.no_grad(), self.accelerator.autocast():
images, rgbs = self.G(
**G_kwargs,
**maybe_text_kwargs,
return_all_rgbs = True
)
all_fake_images.append(images)
all_fake_rgbs.append(rgbs)
# diff augment
if exists(self.diff_augment):
images, rgbs = self.diff_augment(images, rgbs)
# detach output of generator, as training discriminator only
images.detach_()
for rgb in rgbs:
rgb.detach_()
# main divergence loss
with self.accelerator.autocast():
fake_logits, fake_multiscale_logits, _ = self.D(
images,
rgbs,
**maybe_text_kwargs,
return_multiscale_outputs = calc_multiscale_loss,
calc_aux_loss = False
)
real_logits, real_multiscale_logits, aux_recon_losses = self.D(
real_images,
real_images_rgbs,
**maybe_text_kwargs,
return_multiscale_outputs = calc_multiscale_loss,
calc_aux_loss = True
)
divergence = discriminator_hinge_loss(real_logits, fake_logits)
total_divergence += (divergence.item() / grad_accum_every)
# handle multi-scale divergence
multiscale_divergence = 0.
if self.multiscale_divergence_loss_weight > 0. and len(fake_multiscale_logits) > 0:
for multiscale_fake, multiscale_real in zip(fake_multiscale_logits, real_multiscale_logits):
multiscale_loss = discriminator_hinge_loss(multiscale_real, multiscale_fake)
multiscale_divergence = multiscale_divergence + multiscale_loss
total_multiscale_divergence += (multiscale_divergence.item() / grad_accum_every)
# figure out gradient penalty if needed
gp_loss = 0.
if apply_gradient_penalty:
gp_loss = gradient_penalty(
real_images,
outputs = [real_logits, *real_multiscale_logits],
grad_output_weights = [1., *(self.multiscale_divergence_loss_weight,) * len(real_multiscale_logits)],
scaler = self.D_opt.scaler
)
if not torch.isnan(gp_loss):
total_gp_loss += (gp_loss.item() / grad_accum_every)
# handle vision aided discriminator, if needed
vd_loss = 0.
if self.need_vision_aided_discriminator:
fake_vision_aided_logits = self.VD(images, **maybe_text_kwargs)
real_vision_aided_logits, clip_encodings = self.VD(real_images, return_clip_encodings = True, **maybe_text_kwargs)
for fake_logits, real_logits in zip(fake_vision_aided_logits, real_vision_aided_logits):
vd_loss = vd_loss + discriminator_hinge_loss(real_logits, fake_logits)
total_vision_aided_divergence += (vd_loss.item() / grad_accum_every)
# handle gradient penalty for vision aided discriminator
if apply_gradient_penalty:
vd_gp_loss = gradient_penalty(
clip_encodings,
outputs = real_vision_aided_logits,
grad_output_weights = [self.vision_aided_divergence_loss_weight] * len(real_vision_aided_logits),
scaler = self.VD_opt.scaler
)
if not torch.isnan(vd_gp_loss):
gp_loss = gp_loss + vd_gp_loss
total_gp_loss += (vd_gp_loss.item() / grad_accum_every)
# sum up losses
total_loss = divergence + gp_loss
if self.multiscale_divergence_loss_weight > 0.:
total_loss = total_loss + multiscale_divergence * self.multiscale_divergence_loss_weight
if self.vision_aided_divergence_loss_weight > 0.:
total_loss = total_loss + vd_loss * self.vision_aided_divergence_loss_weight
if self.discr_aux_recon_loss_weight > 0.:
aux_loss = sum(aux_recon_losses)
total_aux_loss += (aux_loss.item() / grad_accum_every)
total_loss = total_loss + aux_loss * self.discr_aux_recon_loss_weight
# backwards
self.accelerator.backward(total_loss / grad_accum_every)
# matching awareness loss
# strategy would be to rotate the texts by one and assume batch is shuffled enough for mismatched conditions
if has_matching_awareness:
# rotate texts
all_texts = [*all_texts[1:], all_texts[0]]
all_texts = group_by_num_consecutive(texts, batch_size)
zipped_data = zip(
all_fake_images,
all_fake_rgbs,
all_real_images,
all_texts
)
total_loss = 0.
for fake_images, fake_rgbs, real_images, texts in zipped_data:
with self.accelerator.autocast():
fake_logits, *_ = self.D(
fake_images,
fake_rgbs,
texts = texts,
return_multiscale_outputs = False,
calc_aux_loss = False
)
real_images_rgbs = self.D.real_images_to_rgbs(real_images)
real_logits, *_ = self.D(
real_images,
real_images_rgbs,
texts = texts,
return_multiscale_outputs = False,
calc_aux_loss = False
)
matching_loss = aux_matching_loss(real_logits, fake_logits)
total_matching_aware_loss = (matching_loss.item() / grad_accum_every)
loss = matching_loss * self.matching_awareness_loss_weight
self.accelerator.backward(loss / grad_accum_every)
self.D_opt.step()
if self.need_vision_aided_discriminator:
self.VD_opt.step()
return TrainDiscrLosses(
total_divergence,
total_multiscale_divergence,
total_vision_aided_divergence,
total_matching_aware_loss,
total_gp_loss,
total_aux_loss
)
def train_generator_step(
self,
batch_size = None,
dl_iter: Optional[Iterable] = None,
grad_accum_every = 1,
calc_multiscale_loss = True
):
total_divergence = 0.
total_multiscale_divergence = 0. if calc_multiscale_loss else None
total_vd_divergence = 0.
contrastive_loss = 0.
self.G.train()
self.D.train()
self.D_opt.zero_grad()
self.G_opt.zero_grad()
all_images = []
all_texts = []
for _ in range(grad_accum_every):
# generator
G_kwargs, maybe_text_kwargs = self.generate_kwargs(dl_iter, batch_size)
with self.accelerator.autocast():
images, rgbs = self.G(
**G_kwargs,
**maybe_text_kwargs,
return_all_rgbs = True
)
# diff augment
if exists(self.diff_augment):
images, rgbs = self.diff_augment(images, rgbs)
# accumulate all images and texts for maybe contrastive loss
if self.need_contrastive_loss:
all_images.append(images)
all_texts.extend(maybe_text_kwargs['texts'])
# discriminator
logits, multiscale_logits, _ = self.D(
images,
rgbs,
**maybe_text_kwargs,
return_multiscale_outputs = calc_multiscale_loss,
calc_aux_loss = False
)
# generator hinge loss discriminator and multiscale
divergence = generator_hinge_loss(logits)
total_divergence += (divergence.item() / grad_accum_every)
total_loss = divergence
if self.multiscale_divergence_loss_weight > 0. and len(multiscale_logits) > 0:
multiscale_divergence = 0.
for multiscale_logit in multiscale_logits:
multiscale_divergence = multiscale_divergence + generator_hinge_loss(multiscale_logit)
total_multiscale_divergence += (multiscale_divergence.item() / grad_accum_every)
total_loss = total_loss + multiscale_divergence * self.multiscale_divergence_loss_weight
# vision aided generator hinge loss
if self.need_vision_aided_discriminator:
vd_loss = 0.
logits = self.VD(images, **maybe_text_kwargs)
for logit in logits:
vd_loss = vd_loss + generator_hinge_loss(logit)
total_vd_divergence += (vd_loss.item() / grad_accum_every)
total_loss = total_loss + vd_loss * self.vision_aided_divergence_loss_weight
self.accelerator.backward(total_loss / grad_accum_every, retain_graph = self.need_contrastive_loss)
# if needs the generator contrastive loss
# gather up all images and texts and calculate it
if self.need_contrastive_loss:
all_images = torch.cat(all_images, dim = 0)
contrastive_loss = aux_clip_loss(
clip = self.G.text_encoder.clip,
texts = all_texts,
images = all_images
)
self.accelerator.backward(contrastive_loss * self.generator_contrastive_loss_weight)
# generator optimizer step
self.G_opt.step()
# update exponentially moving averaged generator
self.accelerator.wait_for_everyone()
if self.is_main and self.has_ema_generator:
self.G_ema.update()
return TrainGenLosses(
total_divergence,
total_multiscale_divergence,
total_vd_divergence,
contrastive_loss
)
def sample(self, model, dl_iter, batch_size):
G_kwargs, maybe_text_kwargs = self.generate_kwargs(dl_iter, batch_size)
with self.accelerator.autocast():
generator_output = model(**G_kwargs, **maybe_text_kwargs)
if not self.train_upsampler:
return generator_output
output_size = generator_output.shape[-1]
lowres_image = G_kwargs['lowres_image']
lowres_image = F.interpolate(lowres_image, (output_size, output_size))
return torch.cat([lowres_image, generator_output])
@torch.inference_mode()
def save_sample(
self,
batch_size,
dl_iter = None
):
milestone = self.steps.item() // self.save_and_sample_every
nrow_mult = 2 if self.train_upsampler else 1
batches = num_to_groups(self.num_samples, batch_size)
if self.train_upsampler:
dl_iter = default(self.sample_upsampler_dl_iter, dl_iter)
assert exists(dl_iter)
sample_models_and_output_file_name = [(self.unwrapped_G, f'sample-{milestone}.png')]
if self.has_ema_generator:
sample_models_and_output_file_name.append((self.G_ema, f'ema-sample-{milestone}.png'))
for model, filename in sample_models_and_output_file_name:
model.eval()
all_images_list = list(map(lambda n: self.sample(model, dl_iter, n), batches))
all_images = torch.cat(all_images_list, dim = 0)
all_images.clamp_(0., 1.)
utils.save_image(
all_images,
str(self.results_folder / filename),
nrow = int(sqrt(self.num_samples)) * nrow_mult
)
# Possible to do: Include some metric to save if improved, include some sampler dict text entries
self.save(str(self.model_folder / f'model-{milestone}.ckpt'))
@beartype
def forward(
self,
*,
steps,
grad_accum_every = 1
):
assert exists(self.train_dl), 'you need to set the dataloader by running .set_dataloader(dl: Dataloader)'
batch_size = self.train_dl_batch_size
dl_iter = cycle(self.train_dl)
last_gp_loss = 0.
last_multiscale_d_loss = 0.
last_multiscale_g_loss = 0.
for _ in tqdm(range(steps), initial = self.steps.item()):
steps = self.steps.item()
is_first_step = steps == 1
apply_gradient_penalty = self.apply_gradient_penalty_every > 0 and divisible_by(steps, self.apply_gradient_penalty_every)
calc_multiscale_loss = self.calc_multiscale_loss_every > 0 and divisible_by(steps, self.calc_multiscale_loss_every)
(
d_loss,
multiscale_d_loss,
vision_aided_d_loss,
matching_aware_loss,
gp_loss,
recon_loss
) = self.train_discriminator_step(
dl_iter = dl_iter,
grad_accum_every = grad_accum_every,
apply_gradient_penalty = apply_gradient_penalty,
calc_multiscale_loss = calc_multiscale_loss
)
self.accelerator.wait_for_everyone()
(
g_loss,
multiscale_g_loss,
vision_aided_g_loss,
contrastive_loss
) = self.train_generator_step(
dl_iter = dl_iter,
batch_size = batch_size,
grad_accum_every = grad_accum_every,
calc_multiscale_loss = calc_multiscale_loss
)
if exists(gp_loss):
last_gp_loss = gp_loss
if exists(multiscale_d_loss):
last_multiscale_d_loss = multiscale_d_loss
if exists(multiscale_g_loss):
last_multiscale_g_loss = multiscale_g_loss
if is_first_step or divisible_by(steps, self.log_steps_every):
losses = (
('G', g_loss),
('MSG', last_multiscale_g_loss),
('VG', vision_aided_g_loss),
('D', d_loss),
('MSD', last_multiscale_d_loss),
('VD', vision_aided_d_loss),
('GP', last_gp_loss),
('SSL', recon_loss),
('CL', contrastive_loss),
('MAL', matching_aware_loss)
)
losses_str = ' | '.join([f'{loss_name}: {loss:.2f}' for loss_name, loss in losses])
self.print(losses_str)
self.accelerator.wait_for_everyone()
if self.is_main and (is_first_step or divisible_by(steps, self.save_and_sample_every) or (steps <= self.early_save_thres_steps and divisible_by(steps, self.early_save_and_sample_every))):
self.save_sample(batch_size, dl_iter)
self.steps += 1
self.print(f'complete {steps} training steps')
|
gigagan-pytorch-main
|
gigagan_pytorch/gigagan_pytorch.py
|
from math import log2
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from gigagan_pytorch.attend import Attend
from gigagan_pytorch.gigagan_pytorch import (
BaseGenerator,
StyleNetwork,
AdaptiveConv2DMod,
TextEncoder,
CrossAttentionBlock,
Upsample
)
from beartype import beartype
from beartype.typing import Optional, List, Union, Dict, Iterable
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(t, length = 1):
if isinstance(t, tuple):
return t
return ((t,) * length)
def identity(t, *args, **kwargs):
return t
def is_power_of_two(n):
return log2(n).is_integer()
def null_iterator():
while True:
yield None
# small helper modules
class PixelShuffleUpsample(nn.Module):
def __init__(self, dim, dim_out = None):
super().__init__()
dim_out = default(dim_out, dim)
conv = nn.Conv2d(dim, dim_out * 4, 1)
self.init_conv_(conv)
self.net = nn.Sequential(
conv,
nn.SiLU(),
nn.PixelShuffle(2)
)
def init_conv_(self, conv):
o, *rest_shape = conv.weight.shape
conv_weight = torch.empty(o // 4, *rest_shape)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
return self.net(x)
def Downsample(dim, dim_out = None):
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 2, p2 = 2),
nn.Conv2d(dim * 4, default(dim_out, dim), 1)
)
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
return F.normalize(x, dim = 1) * self.g * (x.shape[1] ** 0.5)
# building block modules
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8,
num_conv_kernels = 0
):
super().__init__()
self.proj = AdaptiveConv2DMod(dim, dim_out, kernel = 3, num_conv_kernels = num_conv_kernels)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(
self,
x,
conv_mods_iter: Optional[Iterable] = None
):
conv_mods_iter = default(conv_mods_iter, null_iterator())
x = self.proj(
x,
mod = next(conv_mods_iter),
kernel_mod = next(conv_mods_iter)
)
x = self.norm(x)
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
groups = 8,
num_conv_kernels = 0,
style_dims: List = []
):
super().__init__()
style_dims.extend([
dim,
num_conv_kernels,
dim_out,
num_conv_kernels
])
self.block1 = Block(dim, dim_out, groups = groups, num_conv_kernels = num_conv_kernels)
self.block2 = Block(dim_out, dim_out, groups = groups, num_conv_kernels = num_conv_kernels)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(
self,
x,
conv_mods_iter: Optional[Iterable] = None
):
h = self.block1(x, conv_mods_iter = conv_mods_iter)
h = self.block2(h, conv_mods_iter = conv_mods_iter)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(
self,
dim,
heads = 4,
dim_head = 32
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.norm = RMSNorm(dim)
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
RMSNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
x = self.norm(x)
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 4,
dim_head = 32,
flash = False
):
super().__init__()
self.heads = heads
hidden_dim = dim_head * heads
self.norm = RMSNorm(dim)
self.attend = Attend(flash = flash)
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
x = self.norm(x)
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h (x y) c', h = self.heads), qkv)
out = self.attend(q, k, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# feedforward
def FeedForward(dim, mult = 4):
return nn.Sequential(
RMSNorm(dim),
nn.Conv2d(dim, dim * mult, 1),
nn.GELU(),
nn.Conv2d(dim * mult, dim, 1)
)
# transformers
class Transformer(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
depth = 1,
flash_attn = True,
ff_mult = 4
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, flash = flash_attn),
FeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class LinearTransformer(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
depth = 1,
ff_mult = 4
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
LinearAttention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
# model
class UnetUpsampler(BaseGenerator):
@beartype
def __init__(
self,
dim,
*,
image_size,
input_image_size,
init_dim = None,
out_dim = None,
text_encoder: Optional[Union[TextEncoder, Dict]] = None,
style_network: Optional[Union[StyleNetwork, Dict]] = None,
style_network_dim = None,
dim_mults = (1, 2, 4, 8, 16),
channels = 3,
resnet_block_groups = 8,
full_attn = (False, False, False, True, True),
cross_attn = (False, False, False, True, True),
flash_attn = True,
self_attn_dim_head = 64,
self_attn_heads = 8,
self_attn_dot_product = True,
self_attn_ff_mult = 4,
attn_depths = (1, 1, 1, 1, 1),
cross_attn_dim_head = 64,
cross_attn_heads = 8,
cross_ff_mult = 4,
mid_attn_depth = 1,
num_conv_kernels = 2,
resize_mode = 'bilinear',
unconditional = True,
skip_connect_scale = None
):
super().__init__()
# style network
if isinstance(text_encoder, dict):
text_encoder = TextEncoder(**text_encoder)
self.text_encoder = text_encoder
if isinstance(style_network, dict):
style_network = StyleNetwork(**style_network)
self.style_network = style_network
assert exists(style_network) ^ exists(style_network_dim), 'either style_network or style_network_dim must be passed in'
# validate text conditioning and style network hparams
self.unconditional = unconditional
assert unconditional ^ exists(text_encoder), 'if unconditional, text encoder should not be given, and vice versa'
assert not (unconditional and exists(style_network) and style_network.dim_text_latent > 0)
assert unconditional or text_encoder.dim == style_network.dim_text_latent, 'the `dim_text_latent` on your StyleNetwork must be equal to the `dim` set for the TextEncoder'
assert is_power_of_two(image_size) and is_power_of_two(input_image_size), 'both output image size and input image size must be power of 2'
assert input_image_size < image_size, 'input image size must be smaller than the output image size, thus upsampling'
num_layer_no_downsample = int(log2(image_size) - log2(input_image_size))
assert num_layer_no_downsample <= len(dim_mults), 'you need more stages in this unet for the level of upsampling'
self.image_size = image_size
self.input_image_size = input_image_size
# setup adaptive conv
style_embed_split_dims = []
# determine dimensions
self.channels = channels
input_channels = channels
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
*_, mid_dim = dims
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(
ResnetBlock,
groups = resnet_block_groups,
num_conv_kernels = num_conv_kernels,
style_dims = style_embed_split_dims
)
# attention
full_attn = cast_tuple(full_attn, length = len(dim_mults))
assert len(full_attn) == len(dim_mults)
FullAttention = partial(Transformer, flash_attn = flash_attn)
cross_attn = cast_tuple(cross_attn, length = len(dim_mults))
assert unconditional or len(full_attn) == len(dim_mults)
# skip connection scale
self.skip_connect_scale = default(skip_connect_scale, 2 ** -0.5)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, ((dim_in, dim_out), layer_full_attn, layer_cross_attn, layer_attn_depth) in enumerate(zip(in_out, full_attn, cross_attn, attn_depths)):
ind >= (num_resolutions - 1)
should_not_downsample = ind < num_layer_no_downsample
has_cross_attn = not self.unconditional and layer_cross_attn
attn_klass = FullAttention if layer_full_attn else LinearTransformer
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in),
block_klass(dim_in, dim_in),
CrossAttentionBlock(dim_in, dim_context = text_encoder.dim, dim_head = self_attn_dim_head, heads = self_attn_heads, ff_mult = self_attn_ff_mult) if has_cross_attn else None,
attn_klass(dim_in, dim_head = self_attn_dim_head, heads = self_attn_heads, depth = layer_attn_depth),
Downsample(dim_in, dim_out) if not should_not_downsample else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
self.mid_block1 = block_klass(mid_dim, mid_dim)
self.mid_attn = FullAttention(mid_dim, dim_head = self_attn_dim_head, heads = self_attn_heads, depth = mid_attn_depth)
self.mid_block2 = block_klass(mid_dim, mid_dim)
self.mid_to_rgb = nn.Conv2d(mid_dim, channels, 1)
for ind, ((dim_in, dim_out), layer_cross_attn, layer_full_attn, layer_attn_depth) in enumerate(zip(reversed(in_out), reversed(full_attn), reversed(cross_attn), reversed(attn_depths))):
attn_klass = FullAttention if layer_full_attn else LinearTransformer
has_cross_attn = not self.unconditional and layer_cross_attn
self.ups.append(nn.ModuleList([
PixelShuffleUpsample(dim_out, dim_in),
Upsample(),
nn.Conv2d(dim_in, channels, 1),
block_klass(dim_in * 2, dim_in),
block_klass(dim_in * 2, dim_in),
CrossAttentionBlock(dim_in, dim_context = text_encoder.dim, dim_head = self_attn_dim_head, heads = self_attn_heads, ff_mult = cross_ff_mult) if has_cross_attn else None,
attn_klass(dim_in, dim_head = cross_attn_dim_head, heads = self_attn_heads, depth = layer_attn_depth),
]))
self.out_dim = default(out_dim, channels)
self.final_res_block = block_klass(dim, dim)
self.final_to_rgb = nn.Conv2d(dim, channels, 1)
# resize mode
self.resize_mode = resize_mode
# determine the projection of the style embedding to convolutional modulation weights (+ adaptive kernel selection weights) for all layers
self.style_to_conv_modulations = nn.Linear(style_network.dim, sum(style_embed_split_dims))
self.style_embed_split_dims = style_embed_split_dims
@property
def allowable_rgb_resolutions(self):
input_res_base = int(log2(self.input_image_size))
output_res_base = int(log2(self.image_size))
allowed_rgb_res_base = list(range(input_res_base, output_res_base))
return [*map(lambda p: 2 ** p, allowed_rgb_res_base)]
@property
def device(self):
return next(self.parameters()).device
@property
def total_params(self):
return sum([p.numel() for p in self.parameters()])
def resize_image_to(self, x, size):
return F.interpolate(x, (size, size), mode = self.resize_mode)
def forward(
self,
lowres_image,
styles = None,
noise = None,
texts: Optional[List[str]] = None,
global_text_tokens = None,
fine_text_tokens = None,
text_mask = None,
return_all_rgbs = False,
replace_rgb_with_input_lowres_image = True # discriminator should also receive the low resolution image the upsampler sees
):
x = lowres_image
shape = x.shape
batch_size = shape[0]
assert shape[-2:] == ((self.input_image_size,) * 2)
# take care of text encodings
# which requires global text tokens to adaptively select the kernels from the main contribution in the paper
# and fine text tokens to attend to using cross attention
if not self.unconditional:
if exists(texts):
assert exists(self.text_encoder)
global_text_tokens, fine_text_tokens, text_mask = self.text_encoder(texts)
else:
assert all([*map(exists, (global_text_tokens, fine_text_tokens, text_mask))])
else:
assert not any([*map(exists, (texts, global_text_tokens, fine_text_tokens))])
# styles
if not exists(styles):
assert exists(self.style_network)
noise = default(noise, torch.randn((batch_size, self.style_network.dim), device = self.device))
styles = self.style_network(noise, global_text_tokens)
# project styles to conv modulations
conv_mods = self.style_to_conv_modulations(styles)
conv_mods = conv_mods.split(self.style_embed_split_dims, dim = -1)
conv_mods = iter(conv_mods)
# initial conv
x = self.init_conv(x)
h = []
# downsample stages
for block1, block2, cross_attn, attn, downsample in self.downs:
x = block1(x, conv_mods_iter = conv_mods)
h.append(x)
x = block2(x, conv_mods_iter = conv_mods)
x = attn(x)
if exists(cross_attn):
x = cross_attn(x, context = fine_text_tokens, mask = text_mask)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, conv_mods_iter = conv_mods)
x = self.mid_attn(x)
x = self.mid_block2(x, conv_mods_iter = conv_mods)
# rgbs
rgbs = []
init_rgb_shape = list(x.shape)
init_rgb_shape[1] = self.channels
rgb = self.mid_to_rgb(x)
rgbs.append(rgb)
# upsample stages
for upsample, upsample_rgb, to_rgb, block1, block2, cross_attn, attn in self.ups:
x = upsample(x)
rgb = upsample_rgb(rgb)
res1 = h.pop() * self.skip_connect_scale
res2 = h.pop() * self.skip_connect_scale
fmap_size = x.shape[-1]
residual_fmap_size = res1.shape[-1]
if residual_fmap_size != fmap_size:
res1 = self.resize_image_to(res1, fmap_size)
res2 = self.resize_image_to(res2, fmap_size)
x = torch.cat((x, res1), dim = 1)
x = block1(x, conv_mods_iter = conv_mods)
x = torch.cat((x, res2), dim = 1)
x = block2(x, conv_mods_iter = conv_mods)
if exists(cross_attn):
x = cross_attn(x, context = fine_text_tokens, mask = text_mask)
x = attn(x)
rgb = rgb + to_rgb(x)
rgbs.append(rgb)
x = self.final_res_block(x, conv_mods_iter = conv_mods)
assert len([*conv_mods]) == 0
rgb = rgb + self.final_to_rgb(x)
if not return_all_rgbs:
return rgb
# only keep those rgbs whose feature map is greater than the input image to be upsampled
rgbs = list(filter(lambda t: t.shape[-1] > shape[-1], rgbs))
# and return the original input image as the smallest rgb
rgbs = [lowres_image, *rgbs]
return rgb, rgbs
|
gigagan-pytorch-main
|
gigagan_pytorch/unet_upsampler.py
|
from gigagan_pytorch.gigagan_pytorch import (
GigaGAN,
Generator,
Discriminator,
VisionAidedDiscriminator,
AdaptiveConv2DMod,
StyleNetwork,
TextEncoder
)
from gigagan_pytorch.unet_upsampler import UnetUpsampler
from gigagan_pytorch.data import (
ImageDataset,
TextImageDataset,
MockTextImageDataset
)
__all__ = [
GigaGAN,
Generator,
Discriminator,
VisionAidedDiscriminator,
AdaptiveConv2DMod,
StyleNetwork,
UnetUpsampler,
TextEncoder,
ImageDataset,
TextImageDataset,
MockTextImageDataset
]
|
gigagan-pytorch-main
|
gigagan_pytorch/__init__.py
|
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
# constants
AttentionConfig = namedtuple('AttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = AttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(False, True, True)
def flash_attn(self, q, k, v):
is_cuda = q.is_cuda
q, k, v = map(lambda t: t.contiguous(), (q, k, v))
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
dropout_p = self.dropout if self.training else 0.
)
return out
def forward(self, q, k, v):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
if self.flash:
return self.flash_attn(q, k, v)
scale = q.shape[-1] ** -0.5
# similarity
sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b h j d -> b h i d", attn, v)
return out
|
gigagan-pytorch-main
|
gigagan_pytorch/attend.py
|
import torch
import torch.nn.functional as F
from torch.autograd import Function
import torch.distributed as dist
from einops import rearrange
# helpers
def exists(val):
return val is not None
def pad_dim_to(t, length, dim = 0):
pad_length = length - t.shape[dim]
zero_pairs = (-dim - 1) if dim < 0 else (t.ndim - dim - 1)
return F.pad(t, (*((0, 0) * zero_pairs), 0, pad_length))
# distributed helpers
def all_gather_variable_dim(t, dim = 0, sizes = None):
device, world_size = t.device, dist.get_world_size()
if not exists(sizes):
size = torch.tensor(t.shape[dim], device = device, dtype = torch.long)
sizes = [torch.empty_like(size, device = device, dtype = torch.long) for i in range(world_size)]
dist.all_gather(sizes, size)
sizes = torch.stack(sizes)
max_size = sizes.amax().item()
padded_t = pad_dim_to(t, max_size, dim = dim)
gathered_tensors = [torch.empty(padded_t.shape, device = device, dtype = padded_t.dtype) for i in range(world_size)]
dist.all_gather(gathered_tensors, padded_t)
gathered_tensor = torch.cat(gathered_tensors, dim = dim)
seq = torch.arange(max_size, device = device)
mask = rearrange(seq, 'j -> 1 j') < rearrange(sizes, 'i -> i 1')
mask = rearrange(mask, 'i j -> (i j)')
seq = torch.arange(mask.shape[-1], device = device)
indices = seq[mask]
gathered_tensor = gathered_tensor.index_select(dim, indices)
return gathered_tensor, sizes
class AllGather(Function):
@staticmethod
def forward(ctx, x, dim, sizes):
is_dist = dist.is_initialized() and dist.get_world_size() > 1
ctx.is_dist = is_dist
if not is_dist:
return x, None
x, batch_sizes = all_gather_variable_dim(x, dim = dim, sizes = sizes)
ctx.batch_sizes = batch_sizes.tolist()
ctx.dim = dim
return x, batch_sizes
@staticmethod
def backward(ctx, grads, _):
if not ctx.is_dist:
return grads, None, None
batch_sizes, rank = ctx.batch_sizes, dist.get_rank()
grads_by_rank = grads.split(batch_sizes, dim = ctx.dim)
return grads_by_rank[rank], None, None
all_gather = AllGather.apply
|
gigagan-pytorch-main
|
gigagan_pytorch/distributed.py
|
from torch.optim import AdamW, Adam
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = True,
group_wd_params = True,
**kwargs
):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if group_wd_params and wd > 0:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
if wd == 0:
return Adam(params, lr = lr, betas = betas, eps = eps)
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
|
gigagan-pytorch-main
|
gigagan_pytorch/optimizer.py
|
from functools import partial
from pathlib import Path
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision import transforms as T
from beartype.door import is_bearable
from beartype.typing import Tuple
# helper functions
def exists(val):
return val is not None
def convert_image_to_fn(img_type, image):
if image.mode == img_type:
return image
return image.convert(img_type)
# custom collation function
# so dataset can return a str and it will collate into List[str]
def collate_tensors_or_str(data):
is_one_data = not isinstance(data[0], tuple)
if is_one_data:
data = torch.stack(data)
return (data,)
outputs = []
for datum in zip(*data):
if is_bearable(datum, Tuple[str, ...]):
output = list(datum)
else:
output = torch.stack(datum)
outputs.append(output)
return tuple(outputs)
# dataset classes
class ImageDataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png', 'tiff'],
augment_horizontal_flip = False,
convert_image_to = None
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
assert len(self.paths) > 0, 'your folder contains no images'
assert len(self.paths) > 100, 'you need at least 100 images, 10k for research paper, millions for miraculous results (try Laion-5B)'
maybe_convert_fn = partial(convert_image_to_fn, convert_image_to) if exists(convert_image_to) else nn.Identity()
self.transform = T.Compose([
T.Lambda(maybe_convert_fn),
T.Resize(image_size),
T.RandomHorizontalFlip() if augment_horizontal_flip else nn.Identity(),
T.CenterCrop(image_size),
T.ToTensor()
])
def get_dataloader(self, *args, **kwargs):
return DataLoader(self, *args, shuffle = True, drop_last = True, **kwargs)
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
class TextImageDataset(Dataset):
def __init__(self):
raise NotImplementedError
def get_dataloader(self, *args, **kwargs):
return DataLoader(self, *args, collate_fn = collate_tensors_or_str, **kwargs)
class MockTextImageDataset(TextImageDataset):
def __init__(
self,
image_size,
length = int(1e5),
channels = 3
):
self.image_size = image_size
self.channels = channels
self.length = length
def get_dataloader(self, *args, **kwargs):
return DataLoader(self, *args, collate_fn = collate_tensors_or_str, **kwargs)
def __len__(self):
return self.length
def __getitem__(self, index):
mock_image = torch.randn(self.channels, self.image_size, self.image_size)
return mock_image, 'mock text'
|
gigagan-pytorch-main
|
gigagan_pytorch/data.py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
import open_clip
from einops import rearrange
from beartype import beartype
from beartype.typing import List, Optional
def exists(val):
return val is not None
def l2norm(t):
return F.normalize(t, dim = -1)
class OpenClipAdapter(nn.Module):
@beartype
def __init__(
self,
name = 'ViT-B/32',
pretrained = 'laion400m_e32',
tokenizer_name = 'ViT-B-32-quickgelu',
eos_id = 49407
):
super().__init__()
clip, _, preprocess = open_clip.create_model_and_transforms(name, pretrained = pretrained)
tokenizer = open_clip.get_tokenizer(tokenizer_name)
self.clip = clip
self.tokenizer = tokenizer
self.eos_id = eos_id
# hook for getting final text representation
text_attention_final = self.find_layer('ln_final')
self._dim_latent = text_attention_final.weight.shape[0]
self.text_handle = text_attention_final.register_forward_hook(self._text_hook)
# hook for getting final image representation
# this is for vision-aided gan loss
self._dim_image_latent = self.find_layer('visual.ln_post').weight.shape[0]
num_visual_layers = len(clip.visual.transformer.resblocks)
self.image_handles = []
for visual_layer in range(num_visual_layers):
image_attention_final = self.find_layer(f'visual.transformer.resblocks.{visual_layer}')
handle = image_attention_final.register_forward_hook(self._image_hook)
self.image_handles.append(handle)
# normalize fn
self.clip_normalize = preprocess.transforms[-1]
self.cleared = False
@property
def device(self):
return next(self.parameters()).device
def find_layer(self, layer):
modules = dict([*self.clip.named_modules()])
return modules.get(layer, None)
def clear(self):
if self.cleared:
return
self.text_handle()
self.image_handle()
def _text_hook(self, _, inputs, outputs):
self.text_encodings = outputs
def _image_hook(self, _, inputs, outputs):
if not hasattr(self, 'image_encodings'):
self.image_encodings = []
self.image_encodings.append(outputs)
@property
def dim_latent(self):
return self._dim_latent
@property
def image_size(self):
image_size = self.clip.visual.image_size
if isinstance(image_size, tuple):
return max(image_size)
return image_size
@property
def image_channels(self):
return 3
@property
def max_text_len(self):
return self.clip.positional_embedding.shape[0]
@beartype
def embed_texts(
self,
texts: List[str]
):
ids = self.tokenizer(texts)
ids = ids.to(self.device)
ids = ids[..., :self.max_text_len]
is_eos_id = (ids == self.eos_id)
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
text_mask = text_mask & (ids != 0)
assert not self.cleared
text_embed = self.clip.encode_text(ids)
text_encodings = self.text_encodings
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
del self.text_encodings
return l2norm(text_embed.float()), text_encodings.float()
def embed_images(self, images):
if images.shape[-1] != self.image_size:
images = F.interpolate(images, self.image_size)
assert not self.cleared
images = self.clip_normalize(images)
image_embeds = self.clip.encode_image(images)
image_encodings = rearrange(self.image_encodings, 'l n b d -> l b n d')
del self.image_encodings
return l2norm(image_embeds.float()), image_encodings.float()
@beartype
def contrastive_loss(
self,
images,
texts: Optional[List[str]] = None,
text_embeds: Optional[torch.Tensor] = None
):
assert exists(texts) ^ exists(text_embeds)
if not exists(text_embeds):
text_embeds, _ = self.embed_texts(texts)
image_embeds, _ = self.embed_images(images)
n = text_embeds.shape[0]
temperature = self.clip.logit_scale.exp()
sim = einsum('i d, j d -> i j', text_embeds, image_embeds) * temperature
labels = torch.arange(n, device = sim.device)
return (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
|
gigagan-pytorch-main
|
gigagan_pytorch/open_clip.py
|
import bitsandbytes as bnb
import torch
p = torch.nn.Parameter(torch.rand(10,10).cuda())
a = torch.rand(10,10).cuda()
p1 = p.data.sum().item()
adam = bnb.optim.Adam([p])
out = a*p
loss = out.sum()
loss.backward()
adam.step()
p2 = p.data.sum().item()
assert p1 != p2
print('SUCCESS!')
print('Installation was successful!')
|
bitsandbytes-main
|
check_bnb_install.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
from setuptools import find_packages, setup
libs = list(glob.glob("./bitsandbytes/libbitsandbytes*.so"))
libs = [os.path.basename(p) for p in libs]
print("libs:", libs)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name=f"bitsandbytes",
version=f"0.37.0",
author="Tim Dettmers",
author_email="dettmers@cs.washington.edu",
description="8-bit optimizers and matrix multiplication routines.",
license="MIT",
keywords="gpu optimizers optimization 8-bit quantization compression",
url="https://github.com/TimDettmers/bitsandbytes",
packages=find_packages(),
package_data={"": libs},
long_description=read("README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
bitsandbytes-main
|
setup.py
|
import math
import random
import time
from itertools import product
import einops
import pytest
import torch
import numpy as np
import bitsandbytes as bnb
from bitsandbytes import functional as F
from scipy.stats import norm
torch.set_printoptions(
precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000
)
k = 20
def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
print(f"Too many values not close: assert {sumval} < {count}")
torch.testing.assert_allclose(a, b, rtol, atol)
class FFN(torch.nn.Module):
def __init__(self, input_features, hidden_size, bias=True):
super().__init__()
self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)
self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)
with torch.no_grad():
torch.nn.init.xavier_uniform_(self.fc1.weight)
torch.nn.init.xavier_uniform_(self.fc2.weight)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x
class Timer:
def __init__(self):
self.starts = {}
self.ends = {}
self.agg = {}
def tick(self, name="default"):
if name not in self.starts:
self.starts[name] = torch.cuda.Event(enable_timing=True)
self.ends[name] = torch.cuda.Event(enable_timing=True)
self.starts[name].record()
else:
ms = self.tock(name, evict=True, print_ms=False)
def tock(self, name="default", evict=True, print_ms=True):
if name in self.ends:
self.ends[name].record()
torch.cuda.synchronize()
ms = self.starts[name].elapsed_time(self.ends[name])
if name not in self.agg:
self.agg[name] = 0.0
self.agg[name] += ms
if evict:
self.starts.pop(name)
self.ends.pop(name)
if print_ms and name in self.agg:
print(f"{name} took: {self.agg[name] / 1000.0:.5f}s")
return self.agg[name]
def reset(self):
self.starts = {}
self.ends = {}
self.agg = {}
print("Resetting benchmark data")
def setup():
pass
def teardown():
pass
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
def test_quantile_quantization():
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
assert diff < 0.0075
A1 = torch.rand(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
assert diff < 0.001
def test_dynamic_quantization():
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diff.mean().item() < 0.0135
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2).mean().item()
torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
assert diff < 0.004
def test_dynamic_blockwise_quantization():
#print('')
for blocksize in [4096, 2048, 1024, 512]:
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, S = F.quantize_blockwise(A1, blocksize=blocksize)
A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
abserr = sum(diffs)/len(diffs)
relerr = sum(reldiffs)/len(reldiffs)
assert abserr < 0.011
assert relerr < 0.018
#print('randn', blocksize, sum(diffs)/len(diffs))
#print('randn', blocksize, sum(reldiffs)/len(reldiffs))
diffs = []
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, S = F.quantize_blockwise(A1, blocksize=blocksize)
A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
#torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
abserr = sum(diffs)/len(diffs)
relerr = sum(reldiffs)/len(reldiffs)
assert abserr < 0.0035
assert relerr < 0.015
#print('rand', blocksize, sum(diffs)/len(diffs))
#print('rand', blocksize, sum(reldiffs)/len(reldiffs))
def test_dynamic_blockwise_stochastic_quantization():
diffs = []
reldiffs = []
rand = torch.rand(1024).cuda()
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C1, S1 = F.quantize_blockwise(A1, rand=rand)
C2, S2 = F.quantize_blockwise(A1)
# a maximunm distance of quantized values of 1
torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
fraction_smaller = (C1 < C2).float().sum() / C1.numel()
fraction_larger = (C1 > C2).float().sum() / C1.numel()
torch.testing.assert_allclose(
fraction_larger, fraction_smaller, atol=0.01, rtol=0
)
@pytest.mark.parametrize(
"gtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_percentile_clipping(gtype):
gnorm_vec1 = torch.zeros(100, device="cuda")
gnorm_vec2 = torch.zeros(100, device="cuda")
n = 4
step = 0
percentile = 5
for i in range(k):
step += 1
g = torch.randn(n, n, dtype=gtype, device="cuda")
gnorm1, clip2, gnorm_scale = F.percentile_clipping(
g, gnorm_vec2, step, percentile=percentile
)
assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1
gnorm2 = torch.norm(g.float())
if step == 1:
gnorm_vec1[:] = gnorm2
else:
gnorm_vec1[step % 100] = gnorm2
vals, idx = torch.sort(gnorm_vec1)
clip1 = vals[percentile]
torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
torch.testing.assert_allclose(clip1, clip2)
torch.testing.assert_allclose(gnorm1, gnorm2)
def quant(x):
max1 = torch.abs(x).max()
x = torch.round(x / max1 * 127)
return max1, x.to(torch.int8)
def dequant(c, maxC):
return c.float() * (maxC / 127)
def mm_dequant(maxA, maxB, C):
return C.float() * (maxA / 127) * (maxB / 127)
def quant_multi(x, dim):
max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
max1[max1 == 0] = 1.0
x = torch.round(x / max1 * 127)
return max1, x.to(torch.int8)
def quant_multi_chunk(x, dim, chunk_size=32):
if dim == 1:
x_chunked = einops.rearrange(x, "(c a) b -> c a b", c=chunk_size)
max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)
max1 = torch.tile(max1, (1, 1, x.shape[1]))
max1 = max1.view(x.shape)
elif dim == 0:
x_chunked = einops.rearrange(x, "a (b c) -> a b c", c=chunk_size)
max1 = torch.amax(torch.abs(x_chunked), dim=dim, keepdim=True)
max1 = torch.tile(max1, (x.shape[0], 1, 1))
max1 = max1.view(x.shape)
max1[max1 == 0] = 1.0
x = torch.round(x / max1 * 127)
return max1, x.to(torch.int8)
def quant_minmax(A):
minA = A.min()
maxA = A.max()
def mean(xx):
return sum(xx) / float(len(xx))
# dim1 = torch.randint(1,1024*4, size=(4,)).tolist()
# dim2 = torch.randint(1,1024*4, size=(4,)).tolist()
dim1 = [1024 * 2]
dim2 = [1024 * 16]
methods = [
(
lambda x, dim: quant(x),
lambda x, dim: quant(x),
dequant,
dequant,
mm_dequant,
)
]
methods.append((quant_multi, quant_multi, dequant, dequant, mm_dequant))
# methods.append((lambda x: quant_multi_chunk(x, dim=-1), lambda x: quant_multi_chunk(x, dim=0), dequant, dequant, mm_dequant))
method_names = ["linear", "vectorwise"]
batched = [False, True]
values = list(product(dim1, dim2, methods, batched))
values_names = list(product(dim1, dim2, method_names, batched))
names = [
"dim1_{}_dim2_{}_quant_{}_batched_{}".format(*vals)
for vals in values_names
]
@pytest.mark.parametrize(
"dim1, dim2, quant_methods, batched", values, ids=names
)
def test_approx_igemm(dim1, dim2, quant_methods, batched):
dim1 = dim1 - (dim1 % 32)
dim2 = dim2 - (dim2 % 32)
errors = []
relerrors = []
print("")
for i in range(5):
if batched:
A = torch.normal(0, 0.5, size=(32, dim1, dim2 // 32), device="cuda")
B = torch.normal(0, 0.5, size=(32, dim2 // 32, dim1), device="cuda")
maxA, Ac = quant_methods[0](A, 2)
maxB, Bc = quant_methods[1](B, 1)
else:
A = torch.normal(0, 0.5, size=(dim1, dim2), device="cuda")
B = torch.normal(0, 0.5, size=(dim2, dim1), device="cuda")
maxA, Ac = quant_methods[0](A, 1)
maxB, Bc = quant_methods[1](B, 0)
torch.testing.assert_allclose(
quant_methods[2](maxA, Ac), A, atol=0.025, rtol=0.05
)
if batched:
out2 = torch.bmm(A, B)
C = torch.bmm(Ac.float(), Bc.float())
else:
out2 = torch.mm(A, B)
C = F.igemm(Ac, Bc)
out = quant_methods[4](maxA, maxB, C)
std = out2.std()
out /= std
out2 /= std
err = torch.abs(out - out2)
relerr = err / torch.abs(out2)
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
print(mean(errors))
print(mean(relerrors))
def test_stable_embedding():
layer = bnb.nn.StableEmbedding(1024, 1024)
layer.reset_parameters()
n = 2
hidden_dim = torch.randint(32, 256, size=(n,)).tolist()
batch_dim = torch.randint(16, 256, size=(n,)).tolist()
seq_dim = torch.randint(16, 256, size=(n,)).tolist()
transpose = [(False, False), (False, True), (True, False), (True, True)]
values = list(product(hidden_dim, batch_dim, transpose, seq_dim))
names = [
"hidden_dim_{}_batch_dim_{},transpose_{}_seq_dim_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize(
"hidden_dim, batch_dim, transpose, seq_dim", values, ids=names
)
def test_igemm(hidden_dim, batch_dim, transpose, seq_dim):
hidden_dim = hidden_dim - (hidden_dim % 32)
batch_dim = batch_dim - (batch_dim % 16)
seq_dim = seq_dim - (seq_dim % 16)
for i in range(k):
shapeA = (
(batch_dim, hidden_dim)
if not transpose[0]
else (hidden_dim, batch_dim)
)
shapeB = (
(32 * random.randint(1, 4), hidden_dim)
if transpose[1]
else (hidden_dim, 32 * random.randint(1, 4))
)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.matmul(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.matmul(A.float(), B.t().float())
out = F.igemm(A, B.t())
elif transpose[0] and not transpose[1]:
out2 = torch.matmul(A.t().float(), B.float())
out = F.igemm(A.t(), B)
elif transpose[0] and transpose[1]:
out2 = torch.matmul(A.t().float(), B.t().float())
out = F.igemm(A.t(), B.t())
torch.testing.assert_allclose(out.float(), out2)
for i in range(k):
shapeA = (batch_dim, seq_dim, hidden_dim)
shapeB = (
(32 * random.randint(1, 4), hidden_dim)
if transpose[1]
else (hidden_dim, 32 * random.randint(1, 4))
)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.matmul(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.matmul(A.float(), B.t().float())
out = F.igemm(A, B.t())
torch.testing.assert_allclose(out.float(), out2)
n = 3
seq_dim = torch.randint(32, 512, size=(n,)).tolist()
hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
batch_dim = torch.randint(2, 16, size=(n,)).tolist()
values = list(product(seq_dim, hidden_dim, batch_dim))
names = [
"seq_dim{}_hidden_dim{}_batch_dim{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("seq_dim, hidden_dim, batch_dim", values, ids=names)
def test_dim3_igemm(seq_dim, hidden_dim, batch_dim):
seq_dim = seq_dim - (seq_dim % 32)
hidden_dim = hidden_dim - (hidden_dim % 32)
batch_dim = batch_dim - (batch_dim % 2)
for i in range(25):
A = torch.randint(
-128, 127, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
).to(torch.int8)
B = torch.randint(
-128, 127, size=(batch_dim, seq_dim, 1024), device="cuda"
).to(torch.int8)
out2 = torch.einsum("bsi, bso->io", A.float(), B.float())
iout = torch.empty(
A.shape[2], B.shape[2], dtype=torch.int32, device=A.device
)
out = F.igemm(A, B, out=iout)
torch.testing.assert_allclose(out.float(), out2)
n = 2
seq_dim = torch.randint(32, 512, size=(n,)).tolist()
hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
batch_dim = torch.randint(2, 16, size=(n,)).tolist()
transpose = [False, True]
values = list(product(seq_dim, hidden_dim, batch_dim, transpose))
names = [
"seq_dim={}_hidden_dim={}_batch_dim={}_transpose{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize(
"seq_dim, hidden_dim, batch_dim, transpose", values, ids=names
)
def test_minmax_igemm(seq_dim, hidden_dim, batch_dim, transpose):
def min_max(x):
maxA = torch.amax(x, dim=2, keepdim=True)
minA = torch.amin(x, dim=2, keepdim=True)
scale = (maxA - minA) / 2.0
return (127 * (x - minA - scale) / scale).to(torch.int8), minA, scale
seq_dim = seq_dim - (seq_dim % 16)
hidden_dim = hidden_dim - (hidden_dim % 16)
batch_dim = batch_dim - (batch_dim % 2)
errs = []
relerrs = []
errs2 = []
relerrs2 = []
for i in range(k):
A = torch.normal(
0.0, 0.5, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
)
if transpose:
B = torch.normal(0, 0.5, size=(256, hidden_dim), device="cuda")
else:
B = torch.normal(0, 0.5, size=(hidden_dim, 256), device="cuda")
Ac, minA, scale = min_max(A)
if transpose:
maxB, Bc = quant_multi(B, dim=(1 if transpose else 0))
out = F.igemm(Ac, Bc.t())
out2 = torch.matmul(A, B.t())
offset = B.t().sum(0) * (minA + scale)
out = out.float()
out = (out * maxB.t() * scale / (127 * 127)) + offset
maxA, Ac = quant_multi(A, dim=2)
out3 = F.igemm(Ac, Bc.t())
out3 = mm_dequant(maxA, maxB.t(), out3)
else:
maxB, Bc = quant_multi(B, dim=0)
offset = B.sum(0) * (minA + scale)
out = F.igemm(Ac, Bc)
out2 = torch.matmul(A, B)
out = out.float()
out = (out * maxB * scale / (127 * 127)) + offset
maxA, Ac = quant_multi(A, dim=2)
out3 = F.igemm(Ac, Bc)
out3 = mm_dequant(maxA, maxB, out3)
std = out2.std()
out2 /= std
out /= std
out3 /= std
err = torch.abs(out - out2)
relerr = err / (torch.abs(out2) + 1e-7)
err2 = torch.abs(out3 - out2)
relerr2 = err2 / (torch.abs(out2) + 1e-7)
errs.append(err.mean().item())
relerrs.append(relerr.mean().item())
errs2.append(err2.mean().item())
relerrs2.append(relerr2.mean().item())
# print(mean(errs))
# print(mean(relerrs))
# print(mean(errs2))
# print(mean(relerrs2))
assert mean(errs) < 0.015
assert mean(relerrs) < 0.3
n = 2
dim1 = torch.randint(1, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 128, size=(n,)).tolist()
dim3 = torch.randint(32, 256, size=(n,)).tolist()
dim4 = torch.randint(32, 256, size=(n,)).tolist()
transpose = [(False, False), (True, False), (False, True), (True, True)]
values = list(product(dim1, dim2, dim3, dim4, transpose))
names = [
"dim1_{}_dim2_{}_dim3_{}_dim4_{}_transpose_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
dim2 = dim2 - (dim2 % 16)
dim3 = dim3 - (dim3 % 16)
dim4 = dim4 - (dim4 % 16)
for i in range(k):
shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)
shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.bmm(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())
out = F.igemm(A, B.permute([0, 2, 1]))
elif transpose[0] and not transpose[1]:
out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())
out = F.igemm(A.permute([0, 2, 1]), B)
elif transpose[0] and transpose[1]:
out2 = torch.bmm(
A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float()
)
out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))
torch.testing.assert_allclose(out.float(), out2.float())
n = 1
dim1 = torch.randint(1, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 128, size=(n,)).tolist()
dim3 = torch.randint(32, 256, size=(n,)).tolist()
values = list(product(dim1, dim2, dim3))
names = ["dim1_{}_dim2_{}_dim3_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, dim3", values, ids=names)
def test_vector_quant(dim1, dim2, dim3):
dim2 = dim2 - (dim2 % 16)
dim3 = dim3 - (dim3 % 16)
for i in range(k):
A = torch.randn(size=(dim2, dim3), device="cuda")
qA, SA = F.vectorwise_quant(A, dim=0)
A1 = F.vectorwise_dequant(qA, SA)
n = A1.numel()
assert_all_approx_close(A1, A, atol=0.01, rtol=0.1, count=int(n*0.002))
n = 2
dim1 = torch.randint(2, 256, size=(n,)).tolist()
dim2 = torch.randint(2, 256, size=(n,)).tolist()
dim3 = torch.randint(2, 256, size=(n,)).tolist()
# dim1, dim2 = (256,), (256,)
dtype = [torch.int8, torch.int32]
a_order = ["row"]
out_order = ["col", "row", "col32"]
transpose = [False]
dims = [2, 3]
values = list(product(dim1, dim2, dim3, dims, dtype, a_order, out_order, transpose))
names = ["dim1_{}_dim2_{}_dim3_{}_dims_{}_dtype_{}_orderA_{}_orderOut_{}_transpose_{}".format(*vals)for vals in values]
@pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
if dims == 3 and out_order != "col32":
return
if dtype == torch.int32 and out_order != "col32":
return
func = F.get_transform_func(dtype, orderA, orderOut, transpose)
if dims == 2:
A = torch.randint(-128, 127, size=(dim1, dim2), device="cuda").to(dtype)
elif dims == 3:
A = torch.randint(-128, 127, size=(dim1, dim2, dim3), device="cuda").to(
dtype
)
out, S = F.nvidia_transform(A, to_order=orderOut)
if orderOut == "row":
torch.testing.assert_allclose(A.flatten(), out.flatten())
elif orderOut == "col":
torch.testing.assert_allclose(A.t().flatten(), out.flatten())
elif orderOut == "col32":
if dims == 2:
n = A.shape[0] * (A.shape[1] + (32 - (A.shape[1] % 32)))
elif dims == 3:
n = (
A.shape[0]
* A.shape[1]
* (A.shape[2] + (32 - (A.shape[2] % 32)))
)
assert out.numel() == n
elif orderOut == "col_turing":
# 32 col 8 row tiles
n = (A.shape[0] + (8 - A.shape[0] % 8)) * (
A.shape[1] + (32 - (A.shape[1] % 32))
)
assert out.numel() == n
total_coltile = (A.shape[1] // 32) + (1 if A.shape[1] % 32 != 0 else 0)
for row in range(A.shape[0]):
for col in range(A.shape[1]):
i = row * A.shape[1]
j = col
coltile = (col // 32) + (1 if col % 32 != 0 else 0)
rowtile = (
(row // 8) + (1 if row % 8 != 0 else 0)
) * total_coltile
offset = 32 * 8 * (rowtile + coltile)
col2 = col % 32
row2 = (row % 8) * 32
assert A.flatten()[i + j] == A[row, col]
# assert A.flatten()[i+j] == out.flatten()[row2+col2]
# torch.testing.assert_allclose(A.flatten()[i+j], A[row, col])
# torch.testing.assert_allclose(A.flatten()[i+j], out.flatten()[row2+ col2+block_offset])
if orderOut == "col32":
out2, S = F.nvidia_transform(
out, from_order=orderOut, to_order="row", state=S
)
torch.testing.assert_allclose(A, out2)
n = 1
dim1 = torch.randint(1, 256, size=(n,)).tolist()
dim2 = torch.randint(32, 512, size=(n,)).tolist()
dim3 = torch.randint(32, 1024, size=(n,)).tolist()
dim4 = torch.randint(32, 1024, size=(n,)).tolist()
# dim1 = [2]
# dim2 = [2]
# dim3 = [2]
# dim4 = [2]
dims = (2, 3)
ldb = [0]
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dim3, dim4, dims, ldb))
names = [
"dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}_ldb_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims, ldb", values, ids=names)
def test_igemmlt_int(dim1, dim2, dim3, dim4, dims, ldb):
for i in range(k):
if dims == 2:
A = torch.randint(-128, 127, size=(dim1, dim3), device="cuda").to(
torch.int8
)
elif dims == 3:
A = torch.randint(
-128, 127, size=(dim1, dim2, dim3), device="cuda"
).to(torch.int8)
B = torch.randint(-128, 127, size=(dim4, dim3), device="cuda").to(
torch.int8
)
C1 = torch.matmul(A.float(), B.t().float())
A2, SA = F.transform(A, "col32")
B2, SB = F.transform(B, "col_turing")
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
torch.testing.assert_allclose(C1, C3.float())
# transpose
B = torch.randint(-128, 127, size=(dim3, dim4), device="cuda").to(
torch.int8
)
C1 = torch.matmul(A.float(), B.float())
B2t, SBt = F.transform(B, "col_turing", transpose=True)
C2, SC = F.igemmlt(A2, B2t, SA, SBt)
C3, S = F.nvidia_transform(C2, "row", state=SC)
torch.testing.assert_allclose(C1, C3.float())
dim1 = [32]
dim2 = [32]
dim3 = [32]
dim4 = [32]
dims = (2,)
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dim3, dim4, dims))
names = [
"dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims", values, ids=names)
def test_igemmlt_half(dim1, dim2, dim3, dim4, dims):
formatB = F.get_special_format_str()
for i in range(k):
if dims == 2:
A = torch.normal(0, 0.5, size=(dim1, dim3), device="cuda").half()
elif dims == 3:
A = torch.normal(
0, 0.5, size=(dim1, dim2, dim3), device="cuda"
).half()
B = torch.randn((dim4, dim3), device="cuda").half()
torch.nn.init.xavier_uniform_(B)
C1 = torch.matmul(A, B.t())
C2 = bnb.matmul(A, B.t())
A = A.view(-1, A.shape[-1])
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
CB, CBt, statsB, statsBt, coo_tensor = F.double_quant(B)
C32A, SA = F.transform(CA, "col32")
CxB, SB = F.transform(CB, to_order=formatB)
out1_32, Sout1_32 = F.igemmlt(C32A, CxB, SA, SB)
output = F.mm_dequant(out1_32, Sout1_32, statsAt, statsBt)
# print('')
# print(output.flatten()[:10])
# print(C1.flatten()[:10])
# print(C2.flatten()[:10])
# torch.testing.assert_allclose(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)
# transpose
# B = torch.randint(-128, 127, size=(dim3, dim4), device='cuda').to(torch.int8)
# C1 = torch.matmul(A.float(), B.float())
# B2t, SBt = F.transform2(B, 'col_turing', transpose=True)
# C2, SC = F.igemmlt(A2, B2t, SA, SBt)
# C3, S = F.transform(C2, 'row', state=SC)
# torch.testing.assert_allclose(C1, C3.float())
batch_size = 2
seqdim = 512
# values = [(batch_size, seqdim, 4*1024, 16*1024),(batch_size, seqdim, 5120, 4*5120),(batch_size, seqdim, 12*1024, 4*12*1024)]
values = [
(batch_size, seqdim, 4 * 1024, 3 * 4 * 1024),
(batch_size, seqdim, 5120, 3 * 5120),
(batch_size, seqdim, 12 * 1024, 4 * 12 * 1024),
]
# values = list(product(batch, seq, model, hidden))
names = [
"batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_8bit_training(batch, seq, model, hidden):
formatB = F.get_special_format_str()
A = torch.randn(batch, seq, model, device="cuda").half()
grad = torch.randn(batch, seq, model, device="cuda").half()
w1 = torch.randint(-128, 127, size=(hidden, model), device="cuda").half()
w2 = torch.randint(-128, 127, size=(model, hidden), device="cuda").half()
print("")
# torch.cuda.synchronize()
## warmup
# for i in range(100):
# torch.matmul(A, w1.t())
# torch.cuda.synchronize()
dtype = torch.int8
A = A.view(-1, A.shape[-1]).contiguous()
grad = grad.view(-1, grad.shape[-1]).contiguous()
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
out1 = torch.matmul(A, w1.t()) # fc1
# out2 = torch.matmul(out1, w2.t())# fc2
# d1 = torch.matmul(grad, w2) # delta1
# d2 = torch.matmul(d1, w1) # delta2
# grad1 = torch.einsum('bo,bh->oh', out1, grad) # grad w2
# grad2 = torch.einsum('bh,bo->ho', A, d2) # grad w1
torch.cuda.synchronize()
t16 = time.time() - t0
print(t16)
# torch.cuda.empty_cache()
# Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
# Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
# CTw1, Sw1 = F.transform2(Cw1, formatB)
# CTw2, Sw2 = F.transform2(Cw2, formatB)
# CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)
# CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
# CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# C32A, SA = F.transform2(CA, 'col32')
## fc1
# out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)
##out1 = F.mm_dequant(out1_32, Sout1_32, statsAt, statsw1t)
## fc2
# Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)
# C32out1, Sout1 = F.transform2(Cout1, 'col32')
# out2_32, Sout2_32 = F.igemmlt(C32out1, CTw2, Sout1, Sw2, dtype=dtype)
##out2 = F.mm_dequant(out2_32, Sout2_32, statsout1t, statsw2t)
## delta1
# Cgrad, Cgradt, statsgrad, statsgradt, coo_tensor = F.double_quant(grad)
# C32grad, Sgrad = F.transform2(Cgrad, 'col32')
##d1_32, Sd1_32 = F.igemmlt(C32grad, CTw2t, Sgrad, Sw2t, dtype=dtype)
##d1 = F.mm_dequant(d1_32, Sd1_32, statsgradt, statsw2)
## delta2
# Cd1, Cd1t, statsd1, statsd1t, coo_tensor = F.double_quant(d1)
# C32d1, Sd1 = F.transform2(Cd1, 'col32')
##d2_32, Sd2_32 = F.igemmlt(C32d1, CTw1t, Sd1, Sw1t, dtype=dtype)
##d2 = F.mm_dequant(d2_32, Sd2_32, statsd1t, statsw1)
## grad1
# C32out1t, Sout1t = F.transform2(Cout1t, 'col32', transpose=True)
# CTgradt, Sgradt = F.transform2(Cgradt, formatB, transpose=True)
##grad1_32, Sgrad1_32 = F.igemmlt(C32out1t, CTgradt, Sout1t, Sgradt, dtype=dtype)
##grad1 = F.mm_dequant(grad1_32, Sgrad1_32, statsout1, statsgrad)
## grad2
# C32At, SAt = F.transform2(CAt, 'col32', transpose=True)
# CTd1t, Sd1t = F.transform2(Cd1t, formatB, transpose=True)
##grad2_32, Sgrad2_32 = F.igemmlt(C32At, CTd1t, SAt, Sd1t, dtype=dtype)
##grad2 = F.mm_dequant(grad2_32, Sgrad2_32, statsA, statsd1)
# Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
# Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
# Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
# CTw1, Sw1 = F.transform2(Cw1, formatB)
# CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
# CTw2, Sw2 = F.transform2(Cw2, formatB)
# CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)
# torch.cuda.synchronize()
# t0 = time.time()
# for i in range(k):
# #Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
# #CTw1, Sw1 = F.transform2(Cw1, formatB)
# #Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
# #CTw1, Sw1 = F.transform2(Cw1, formatB)
# #CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A, threshold=3.5)
# CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# #CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
# #CTw2, Sw2 = F.transform2(Cw2, formatB)
# #CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)
# C32A, SA = F.transform2(CA, 'col32')
# # fc1
# out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)
# #out1dn = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)
# #print(coo_tensor.nnz)
# #out1sp = F.spmm_coo(coo_tensor, w1.t())
# #print(w1.t().shape)
# #out1 = out1dn + out1sp
# # fc2
# Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)
# C32out1, Sout1 = F.transform2(Cout1, 'col32')
# out2_32, Sout2_32 = F.igemmlt(C32out1, CTw2, Sout1, Sw2, dtype=dtype)
# #out2 = F.mm_dequant(out2_32, Sout2_32, statsout1, statsw2)
# # delta1
# Cgrad, Cgradt, statsgrad, statsgradt, coo_tensor = F.double_quant(grad)
# C32grad, Sgrad = F.transform2(Cgrad, 'col32')
# d1_32, Sd1_32 = F.igemmlt(C32grad, CTw2t, Sgrad, Sw2t, dtype=dtype)
# #d1 = F.mm_dequant(d1_32, Sd1_32, statsgrad, statsw2t)
# # delta2
# Cd1, Cd1t, statsd1, statsd1t, coo_tensor = F.double_quant(d1)
# C32d1, Sd1 = F.transform2(Cd1, 'col32')
# d2_32, Sd2_32 = F.igemmlt(C32d1, CTw1t, Sd1, Sw1t, dtype=dtype)
# #d2 = F.mm_dequant(d2_32, Sd2_32, statsd1, statsw1t)
# # grad1
# #C32out1t, Sout1t = F.transform2(Cout1t, 'col32', transpose=True)
# #CTgradt, Sgradt = F.transform2(Cgradt, formatB, transpose=True)
# #grad1_32, Sgrad1_32 = F.igemmlt(C32out1t, CTgradt, Sout1t, Sgradt, dtype=dtype)
# #grad1 = F.mm_dequant(grad1_32, Sgrad1_32, statsout1t, statsgradt)
# ## grad2
# #C32At, SAt = F.transform2(CAt, 'col32', transpose=True)
# #CTd1t, Sd1t = F.transform2(Cd1t, formatB, transpose=True)
# #grad2_32, Sgrad2_32 = F.igemmlt(C32At, CTd1t, SAt, Sd1t, dtype=dtype)
# #grad2 = F.mm_dequant(grad2_32, Sgrad2_32, statsAt, statsd1t)
# torch.cuda.synchronize()
# t8 = time.time() - t0
# print(t8)
n = 2
dim1 = torch.randint(64, 256, size=(n,)).tolist()
dim4 = torch.randint(64, 1024, size=(n,)).tolist()
#dim1 = [2*1024]
#dim4 = [2*1024]
#dim1 = [4]
#dim4 = [4]
dims = (2,)
formatB = ["col_turing", "col_ampere"]
has_bias = [True, False]
values = list(product(dim1, dim4, dims, formatB, has_bias))
names = ["dim1_{}_dim4_{}_dims_{}_formatB_{}_has_bias_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim4, dims, formatB, has_bias", values, ids=names)
def test_dequant_mm(dim1, dim4, dims, formatB, has_bias):
inner = torch.randint(1, 128, size=(1,)).item()
bias = None
if has_bias: bias = torch.randn(dim4, device='cuda', dtype=torch.float16)
formatB = F.get_special_format_str()
for i in range(1):
A = torch.randn(dim1, inner, device="cuda")
B = torch.randn(dim4, inner, device="cuda")
C1 = torch.matmul(A.half(), B.t().half())
if has_bias: C1 += bias
A1, maxA = F.vectorwise_quant(A, dim=1)
B1, maxB = F.vectorwise_quant(B, dim=1)
A2, SA = F.nvidia_transform(A1, "col32")
B2, SB = F.nvidia_transform(B1, formatB)
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
C4 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())
if has_bias: C4 += bias
# TODO: is something wrong here? If so, the problem goes deeper
#n = C1.numel()
#p = 0.06
std = C1.std(0).view(1, -1)
C1 /= std
C4 /= std
#assert_all_approx_close(C1, C4, atol=0.02, rtol=0.1, count=int(n*0.06))
#assert (count / n < p), f"error in more than {p} of elements: {count}/{n}={count/n}"
C5 = F.mm_dequant(C2, SC, maxA.flatten(), maxB.flatten(), bias=bias)
#torch.testing.assert_allclose(C5, C4, atol=0.015, rtol=0.1)
n = C5.numel()
assert_all_approx_close(C1, C4, atol=0.015, rtol=0.1, count=int(0.01*n))
n = 2
dim1 = [1 * 1024]
dim2 = [1 * 1024]
# dim1 = torch.randint(1,4*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
dims = (2,)
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dims))
names = ["dim1_{}_dim2_{}_dims_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
for i in range(k):
threshold = 3.0
A = torch.randn(dim1, dim2, device="cuda").half()
A_truncated = A.clone()
A_truncated[torch.abs(A_truncated) >= 3.0] = 0.0
if dims == 2:
row_stats1, _ = torch.abs(A.float()).max(1)
col_stats1, _ = torch.abs(A.float()).max(0)
row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)
col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)
else:
assert False
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=threshold
)
A_blocked = einops.rearrange(
torch.abs(A),
"(rows row_tiles) (cols block_size)-> rows cols row_tiles block_size",
row_tiles=16,
block_size=64 * 4,
)
nnz_rows1_counts = (torch.abs(A_blocked) >= threshold).sum(3).flatten()
nnz_block_ptr1 = torch.zeros(
nnz_rows1_counts.shape[0] + 1,
dtype=nnz_rows1_counts.dtype,
device=nnz_rows1_counts.device,
)
nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)
torch.testing.assert_allclose(col_stats1_trunc, col_stats2)
torch.testing.assert_allclose(row_stats1_trunc, row_stats2)
torch.testing.assert_allclose(nnz_block_ptr1, nnz_block_ptr2)
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=0.0
)
torch.testing.assert_allclose(col_stats1, col_stats2)
torch.testing.assert_allclose(row_stats1, row_stats2)
assert nnz_block_ptr2 is None
n = 2
# dim1 = [8*1024]
# dim2 = [4*1024]
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
values = list(product(dim1, dim2))
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
out_col1, Scol = F.vectorwise_quant(A, dim=0)
out_row1, Srow = F.vectorwise_quant(A, dim=1)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# max difference is 1 due to rounding differences
torch.testing.assert_allclose(CA, out_row1, atol=1, rtol=0)
torch.testing.assert_allclose(CAt, out_col1, atol=1, rtol=0)
n = CAt.numel()
num_not_close_rows = (
(torch.isclose(CA, out_row1, atol=1) == 0).sum().item()
)
num_not_close_cols = (
(torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()
)
# allow for 1:500 error due to rounding differences
min_error = 1 / 500
if num_not_close_cols > (min_error * n):
print(
f"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols/n:.4f}"
)
assert False
if num_not_close_rows > (min_error * n):
print(
f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows/n:.4f}"
)
assert False
torch.testing.assert_allclose(Srow.flatten(), statsA)
torch.testing.assert_allclose(Scol.flatten(), statsAt)
n = 4
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
values = list(zip(dim1, dim4, inner))
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
for i in range(k):
A = torch.randn(dim1, inner, device="cuda").half()
B = torch.randn(dim4, inner, device="cuda").half()
out1 = torch.matmul(A.half(), B.t().half())
C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
A1, maxA = F.vectorwise_quant(A, dim=1)
B1, maxB = F.vectorwise_quant(B, dim=1)
torch.testing.assert_allclose(maxA.flatten(), stats1a)
torch.testing.assert_allclose(maxB.flatten(), stats2a)
torch.testing.assert_allclose(C1a, A1, rtol=0, atol=1)
torch.testing.assert_allclose(C2a, B1, rtol=0, atol=1)
A2, SA = F.nvidia_transform(C1a, "col32")
B2, SB = F.nvidia_transform(C2a, "col_turing")
outC32, SC = F.igemmlt(A2, B2, SA, SB)
out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)
A2, SA = F.nvidia_transform(A1, "col32")
B2, SB = F.nvidia_transform(B1, "col_turing")
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
out3 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())
err1 = torch.abs(out1 - out2).mean().item()
err2 = torch.abs(out1 - out3).mean().item()
assert err2 <= err1 * 1.025
n = 6
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
values = list(zip(dim1, dim4, inner))
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
@pytest.mark.skip("Row scale has some bugs for ampere")
def test_igemmlt_row_scale(dim1, dim4, inner):
formatB = F.get_special_format_str()
err1, err2, err3 = [], [], []
relerr1, relerr2 = [], []
scale = 1
for i in range(k):
A = torch.randn(dim1, inner, device="cuda").half()
B = torch.randn(dim4, inner, device="cuda").half()
torch.nn.init.xavier_uniform_(B)
C1 = torch.matmul(A, B.t())
out1 = torch.matmul(A.half(), B.t().half())
C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
CB, absmaxB = F.vectorwise_quant(B, quant_type="linear")
A2, SA = F.nvidia_transform(C1a, "col32")
B2, SB = F.nvidia_transform(CB, formatB)
A1, maxA = F.vectorwise_quant(A, dim=1)
c = 10.0 * inner * scale
row_scale = torch.ones_like(maxA) / c
outC32, SC = F.igemmlt(
A2, B2, SA, SB, dtype=torch.int8, row_scale=row_scale
)
C3, S = F.nvidia_transform(outC32, "row", state=SC)
maxval = torch.abs(C3).max()
if maxval == 127:
scale = 1.5
else:
scale = maxval / 120
out3 = C3 * maxA * absmaxB * c / (127 * 127)
C4 = torch.matmul(C1a.float(), CB.float().t())
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
B2, SB = F.nvidia_transform(C2a, formatB)
outC32, SC = F.igemmlt(A2, B2, SA, SB)
out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)
CA, SA = F.vectorwise_quant(A, dim=1, quant_type="vector")
CB, SB = F.vectorwise_quant(B, dim=1, quant_type="linear")
C = torch.matmul(CA.float(), CB.t().float())
out4 = C * SA * SB / (127 * 127)
# out4 = torch.clip(torch.round(C*SA/c), -127, 127)*c*SB/(127*127)
# print('='*80)
# print(out1)
# print(out2)
# print(out3)
# print(out1)
# print(out2)
# print(out3)
err1.append(torch.abs(out1 - out2).mean().item())
err2.append(torch.abs(out1 - out3).mean().item())
err3.append(torch.abs(out1 - out4).mean().item())
# assert_all_approx_close(C3.float(), torch.round(C4*row_scale), rtol=0, atol=0, count=10)
print("")
print(sum(err1) / len(err1))
print(sum(err2) / len(err2))
print(sum(err3) / len(err3))
dim1 = [1024, 2048]
inner = [12288 * 4, 4096 * 4]
dim4 = [12288, 4096]
values = list(zip(dim1, dim4, inner))
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
@pytest.mark.skip("Row scale has some bugs for ampere")
def test_row_scale_bench(dim1, dim4, inner):
err1, err2, err3 = [], [], []
relerr1, relerr2 = [], []
scale = 1
A = torch.randn(dim1, inner, device="cuda").half()
B = torch.randn(dim4, inner, device="cuda").half()
torch.nn.init.xavier_uniform_(B)
# warmpup
for i in range(k):
C1 = torch.matmul(A, B.t())
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
C1 = torch.matmul(A, B.t())
torch.cuda.synchronize()
print("16", time.time() - t0)
C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
CB, absmaxB = F.vectorwise_quant(B, quant_type="linear")
A2, SA = F.nvidia_transform(C1a, "col32")
B2, SB = F.nvidia_transform(CB, formatB)
A1, maxA = F.vectorwise_quant(A, dim=1)
c = 10.0 * inner * scale
row_scale = maxA / c
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
outC32, SC = F.igemmlt(
A2, B2, SA, SB, dtype=torch.int8, row_scale=row_scale
)
torch.cuda.synchronize()
print("row-wise", time.time() - t0)
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
B2, SB = F.nvidia_transform(C2a, formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
outC32, SC = F.igemmlt(A2, B2, SA, SB)
torch.cuda.synchronize()
print("vector-wise", time.time() - t0)
n = 2
dim1 = torch.randint(2, 1024, size=(n,)).tolist()
dim2 = torch.randint(2, 1024, size=(n,)).tolist()
# dim1 = [8*1024]
# dim2 = [4*1024]
dim3 = [0]
dtype = [torch.int8]
a_order = ["row"]
out_order = ["col32", "col_turing", "col_ampere"]
transpose = [False, True]
dims = [2]
values = list(
product(dim1, dim2, dim3, dims, dtype, a_order, out_order, transpose)
)
names = [
"dim1_{}_dim2_{}_dim3_{}_dims_{}_dtype_{}_orderA_{}_orderOut_{}_{}".format(
*vals
)
for vals in values
]
@pytest.mark.parametrize(
"dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",
values,
ids=names,
)
def test_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
for i in range(k):
if dims == 2:
A = torch.randint(10, 99, size=(dim1, dim2), device="cuda").to(
dtype
)
elif dims == 3:
A = torch.randint(
10, 99, size=(dim1, dim2, dim3), device="cuda"
).to(dtype)
A.view(-1)[-1] = -1
if transpose:
At = A.t().contiguous()
out1, S1 = F.nvidia_transform(At, to_order=orderOut)
else:
out1, S1 = F.nvidia_transform(A, to_order=orderOut)
out2, S2 = F.transform(A, to_order=orderOut, transpose=transpose)
assert S1[0][0] == S2[0][0]
assert S1[0][1] == S2[0][1]
# print(out1)
# print(out2)
torch.testing.assert_allclose(out1, out2)
n = 2
# dim1 = torch.randint(2,1024, size=(n,)).tolist()
# dim2 = torch.randint(2,1024, size=(n,)).tolist()
dim1 = [1]
dim2 = [33]
dtype = [torch.int8]
# a_order = ['col_turing', 'col_ampere']
a_order = ["col_turing"]
out_order = ["row"]
values = list(product(dim1, dim2, dtype, a_order, out_order))
names = [
"dim1_{}_dim2_{}_dtype_{}_orderA_{}_orderOut_{}".format(*vals)
for vals in values
]
def test_overflow():
formatB = F.get_special_format_str()
print(formatB)
for i in range(2):
a = torch.arange(5, 15).cuda().to(torch.int8).view(-1, 1)
b = torch.arange(5, 15).cuda().to(torch.int8).view(-1, 1)
Ca, Sa = F.nvidia_transform(a, "col32")
Cb, Sb = F.nvidia_transform(b, formatB)
c = F.igemmlt(Ca, Cb, Sa, Sb, dtype=torch.int8)
c2 = torch.matmul(a.float(), b.float().t())
n = 2
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
# dim1 = [4]
# dim2 = [5]
values = list(product(dim1, dim2))
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_coo_double_quant(dim1, dim2):
threshold = 3.00
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
idx = torch.abs(A) >= threshold
CA2, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
A, threshold=threshold
)
if coo_tensor is not None:
A1 = A * idx
A2 = torch.zeros_like(A)
A2[
coo_tensor.rowidx.long(), coo_tensor.colidx.long()
] = coo_tensor.values
torch.testing.assert_allclose(A1, A2)
A1 = A * (idx == 0)
A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()
torch.testing.assert_allclose(
A * (idx == 0), A2, rtol=0.05, atol=1.5e-2
)
n = 2
dim1 = torch.randint(1, 1 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 1 * 1024, size=(n,)).tolist()
# dim1 = [7]
# dim2 = [11]
transposed_B = [False, True]
values = list(product(dim1, dim2, transposed_B))
names = ["dim1_{}_dim2_{}_transposed_B_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, transposed_B", values, ids=names)
def test_spmm_coo(dim1, dim2, transposed_B):
threshold = 1.5
dim3 = torch.randint(32, 128, size=(1,)).item()
# dim3 = 17
for i in range(k):
A = torch.randn(dim1, dim2).cuda().half()
if transposed_B:
B = torch.randn(dim3, dim2).cuda().half()
else:
B = torch.randn(dim2, dim3).cuda().half()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
if transposed_B:
out2 = F.spmm_coo(cooA, B.t())
out1 = torch.matmul(A2, B.t())
else:
out2 = F.spmm_coo(cooA, B)
out1 = torch.matmul(A2, B)
assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=30)
def test_spmm_bench():
batch = 2
model = 1024 * 1
hidden = model * 4
seq = 1024
dim1 = batch * seq
dim2 = model
dim3 = hidden
threshold = 4
A = torch.randn(dim1, dim2, device="cuda").half()
B = torch.randn(dim2, dim3, device="cuda").half()
for i in range(10):
C1 = bnb.matmul(A, B.t())
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
C1 = bnb.matmul(A, B.t())
torch.cuda.synchronize()
t8 = time.time() - t0
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
print(nnz / idx.numel())
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
for i in range(10):
out2 = F.spmm_coo(cooA, B)
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
out2 = F.spmm_coo(cooA, B)
torch.cuda.synchronize()
tsp = time.time() - t0
print(tsp, t8)
print(tsp / t8)
n = 2
dim1 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
dim2 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
values = list(product(dim1, dim2))
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_integrated_sparse_decomp(dim1, dim2):
threshold = 3.0
formatB = "col_turing"
for i in range(k):
A = torch.randn(dim1, dim2).cuda().half()
w1 = torch.randn(dim1, dim2).cuda().half()
out1 = torch.matmul(A, w1.t())
Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
CTw1, Sw1 = F.transform(Cw1, formatB)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
C32A, SA = F.transform(CA, "col32")
out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1)
out2 = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
A, threshold=threshold
)
C32A, SA = F.transform(CA, "col32")
out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1)
out3 = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)
assert coo_tensor is not None
out4 = F.spmm_coo(coo_tensor, w1.t())
out5 = out3 + out4
err1 = torch.abs(out1 - out2).mean().item()
err2 = torch.abs(out1 - out5).mean().item()
assert err2 < err1
def test_matmuls():
a = torch.randn(256, 512).half().cuda()
b = torch.randn(256, 512).half().cuda()
c1 = torch.matmul(a, b.t())
c2 = bnb.matmul(a, b)
c3 = bnb.matmul_cublas(a, b.t())
err1 = torch.abs(c1 - c2).mean().item()
err2 = torch.abs(c1 - c3).mean().item()
assert err1 < 0.2
assert err2 < 0.2
print(err1, err2)
n = 2
# dim1 = torch.randint(1,1*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
dim1 = [1 * 2048]
dim2 = [12288]
# dim1 = [32]
# dim2 = [32]
# dtype = [torch.float16, torch.int8]
dtype = [torch.float16]
out_function = ["zeros", "ones"]
values = list(product(dim1, dim2, dtype, out_function))
names = [
"dim1_{}_dim2_{}_dtype_{}_out_func_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("dim1, dim2, dtype, out_func", values, ids=names)
def test_spmm_coo_very_sparse(dim1, dim2, dtype, out_func):
out_func = getattr(torch, out_func)
threshold = 3.3
# threshold = 2.8
# threshold = 0.0
A = torch.randn(dim1, dim2, device="cuda").half()
if dtype == torch.float16:
B = torch.randn(dim2, dim2 * 4, device="cuda").half()
torch.nn.init.xavier_uniform_(B)
else:
B = torch.randn(dim2, dim2 * 4, device="cuda").half()
torch.nn.init.xavier_uniform_(B)
B, SB = F.vectorwise_quant(B, quant_type="linear")
# B = torch.randint(-127, 127, size=(dim2, dim2*4), device='cuda').to(torch.int8)
print("")
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
out1 = torch.matmul(A2.half(), B.half())
out = out_func(out1.shape, dtype=torch.float16, device=out1.device)
out1 += out.clone()
out2 = F.spmm_coo_very_sparse(cooA, B, out=out)
# print(B)
# print(out1)
# print(out2)
p = 200 / (2048 * 12288 * 4)
n = out1.numel()
count = math.ceil(p * n)
std = out1.std()
out1 /= std
out2 /= std
assert_all_approx_close(
out1, out2.half(), rtol=0.01, atol=3.0e-2, count=count
)
# assert_all_approx_close(out1, out2.half(), rtol=0.05, atol=0.01, count=count)
idx_col = torch.randint(0, A2.shape[-1], size=(15,))
# torch.testing.assert_allclose(out1, out2.half(), rtol=0.05, atol=0.001)
# Bt = torch.randn(dim2*4, dim2, device='cuda').half()
# torch.cuda.synchronize()
# t0 = time.time()
# print(A2.shape, B.shape)
# for i in range(100):
# #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
csrA = F.coo2csr(cooA)
counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
assert counts.numel() == A.shape[0]
torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
idx = A2 != 0
torch.testing.assert_allclose(A2[idx], csrA.values)
def test_coo2csc():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
cscA = F.coo2csc(cooA)
counts = cscA.colptr[1:] - cscA.colptr[:-1]
assert counts.numel() == A.shape[1]
torch.testing.assert_allclose(counts, (A2 != 0).sum(0))
# torch uses row-major -> use transpose to transfer to col-major
idx = A2.t() != 0
torch.testing.assert_allclose(A2.t()[idx], cscA.values)
n = 2
# dim1 = torch.randint(1,1*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
dim1 = [1 * 2048]
# dim2 = [12288]
dim2 = [2048]
# dim1 = [2]
# dim2 = [2]
dtype = [torch.int8]
values = list(product(dim1, dim2, dtype))
names = ["dim1_{}_dim2_{}_dtype_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, dtype", values, ids=names)
def test_spmm_coo_dequant(dim1, dim2, dtype):
threshold = 6.0
# threshold = 2.8
# threshold = 0.0
A = torch.randn(dim1, dim2, device="cuda").half()
B = torch.empty(dim2, dim2 * 4, device="cuda", dtype=torch.float16)
torch.nn.init.xavier_uniform_(B)
Bt = B.t().contiguous()
CB, CBt, statsB, statsBt, coo_tensor = F.double_quant(B)
rowidx = torch.randint(0, A.shape[-1], size=(15,))
A[:, rowidx] = 8.0
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
out1 = torch.matmul(A2, B.half())
out3 = F.spmm_coo_very_sparse(cooA, CBt.half())
out3 = out3 * statsBt.half() / 127
values, counts = torch.unique(cooA.rowidx, return_counts=True)
offset = counts.cumsum(0).int()
max_count, max_idx = torch.sort(counts, descending=True)
print(torch.median(max_count.float()))
torch.testing.assert_allclose(out2, out3, rtol=0.05, atol=0.001)
p = 200 / (2048 * 12288 * 4)
n = out1.numel()
count = math.ceil(p * n)
assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=count)
# torch.cuda.synchronize()
# t0 = time.time()
# for i in range(100):
# out2 = F.spmm_coo_very_sparse(cooA, B)
# torch.cuda.synchronize()
# print('fp16', time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo(cooA, B)
torch.cuda.synchronize()
print("cusparse fp16", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo_very_sparse(cooA, CBt)
torch.cuda.synchronize()
print("int8", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
torch.cuda.synchronize()
print("int8+dequant", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = torch.matmul(A, B)
torch.cuda.synchronize()
print("matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
out = out1 + out2
torch.cuda.synchronize()
print("sparse+ matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
torch.matmul(A[:, rowidx], Bt.t()[rowidx], out=out1)
torch.cuda.synchronize()
print("partial matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
torch.cuda.synchronize()
print("partial matmul", time.time() - t0)
batch_size = 1
seqdim = 1
values = []
values.append((batch_size, seqdim, 768, 4 * 768))
# values.append((batch_size, seqdim, 1024, 4*1024))
# values.append((batch_size, seqdim, 1536, 4*1536))
# values.append((batch_size, seqdim, 2048, 4*2048))
# values.append((batch_size, seqdim, 2560, 4*2560))
# values.append((batch_size, seqdim, 4096, 4*4096))
# values.append((batch_size, seqdim, 5140, 4*5140))
#values.append((batch_size, seqdim, 12288, 4*12288))
names = [
"batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
iters = 128
formatB = F.get_special_format_str()
A = torch.randn(batch, seq, model, device="cuda").half()
B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
torch.nn.init.xavier_uniform_(B)
linear8bit = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
linear8bit.eval()
outliers = torch.randint(0, model, size=(5,)).cuda()
A[:, :, outliers] = 8.0
linearMixedBit = (
bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
)
linearMixedBit.eval()
# warmup
for i in range(iters):
torch.matmul(A, B.t())
torch.cuda.synchronize()
print("")
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
torch.matmul(A, B.t())
torch.cuda.synchronize()
print(
f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul(A, B)
torch.cuda.synchronize()
print(f"CB -> CxB conversion (each iteration): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul(A, B, threshold=6.0)
torch.cuda.synchronize()
print(f"CB -> CxB conversion + threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)
C32A, SA = F.transform(CA, "col32")
CB, CBt, SCB, SCBt, coo_tensorB = F.double_quant(B)
CxB, SB = F.transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
torch.cuda.synchronize()
print(f"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
BA, statsB = F.vectorwise_quant(B, dim=1)
CxB, SB = F.nvidia_transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
A2 = A.view(-1, A.shape[-1]).contiguous()
CA, statsA = F.vectorwise_quant(A2, dim=1)
C32A, SA = F.nvidia_transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
F.vectorwise_mm_dequant(Cout, statsA, statsB.t())
torch.cuda.synchronize()
#print(f"vector pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
BA, statsB = F.vectorwise_quant(B, dim=1, quant_type="linear")
CxB, SB = F.nvidia_transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
A2 = A.view(-1, A.shape[-1]).contiguous()
CA, statsA = F.vectorwise_quant(A2, dim=1, quant_type="linear")
C32A, SA = F.nvidia_transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
out = Cout * statsB * statsA * (1.0 / (127 * 127))
torch.cuda.synchronize()
#print(f"linear pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
linear8bit(A)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
linear8bit(A)
torch.cuda.synchronize()
print(
f"bnb linear8bitlt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
)
linearMixedBit(A)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
linearMixedBit(A)
torch.cuda.synchronize()
print(
f"bnb linear8bitlt with threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
)
def test_zeropoint():
def quant_zp(x):
dtype = x.dtype
x = x.float()
dyna = x.max() - x.min()
if dyna == 0:
dyna = 1
qx = 254.0 / dyna
minx = x.min()
# zpx = torch.round(minx* qx)
# zpx = 127 - torch.round(x.max()* qx)
zpx = torch.round(x.min() * qx) - 127
x = (qx * x) + zpx
return x, qx, zpx
batch = 2
seq = 512
model = 1024
hidden = 4 * model
A = torch.randn(batch * seq, model, device="cuda").half() * 0.1
B = torch.randn(model, hidden, device="cuda").half() * 0.1
C0 = torch.matmul(A, B)
# A, SA = F.vectorwise_quant(A, quant_type='linear')
# B, SB = F.vectorwise_quant(B, quant_type='linear')
A = A.float()
B = B.float()
C1 = torch.matmul(A, B)
C3 = bnb.matmul(A.half(), B.t().contiguous().half())
zp = 1
# C2 = torch.matmul(A-zp, B)
# C2 += B.sum(0).view(1, -1)*zp
C2 = torch.matmul(A, B - zp)
C2 -= A.sum(1).view(-1, 1) * zp
ca, cqa, cza = quant_zp(A)
print(ca.min(), ca.max())
print((ca - cza).min(), (ca - cza).max())
zp = 1
scale = 2.0
C5 = torch.matmul((A * scale) - zp, B)
C5 += B.sum(0) * zp
C5 /= scale
CA, qa, zpa = quant_zp(A)
C4 = torch.matmul(CA, B)
C4 -= B.sum(0) * zpa
C4 /= qa
zpb = 1
zpa = 1
qa = 2
qb = 2
C6 = torch.matmul((A * qa) + zpa, (B * qb) + zpb)
C6 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1) * zpb)
C6 -= zpa * zpb * A.shape[1]
C6 /= qa * qb
CA, qa, zpa = quant_zp(A)
CB, qb, zpb = quant_zp(B)
C7 = torch.matmul(CA, CB)
C7 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1) * zpb)
C7 -= zpa * zpb * A.shape[1]
C7 /= qa * qb
print("")
# print(C0.flatten()[:10])
print(C1.flatten()[:10])
print(C2.flatten()[:10])
print(C3.flatten()[:10])
print(C5.flatten()[:10])
print(C6.flatten()[:10])
print(C7.flatten()[:10])
err1 = torch.abs(C1 - C2).mean().item()
err2 = torch.abs(C1 - C3).mean().item()
err3 = torch.abs(C1 - C4).mean().item()
err4 = torch.abs(C1 - C5).mean().item()
err5 = torch.abs(C1 - C6).mean().item()
err6 = torch.abs(C1 - C7).mean().item()
print(err1, err2, err3, err4, err5, err6)
def test_extract_outliers():
for i in range(k):
shapeA = (4096, 4096 * 4)
idx = torch.unique(torch.randint(0, shapeA[1], size=(10,)).int()).cuda()
# idx = torch.Tensor([0]).int().cuda()
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
outliers1 = A[:, idx.long()]
CA, SA = F.transform(A, "col_turing")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
torch.testing.assert_allclose(outliers1, outliers2)
CA, SA = F.transform(A, "col_ampere")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
torch.testing.assert_allclose(outliers1, outliers2)
def test_blockwise_cpu_large():
diffs = []
reldiffs = []
batch = 128
seq = 128
for hidden in [128]:#, 14336]:
for blocksize in [4096, 16384]:
for i in range(2):
A1 = torch.randn(batch, seq, hidden, device='cpu')
t0 = time.time()
C, S = F.quantize_blockwise(A1, blocksize=blocksize)
A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
print(time.time() - t0)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diffs[-1] < 0.011
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
def test_fp8_quant():
for e_bits in range(1, 7):
p_bits = 7-e_bits
code = F.create_fp8_map(True, e_bits, p_bits).cuda()
print(e_bits, p_bits)
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1, code=code)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(sum(abserr)/len(abserr))
#print(sum(relerr)/len(relerr))
abserr = []
relerr = []
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1, code=code)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(sum(abserr)/len(abserr))
#print(sum(relerr)/len(relerr))
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
#print('')
for bits in range(2, 9):
#print('='*30, bits, '='*30)
for method in ['linear', 'fp8', 'dynamic', 'quantile']:
abserrs = []
relerrs = []
code = None
if method == 'linear':
code = F.create_linear_map(True, total_bits=bits).cuda()
elif method == 'fp8':
ebits = math.ceil(bits/2)
pbits = bits-ebits-1
code = F.create_fp8_map(True, ebits, pbits, bits).cuda()
elif method == 'dynamic':
code = F.create_dynamic_map(True, bits-0, bits).cuda()
elif method == 'quantile':
values = torch.randn(2048, 2048, device='cuda')
code = F.create_quantile_map(values, bits).cuda()
# for some data types we have no zero
# for some data types we have one zero
# for some data types we have two zeros
assert torch.unique(code).numel() in [2**bits, 2**bits-1], f'bits: {bits}, method: {method}'
#print(method, (code==0).sum())
assert code.numel() == 256
for i in range(10):
values = torch.randn(1, 32, device='cuda')
values /= values.abs().max()
#values[values.abs() < 1e-6] += 1e-5
q1 = []
v1 = []
for v in values[0]:
idx = torch.abs(v-code).argmin()
q1.append(idx.item())
v1.append(code[idx].item())
q1 = torch.Tensor(q1).cuda()
v1 = torch.Tensor(v1).cuda()
q2, S2 = F.quantize_blockwise(values, code=code)
v2 = F.dequantize_blockwise(q2, S2)
idx = torch.isclose(q1.int(), q2.int())
err2 = torch.abs(v2-values)
abserrs.append(err2.mean().item())
relerrs.append((err2/(1e-10+values).abs()).mean().item())
if idx.sum():
# some weird cases
err1 = torch.abs(v1-values).mean()
#assert err2.mean() <= err1
else:
torch.testing.assert_allclose(q1, q2)
#print(method, 'abserr:', sum(abserrs)/len(abserrs), 'relerr:', sum(relerrs)/len(relerrs))
#assert False
def test_kbit_quantile_estimation():
for i in range(100):
data = torch.randn(1024, 1024, device='cuda')
for bits in range(2, 9):
p = np.linspace(1.3e-4, 1-1.3e-4, 2**bits)
val1 = torch.Tensor(norm.ppf(p)).cuda()
val2 = F.estimate_quantiles(data, offset=0, num_quantiles=2**bits)
err = torch.abs(val1-val2).mean()
assert err < 0.038
for i in range(100):
data = torch.randn(1024, 1024, device='cuda')
for bits in range(2, 4):
total_values = 2**bits-1
p = np.linspace(0, 1, 2*total_values+1)
idx = np.arange(1, 2*total_values+1, 2)
p = p[idx]
offset = 1/(2*total_values)
p = np.linspace(offset, 1-offset, total_values)
val1 = torch.Tensor(norm.ppf(p)).cuda()
val2 = F.estimate_quantiles(data, num_quantiles=2**bits-1)
err = torch.abs(val1-val2).mean()
assert err < 0.035
def test_bench_dequantization():
a = torch.rand(1024, 1024, device='cuda').half()
qa, SA = F.quantize_blockwise(a)
max_theoretical_mu = 1024*1024*2/1024**3/672*1000*1000
#print(max_theoretical_mu)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
F.dequantize_blockwise(qa, SA, blocksize=2048)
torch.cuda.synchronize()
#print((time.time()-t0)/1e6)
|
bitsandbytes-main
|
tests/test_functional.py
|
import ctypes
import os
import shutil
import time
import uuid
from itertools import product
from os.path import join
import pytest
from lion_pytorch import Lion
import torch
import bitsandbytes as bnb
import bitsandbytes.functional as F
# import apex
k = 20
def get_temp_dir():
path = f"/tmp/autoswap/{str(uuid.uuid4())}"
os.makedirs(path, exist_ok=True)
return path
def rm_path(path):
shutil.rmtree(path)
str2optimizers = {}
str2optimizers["adam_pytorch"] = (None, torch.optim.Adam, bnb.optim.Adam)
# str2optimizers['adam_apex'] = (None, apex.optimizers.FusedAdam, bnb.optim.Adam)
# str2optimizers['momentum_apex'] = (None, lambda pxx: apex.optimizers.FusedSGD(pxx, 0.01, 0.9), bnb.optim.Adam)
str2optimizers["lion_pytorch"] = (None, Lion, bnb.optim.Lion)
str2optimizers["momentum_pytorch"] = (
None,
lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
bnb.optim.Adam,
)
str2optimizers["adam"] = (torch.optim.Adam, bnb.optim.Adam)
# str2optimizers['fused_adam'] = (apex.optimizers.FusedAdam, bnb.optim.Adam)
str2optimizers["lion"] = (Lion, bnb.optim.Lion)
str2optimizers["momentum"] = (
lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.SGD(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["lars"] = (
lambda pxx: bnb.optim.PytorchLARS(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.LARS(pxx, 0.01, 0.9),
)
str2optimizers["rmsprop"] = (
lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.RMSprop(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["adam8bit"] = (
torch.optim.Adam,
lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=False),
)
str2optimizers["lion8bit"] = (
Lion,
lambda pxx: bnb.optim.Lion8bit(pxx, block_wise=False),
)
str2optimizers["momentum8bit"] = (
lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.SGD8bit(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["rmsprop8bit"] = (
lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.RMSprop8bit(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["lars8bit"] = (
lambda pxx: bnb.optim.PytorchLARS(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.LARS8bit(pxx, 0.01, 0.9),
)
str2optimizers["adam8bit_blockwise"] = (
torch.optim.Adam,
lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=True),
)
str2optimizers["lion8bit_blockwise"] = (
Lion,
lambda pxx: bnb.optim.Lion8bit(pxx, block_wise=True),
)
str2optimizers["momentum8bit_blockwise"] = (
lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.SGD8bit(pxx, 0.01, 0.9, block_wise=True),
)
str2optimizers["rmsprop8bit_blockwise"] = (
lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),
lambda pxx: bnb.optim.RMSprop8bit(pxx, 0.01, 0.9, block_wise=True),
)
str2statenames = {}
str2statenames["adam"] = [("exp_avg", "state1"), ("exp_avg_sq", "state2")]
str2statenames["lion"] = [("exp_avg", "state1")]
str2statenames["momentum"] = [("momentum_buffer", "state1")]
str2statenames["lars"] = [("momentum_buffer", "state1")]
str2statenames["lamb"] = [("exp_avg", "state1"), ("exp_avg_sq", "state2")]
str2statenames["rmsprop"] = [("square_avg", "state1")]
str2statenames["adam8bit"] = [
("exp_avg", "state1", "qmap1", "max1"),
("exp_avg_sq", "state2", "qmap2", "max2"),
]
str2statenames["lion8bit"] = [
("exp_avg", "state1", "qmap1", "max1")
]
str2statenames["lamb8bit"] = [
("exp_avg", "state1", "qmap1", "max1"),
("exp_avg_sq", "state2", "qmap2", "max2"),
]
str2statenames["adam8bit_blockwise"] = [
("exp_avg", "state1", "qmap1", "absmax1"),
("exp_avg_sq", "state2", "qmap2", "absmax2"),
]
str2statenames["lion8bit_blockwise"] = [
("exp_avg", "state1", "qmap1", "absmax1")
]
str2statenames["momentum8bit"] = [
("momentum_buffer", "state1", "qmap1", "max1")
]
str2statenames["momentum8bit_blockwise"] = [
("momentum_buffer", "state1", "qmap1", "absmax1")
]
str2statenames["lars8bit"] = [("momentum_buffer", "state1", "qmap1", "max1")]
str2statenames["rmsprop8bit"] = [("square_avg", "state1", "qmap1", "max1")]
str2statenames["rmsprop8bit_blockwise"] = [
("square_avg", "state1", "qmap1", "absmax1")
]
dim1 = [1024]
dim2 = [32, 1024, 4097, 1]
gtype = [torch.float32, torch.float16]
optimizer_names = ["adam", "momentum", "rmsprop", "lars", "lion"]
values = list(product(dim1, dim2, gtype, optimizer_names))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
p2 = p1.clone()
p1 = p1.float()
torch_optimizer = str2optimizers[optim_name][0]([p1])
bnb_optimizer = str2optimizers[optim_name][1]([p2])
if gtype == torch.float32:
atol, rtol = 1e-6, 1e-5
else:
atol, rtol = 1e-4, 1e-3
for i in range(k):
g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
p1.grad = g.clone().float()
p2.grad = g.clone()
bnb_optimizer.step()
torch_optimizer.step()
for name1, name2 in str2statenames[optim_name]:
torch.testing.assert_allclose(
torch_optimizer.state[p1][name1],
bnb_optimizer.state[p2][name2],
atol=atol,
rtol=rtol,
)
torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
if i % (k // 5) == 0 and i > 0:
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
for name1, name2 in str2statenames[optim_name]:
torch.testing.assert_allclose(
torch_optimizer.state[p1][name1],
bnb_optimizer.state[p2][name2],
atol=atol,
rtol=rtol,
)
if gtype == torch.float16:
# the adam buffers should also be close because they are 32-bit
# but the paramters can diverge because they are 16-bit
# the difference grow larger and larger with each update
# --> copy the state to keep weights close
p1.data = p1.data.half().float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.half(), p2)
if optim_name in ["lars", "lamb"]:
assert bnb_optimizer.state[p2]["unorm_vec"] > 0.0
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32, torch.float16]
values = list(product(dim1, dim2, gtype))
names = ["dim1_{}_dim2_{}_gtype_{}".format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, gtype", values, ids=names)
def test_global_config(dim1, dim2, gtype):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
p2 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
p3 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
mask = torch.rand_like(p2) < 0.1
beta1 = 0.9
beta2 = 0.999
lr = 0.001
eps = 1e-8
bnb.optim.GlobalOptimManager.get_instance().initialize()
bnb.optim.GlobalOptimManager.get_instance().override_config(
p3, "optim_bits", 8
)
bnb.optim.GlobalOptimManager.get_instance().register_parameters(
[p1, p2, p3]
)
p1 = p1.cuda()
p2 = p2.cuda()
p3 = p3.cuda()
adam2 = bnb.optim.Adam([p1, p2, p3], lr, (beta1, beta2), eps)
if gtype == torch.float32:
atol, rtol = 1e-6, 1e-5
else:
atol, rtol = 1e-4, 1e-3
for i in range(50):
g1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
g2 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
g3 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
p1.grad = g1
p2.grad = g2
p3.grad = g3
adam2.step()
assert adam2.state[p3]["state1"].dtype == torch.uint8
assert adam2.state[p3]["state2"].dtype == torch.uint8
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32, torch.float16]
optimizer_names = [
"adam8bit",
"lion8bit",
"momentum8bit",
"rmsprop8bit",
"adam8bit_blockwise",
"lion8bit_blockwise",
"lars8bit",
"momentum8bit_blockwise",
"rmsprop8bit_blockwise",
]
values = list(product(dim1, dim2, gtype, optimizer_names))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
p2 = p1.clone()
p1 = p1.float()
blocksize = 2048
torch_optimizer = str2optimizers[optim_name][0]([p1])
bnb_optimizer = str2optimizers[optim_name][1]([p2])
if gtype == torch.float32:
atol, rtol = 3e-3, 1e-3
patol, prtol = 1e-5, 1e-3
else:
atol, rtol = 3e-3, 1e-3
patol, prtol = 1e-5, 1e-3
errors = []
relerrors = []
for i in range(50):
g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
p1.grad = g.clone().float()
p2.grad = g.clone()
bnb_optimizer.step()
torch_optimizer.step()
torch.testing.assert_allclose(p1, p2.float(), atol=patol, rtol=prtol)
dequant_states = []
for name1, name2, qmap, max_val in str2statenames[optim_name]:
# print(bnb_optimizer.state[p2][max_val], name1)
if "blockwise" in optim_name:
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol
)
== 0
)
assert num_not_close.sum().item() < 20
dequant_states.append(s1.clone())
err = torch.abs(p1 - p2)
relerr = err / torch.abs(p1)
assert err.mean() < 0.0001
assert relerr.mean() < 0.001
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
if i % 10 == 0 and i > 0:
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
s1cpy = s.clone()
raws1cpy = bnb_optimizer.state[p2][name2].clone()
qmap1 = bnb_optimizer.state[p2][qmap].clone()
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
torch.testing.assert_allclose(
raws1cpy, bnb_optimizer.state[p2][name2]
)
torch.testing.assert_allclose(
qmap1, bnb_optimizer.state[p2][qmap]
)
if "blockwise" in optim_name:
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
torch.testing.assert_allclose(s1cpy, s1)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1],
s1,
atol=atol,
rtol=rtol,
)
== 0
)
assert num_not_close.sum().item() < 20
torch.testing.assert_allclose(
p1, p2.float(), atol=patol, rtol=prtol
)
# the parameters diverge quickly. Here we keep them close
# together so we can test against the Adam error
p1.data = p1.data.to(gtype).float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.to(gtype), p2)
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
torch_optimizer.state[p1][name1].copy_(s.data)
# print(sum(errors)/len(errors))
# print(sum(relerrors)/len(relerrors))
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32]
optim_bits = [32, 8]
values = list(product(dim1, dim2, gtype, optim_bits))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_bits_{}".format(*vals)
for vals in values
]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_bits", values, ids=names)
def test_adam_percentile_clipping(dim1, dim2, gtype, optim_bits):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
beta1 = 0.9
beta2 = 0.999
lr = 0.001
eps = 1e-8
p1 = p1.cuda()
p2 = p1.clone()
adam1 = bnb.optim.Adam([p1], lr, (beta1, beta2), eps, optim_bits=optim_bits)
adam2 = bnb.optim.Adam(
[p2],
lr,
(beta1, beta2),
eps,
optim_bits=optim_bits,
percentile_clipping=5,
)
gnorm_vec = torch.zeros(100).cuda()
step = 0
for i in range(50):
step += 1
g1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + (
0.01 * i
)
g2 = g1.clone()
p2.grad = g2
current_gnorm, clip_val, gnorm_scale = F.percentile_clipping(
g1, gnorm_vec, step, 5
)
g1 = (g1.float() * gnorm_scale).to(gtype)
p1.grad = g1
adam1.step()
adam2.step()
# gnorm_scale is not deterministic (warp reductions), as such there can be slight differences in state
if optim_bits == 32:
torch.testing.assert_allclose(p1, p2)
torch.testing.assert_allclose(
adam1.state[p1]["state1"],
adam2.state[p2]["state1"],
atol=5e-5,
rtol=1e-4,
)
torch.testing.assert_allclose(
adam1.state[p1]["state2"],
adam2.state[p2]["state2"],
atol=5e-5,
rtol=1e-4,
)
elif optim_bits == 8:
torch.testing.assert_allclose(p1, p2, atol=1e-4, rtol=1e-3)
torch.testing.assert_allclose(
adam1.state[p1]["state1"],
adam2.state[p2]["state1"],
atol=2,
rtol=1e-3,
)
torch.testing.assert_allclose(
adam1.state[p1]["state2"],
adam2.state[p2]["state2"],
atol=2,
rtol=1e-3,
)
adam1.state[p1]["state1"].copy_(adam2.state[p2]["state1"])
adam1.state[p1]["state2"].copy_(adam2.state[p2]["state2"])
if i % 10 == 0 and i > 0:
path = get_temp_dir()
torch.save(adam2.state_dict(), join(path, "opt.pt"))
del adam2
adam2 = None
adam2 = bnb.optim.Adam(
[p2],
lr,
(beta1, beta2),
eps,
optim_bits=optim_bits,
percentile_clipping=5,
)
adam2.load_state_dict(torch.load(join(path, "opt.pt")))
dim1 = [4096]
dim2 = [4096]
gtype = [torch.float32, torch.float16]
# optimizer_names = ['adam8bit_blockwise', 'adam8bit', 'lamb8bit']
# optimizer_names = ['adam8bit_blockwise', 'adam_apex', 'adam8bit', 'adam', 'adam_pytorch']
# optimizer_names = ['momentum_apex', 'momentum8bit', 'momentum_pytorch']
# optimizer_names = ['lamb_apex', 'lamb8bit']
# optimizer_names = ['lars_apex', 'lars8bit']
optimizer_names = ["adam8bit_blockwise"]
values = list(product(dim1, dim2, gtype, optimizer_names))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_{}".format(*vals) for vals in values
]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_benchmark_blockwise(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
bnb_optimizer = str2optimizers[optim_name][1]([p1])
g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
p1.grad = g
for i in range(k):
if i == k // 5:
# 100 iterations for burn-in
torch.cuda.synchronize()
t0 = time.time()
bnb_optimizer.step()
torch.cuda.synchronize()
s = time.time() - t0
print("")
params = (k - k // 5) * dim1 * dim2
print(optim_name, gtype, s / params)
# assert s < 3.9
|
bitsandbytes-main
|
tests/test_optim.py
|
import os
from typing import List, NamedTuple
import pytest
import bitsandbytes as bnb
from bitsandbytes.cuda_setup.main import (
CUDA_RUNTIME_LIB,
determine_cuda_runtime_lib_path,
evaluate_cuda_setup,
extract_candidate_paths,
)
"""
'LD_LIBRARY_PATH': ':/mnt/D/titus/local/cuda-11.1/lib64/'
'CONDA_EXE': '/mnt/D/titus/miniconda/bin/conda'
'LESSCLOSE': '/usr/bin/lesspipe %s %s'
'OLDPWD': '/mnt/D/titus/src'
'CONDA_PREFIX': '/mnt/D/titus/miniconda/envs/8-bit'
'SSH_AUTH_SOCK': '/mnt/D/titus/.ssh/ssh-agent.tim-uw.sock'
'CONDA_PREFIX_1': '/mnt/D/titus/miniconda'
'PWD': '/mnt/D/titus/src/8-bit'
'HOME': '/mnt/D/titus'
'CONDA_PYTHON_EXE': '/mnt/D/titus/miniconda/bin/python'
'CUDA_HOME': '/mnt/D/titus/local/cuda-11.1/'
'TMUX': '/tmp/tmux-1007/default,59286,1'
'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop'
'SSH_TTY': '/dev/pts/0'
'MAIL': '/var/mail/titus'
'SHELL': '/bin/bash'
'DBUS_SESSION_BUS_ADDRESS': 'unix:path=/run/user/1007/bus'
'XDG_RUNTIME_DIR': '/run/user/1007'
'PATH': '/mnt/D/titus/miniconda/envs/8-bit/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/mnt/D/titus/local/cuda-11.1/bin'
'LESSOPEN': '| /usr/bin/lesspipe %s'
'_': '/mnt/D/titus/miniconda/envs/8-bit/bin/python'
# any that include 'CONDA' that are not 'CONDA_PREFIX'
# we search for
'CUDA_HOME': '/mnt/D/titus/local/cuda-11.1/'
"""
class InputAndExpectedOutput(NamedTuple):
input: str
output: str
HAPPY_PATH__LD_LIB_TEST_PATHS: List[InputAndExpectedOutput] = [
(
f"some/other/dir:dir/with/{CUDA_RUNTIME_LIB}",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f":some/other/dir:dir/with/{CUDA_RUNTIME_LIB}",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f"some/other/dir:dir/with/{CUDA_RUNTIME_LIB}:",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f"some/other/dir::dir/with/{CUDA_RUNTIME_LIB}",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f"dir/with/{CUDA_RUNTIME_LIB}:some/other/dir",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
(
f"dir/with/{CUDA_RUNTIME_LIB}:other/dir/libcuda.so",
f"dir/with/{CUDA_RUNTIME_LIB}",
),
]
@pytest.fixture(params=HAPPY_PATH__LD_LIB_TEST_PATHS)
def happy_path_path_string(tmpdir, request):
for path in extract_candidate_paths(request.param):
test_dir.mkdir()
if CUDA_RUNTIME_LIB in path:
(test_input / CUDA_RUNTIME_LIB).touch()
UNHAPPY_PATH__LD_LIB_TEST_PATHS = [
f"a/b/c/{CUDA_RUNTIME_LIB}:d/e/f/{CUDA_RUNTIME_LIB}",
f"a/b/c/{CUDA_RUNTIME_LIB}:d/e/f/{CUDA_RUNTIME_LIB}:g/h/j/{CUDA_RUNTIME_LIB}",
]
def test_full_system():
## this only tests the cuda version and not compute capability
# if CONDA_PREFIX exists, it has priority before all other env variables
# but it does not contain the library directly, so we need to look at the a sub-folder
version = ""
if "CONDA_PREFIX" in os.environ:
ls_output, err = bnb.utils.execute_and_return(f'ls -l {os.environ["CONDA_PREFIX"]}/lib/libcudart.so')
major, minor, revision = (ls_output.split(" ")[-1].replace("libcudart.so.", "").split("."))
version = float(f"{major}.{minor}")
if version == "" and "LD_LIBRARY_PATH" in os.environ:
ld_path = os.environ["LD_LIBRARY_PATH"]
paths = ld_path.split(":")
version = ""
for p in paths:
if "cuda" in p:
idx = p.rfind("cuda-")
version = p[idx + 5 : idx + 5 + 4].replace("/", "")
version = float(version)
break
assert version > 0
binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
binary_name = binary_name.replace("libbitsandbytes_cuda", "")
assert binary_name.startswith(str(version).replace(".", ""))
|
bitsandbytes-main
|
tests/test_cuda_setup_evaluator.py
|
import bitsandbytes as bnb
import pytest
import torch
from bitsandbytes import functional as F
from bitsandbytes.autograd import get_inverse_transform_indices, undo_layout
from bitsandbytes.nn.modules import Linear8bitLt
# contributed by Alex Borzunov, see:
# https://github.com/bigscience-workshop/petals/blob/main/tests/test_linear8bitlt.py
@pytest.mark.skipif(
not torch.cuda.is_available() or torch.cuda.get_device_capability() < (7, 5),
reason="this test requires a turing-generation or newer GPU, see bitsandbytes docs",
)
def test_layout_exact_match():
x = (torch.randn(14336 * 3, 14336) * 10).to(torch.int8).cuda()
for tile_size, order in ((8, 32), "col_turing"), ((32, 32), "col_ampere"):
transform = lambda x: F.transform(x.cuda(), from_order="row", to_order=order)[0].to(x.device)
tile_indices = get_inverse_transform_indices(transform, tile_size)
cxb = transform(x)
torch.cuda.synchronize()
restored_x = undo_layout(cxb, tile_indices)
torch.cuda.synchronize()
assert restored_x.is_contiguous()
assert torch.all(torch.eq(restored_x, x))
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
def test_linear_no_igemmlt():
linear = torch.nn.Linear(1024, 3072)
x = torch.randn(3, 1024, dtype=torch.half)
linear_custom = Linear8bitLt(
linear.in_features,
linear.out_features,
linear.bias is not None,
has_fp16_weights=False,
threshold=6.0,
)
linear_custom.state.force_no_igemmlt = True
linear_custom.weight = bnb.nn.Int8Params(
linear.weight.data.clone(), requires_grad=False, has_fp16_weights=False
).to(linear.weight.dtype)
linear_custom.bias = linear.bias
linear = linear_custom.cuda()
linear = linear.half().cuda()
x_ref = x.clone().cuda().requires_grad_(True)
x_ours = x.clone().cuda().requires_grad_(True)
fx_ref = linear(x_ref).float()
grad_proj = torch.randn_like(fx_ref)
(fx_ref * grad_proj).mean().backward()
fx_ours = linear_custom(x_ours).float()
(fx_ours * grad_proj).mean().backward()
assert torch.allclose(fx_ref, fx_ours, atol=0.02)
assert torch.allclose(x_ref.grad, x_ours.grad, atol=0.01)
assert not linear_custom.state.has_fp16_weights
assert linear_custom.state.CB is not None
assert linear_custom.state.CxB is None
|
bitsandbytes-main
|
tests/test_linear8bitlt.py
|
from itertools import permutations, product
import pytest
import torch
import bitsandbytes as bnb
n = 1
k = 25
dim1 = torch.randint(16, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 96, size=(n,)).tolist()
dim3 = torch.randint(32, 96, size=(n,)).tolist()
dim4 = torch.randint(32, 96, size=(n,)).tolist()
funcs = [(torch.bmm, bnb.bmm_cublas), (torch.matmul, bnb.matmul_cublas)]
str_funcs = ["bmm", "matmul"]
req_grad = [(False, False), (True, False), (True, True), (False, True)]
req_grad_str = ["FF", "TF", "TT", "FT"]
transpose = [(False, False), (False, True), (True, True), (True, False)]
str_transpose = ["FF", "FT", "TT", "TF"]
dtype = [torch.float32, torch.float16]
values = list(
product(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose)
)
str_values = list(
product(
dim1, dim2, dim3, dim4, str_funcs, dtype, req_grad_str, str_transpose
)
)
names = [
"dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}".format(
*vals
)
for vals in str_values
]
@pytest.mark.parametrize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose",
values,
ids=names,
)
def test_matmul(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):
if not torch.cuda.is_available(): pytest.skip('No GPU found.')
if dim2 > 0:
dim2 = dim2 - (dim2 % 16)
dim3 = dim3 - (dim3 % 16)
dim4 = dim4 - (dim4 % 16)
for i in range(k):
# normal multiply
if funcs[0] in [torch.mm, torch.matmul]:
dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
A = torch.randn(size=dimA, device="cuda", requires_grad=req_grad[0])
B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1])
target = torch.randn(
size=(dim2, dim4), device="cuda", requires_grad=req_grad[1]
)
torch.nn.init.xavier_uniform_(B)
if not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B)
elif not transpose[0] and transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B.t())
elif transpose[0] and not transpose[1]:
out_torch = funcs[0](A.t(), B)
out_bnb = funcs[1](A.t(), B)
elif transpose[0] and transpose[1]:
out_torch = funcs[0](A.t(), B.t())
out_bnb = funcs[1](A.t(), B.t())
n = out_bnb.numel()
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() < n * 0.0175
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() < n * 0.001
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.02
torch.testing.assert_allclose(
gradB1, gradB2, atol=0.18, rtol=0.3
)
# batched matrix multiply
if funcs[0] in [torch.bmm, torch.matmul]:
A = torch.randn(
size=(dim1, dim2, dim3),
device="cuda",
requires_grad=req_grad[0],
)
B = torch.randn(
size=(dim1, dim3, dim4),
device="cuda",
requires_grad=req_grad[1],
)
target = torch.randn(
size=(dim1, dim2, dim4),
device="cuda",
requires_grad=req_grad[1],
)
torch.nn.init.xavier_uniform_(B)
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B)
n = out_bnb.numel()
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() < n * 0.01
torch.testing.assert_allclose(
out_bnb, out_torch, atol=0.027, rtol=0.2
)
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.02
if funcs[0] in [torch.matmul]:
dim1 = dim1 - (dim1 % 16)
A = torch.randn(
size=(dim1, dim2, dim3),
device="cuda",
requires_grad=req_grad[0],
)
dimB = (dim4, dim3) if transpose[1] else (dim3, dim4)
B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1])
target = torch.randn(
size=(dim1, dim2, dim4),
device="cuda",
requires_grad=req_grad[1],
)
torch.nn.init.xavier_uniform_(B)
if transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B.t())
else:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B)
n = out_bnb.numel()
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() < n * 0.0175
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() < n * 0.001
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() < n * 0.02
n = 1
k = 3
dim1 = torch.randint(16, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 96, size=(n,)).tolist()
dim3 = torch.randint(32, 96, size=(n,)).tolist()
dim4 = torch.randint(32, 96, size=(n,)).tolist()
dim2.append(0)
decomp = [0.0, 6.0]
funcs = [(torch.matmul, bnb.matmul)]
str_funcs = ["matmul"]
req_grad = [(False, False), (True, False), (True, True), (False, True)]
req_grad = list(product([True, False], repeat=3))
req_grad_str = []
for c in req_grad:
strval = ''
for v in c:
if v == True: strval += 'T'
else: strval += 'F'
req_grad_str.append(strval)
transpose = [(False, True), (False, False)]
str_transpose = ["NT", "NN"]
dtype = [torch.float16, torch.bfloat16, torch.float32]
has_fp16_weights = [True, False]
has_bias = [True, False]
values = list(
product(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
)
)
str_values = list(
product(
dim1,
dim2,
dim3,
dim4,
str_funcs,
dtype,
req_grad_str,
str_transpose,
decomp,
has_fp16_weights,
has_bias
)
)
names = ["dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}_decomp_{}_has_fp16_weights_{}_has_bias_{}".format(*vals) for vals in str_values]
@pytest.mark.parametrize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
if not torch.cuda.is_available(): pytest.skip('No GPU found.')
dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
outlier_dim = torch.randint(0, dimA[1], size=(dimA[1] // 8,), device="cuda")
if has_bias == False:
req_grad = list(req_grad)
req_grad[2] = False
for i in range(k):
# normal multiply
if funcs[0] in [torch.mm, torch.matmul]:
A = torch.randn(
size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype
)
if decomp == 6.0:
with torch.no_grad():
A[:, outlier_dim] = 6.0
B = torch.randn(
size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype
)
target = torch.randn(
size=(dim2, dim4),
device="cuda",
requires_grad=req_grad[1],
dtype=dtype,
)
bias = None
bias2 = None
if has_bias:
bias = torch.randn(dim4, device='cuda', dtype=dtype, requires_grad=req_grad[2])
bias2 = bias.clone()
torch.nn.init.xavier_uniform_(B)
B2 = B.clone()
state = bnb.MatmulLtState()
state.threshold = decomp
state.has_fp16_weights = has_fp16_weights
if not has_fp16_weights:
if not transpose[0] and not transpose[1]:
B2 = B2.t().contiguous()
(
state.CB,
CBt,
state.SCB,
SCBt,
coo_tensorB,
) = bnb.functional.double_quant(B2.to(torch.float16))
B2 = state.CB
if not transpose[0] and transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B2, state=state, bias=bias2)
elif not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B2.t(), state=state, bias=bias2)
if has_bias:
out_torch += bias
assert out_bnb.dtype == A.dtype, f"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}"
n = out_bnb.numel()
err = torch.abs(out_bnb - out_torch).mean().item()
# print(f'abs error {err:.4f}')
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() <= n * (0.0175 if dtype == torch.float16 else 0.021)
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() <= n * 0.001
if has_fp16_weights:
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(
out_bnb, target
).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias1 = bias.grad
bias.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias2 = bias.grad
bias.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
if dim2 > 0:
assert torch.abs(gradB1).sum() > 0.0
assert torch.abs(gradB2).sum() > 0.0
else:
assert torch.abs(gradB1).sum() == 0.0
assert torch.abs(gradB2).sum() == 0.0
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.02
torch.testing.assert_allclose(
gradB1, gradB2, atol=0.18, rtol=0.3
)
if req_grad[2]:
torch.testing.assert_allclose(gradBias1, gradBias2)
|
bitsandbytes-main
|
tests/test_autograd.py
|
from itertools import product
import pytest
import torch
from torch import nn
import bitsandbytes as bnb
class MockArgs:
def __init__(self, initial_data):
for key in initial_data:
setattr(self, key, initial_data[key])
class MLP8bit(torch.nn.Module):
def __init__(self, dim1, dim2, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0):
super().__init__()
self.fc1 = bnb.nn.Linear8bitLt(
dim1, dim2, has_fp16_weights=has_fp16_weights, memory_efficient_backward=memory_efficient_backward,
threshold=threshold
)
self.fc2 = bnb.nn.Linear8bitLt(
dim2, dim1, has_fp16_weights=has_fp16_weights, memory_efficient_backward=memory_efficient_backward,
threshold=threshold
)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
def get_args():
args = MockArgs([])
args.quant_type = "vector"
args.use_8bit_training = "full"
args.clip_freq = 9999
return args
def assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
print(f"Too many values not close: assert {sumval} < {count}")
torch.testing.assert_allclose(a, b, rtol, atol)
class LinearFunction(torch.autograd.Function):
@staticmethod
def get_8bit_linear_trimmed(x, stochastic=False, trim_value=3.0):
round_func = (
LinearFunction.round_stoachastic if stochastic else torch.round
)
norm = math.sqrt(math.pi) / math.sqrt(2.0)
# std = torch.abs(x).mean()*norm
std = torch.std(x)
max1 = std * trim_value
x = x / max1 * 127
x = round_func(x)
x[x > 127] = 127
x[x < -127] = -127
x = x / 127 * max1
return x
def quant(x, quant_type, dim=1):
if quant_type == "linear":
max1 = torch.abs(x).max().float()
xq = torch.round(x / max1 * 127).to(torch.int8)
return xq, max1
elif quant_type == "vector":
max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
xq = torch.round(x / max1 * 127).to(torch.int8)
return xq, max1
elif quant_type == "min-max":
maxA = torch.amax(x, dim=dim, keepdim=True).float()
minA = torch.amin(x, dim=dim, keepdim=True).float()
scale = (maxA - minA) / 2.0
xq = torch.round(127 * (x - minA - scale) / scale).to(torch.int8)
return xq, (minA.float(), scale.float())
else:
return None
def dequant(xq, S1, S2, dtype, quant_type):
if quant_type == "linear":
norm = S1 * S2 / (127 * 127)
# double cast needed to prevent overflows
return (xq.float() * norm).to(dtype)
elif quant_type == "vector":
x = xq.float()
if len(xq.shape) == 2 and len(S1.shape) == 3:
S1 = S1.squeeze(0)
if len(xq.shape) == 2 and len(S2.shape) == 3:
S2 = S2.squeeze(0)
# print(x.shape, S1.shape, S2.shape)
if len(S1.shape) == 2:
x *= S1.t() / 127
else:
x *= S1 / 127
x *= S2 / 127
return x.to(dtype)
else:
return None
def dequant_min_max(xq, A, B, SA, SB, dtype):
offset = B.float().t().sum(0) * (SA[0] + SA[1])
x = xq.float()
if len(xq.shape) == 2 and len(SB.shape) == 3:
SB = SB.squeeze(0)
if len(xq.shape) == 2 and len(SA.shape) == 3:
SA = SA.squeeze(0)
if len(SB.shape) == 2:
x *= SB.t() / 127
else:
x *= SB / 127
x *= SA[1] / 127
x += offset
return x.to(dtype)
def get_8bit_linear(x, stochastic=False):
round_func = (
LinearFunction.round_stoachastic if stochastic else torch.round
)
max1 = torch.abs(x).max()
x = x / max1 * 127
x = round_func(x) / 127 * max1
# x = torch.round(x)/128*max1
return x
@staticmethod
def get_8bit_vector_wise(x, dim, stochastic=False):
round_func = (
LinearFunction.round_stoachastic if stochastic else torch.round
)
max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
max1[max1 == 0] = 1.0
x = (x * 127) / max1
x = round_func(x) / 127 * max1
return x
@staticmethod
def round_stoachastic(x):
sign = torch.sign(x)
absx = torch.abs(x)
decimal = absx - torch.floor(absx)
rdm = torch.rand_like(decimal)
return sign * (torch.floor(absx) + (rdm < decimal).to(x.dtype))
@staticmethod
def fake_8bit_storage(w, exponent_bits):
code = bnb.functional.create_dynamic_map(n=exponent_bits).to(w.device)
absmax, C = bnb.functional.quantize_blockwise(w.data, code=code)
out = bnb.functional.dequantize_blockwise(absmax, C, code)
out = out.half()
w.copy_(out)
return out
@staticmethod
def fake_8bit_storage_quantile(w, args):
code = bnb.functional.estimate_quantiles(w.data, offset=args.offset)
# C = bnb.functional.quantize_no_absmax(code, w)
# out = bnb.functional.dequantize_no_absmax(code, C, out=w.data)
# print(out)
# out = out.half()
code /= torch.max(torch.abs(code))
absmax, C = bnb.functional.quantize_blockwise(w.data, code=code)
out = bnb.functional.dequantize_blockwise(absmax, C, code)
out = out.half()
w.copy_(out)
return out
@staticmethod
def fake_8bit_storage_stoachstic(w):
rand = torch.rand(1024, device=w.device)
absmax, C = bnb.functional.quantize_blockwise(w.data, rand=rand)
out = bnb.functional.dequantize_blockwise(absmax, C)
out = out.half()
w.copy_(out)
return out
@staticmethod
def fake_8bit_storage_with_max(w, topk=8):
blocked_w = einops.rearrange(w.flatten(), "(h b) -> h b", b=256)
max_val, idx = torch.sort(torch.abs(blocked_w), dim=1, descending=True)
idx = idx[:, :topk]
max_val = max_val[:, :topk]
mask = torch.zeros_like(blocked_w)
mask.scatter_(dim=1, index=idx, src=torch.ones_like(max_val))
mask = mask.bool()
# 1. zero out max values
# 2. quantize + dequantize
# 3. write back max values
# 4. copy matrix back to weight
values = blocked_w[mask]
blocked_w[mask] = 0
code = bnb.functional.create_dynamic_map()
code = code.to(w.device)
absmax, C = bnb.functional.quantize_blockwise(blocked_w.data)
bnb.functional.dequantize_blockwise(absmax, C, out=blocked_w)
blocked_w[mask] = values
unblocked_w = blocked_w.flatten().view(w.shape)
w.copy_(unblocked_w)
return unblocked_w
@staticmethod
def forward(ctx, x, weight, bias=None, args=None):
if args.use_8bit_training != "off":
weight8, S1 = LinearFunction.quant(weight, args.quant_type, dim=1)
x8, S2 = LinearFunction.quant(x, args.quant_type, dim=2)
outputq = bnb.functional.igemm(x8, weight8.t())
output = LinearFunction.dequant(
outputq, S1, S2, x.dtype, args.quant_type
)
# if torch.rand(1) < 0.01:
# output32 = torch.matmul(x, weight.t())
# err = torch.abs(output-output32).float()
# relerr = err/(torch.abs(output32).float()+1e-8)
# print(f'{err.mean().item():.4f}, {relerr.mean().item():.4f}', args.quant_type, 'forward', proxy)
else:
# output = torch.matmul(x, weight.t())
output = torch.einsum("bsi,oi->bso", x, weight)
ctx.save_for_backward(x, weight, bias)
ctx.args = args
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
@staticmethod
def backward(ctx, grad_output):
x, weight, bias = ctx.saved_tensors
args = ctx.args
stochastic = False
grad_input = grad_weight = grad_bias = None
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
# weight and x are already 8bit
# -> transform grad_output to 8-bit
if args.use_8bit_training == "forward+wgrad":
grad_output8, S1 = LinearFunction.quant(
grad_output, args.quant_type, dim=[0, 1]
)
x8, S2 = LinearFunction.quant(x, args.quant_type, dim=[0, 1])
grad_weight8 = bnb.functional.igemm(grad_output8, x8)
grad_weight = LinearFunction.dequant(
grad_weight8, S1, S2, grad_output.dtype, args.quant_type
)
# grad_weight32 = torch.einsum('bso,bsi->oi', grad_output, x)
grad_input = grad_output.matmul(weight)
elif args.use_8bit_training == "full":
grad_output8, S1 = LinearFunction.quant(
grad_output, args.quant_type, dim=[0, 1]
)
x8, S2 = LinearFunction.quant(x, args.quant_type, dim=[0, 1])
grad_weight8 = torch.zeros_like(weight, dtype=torch.int32)
bnb.functional.igemm(grad_output8, x8, out=grad_weight8)
grad_weight = LinearFunction.dequant(
grad_weight8, S1, S2, grad_output.dtype, args.quant_type
)
grad_output8, S1 = LinearFunction.quant(
grad_output, args.quant_type, dim=2
)
weight8, S3 = LinearFunction.quant(weight, args.quant_type, dim=0)
grad_input8 = bnb.functional.igemm(grad_output8, weight8)
grad_input = LinearFunction.dequant(
grad_input8, S1, S3, grad_output.dtype, args.quant_type
)
else:
grad_input = grad_output.matmul(weight)
grad_weight = torch.einsum("bsi,bso->oi", x, grad_output)
return grad_input, grad_weight, grad_bias, None
class Linear8bit(nn.Module):
def __init__(self, input_features, output_features, bias=True, args=None):
super().__init__()
self.input_features = input_features
self.output_features = output_features
self.args = args
self.weight = nn.Parameter(torch.empty(output_features, input_features))
if bias:
self.bias = nn.Parameter(torch.empty(output_features))
else:
self.register_parameter("bias", None)
torch.nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
torch.nn.init.zeros_(self.bias)
def forward(self, x):
self.args.training = self.training
return LinearFunction.apply(x, self.weight, self.bias, self.args)
threshold = [0.0, 3.0]
values = threshold
names = [f"threshold_{vals}" for vals in values]
@pytest.mark.parametrize("threshold", values, ids=names)
def test_linear8bitlt_inference(threshold):
l1 = bnb.nn.Linear8bitLt(32, 64, threshold=threshold).cuda().half()
assert l1.weight.device.type == "cuda"
assert l1.weight.dtype == torch.float16
l1.eval()
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = l1(b1)
if i == 1:
assert l1.state.CxB is not None
def test_linear8bitlt_accumulated_gradient():
l1 = torch.nn.Sequential(
*[bnb.nn.Linear8bitLt(32, 32).cuda().half() for i in range(2)]
)
l2 = torch.nn.Sequential(
*[torch.nn.Linear(32, 32).cuda().half() for i in range(2)]
)
l2[0].weight = torch.nn.Parameter(l1[0].weight.clone())
l2[0].bias = torch.nn.Parameter(l1[0].bias.clone())
l2[1].weight = torch.nn.Parameter(l1[1].weight.clone())
l2[1].bias = torch.nn.Parameter(l1[1].bias.clone())
opt1 = bnb.optim.Adam8bit(l1.parameters(), lr=0.001)
opt2 = bnb.optim.Adam8bit(l2.parameters(), lr=0.001)
acc_steps = 10
for i in range(10):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = l1(b1)
o2 = l2(b1)
loss1 = o1.mean()
loss2 = o2.mean()
loss1.backward()
loss2.backward()
if i == 2:
assert l1[0].state.CxB is not None
assert l1[1].state.CxB is not None
if i > 0 and i % acc_steps == 0:
opt1.step()
opt1.zero_grad(True)
opt2.step()
opt2.zero_grad(True)
assert_all_approx_close(
l1[0].weight, l2[0].weight, rtol=1.05, atol=0.01, count=2
)
assert_all_approx_close(
l1[1].weight, l2[1].weight, rtol=1.05, atol=0.01, count=2
)
# we do this copy because otherwise we have small divergences over time that add up
l1[0].weight.data.copy_(l2[0].weight.data)
l1[1].weight.data.copy_(l2[1].weight.data)
else:
torch.testing.assert_allclose(l1[0].weight.grad, l2[0].weight.grad)
torch.testing.assert_allclose(l1[1].weight.grad, l2[1].weight.grad)
threshold = [0.0, 2.0]
values = threshold
names = [f"threshold_{vals}" for vals in values]
@pytest.mark.parametrize("threshold", values, ids=names)
@pytest.mark.parametrize("memory_efficient_backward", [False])
def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward):
l1 = (
bnb.nn.Linear8bitLt(
32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
)
.cuda()
.half()
)
assert l1.weight.dtype == torch.int8
l1.eval()
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = l1(b1)
assert o1.dtype == torch.float16
mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).cuda()
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
mlp = (
MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False)
.cuda()
.half()
)
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
mlp = (
MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False)
.half()
.cuda()
)
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
mlp = (
MLP8bit(
32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
)
.half()
.to("cuda")
)
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
assert mlp.fc1.weight.device.type == "cuda"
assert mlp.fc2.weight.device.type == "cuda"
mlp = MLP8bit(
32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
)
w1, w2 = mlp.fc1.weight.clone().cuda(), mlp.fc2.weight.clone().cuda() # grab weights before quantization,
mlp = mlp.cuda().half() # and this line triggers quantization
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
assert mlp.fc1.weight.device.type == "cuda"
assert mlp.fc2.weight.device.type == "cuda"
if memory_efficient_backward:
b1 = torch.randn(16, 8, 32, device="cuda", requires_grad=True, dtype=torch.half)
o1 = mlp(b1)
assert o1.dtype == torch.float16
assert o1.requires_grad
grad_proj = torch.randn_like(o1)
mlp.zero_grad()
(o1 * grad_proj).sum().backward()
grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half()
scale = grad_ref.abs().mean()
torch.testing.assert_allclose(b1.grad, grad_ref, rtol=0, atol=0.05 * scale)
idx = torch.isclose(b1.grad, grad_ref, atol=0.01 * scale, rtol=0.1)
assert (idx == 0).sum().item() <= b1.numel() * 0.005
def test_linear8bitlt_fp32_bias():
# casts model to fp16 -> int8 automatically
l1 = bnb.nn.Linear8bitLt(32, 64, has_fp16_weights=False).cuda()
assert l1.weight.dtype == torch.int8
assert l1.bias.dtype == torch.float32
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
# casts bias to fp32
o1 = l1(b1)
assert l1.bias.dtype == torch.float16
# casts model to fp16 -> int8 automatically
l1 = bnb.nn.Linear8bitLt(32, 64, has_fp16_weights=False, bias=False).cuda()
assert l1.weight.dtype == torch.int8
assert l1.bias is None
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = l1(b1)
assert l1.bias is None
|
bitsandbytes-main
|
tests/test_modules.py
|
import ctypes as ct
import os
import torch
from pathlib import Path
from warnings import warn
from bitsandbytes.cuda_setup.main import CUDASetup
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
setup.print_log_stack()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs above to fix your environment!
If you cannot find any issues and suspect a bug, please open an issue with detals about your environment:
https://github.com/TimDettmers/bitsandbytes/issues''')
lib.cadam32bit_g32
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
|
bitsandbytes-main
|
bitsandbytes/cextension.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import cuda_setup, utils
from .autograd._functions import (
MatmulLtState,
bmm_cublas,
matmul,
matmul_cublas,
mm_cublas,
)
from .cextension import COMPILED_WITH_CUDA
from .nn import modules
if COMPILED_WITH_CUDA:
from .optim import adam
__pdoc__ = {
"libbitsandbytes": False,
"optim.optimizer.Optimizer8bit": False,
"optim.optimizer.MockArgs": False,
}
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
|
bitsandbytes-main
|
bitsandbytes/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes as ct
import itertools
import operator
import random
import torch
import itertools
import math
from functools import reduce # Required in Python 3
from typing import Tuple
from torch import Tensor
from .cextension import COMPILED_WITH_CUDA, lib
# math.prod not compatible with python < 3.8
def prod(iterable):
return reduce(operator.mul, iterable, 1)
name2qmap = {}
if COMPILED_WITH_CUDA:
"""C FUNCTIONS FOR OPTIMIZERS"""
str2optimizer32bit = {}
str2optimizer32bit["adam"] = (lib.cadam32bit_g32, lib.cadam32bit_g16)
str2optimizer32bit["momentum"] = (
lib.cmomentum32bit_g32,
lib.cmomentum32bit_g16,
)
str2optimizer32bit["rmsprop"] = (
lib.crmsprop32bit_g32,
lib.crmsprop32bit_g16,
)
str2optimizer32bit["lion"] = (
lib.clion32bit_g32,
lib.clion32bit_g16,
)
str2optimizer32bit["adagrad"] = (
lib.cadagrad32bit_g32,
lib.cadagrad32bit_g16,
)
str2optimizer32bit["lars"] = (
lib.cmomentum32bit_g32,
lib.cmomentum32bit_g16,
)
str2optimizer32bit["lamb"] = (lib.cadam32bit_g32, lib.cadam32bit_g16)
str2optimizer8bit = {}
str2optimizer8bit["adam"] = (
lib.cadam_static_8bit_g32,
lib.cadam_static_8bit_g16,
)
str2optimizer8bit["momentum"] = (
lib.cmomentum_static_8bit_g32,
lib.cmomentum_static_8bit_g16,
)
str2optimizer8bit["rmsprop"] = (
lib.crmsprop_static_8bit_g32,
lib.crmsprop_static_8bit_g16,
)
str2optimizer8bit["lion"] = (
lib.clion_static_8bit_g32,
lib.clion_static_8bit_g16,
)
str2optimizer8bit["lamb"] = (
lib.cadam_static_8bit_g32,
lib.cadam_static_8bit_g16,
)
str2optimizer8bit["lars"] = (
lib.cmomentum_static_8bit_g32,
lib.cmomentum_static_8bit_g16,
)
str2optimizer8bit_blockwise = {}
str2optimizer8bit_blockwise["adam"] = (
lib.cadam_8bit_blockwise_fp32,
lib.cadam_8bit_blockwise_fp16,
)
str2optimizer8bit_blockwise["momentum"] = (
lib.cmomentum_8bit_blockwise_fp32,
lib.cmomentum_8bit_blockwise_fp16,
)
str2optimizer8bit_blockwise["rmsprop"] = (
lib.crmsprop_8bit_blockwise_fp32,
lib.crmsprop_8bit_blockwise_fp16,
)
str2optimizer8bit_blockwise["lion"] = (
lib.clion_8bit_blockwise_fp32,
lib.clion_8bit_blockwise_fp16,
)
str2optimizer8bit_blockwise["adagrad"] = (
lib.cadagrad_8bit_blockwise_fp32,
lib.cadagrad_8bit_blockwise_fp16,
)
class CUBLAS_Context:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.context = {}
# prev_device = torch.cuda.current_device()
# for i in range(torch.cuda.device_count()):
# torch.cuda.set_device(torch.device('cuda', i))
# self.context.append(ct.c_void_p(lib.get_context()))
# torch.cuda.set_device(prev_device)
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def get_context(self, device):
if device.index not in self.context:
prev_device = torch.cuda.current_device()
torch.cuda.set_device(device)
self.context[device.index] = ct.c_void_p(lib.get_context())
torch.cuda.set_device(prev_device)
return self.context[device.index]
class Cusparse_Context:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.context = ct.c_void_p(lib.get_cusparse())
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def create_linear_map(signed=True, total_bits=8, add_zero=True):
sign = (-1.0 if signed else 0.0)
total_values = 2**total_bits
if add_zero or total_bits < 8:
# add a zero
# since we simulate less bits by having zeros in the data type, we
# we need to center the quantization around zero and as such lose
# a single value
total_values = (2**total_bits if not signed else 2**total_bits-1)
values = torch.linspace(sign, 1.0, total_values)
gap = 256 - values.numel()
if gap == 0:
return values
else:
l = values.numel()//2
#return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist())
return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist())
def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8):
e = exponent_bits
p = precision_bits
has_sign = 1 if signed else 0
assert e+p == total_bits-has_sign
# the exponent is biased to 2^(e-1) -1 == 0
evalues = []
pvalues = []
for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)):
evalues.append(2**val)
values = []
lst = list(itertools.product([0, 1], repeat=precision_bits))
#for ev in evalues:
bias = 2**(exponent_bits-1)-1
for evalue in range(2**(exponent_bits)):
for bit_pattern in lst:
value = (1 if evalue != 0 else 0)
for i, pval in enumerate(list(bit_pattern)):
value += pval*(2**-(i+1))
if evalue == 0:
# subnormals
value = value*2**-(bias-1)
else:
# normals
value = value*2**-(evalue-bias-2)
values.append(value)
if signed:
values.append(-value)
assert len(values) == 2**total_bits
values.sort()
if total_bits < 8:
gap = 256 - len(values)
for i in range(gap):
values.append(0)
values.sort()
code = torch.Tensor(values)
code /= code.max()
return code
def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
"""
Creates the dynamic quantiztion map.
The dynamic data type is made up of a dynamic exponent and
fraction. As the exponent increase from 0 to -7 the number
of bits available for the fraction shrinks.
This is a generalization of the dynamic type where a certain
number of the bits and be reserved for the linear quantization
region (the fraction). n determines the maximum number of
exponent bits.
For more details see
(8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]
"""
data = []
# these are additional items that come from the case
# where all the exponent bits are zero and no
# indicator bit is present
non_sign_bits = total_bits - (1 if signed else 0)
additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1
if not signed:
additional_items = 2 * additional_items
for i in range(max_exponent_bits):
fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1))
boundaries = torch.linspace(0.1, 1, fraction_items)
means = (boundaries[:-1] + boundaries[1:]) / 2.0
data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if signed:
data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if additional_items > 0:
boundaries = torch.linspace(0.1, 1, additional_items + 1)
means = (boundaries[:-1] + boundaries[1:]) / 2.0
data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if signed:
data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
data.append(0)
data.append(1.0)
gap = 256 - len(data)
for i in range(gap):
data.append(0)
data.sort()
return Tensor(data)
def create_quantile_map(A, total_bits=8):
q = estimate_quantiles(A, num_quantiles=2**total_bits-1)
q = q.tolist()
q.append(0)
gap = 256 - len(q)
for i in range(gap):
q.append(0)
q.sort()
q = Tensor(q)
q = q/q.abs().max()
return q
def get_special_format_str():
if not torch.cuda.is_available(): return 'col_turing'
major, _minor = torch.cuda.get_device_capability()
if major <= 7:
return "col_turing"
if major == 8:
return "col_ampere"
return "col_turing"
def is_on_gpu(tensors):
on_gpu = True
for t in tensors:
if t is None: continue # NULL pointers are fine
on_gpu &= t.device.type == 'cuda'
return on_gpu
def get_ptr(A: Tensor) -> ct.c_void_p:
"""
Get the ctypes pointer from a PyTorch Tensor.
Parameters
----------
A : torch.tensor
The PyTorch tensor.
Returns
-------
ctypes.c_void_p
"""
if A is None:
return None
else:
return ct.c_void_p(A.data.data_ptr())
def pre_call(device):
prev_device = torch.cuda.current_device()
torch.cuda.set_device(device)
return prev_device
def post_call(prev_device):
torch.cuda.set_device(prev_device)
def get_transform_func(dtype, orderA, orderOut, transpose=False):
name = f'ctransform_{(8 if dtype == torch.int8 else 32)}_{orderA}_to_{orderOut}_{"t" if transpose else "n"}'
if not hasattr(lib, name):
print(name)
raise ValueError(
f"Transform function not supported: {orderA} to {orderOut} for data type {dtype} and transpose={transpose}"
)
else:
return getattr(lib, name)
def get_transform_buffer(
shape, dtype, device, to_order, from_order="row", transpose=False
):
# init_func = torch.empty
init_func = torch.zeros
dims = len(shape)
if dims == 2:
rows = shape[0]
elif dims == 3:
rows = shape[0] * shape[1]
cols = shape[-1]
state = (shape, to_order)
if transpose:
# swap dims
tmp = rows
rows = cols
cols = tmp
state = (shape[::-1], to_order)
if to_order == "row" or to_order == "col":
return init_func(shape, dtype=dtype, device=device), state
elif to_order == "col32":
# blocks of 32 columns (padded)
cols = 32 * ((cols + 31) // 32)
return init_func((rows, cols), dtype=dtype, device=device), state
elif to_order == "col_turing":
# blocks of 32 columns and 8 rows
cols = 32 * ((cols + 31) // 32)
rows = 8 * ((rows + 7) // 8)
return init_func((rows, cols), dtype=dtype, device=device), state
elif to_order == "col_ampere":
# blocks of 32 columns and 32 rows
cols = 32 * ((cols + 31) // 32)
rows = 32 * ((rows + 31) // 32)
return init_func((rows, cols), dtype=dtype, device=device), state
else:
raise NotImplementedError(f"To_order not supported: {to_order}")
def nvidia_transform(
A,
to_order,
from_order="row",
out=None,
transpose=False,
state=None,
ld=None,
):
if state is None:
state = (A.shape, from_order)
else:
from_order = state[1]
if out is None:
out, new_state = get_transform_buffer(
state[0], A.dtype, A.device, to_order, state[1]
)
else:
new_state = (state[1], to_order)
func = get_transform_func(A.dtype, from_order, to_order, transpose)
shape = state[0]
if len(shape) == 2:
dim1 = ct.c_int32(shape[0])
dim2 = ct.c_int32(shape[1])
elif ld is not None:
n = prod(shape)
dim1 = prod([shape[i] for i in ld])
dim2 = ct.c_int32(n // dim1)
dim1 = ct.c_int32(dim1)
else:
dim1 = ct.c_int32(shape[0] * shape[1])
dim2 = ct.c_int32(shape[2])
ptr = CUBLAS_Context.get_instance().get_context(A.device)
func(ptr, get_ptr(A), get_ptr(out), dim1, dim2)
return out, new_state
def estimate_quantiles(A: Tensor, out: Tensor = None, offset: float = 1 / 512, num_quantiles=256) -> Tensor:
'''
Estimates 256 equidistant quantiles on the input tensor eCDF.
Uses SRAM-Quantiles algorithm to quickly estimate 256 equidistant quantiles
via the eCDF of the input tensor `A`. This is a fast but approximate algorithm
and the extreme quantiles close to 0 and 1 have high variance / large estimation
errors. These large errors can be avoided by using the offset variable which trims
the distribution. The default offset value of 1/512 ensures minimum entropy encoding -- it
trims 1/512 = 0.2% from each side of the distrivution. An offset value of 0.01 to 0.02
usually has a much lower error but is not a minimum entropy encoding. Given an offset
of 0.02 equidistance points in the range [0.02, 0.98] are used for the quantiles.
Parameters
----------
A : torch.Tensor
The input tensor. Any shape.
out : torch.Tensor
Tensor with the 256 estimated quantiles.
offset : float
The offset for the first and last quantile from 0 and 1. Default: 1/(2*num_quantiles)
num_quantiles : int
The number of equally spaced quantiles.
Returns
-------
torch.Tensor:
The 256 quantiles in float32 datatype.
'''
if A.numel() < 256: raise NotImplementedError(f'Quantile estimation needs at least 256 values in the Tensor, but Tensor had only {A.numel()} values.')
if num_quantiles > 256: raise NotImplementedError(f"Currently only a maximum of 256 equally spaced quantiles are supported, but the argument num_quantiles={num_quantiles}")
if num_quantiles < 256 and offset == 1/(512):
# override default arguments
offset = 1/(2*num_quantiles)
if out is None: out = torch.zeros((256,), dtype=torch.float32, device=A.device)
is_on_gpu([A, out])
device = pre_call(A.device)
if A.dtype == torch.float32:
lib.cestimate_quantiles_fp32(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cestimate_quantiles_fp16(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()))
else:
raise NotImplementedError(f"Not supported data type {A.dtype}")
post_call(device)
if num_quantiles < 256:
step = round(256/num_quantiles)
idx = torch.linspace(0, 255, num_quantiles).long().to(A.device)
out = out[idx]
return out
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
rand : torch.Tensor
The tensor for stochastic rounding.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
absmax = torch.zeros((blocks,), device=A.device)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
if rand is not None:
is_on_gpu([code, A, out, absmax, rand])
assert blocksize==4096
assert rand.numel() >= 1024
rand_offset = random.randint(0, 1023)
if A.dtype == torch.float32:
lib.cquantize_blockwise_stochastic_fp32(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_stochastic_fp16(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
else:
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
assert rand is None
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out, (absmax, code)
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
) -> Tensor:
"""
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
"""
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if out is None:
out = torch.zeros_like(A, dtype=torch.float32)
if quant_state is None:
quant_state = (absmax, code)
else:
absmax, code = quant_state
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
def quantize(A: Tensor, code: Tensor = None, out: Tensor = None) -> Tensor:
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
code = code.to(A.device)
absmax = torch.abs(A).max()
inp = A / absmax
out = quantize_no_absmax(inp, code, out)
return out, (absmax, code)
def dequantize(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
) -> Tensor:
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
code = code.to(A.device)
if quant_state is None:
quant_state = (absmax, code)
out = dequantize_no_absmax(A, quant_state[1], out)
return out * quant_state[0]
def quantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor:
'''
Quantizes input tensor to 8-bit.
Quantizes the 32-bit input tensor `A` to the 8-bit output tensor
`out` using the quantization map `code`.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
out : torch.Tensor, optional
The output tensor. Needs to be of type byte.
Returns
-------
torch.Tensor:
Quantized 8-bit tensor.
'''
if out is None: out = torch.zeros_like(A, dtype=torch.uint8)
is_on_gpu([A, out])
lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))
return out
def dequantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor:
'''
Dequantizes the 8-bit tensor to 32-bit.
Dequantizes the 8-bit tensor `A` to the 32-bit tensor `out` via
the quantization map `code`.
Parameters
----------
A : torch.Tensor
The 8-bit input tensor.
code : torch.Tensor
The quantization map.
out : torch.Tensor
The 32-bit output tensor.
Returns
-------
torch.Tensor:
32-bit output tensor.
'''
if out is None: out = torch.zeros_like(A, dtype=torch.float32)
is_on_gpu([code, A, out])
lib.cdequantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))
return out
def optimizer_update_32bit(
optimizer_name: str,
g: Tensor,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
"""
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
skip_zeros : bool
Whether to skip zero-valued gradients or not (default: False).
"""
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementedError(
f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}'
)
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
def optimizer_update_8bit(
optimizer_name: str,
g: Tensor,
p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
max1: Tensor,
max2: Tensor,
new_max1: Tensor,
new_max2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
) -> None:
"""
Performs an inplace Adam update.
Universal Adam update for 32/8-bit state and 32/16-bit gradients/weights.
Uses AdamW formulation if weight decay > 0.0.
Parameters
----------
optimizer_name : str
The name of the optimizer. Choices {adam, momentum}
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Adam state 1.
state2 : torch.Tensor
Adam state 2.
beta1 : float
Adam beta1.
beta2 : float
Adam beta2.
eps : float
Adam epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
qmap1 : torch.Tensor
Quantization map for first Adam state.
qmap2 : torch.Tensor
Quantization map for second Adam state.
max1 : torch.Tensor
Max value for first Adam state update.
max2 : torch.Tensor
Max value for second Adam state update.
new_max1 : torch.Tensor
Max value for the next Adam update of the first state.
new_max2 : torch.Tensor
Max value for the next Adam update of the second state.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
"""
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit[optimizer_name][0](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(max1),
get_ptr(max2),
get_ptr(new_max1),
get_ptr(new_max2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit[optimizer_name][1](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(max1),
get_ptr(max2),
get_ptr(new_max1),
get_ptr(new_max2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
def optimizer_update_8bit_blockwise(
optimizer_name: str,
g: Tensor,
p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
def percentile_clipping(
grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int = 5
):
"""Applies percentile clipping
grad: torch.Tensor
The gradient tensor.
gnorm_vec: torch.Tensor
Vector of gradient norms. 100 elements expected.
step: int
The current optimiation steps (number of past gradient norms).
"""
is_on_gpu([grad, gnorm_vec])
if grad.dtype == torch.float32:
lib.cpercentile_clipping_g32(
get_ptr(grad),
get_ptr(gnorm_vec),
ct.c_int32(step),
ct.c_int32(grad.numel()),
)
elif grad.dtype == torch.float16:
lib.cpercentile_clipping_g16(
get_ptr(grad),
get_ptr(gnorm_vec),
ct.c_int32(step),
ct.c_int32(grad.numel()),
)
else:
raise ValueError(f"Gradient type {grad.dtype} not supported!")
current_gnorm = torch.sqrt(gnorm_vec[step % 100])
vals, idx = torch.sort(gnorm_vec)
clip_value = torch.sqrt(vals[percentile])
gnorm_scale = 1.0
if current_gnorm > clip_value:
gnorm_scale = clip_value / current_gnorm
return current_gnorm, clip_value, gnorm_scale
def histogram_scatter_add_2d(
histogram: Tensor, index1: Tensor, index2: Tensor, source: Tensor
):
assert len(histogram.shape) == 2
assert histogram.dtype == torch.float32
assert source.dtype == torch.float32
assert index1.dtype == torch.int32
assert index2.dtype == torch.int32
assert histogram.device.type == "cuda"
assert index1.device.type == "cuda"
assert index2.device.type == "cuda"
assert source.device.type == "cuda"
maxdim1 = ct.c_int32(histogram.shape[0])
n = ct.c_int32(index1.numel())
is_on_gpu([histogram, index1, index2, source])
lib.chistogram_scatter_add_2d(get_ptr(histogram), get_ptr(index1), get_ptr(index2), get_ptr(source), maxdim1, n)
def check_matmul(A, B, out, transposed_A, transposed_B, expected_type=torch.int8):
if not torch.cuda.is_initialized(): torch.cuda.init()
if A.dtype != expected_type or B.dtype != expected_type:
raise TypeError(
f"Expected torch.int8 input tensors A and B, but got {A.dtype} and {B.dtype}"
)
sA = A.shape
sB = B.shape
tA = transposed_A
tB = transposed_B
correct = True
if len(sA) == 2 and len(sB) == 2:
if not tA and not tB and A.shape[1] != B.shape[0]:
correct = False
elif tA and not tB and A.shape[0] != B.shape[0]:
correct = False
elif tA and tB and A.shape[0] != B.shape[1]:
correct = False
elif not tA and tB and A.shape[1] != B.shape[1]:
correct = False
elif len(sA) == 3 and len(sB) == 2:
if not tA and not tB and A.shape[2] != B.shape[0]:
correct = False
elif tA and not tB and A.shape[1] != B.shape[0]:
correct = False
elif tA and tB and A.shape[1] != B.shape[1]:
correct = False
elif not tA and tB and A.shape[2] != B.shape[1]:
correct = False
elif len(sA) == 3 and len(sB) == 3:
if not tA and not tB and A.shape[2] != B.shape[1]:
correct = False
elif tA and not tB and A.shape[1] != B.shape[1]:
correct = False
elif tA and tB and A.shape[1] != B.shape[2]:
correct = False
elif not tA and tB and A.shape[2] != B.shape[2]:
correct = False
if out is not None:
sout = out.shape
# special case common in backprop
if not correct and len(sA) == 3 and len(sB) == 3:
if (
sout[0] == sA[2]
and sout[1] == sB[2]
and sA[0] == sB[0]
and sA[1] == sB[1]
):
correct = True
else:
if len(sA) == 2 and len(sB) == 2:
if not tA and not tB:
sout = (sA[0], sB[1])
elif tA and tB:
sout = (sA[1], sB[0])
elif tA and not tB:
sout = (sA[1], sB[1])
elif not tA and tB:
sout = (sA[0], sB[0])
elif len(sA) == 3 and len(sB) == 2:
if not tA and not tB:
sout = (sA[0], sA[1], sB[1])
elif tA and tB:
sout = (sA[0], sA[2], sB[0])
elif tA and not tB:
sout = (sA[0], sA[2], sB[1])
elif not tA and tB:
sout = (sA[0], sA[1], sB[0])
elif len(sA) == 3 and len(sB) == 3:
if not tA and not tB:
sout = (sA[0], sA[1], sB[2])
elif tA and tB:
sout = (sA[0], sA[2], sB[1])
elif tA and not tB:
sout = (sA[0], sA[2], sB[2])
elif not tA and tB:
sout = (sA[0], sA[1], sB[1])
if not correct:
raise ValueError(
f"Tensor dimensions incorrect for matrix mulitiplication: A x B: {sA} x {sB} with transpose for A x B: {tA} x {tB}."
)
return sout
def igemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
):
sout = check_matmul(A, B, out, transposed_A, transposed_B)
if out is None:
out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)
if len(A.shape) == 3 and len(B.shape) == 3:
if A.shape[0] == B.shape[0] and A.shape[2] == B.shape[1]:
return batched_igemm(A, B, out)
sA = A.shape
sB = B.shape
if transposed_A and len(sA) == 2:
sA = (sA[1], sA[0])
elif transposed_A and len(sA) == 3:
sA = (sA[0], sA[2], sA[0])
if transposed_B and len(sB) == 2:
sB = (sB[1], sB[0])
elif transposed_B and len(sB) == 3:
sB = (sB[0], sB[2], sB[0])
# this is a mess: cuBLAS expect column major, but PyTorch is row major.
# So to perform the matrix multiplication, we have to treat A, B, and C matrices
# (transpose of row major is column major)
# This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
# matrices in the input arguments for cuBLAS
# column major: A @ B = C: [m, k] @ [k, n] = [m, n]
# row major: B^T @ A^T = C^T: [m, k] @ [k, n] = [m, n]
# column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
if len(sB) == 2:
if B.stride()[0] == B.shape[1]:
transposed_B = False
elif B.stride()[1] == B.shape[0]:
transposed_B = True
if len(A.shape) == 2:
if A.stride()[0] == A.shape[1]:
transposed_A = False
elif A.stride()[1] == A.shape[0]:
transposed_A = True
else:
if A.stride()[1] == A.shape[2]:
transposed_A = False
elif A.stride()[2] == A.shape[1]:
transposed_A = True
if len(sA) == 2:
n = sA[0]
ldb = A.stride()[1 if transposed_A else 0]
elif len(sA) == 3 and len(sB) == 2:
n = sA[0] * sA[1]
ldb = sA[2]
m = sB[1]
k = sB[0]
lda = B.stride()[(1 if transposed_B else 0)]
ldc = sB[1]
elif len(sB) == 3:
# special case
assert len(sA) == 3
if not (sA[0] == sB[0] and sA[1] == sB[1]):
raise ValueError(
f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
transposed_A = True
transposed_B = False
m = sB[2]
n = sA[2]
k = sB[0] * sB[1]
lda = m
ldb = sA[2]
ldc = m
ptr = CUBLAS_Context.get_instance().get_context(A.device)
# B^T @ A^T = C^T
# [km, nk -> mn]
is_on_gpu([B, A, out])
lib.cigemm(ptr, ct.c_bool(transposed_B), ct.c_bool(transposed_A), ct.c_int32(m), ct.c_int32(n), ct.c_int32(k),
get_ptr(B), get_ptr(A), get_ptr(out), ct.c_int32(lda), ct.c_int32(ldb), ct.c_int32(ldc))
return out
def batched_igemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
):
if not len(A.shape) == 3 or not len(B.shape) == 3:
raise ValueError(
f"Expected 3-dimensional tensors for bmm, but got shapes A and B: {A.shape} and {B.shape}"
)
sout = check_matmul(A, B, out, transposed_A, transposed_B)
if out is None:
out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)
if B.is_contiguous():
lda = B.stride()[1]
transposed_A = False
else:
s = B.stride()
if s[0] != B.shape[0]:
B = B.contiguous()
lda = B.stride()[1]
elif s[2] == B.shape[1]:
transposed_A = True
lda = B.stride()[2]
else:
if s[2] == 1:
B = B.contiguous()
lda = B.stride()[1]
elif s[1] == 1:
B = B.contiguous()
lda = B.stride()[1]
else:
B = B.contiguous()
lda = B.stride()[1]
if A.is_contiguous():
ldb = A.stride()[1]
transposed_B = False
else:
s = A.stride()
if s[0] != A.shape[0]:
A = A.contiguous()
ldb = A.stride()[1]
transposed_B = False
elif s[2] == A.shape[1]:
ldb = A.stride()[2]
transposed_B = True
else:
A = A.contiguous()
ldb = A.stride()[1]
transposed_B = False
# this is a mess: cuBLAS expect column major, but PyTorch is row major.
# So to perform the matrix multiplication, we have to treat A, B, and C matrices
# (transpose of row major is column major)
# This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
# matrices in the input arguments for cuBLAS
# column major: A @ B = C: [batch, m, k] @ [batch, k, n] = [batch, m, n]
# row major: B^T @ A^T = C^T: [batch, m, k] @ [batch, k, n] = [batch, m, n]
# column major with row major layout: B^T @ A^T = C^T: [batch, k, m] @ [batch, n, k] = [batch, n, m]
num_batch = A.shape[0]
n = A.shape[1]
m = B.shape[2]
k = B.shape[1]
ldc = m
strideA = B.shape[1] * B.shape[2]
strideB = A.shape[1] * A.shape[2]
strideC = A.shape[1] * B.shape[2]
ptr = CUBLAS_Context.get_instance().get_context(A.device)
is_on_gpu([B, A, out])
lib.cbatched_igemm(ptr, ct.c_bool(transposed_B), ct.c_bool(transposed_A), ct.c_int32(m), ct.c_int32(n), ct.c_int32(k),
get_ptr(B), get_ptr(A), get_ptr(out), ct.c_int32(lda), ct.c_int32(ldb), ct.c_int32(ldc),
ct.c_long(strideA), ct.c_long(strideB), ct.c_long(strideC), ct.c_uint32(num_batch))
return out
def igemmlt(A, B, SA, SB, out=None, Sout=None, dtype=torch.int32):
shapeA = SA[0]
shapeB = SB[0]
dimsA = len(shapeA)
dimsB = len(shapeB)
assert dimsB == 2, 'Only two dimensional matrices are supported for argument B'
if dimsA == 2:
m = shapeA[0]
elif dimsA == 3:
m = shapeA[0] * shapeA[1]
rows = n = shapeB[0]
assert prod(list(shapeA)) > 0, f'Input tensor dimensions need to be > 0: {shapeA}'
# if the tensor is empty, return a transformed empty tensor with the right dimensions
if shapeA[0] == 0 and dimsA == 2:
return torch.empty((0, shapeB[0]), device=A.device, dtype=torch.float16)
elif shapeA[1] == 0 and dimsA == 3:
return torch.empty(tuple(shapeA[:2] + [shapeB[0]]), device=A.device, dtype=torch.float16)
if dimsA == 2 and out is None:
out, Sout = get_transform_buffer(
(shapeA[0], shapeB[0]), dtype, A.device, "col32", "row"
)
elif dimsA == 3 and out is None:
out, Sout = get_transform_buffer(
(shapeA[0], shapeA[1], shapeB[0]), dtype, A.device, "col32", "row"
)
assert dimsB != 3, "len(B.shape)==3 not supported"
assert A.device.type == "cuda"
assert B.device.type == "cuda"
assert A.dtype == torch.int8
assert B.dtype == torch.int8
assert out.dtype == dtype
assert SA[1] == "col32"
assert SB[1] in ["col_turing", "col_ampere"]
assert Sout[1] == "col32"
assert (
shapeA[-1] == shapeB[-1]
), f"Matmullt only supports A @ B^T. Inner matrix dimensions do not match: A @ B = {shapeA} @ {shapeB}"
formatB = SB[1]
prev_device = A.device
torch.cuda.set_device(A.device)
ptr = CUBLAS_Context.get_instance().get_context(A.device)
ptrA = get_ptr(A)
ptrB = get_ptr(B)
ptrC = get_ptr(out)
k = shapeA[-1]
lda = ct.c_int32(m * 32)
if formatB == "col_turing":
# turing: tiles with rows filled up to multiple of 8 rows by 32 columns
# n = rows
ldb = ct.c_int32(((rows + 7) // 8) * 8 * 32)
else:
# ampere: tiles with rows filled up to multiple of 32 rows by 32 columns
# n = rows
ldb = ct.c_int32(((rows + 31) // 32) * 32 * 32)
ldc = ct.c_int32(m * 32)
m = ct.c_int32(m)
n = ct.c_int32(n)
k = ct.c_int32(k)
has_error = 0
ptrRowScale = get_ptr(None)
is_on_gpu([A, B, out])
if formatB == 'col_turing':
if dtype == torch.int32:
has_error = lib.cigemmlt_turing_32(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
else:
has_error = lib.cigemmlt_turing_8(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
elif formatB == "col_ampere":
if dtype == torch.int32:
has_error = lib.cigemmlt_ampere_32(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
else:
has_error = lib.cigemmlt_ampere_8(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
if has_error == 1:
print(f'A: {shapeA}, B: {shapeB}, C: {Sout[0]}; (lda, ldb, ldc): {(lda, ldb, ldc)}; (m, n, k): {(m, n, k)}')
raise Exception('cublasLt ran into an error!')
torch.cuda.set_device(prev_device)
return out, Sout
def mm_dequant(
A,
quant_state,
row_stats,
col_stats,
out=None,
new_row_stats=None,
new_col_stats=None,
bias=None
):
assert A.dtype == torch.int32
if bias is not None: assert bias.dtype == torch.float16
out_shape = quant_state[0]
if len(out_shape) == 3:
out_shape = (out_shape[0] * out_shape[1], out_shape[2])
if out is None:
out = torch.empty(out_shape, dtype=torch.float16, device=A.device)
if new_row_stats is None:
new_row_stats = torch.empty(
out_shape[0], dtype=torch.float32, device=A.device
)
if new_col_stats is None:
new_col_stats = torch.empty(
out_shape[1], dtype=torch.float32, device=A.device
)
assert (
new_row_stats.shape[0] == row_stats.shape[0]
), f"{new_row_stats.shape} vs {row_stats.shape}"
assert (
new_col_stats.shape[0] == col_stats.shape[0]
), f"{new_col_stats.shape} vs {col_stats.shape}"
prev_device = pre_call(A.device)
ptrA = get_ptr(A)
ptrOut = get_ptr(out)
ptrRowStats = get_ptr(row_stats)
ptrColStats = get_ptr(col_stats)
ptrNewRowStats = get_ptr(new_row_stats)
ptrNewColStats = get_ptr(new_col_stats)
ptrBias = get_ptr(bias)
numRows = ct.c_int32(out_shape[0])
numCols = ct.c_int32(out_shape[1])
is_on_gpu([A, row_stats, col_stats, out, new_row_stats, new_col_stats, bias])
lib.cdequant_mm_int32_fp16(ptrA, ptrRowStats, ptrColStats, ptrOut, ptrNewRowStats, ptrNewColStats, ptrBias, numRows, numCols)
post_call(prev_device)
return out
def get_colrow_absmax(
A, row_stats=None, col_stats=None, nnz_block_ptr=None, threshold=0.0
):
assert A.dtype == torch.float16
device = A.device
cols = A.shape[-1]
if len(A.shape) == 3:
rows = A.shape[0] * A.shape[1]
else:
rows = A.shape[0]
col_tiles = (cols + 255) // 256
tiled_rows = ((rows + 15) // 16) * 16
if row_stats is None:
row_stats = torch.empty(
(rows,), dtype=torch.float32, device=device
).fill_(-50000.0)
if col_stats is None:
col_stats = torch.empty(
(cols,), dtype=torch.float32, device=device
).fill_(-50000.0)
if nnz_block_ptr is None and threshold > 0.0:
nnz_block_ptr = torch.zeros(
((tiled_rows * col_tiles) + 1,), dtype=torch.int32, device=device
)
ptrA = get_ptr(A)
ptrRowStats = get_ptr(row_stats)
ptrColStats = get_ptr(col_stats)
ptrNnzrows = get_ptr(nnz_block_ptr)
rows = ct.c_int32(rows)
cols = ct.c_int32(cols)
prev_device = pre_call(A.device)
is_on_gpu([A, row_stats, col_stats, nnz_block_ptr])
lib.cget_col_row_stats(ptrA, ptrRowStats, ptrColStats, ptrNnzrows, ct.c_float(threshold), rows, cols)
post_call(prev_device)
if threshold > 0.0:
nnz_block_ptr.cumsum_(0)
return row_stats, col_stats, nnz_block_ptr
class COOSparseTensor:
def __init__(self, rows, cols, nnz, rowidx, colidx, values):
assert rowidx.dtype == torch.int32
assert colidx.dtype == torch.int32
assert values.dtype == torch.float16
assert values.numel() == nnz
assert rowidx.numel() == nnz
assert colidx.numel() == nnz
self.rows = rows
self.cols = cols
self.nnz = nnz
self.rowidx = rowidx
self.colidx = colidx
self.values = values
class CSRSparseTensor:
def __init__(self, rows, cols, nnz, rowptr, colidx, values):
assert rowptr.dtype == torch.int32
assert colidx.dtype == torch.int32
assert values.dtype == torch.float16
assert values.numel() == nnz
assert colidx.numel() == nnz
assert rowptr.numel() == rows + 1
self.rows = rows
self.cols = cols
self.nnz = nnz
self.rowptr = rowptr
self.colidx = colidx
self.values = values
class CSCSparseTensor:
def __init__(self, rows, cols, nnz, colptr, rowidx, values):
assert colptr.dtype == torch.int32
assert rowidx.dtype == torch.int32
assert values.dtype == torch.float16
assert values.numel() == nnz
assert rowidx.numel() == nnz
assert colptr.numel() == cols + 1
self.rows = rows
self.cols = cols
self.nnz = nnz
self.colptr = colptr
self.rowidx = rowidx
self.values = values
def coo2csr(cooA):
values, counts = torch.unique(cooA.rowidx, return_counts=True)
values.add_(1)
rowptr = torch.zeros(
(cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device
)
rowptr.scatter_(index=values.long(), src=counts.int(), dim=0)
rowptr.cumsum_(0)
return CSRSparseTensor(
cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values
)
def coo2csc(cooA):
val, col2rowidx = torch.sort(cooA.colidx)
rowidx = cooA.rowidx[col2rowidx]
values = cooA.values[col2rowidx]
colvalues, counts = torch.unique(val, return_counts=True)
colvalues.add_(1)
colptr = torch.zeros(
(cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device
)
colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)
colptr.cumsum_(0)
return CSCSparseTensor(
cooA.rows, cooA.cols, cooA.nnz, colptr, rowidx, values
)
def coo_zeros(rows, cols, nnz, device, dtype=torch.half):
rowidx = torch.zeros((nnz,), dtype=torch.int32, device=device)
colidx = torch.zeros((nnz,), dtype=torch.int32, device=device)
values = torch.zeros((nnz,), dtype=dtype, device=device)
return COOSparseTensor(rows, cols, nnz, rowidx, colidx, values)
def double_quant(
A, col_stats=None, row_stats=None, out_col=None, out_row=None, threshold=0.0
):
device = A.device
assert A.dtype == torch.half
assert device.type == "cuda"
prev_device = pre_call(A.device)
cols = A.shape[-1]
if len(A.shape) == 3:
rows = A.shape[0] * A.shape[1]
else:
rows = A.shape[0]
if row_stats is None or col_stats is None:
row_stats, col_stats, nnz_row_ptr = get_colrow_absmax(
A, threshold=threshold
)
if out_col is None:
out_col = torch.zeros(A.shape, device=device, dtype=torch.int8)
if out_row is None:
out_row = torch.zeros(A.shape, device=device, dtype=torch.int8)
coo_tensor = None
ptrA = get_ptr(A)
ptrColStats = get_ptr(col_stats)
ptrRowStats = get_ptr(row_stats)
ptrOutCol = get_ptr(out_col)
ptrOutRow = get_ptr(out_row)
is_on_gpu([A, col_stats, row_stats, out_col, out_row])
if threshold > 0.0:
nnz = nnz_row_ptr[-1].item()
if nnz > 0:
coo_tensor = coo_zeros(
A.shape[0], A.shape[1], nnz_row_ptr[-1].item(), device
)
ptrRowIdx = get_ptr(coo_tensor.rowidx)
ptrColIdx = get_ptr(coo_tensor.colidx)
ptrVal = get_ptr(coo_tensor.values)
ptrRowPtr = get_ptr(nnz_row_ptr)
lib.cdouble_rowcol_quant(
ptrA,
ptrRowStats,
ptrColStats,
ptrOutCol,
ptrOutRow,
ptrRowIdx,
ptrColIdx,
ptrVal,
ptrRowPtr,
ct.c_float(threshold),
ct.c_int32(rows),
ct.c_int32(cols),
)
val, idx = torch.sort(coo_tensor.rowidx)
coo_tensor.rowidx = val
coo_tensor.colidx = coo_tensor.colidx[idx]
coo_tensor.values = coo_tensor.values[idx]
else:
lib.cdouble_rowcol_quant(
ptrA,
ptrRowStats,
ptrColStats,
ptrOutCol,
ptrOutRow,
None,
None,
None,
None,
ct.c_float(0.0),
ct.c_int32(rows),
ct.c_int32(cols),
)
else:
lib.cdouble_rowcol_quant(
ptrA,
ptrRowStats,
ptrColStats,
ptrOutCol,
ptrOutRow,
None,
None,
None,
None,
ct.c_float(threshold),
ct.c_int32(rows),
ct.c_int32(cols),
)
post_call(prev_device)
return out_row, out_col, row_stats, col_stats, coo_tensor
def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None):
prev_device = pre_call(A.device)
if state is None: state = (A.shape, from_order)
else: from_order = state[1]
if out is None: out, new_state = get_transform_buffer(state[0], A.dtype, A.device, to_order, state[1], transpose)
else: new_state = (state[0], to_order) # (shape, order)
shape = state[0]
if len(shape) == 2:
dim1 = ct.c_int32(shape[0])
dim2 = ct.c_int32(shape[1])
else:
dim1 = ct.c_int32(shape[0] * shape[1])
dim2 = ct.c_int32(shape[2])
is_on_gpu([A, out])
if to_order == 'col32':
if transpose:
lib.ctransform_row2col32T(get_ptr(A), get_ptr(out), dim1, dim2)
else:
lib.ctransform_row2col32(get_ptr(A), get_ptr(out), dim1, dim2)
elif to_order == "col_turing":
if transpose:
lib.ctransform_row2turingT(get_ptr(A), get_ptr(out), dim1, dim2)
else:
lib.ctransform_row2turing(get_ptr(A), get_ptr(out), dim1, dim2)
elif to_order == "col_ampere":
if transpose:
lib.ctransform_row2ampereT(get_ptr(A), get_ptr(out), dim1, dim2)
else:
lib.ctransform_row2ampere(get_ptr(A), get_ptr(out), dim1, dim2)
elif to_order == "row":
if from_order == "col_turing":
lib.ctransform_turing2row(get_ptr(A), get_ptr(out), dim1, dim2)
elif from_order == "col_ampere":
lib.ctransform_ampere2row(get_ptr(A), get_ptr(out), dim1, dim2)
else:
raise NotImplementedError(f'Transform function not implemented: From {from_order} to {to_order}')
post_call(prev_device)
return out, new_state
def spmm_coo(cooA, B, out=None):
if out is None:
out = torch.empty(
(cooA.rows, B.shape[1]), device=B.device, dtype=B.dtype
)
nnz = cooA.nnz
assert cooA.rowidx.numel() == nnz
assert cooA.colidx.numel() == nnz
assert cooA.values.numel() == nnz
assert cooA.cols == B.shape[0]
transposed_B = False if B.is_contiguous() else True
ldb = B.stride()[(1 if transposed_B else 0)]
ldc = B.shape[1]
ptr = Cusparse_Context.get_instance().context
ptrRowidx = get_ptr(cooA.rowidx)
ptrColidx = get_ptr(cooA.colidx)
ptrValues = get_ptr(cooA.values)
ptrB = get_ptr(B)
ptrC = get_ptr(out)
cnnz = ct.c_int32(cooA.nnz)
crowsA = ct.c_int32(cooA.rows)
ccolsA = ct.c_int32(cooA.cols)
ccolsB = ct.c_int32(B.shape[1])
cldb = ct.c_int32(ldb)
cldc = ct.c_int32(ldc)
is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out])
lib.cspmm_coo(ptr, ptrRowidx, ptrColidx, ptrValues, cnnz, crowsA, ccolsA, ccolsB, cldb, ptrB, cldc, ptrC, ct.c_bool(transposed_B))
return out
def spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):
if out is None:
out = torch.zeros(
(cooA.rows, B.shape[1]), device=B.device, dtype=cooA.values.dtype
)
nnz = cooA.nnz
assert cooA.rowidx.numel() == nnz
assert cooA.colidx.numel() == nnz
assert cooA.values.numel() == nnz
assert cooA.cols == B.shape[0], f"{cooA.cols} vs {B.shape}"
transposed_B = False if B.is_contiguous() else True
ldb = B.stride()[(1 if transposed_B else 0)]
ldc = B.shape[1]
values, counts = torch.unique(cooA.rowidx, return_counts=True)
offset = counts.cumsum(0).int()
max_count, max_idx = torch.sort(counts, descending=True)
max_idx = max_idx.int()
max_count = max_count.int()
assert (
max_count[0] <= 32
), f"Current max count per row is 8 but found {max_count[0]}."
assert B.dtype in [torch.float16, torch.int8]
ptrOffset = get_ptr(offset)
ptrMaxCount = get_ptr(max_count)
ptrMaxIdx = get_ptr(max_idx)
ptrRowidx = get_ptr(cooA.rowidx)
ptrColidx = get_ptr(cooA.colidx)
ptrValues = get_ptr(cooA.values)
ptrB = get_ptr(B)
ptrC = get_ptr(out)
ptrDequantStats = get_ptr(dequant_stats)
cnnz_rows = ct.c_int32(counts.numel())
cnnz = ct.c_int32(cooA.nnz)
crowsA = ct.c_int32(cooA.rows)
ccolsA = ct.c_int32(cooA.cols)
crowsB = ct.c_int32(B.shape[1])
ccolsB = ct.c_int32(B.shape[1])
cldb = ct.c_int32(ldb)
cldc = ct.c_int32(ldc)
# print(cooA.rowidx[:64])
# print(cooA.colidx[:64].sort()[0])
is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out, dequant_stats])
if B.dtype == torch.float16:
lib.cspmm_coo_very_sparse_naive_fp16(
ptrMaxCount,
ptrMaxIdx,
ptrOffset,
ptrRowidx,
ptrColidx,
ptrValues,
ptrB,
ptrC,
ptrDequantStats,
cnnz_rows,
cnnz,
crowsA,
crowsB,
ccolsB,
)
elif B.dtype == torch.int8:
lib.cspmm_coo_very_sparse_naive_int8(
ptrMaxCount,
ptrMaxIdx,
ptrOffset,
ptrRowidx,
ptrColidx,
ptrValues,
ptrB,
ptrC,
ptrDequantStats,
cnnz_rows,
cnnz,
crowsA,
crowsB,
ccolsB,
)
# else: assertion error
return out
C = 127.0
def vectorwise_quant(x, dim=1, quant_type="vector"):
if quant_type == "linear":
max1 = torch.abs(x).max().float()
xq = torch.round(x / max1 * 127).to(torch.int8)
return xq, max1
elif quant_type in ["vector", "row"]:
max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
xq = torch.round(x * (C / max1)).to(torch.int8)
return xq, max1
elif quant_type == "zeropoint":
dtype = x.dtype
x = x.float()
dyna = x.max() - x.min()
if dyna == 0:
dyna = 1
qx = 255.0 / dyna
minx = x.min()
zpx = torch.round(minx * qx)
x = torch.round(qx * x - zpx) + zpx
return x, qx
elif quant_type in ["vector-zeropoint", "row-zeropoint"]:
dtype = x.dtype
x = x.float()
dyna = torch.amax(x, dim=dim, keepdim=True) - torch.amin(
x, dim=dim, keepdim=True
)
dyna[dyna == 0] = 1
qx = 255.0 / dyna
minx = torch.amin(x, dim=dim, keepdim=True)
zpx = torch.round(minx * qx)
x = torch.round(qx * x - zpx) + zpx
return x, qx
elif quant_type == "truncated-vector":
with torch.no_grad():
absx = torch.abs(x)
max1 = torch.amax(absx, dim=dim, keepdim=True)
max1 = max1 * 0.7
idx = absx > max1.expand_as(absx)
sign = torch.sign(x[idx])
x[idx] = max1.expand_as(absx)[idx] * sign
xq = torch.round(x / max1 * C).to(torch.int8)
return xq, max1
else:
return None
def vectorwise_dequant(xq, max1, quant_type="vector"):
if quant_type == "vector":
x = (xq / C * max1).to(torch.float32)
return x
else:
return None
def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half, quant_type="vector"):
if quant_type == "linear":
norm = S1 * S2 / (C * C)
# double cast needed to prevent overflows
return (xq.float() * norm).to(dtype)
elif quant_type == "zeropoint":
norm = 1.0 / (S1 * S2)
return (xq.float() * norm).to(dtype)
elif quant_type == "row-zeropoint":
norm = 1.0 / (S1 * S2)
x = xq.float()
if len(S1.shape) == 3 and len(x.shape) == 2:
S1 = S1.squeeze(0)
if len(S2.shape) == 3 and len(x.shape) == 2:
S2 = S2.squeeze(0)
if len(S1.shape) == 2:
x *= norm
else:
x *= norm
return x.to(dtype)
elif quant_type == "vector-zeropoint":
x = xq.float()
if len(S1.shape) == 3 and len(x.shape) == 2:
S1 = S1.squeeze(0)
if len(S2.shape) == 3 and len(x.shape) == 2:
S2 = S2.squeeze(0)
if len(S1.shape) == 2:
x *= 1.0 / S1
else:
x *= 1.0 / S1
x *= 1.0 / S2.t()
return x.to(dtype)
elif quant_type == "row":
x = xq.float()
if len(S1.shape) == 3 and len(x.shape) == 2:
S1 = S1.squeeze(0)
if len(S2.shape) == 3 and len(x.shape) == 2:
S2 = S2.squeeze(0)
if len(S1.shape) == 2:
x *= S1 * S2 / (C * C)
else:
x *= S1 * S2 / (C * C)
return x.to(dtype)
elif quant_type in ["truncated-vector", "vector"]:
x = xq.float()
if len(S1.shape) == 3 and len(x.shape) == 2:
S1 = S1.squeeze(0)
if len(S2.shape) == 3 and len(x.shape) == 2:
S2 = S2.squeeze(0)
if len(S1.shape) == 2:
x *= S1 / C
else:
x *= S1 / C
x *= S2 / C
return x.to(dtype)
else:
return None
def dequant_min_max(xq, A, B, SA, SB, dtype=torch.half):
offset = B.float().t().sum(0) * (SA[0] + SA[1])
x = xq.float()
if len(xq.shape) == 2 and len(SB.shape) == 3:
SB = SB.squeeze(0)
if len(SB.shape) == 2:
x *= SB.t() / 127
else:
x *= SB / 127
x *= SA[1] / 127
x += offset
return x.to(dtype)
def extract_outliers(A, SA, idx):
shapeA = SA[0]
formatA = SA[1]
assert formatA in ["col_turing", "col_ampere"]
assert A.device.type == "cuda"
out = torch.zeros(
(shapeA[0], idx.numel()), dtype=torch.int8, device=A.device
)
idx_size = ct.c_int32(idx.numel())
rows = ct.c_int32(shapeA[0])
cols = ct.c_int32(shapeA[1])
ptrA = get_ptr(A)
ptrIdx = get_ptr(idx)
ptrOut = get_ptr(out)
prev_device = pre_call(A.device)
if formatA == 'col_turing':
lib.cextractOutliers_turing(ptrA, ptrIdx, ptrOut, idx_size, rows, cols)
elif formatA == "col_ampere":
lib.cextractOutliers_ampere(ptrA, ptrIdx, ptrOut, idx_size, rows, cols)
post_call(prev_device)
return out
|
bitsandbytes-main
|
bitsandbytes/functional.py
|
import shlex
import subprocess
from typing import Tuple
def execute_and_return(command_string: str) -> Tuple[str, str]:
def _decode(subprocess_err_out_tuple):
return tuple(
to_decode.decode("UTF-8").strip()
for to_decode in subprocess_err_out_tuple
)
def execute_and_return_decoded_std_streams(command_string):
return _decode(
subprocess.Popen(
shlex.split(command_string),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).communicate()
)
std_out, std_err = execute_and_return_decoded_std_streams(command_string)
return std_out, std_err
|
bitsandbytes-main
|
bitsandbytes/utils.py
|
import os
import sys
from warnings import warn
import torch
HEADER_WIDTH = 60
def print_header(
txt: str, width: int = HEADER_WIDTH, filler: str = "+"
) -> None:
txt = f" {txt} " if txt else ""
print(txt.center(width, filler))
def print_debug_info() -> None:
print(
"\nAbove we output some debug information. Please provide this info when "
f"creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose ...\n"
)
print_header("")
print_header("DEBUG INFORMATION")
print_header("")
print()
from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL
from .cuda_setup.env_vars import to_be_ignored
from .cuda_setup.main import get_compute_capabilities, get_cuda_lib_handle
print_header("POTENTIALLY LIBRARY-PATH-LIKE ENV VARS")
for k, v in os.environ.items():
if "/" in v and not to_be_ignored(k, v):
print(f"'{k}': '{v}'")
print_header("")
print(
"\nWARNING: Please be sure to sanitize sensible info from any such env vars!\n"
)
print_header("OTHER")
print(f"{COMPILED_WITH_CUDA = }")
cuda = get_cuda_lib_handle()
print(f"COMPUTE_CAPABILITIES_PER_GPU = {get_compute_capabilities(cuda)}")
print_header("")
print_header("DEBUG INFO END")
print_header("")
print(
"""
Running a quick check that:
+ library is importable
+ CUDA function is callable
"""
)
try:
from bitsandbytes.optim import Adam
p = torch.nn.Parameter(torch.rand(10, 10).cuda())
a = torch.rand(10, 10).cuda()
p1 = p.data.sum().item()
adam = Adam([p])
out = a * p
loss = out.sum()
loss.backward()
adam.step()
p2 = p.data.sum().item()
assert p1 != p2
print("SUCCESS!")
print("Installation was successful!")
sys.exit(0)
except ImportError:
print()
warn(
f"WARNING: {__package__} is currently running as CPU-only!\n"
"Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
f"If you think that this is so erroneously,\nplease report an issue!"
)
print_debug_info()
sys.exit(0)
except Exception as e:
print(e)
print_debug_info()
sys.exit(1)
|
bitsandbytes-main
|
bitsandbytes/__main__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .modules import Int8Params, Linear8bitLt, StableEmbedding
|
bitsandbytes-main
|
bitsandbytes/nn/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, TypeVar, Union, overload
import torch
import torch.nn.functional as F
from torch import Tensor, device, dtype, nn
import bitsandbytes as bnb
from bitsandbytes.optim import GlobalOptimManager
T = TypeVar("T", bound="torch.nn.Module")
class StableEmbedding(torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
device=None,
dtype=None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
device,
dtype,
)
self.norm = torch.nn.LayerNorm(embedding_dim, device=device)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
def reset_parameters(self) -> None:
torch.nn.init.xavier_uniform_(self.weight)
self._fill_padding_idx_with_zero()
""" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
to make the Layer compatible with Pytorch < 1.9.
This means that if this changes in future PyTorch releases this need to change too
which is cumbersome. However, with this we can ensure compatibility with previous
PyTorch releases.
"""
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input: Tensor) -> Tensor:
emb = F.embedding(
input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# always apply layer norm in full precision
emb = emb.to(torch.get_default_dtype())
return self.norm(emb).to(self.weight.dtype)
class Embedding(torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
def reset_parameters(self) -> None:
torch.nn.init.xavier_uniform_(self.weight)
self._fill_padding_idx_with_zero()
""" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
to make the Layer compatible with Pytorch < 1.9.
This means that if this changes in future PyTorch releases this need to change too
which is cumbersome. However, with this we can ensure compatibility with previous
PyTorch releases.
"""
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input: Tensor) -> Tensor:
emb = F.embedding(
input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb
class Int8Params(torch.nn.Parameter):
def __new__(
cls,
data=None,
requires_grad=True,
has_fp16_weights=False,
CB=None,
SCB=None,
):
cls.has_fp16_weights = has_fp16_weights
cls.CB = None
cls.SCB = None
if data is None:
data = torch.empty(0)
return torch.Tensor._make_subclass(cls, data, requires_grad)
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
setattr(self, "CB", CB)
setattr(self, "SCB", SCB)
return self
@overload
def to(
self: T,
device: Optional[Union[int, device]] = ...,
dtype: Optional[Union[dtype, str]] = ...,
non_blocking: bool = ...,
) -> T:
...
@overload
def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T:
...
@overload
def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T:
...
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(
*args, **kwargs
)
if (
device is not None
and device.type == "cuda"
and self.data.device.type == "cpu"
):
return self.cuda(device)
else:
new_param = Int8Params(
super().to(
device=device, dtype=dtype, non_blocking=non_blocking
),
requires_grad=self.requires_grad,
has_fp16_weights=self.has_fp16_weights,
)
new_param.CB = self.CB
new_param.SCB = self.SCB
return new_param
class Linear8bitLt(nn.Linear):
def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True,
memory_efficient_backward=False, threshold=0.0, index=None):
super().__init__(input_features, output_features, bias)
assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
self.state = bnb.MatmulLtState()
self.index = index
self.state.threshold = threshold
self.state.has_fp16_weights = has_fp16_weights
self.state.memory_efficient_backward = memory_efficient_backward
if threshold > 0.0 and not has_fp16_weights:
self.state.use_pool = True
self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
def init_8bit_state(self):
self.state.CB = self.weight.CB
self.state.SCB = self.weight.SCB
self.weight.CB = None
self.weight.SCB = None
def forward(self, x: torch.Tensor):
self.state.is_training = self.training
if self.weight.CB is not None:
self.init_8bit_state()
# weights are cast automatically as Int8Params, but the bias has to be cast manually
if self.bias is not None and self.bias.dtype != x.dtype:
self.bias.data = self.bias.data.to(x.dtype)
out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
if not self.state.has_fp16_weights:
if self.state.CB is not None and self.state.CxB is not None:
# we converted 8-bit row major to turing/ampere format in the first inference pass
# we no longer need the row-major weight
del self.state.CB
self.weight.data = self.state.CxB
return out
|
bitsandbytes-main
|
bitsandbytes/nn/modules.py
|
import operator
import warnings
from dataclasses import dataclass
from functools import reduce # Required in Python 3
from typing import Tuple, Optional
import torch
import bitsandbytes.functional as F
# math.prod not compatible with python < 3.8
def prod(iterable):
return reduce(operator.mul, iterable, 1)
tensor = torch.Tensor
# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:
# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py
"""
This class pools outlier dimensions across layers.
This is particularly important for small models where outlier features
are less systematic and occur with low frequency.
"""
class GlobalOutlierPooler:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.outliers = set()
self.model_dim = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def add_outliers(self, outlier_idx, feature_dim):
if self.model_dim is None:
self.model_dim = feature_dim
if feature_dim != self.model_dim:
return # we do not encode outliers for the 2nd FFN layer
self.outliers.update(outlier_idx.tolist())
def get_current_outlier_idx(self):
return torch.Tensor(list(self.outliers)).to(torch.int64)
def get_inverse_transform_indices(transform_tile: callable, tile_size: Tuple[int, int]):
"""
Compute a permutation of indices that invert the specified (tiled) matrix transformation
:param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2]
:param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere
:note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size
:example: transform_tile function for the turing layout (bitsandbytes.functional as F)
:returns: indices
"""
d1, d2 = tile_size
assert 0 < d1 * d2 < 2**64
tile_indices = torch.arange(d1 * d2, dtype=torch.int64).view(d1, d2)
# encode each position in tile as a tuple of <= 8 unique bytes
permuted_tile_indices = torch.zeros_like(tile_indices)
for i in range(8):
# select i-th byte, apply transformation and trace where each index ended up
ith_dim_indices = torch.div(tile_indices, 256**i, rounding_mode="trunc") % 256
sample_tile_i = (ith_dim_indices - 128).to(torch.int8).contiguous()
assert torch.all(sample_tile_i.int() + 128 == ith_dim_indices), "int overflow"
permuted_tile_i = transform_tile(sample_tile_i)
ith_permuted_indices = permuted_tile_i.to(tile_indices.dtype) + 128
permuted_tile_indices += ith_permuted_indices * (256**i)
if d1 * d2 < 256**i:
break # if all indices fit in i bytes, stop early
return permuted_tile_indices
def undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor:
"""
Undo a tiled permutation such as turing or ampere layout
:param permuted_tensor: torch tensor in a permuted layout
:param tile_indices: reverse transformation indices, from get_inverse_transform_indices
:return: contiguous row-major tensor
"""
(rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape
assert rows % tile_rows == cols % tile_cols == 0, "tensor must contain a whole number of tiles"
tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t()
outputs = torch.empty_like(tensor) # note: not using .index_copy because it was slower on cuda
outputs[tile_indices.flatten()] = tensor
outputs = outputs.reshape(tile_rows, tile_cols, cols // tile_cols, rows // tile_rows)
outputs = outputs.permute(3, 0, 2, 1) # (rows // tile_rows, tile_rows), (cols // tile_cols, tile_cols)
return outputs.reshape(rows, cols).contiguous()
class MatMul8bit(torch.autograd.Function):
@staticmethod
def forward(ctx, A, B, out=None, quant_type="vector", precision=None):
if precision is None:
precision = [8, 8, 8]
if precision[0] != 8:
with torch.no_grad():
output = torch.matmul(A, B)
else:
if len(B.shape) == 2:
dim = 0
else:
dim = 1
qA, SA = F.vectorwise_quant(A, dim=-1, quant_type=quant_type)
qB, SB = F.vectorwise_quant(B, dim=dim, quant_type=quant_type)
iout = F.igemm(qA, qB)
output = F.vectorwise_mm_dequant(iout, SA, SB, A.dtype, quant_type)
if A.requires_grad or B.requires_grad:
ctx.save_for_backward(A, B)
ctx.quant_type = quant_type
ctx.precision = precision
return output
@staticmethod
def backward(ctx, grad_output):
A, B = ctx.saved_tensors
quant_type = ctx.quant_type
precision = ctx.precision
grad_A = grad_B = None
if B.requires_grad:
if len(A.shape) == 3:
dims = [0, 1]
# bsi -> ibs
permute_dim = [0, 2, 1]
else:
dims = [0]
# bs -> sb
permute_dim = [1, 0]
if precision[1] != 8:
with torch.no_grad():
grad_B = torch.matmul(A.permute(permute_dim), grad_output)
else:
if len(B.shape) == 2 and len(A.shape) == 3:
grad_output = grad_output.contiguous()
if not grad_output.is_contiguous():
grad_output.contiguous()
qgrad_output, S1 = F.vectorwise_quant(
grad_output.view(-1, grad_output.shape[2]),
dim=0,
quant_type=quant_type,
)
if not A.is_contiguous():
A = A.contiguous()
qA, S2 = F.vectorwise_quant(
A.view(-1, A.shape[2]), dim=0, quant_type=quant_type
)
igrad_B = F.igemm(qA.t(), qgrad_output)
grad_B = F.vectorwise_mm_dequant(
igrad_B, S2.t(), S1, grad_output.dtype, quant_type
)
else:
qgrad_output, S1 = F.vectorwise_quant(
grad_output, dim=dims, quant_type=quant_type
)
qA, S2 = F.vectorwise_quant(
A, dim=dims, quant_type=quant_type
)
igrad_B = F.igemm(qA.permute(permute_dim), qgrad_output)
grad_B = F.vectorwise_mm_dequant(
igrad_B,
S2.permute(permute_dim),
S1,
grad_output.dtype,
quant_type,
)
if A.requires_grad:
if len(grad_output.shape) == 3:
dims = [2]
else:
dims = [1]
if len(B.shape) == 3:
# bio -> boi
permute_dim = [0, 2, 1]
dim_B = dims
else:
# io -> oi
permute_dim = [1, 0]
dim_B = [1]
if precision[2] != 8:
with torch.no_grad():
grad_A = torch.matmul(grad_output, B.permute(permute_dim))
else:
qgrad_output, S1 = F.vectorwise_quant(
grad_output, dim=dims, quant_type=quant_type
)
qB, S3 = F.vectorwise_quant(B, dim=dim_B, quant_type=quant_type)
igrad_A = F.igemm(qgrad_output, qB.permute(permute_dim))
grad_A = F.vectorwise_mm_dequant(
igrad_A,
S1,
S3.permute(permute_dim),
grad_output.dtype,
quant_type,
)
return grad_A, grad_B, None, None, None
mm_cublas = MatMul8bit.apply
bmm_cublas = MatMul8bit.apply
matmul_cublas = MatMul8bit.apply
@dataclass
class MatmulLtState:
tile_indices: Optional[torch.Tensor] = None
force_no_igemmlt: bool = False
CB = None
CxB = None
SB = None
SCB = None
CxBt = None
SBt = None
CBt = None
subB = None
outlier_pool = None
has_accumulated_gradients = False
threshold = 0.0
idx = None
is_training = True
has_fp16_weights = True
memory_efficient_backward = False
use_pool = False
formatB = F.get_special_format_str()
def reset_grads(self):
self.CB = None
self.CxB = None
self.SB = None
self.SCB = None
self.CxBt = None
self.SBt = None
self.CBt = None
def get_tile_size(self):
assert self.formatB in (
"col_turing",
"col_ampere",
), f"please find this assert and manually enter tile size for {self.formatB}"
return (8, 32) if self.formatB == "col_turing" else (32, 32)
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
using_igemmlt = torch.cuda.get_device_capability(device=A.device) >= (7, 5) and not state.force_no_igemmlt
# default of pytorch behavior if inputs are empty
ctx.is_empty = False
if prod(A.shape) == 0:
ctx.is_empty = True
ctx.A = A
ctx.B = B
ctx.bias = bias
if A.shape[-1] == B.shape[0]:
return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)
else:
return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)
# 1. Quantize A
# 2. Quantize B
# 3. Matmul
# 4. Mixed-precision decomposition matmul
# 5. Save state
formatB = state.formatB
input_shape = A.shape
if state.outlier_pool is None:
state.outlier_pool = GlobalOutlierPooler.get_instance()
# Cast A to fp16
if A.dtype != torch.float16:
warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
# 1. Quantize A
if len(A.shape) == 3:
A = A.view(-1, A.shape[-1]).contiguous()
CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A.to(torch.float16), threshold=state.threshold)
if state.threshold > 0.0 and coo_tensorA is not None:
if state.has_fp16_weights:
idx = torch.unique(coo_tensorA.colidx).long()
CA[:, idx] = 0
CAt[:, idx] = 0
subA = A[:, idx]
state.subB = B[:, idx].t().contiguous()
state.idx = idx
else:
if state.CxB is None and using_igemmlt:
# B in in 8-bit row-major, we can transform it back to 16-bit to extract outlier dimensions
# we also need to convert it to the turing/ampere format
state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
else:
if not state.has_fp16_weights and state.CxB is None and using_igemmlt:
state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
subA = None
# 2. Quantize B
if state.has_fp16_weights:
has_grad = True if (getattr(B, "grad", None) is not None) else False
is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)
if is_transposed:
B = B.contiguous()
if (state.is_training and not has_grad) or state.CxB is None:
state.reset_grads()
(
CB,
state.CBt,
state.SCB,
state.SCBt,
coo_tensorB,
) = F.double_quant(B.to(torch.float16))
if using_igemmlt:
state.CxB, state.SB = F.transform(CB, to_order=formatB)
else:
state.CB = CB
else:
has_grad = False
if coo_tensorA is not None and not state.has_fp16_weights:
# extract outliers
outlier_idx = torch.unique(coo_tensorA.colidx)
state.idx = outlier_idx
# state.outlier_pool.add_outliers(outlier_idx, A.shape[-1])
# if state.use_pool and state.outlier_pool.model_dim == A.shape[-1]:
# # do not use pool for 2nd FFN layer
# state.idx = state.outlier_pool.get_current_outlier_idx().to(A.device)
# else:
# state.idx = outlier_idx
if state.CxB is not None:
outliers = F.extract_outliers(state.CxB, state.SB, state.idx.int())
else:
outliers = state.CB[:, state.idx.long()].clone()
state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)
CA[:, state.idx.long()] = 0
CAt[:, state.idx.long()] = 0
subA = A[:, state.idx.long()]
shapeB = state.SB[0] if state.SB else B.shape
if len(input_shape) == 3:
output_shape = (input_shape[0], input_shape[1], shapeB[0])
else:
output_shape = (input_shape[0], shapeB[0])
# 3. Matmul
if using_igemmlt:
C32A, SA = F.transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, state.CxB, SA, state.SB)
if bias is None or bias.dtype == torch.float16:
# we apply the fused bias here
output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias)
output = output.to(A.dtype)
else: # apply bias separately
output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=None)
output = output.to(A.dtype).add_(bias)
else:
A_wo_outliers = A.clone()
if state.idx is not None:
A_wo_outliers[:, state.idx.long()] = 0
output = torch.nn.functional.linear(A_wo_outliers, state.CB.to(A.dtype))
output = output.mul_(state.SCB.unsqueeze(0).mul(1.0 / 127.0))
if bias is not None:
output = output.add_(bias)
# 4. Mixed-precision decomposition matmul
if coo_tensorA is not None and subA is not None:
output += torch.matmul(subA, state.subB)
# 5. Save state
ctx.state = state
ctx.formatB = formatB
ctx.grad_shape = input_shape
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (CAt, subA)
ctx.tensor_states = (SCAt, state.idx)
else:
ctx.tensors = [None, None]
ctx.tensor_states = (None, None)
ctx.save_for_backward(None, None)
clone_func = torch.clone if len(output_shape) == 3 else lambda x: x
return clone_func(output.view(output_shape))
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad
CAt, subA = ctx.tensors
SCAt, idx = ctx.tensor_states
formatB = ctx.formatB
state = ctx.state
grad_A = grad_B = grad_bias = None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# Cast grad_output to fp16
if len(grad_output.shape) == 3:
grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16))
if req_gradB:
CxAt, SAt = F.transform(CAt, formatB, transpose=True)
C32grad, Sgrad = F.transform(Cgradt, "col32", transpose=True)
gradB32, SgradB32 = F.igemmlt(C32grad, CxAt, Sgrad, SAt)
grad_B = F.mm_dequant(gradB32, SgradB32, SCgradt, SCAt)
if state.threshold > 0.0 and subA is not None:
grad_B[:, idx] += torch.matmul(grad_output.t(), subA)
if req_gradA:
if state.CBt is not None:
C32grad, Sgrad = F.transform(Cgrad, "col32")
if state.CxBt is None:
state.CxBt, state.SBt = F.transform(state.CBt, to_order=formatB, transpose=True)
gradA32, SgradA32 = F.igemmlt(C32grad, state.CxBt, Sgrad, state.SBt)
grad_A = F.mm_dequant(gradA32, SgradA32, SCgrad, state.SCBt).view(ctx.grad_shape).to(ctx.dtype_A)
elif state.CB is not None:
CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))
grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
elif state.CxB is not None:
if state.tile_indices is None:
order, tile_size = state.formatB, state.get_tile_size()
transform = lambda x: F.transform(x.cuda(), from_order="row", to_order=order)[0].to(x.device)
with torch.no_grad():
state.tile_indices = get_inverse_transform_indices(transform, tile_size).to(state.CxB.device)
CB = (
undo_layout(state.CxB, state.tile_indices)
.to(ctx.dtype_A)
.mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))
)
grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
else:
raise Exception("State must contain either CBt or CB or CxB matrix for backward")
return grad_A, grad_B, None, grad_bias, None
def matmul(
A: tensor,
B: tensor,
out: tensor = None,
state: MatmulLtState = None,
threshold=0.0,
bias=None
):
state = state or MatmulLtState()
if threshold > 0.0:
state.threshold = threshold
return MatMul8bitLt.apply(A, B, out, bias, state)
|
bitsandbytes-main
|
bitsandbytes/autograd/_functions.py
|
from ._functions import undo_layout, get_inverse_transform_indices
|
bitsandbytes-main
|
bitsandbytes/autograd/__init__.py
|
bitsandbytes-main
|
bitsandbytes/cuda_setup/__init__.py
|
|
import os
from typing import Dict
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
"_", # current Python interpreter
}
return env_var in ignorable
def might_contain_a_path(candidate: str) -> bool:
return "/" in candidate
def is_active_conda_env(env_var: str) -> bool:
return "CONDA_PREFIX" == env_var
def is_other_conda_env_var(env_var: str) -> bool:
return "CONDA" in env_var
def is_relevant_candidate_env_var(env_var: str, value: str) -> bool:
return is_active_conda_env(env_var) or (
might_contain_a_path(value) and not
is_other_conda_env_var(env_var) and not
to_be_ignored(env_var, value)
)
def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]:
return {
env_var: value
for env_var, value in os.environ.items()
if is_relevant_candidate_env_var(env_var, value)
}
|
bitsandbytes-main
|
bitsandbytes/cuda_setup/env_vars.py
|
"""
extract factors the build is dependent on:
[X] compute capability
[ ] TODO: Q - What if we have multiple GPUs of different makes?
- CUDA version
- Software:
- CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
- CuBLAS-LT: full-build 8-bit optimizer
- no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
evaluation:
- if paths faulty, return meaningful error
- else:
- determine CUDA version
- determine capabilities
- based on that set the default path
"""
import ctypes as ct
import os
import errno
import torch
from warnings import warn
from pathlib import Path
from typing import Set, Union
from .env_vars import get_potentially_lib_path_containing_env_vars
CUDA_RUNTIME_LIB: str = "libcudart.so"
class CUDASetup:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def generate_instructions(self):
if self.cuda is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone git@github.com:TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda = cuda
self.cc = cc
self.cuda_version_string = cuda_version_string
package_dir = Path(__file__).parent.parent
binary_path = package_dir / binary_name
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. CUDA driver not installed')
self.add_log_entry('2. CUDA not installed')
self.add_log_entry('3. You have multiple conflicting CUDA libraries')
self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
self.print_log_stack()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
self.print_log_stack()
def add_log_entry(self, msg, is_warning=False):
self.cuda_setup_log.append((msg, is_warning))
def print_log_stack(self):
for msg, is_warning in self.cuda_setup_log:
if is_warning:
warn(msg)
else:
print(msg)
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=True)
return existent_directories
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
return {
path / CUDA_RUNTIME_LIB
for path in candidate_paths
if (path / CUDA_RUNTIME_LIB).is_file()
}
def resolve_paths_list(paths_list_candidate: str) -> Set[Path]:
"""
Searches a given environmental var for the CUDA runtime library,
i.e. `libcudart.so`.
"""
return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate))
def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]:
return get_cuda_runtime_lib_paths(
resolve_paths_list(paths_list_candidate)
)
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
if len(results_paths) > 1:
warning_msg = (
f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. "
"We'll flip a coin and try one of these, in order to fail forward.\n"
"Either way, this might cause trouble in the future:\n"
"If you get `CUDA error: invalid device function` errors, the above "
"might be the cause and the solution is to make sure only one "
f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.")
CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
"""
Searches for a cuda installations, in the following order of priority:
1. active conda env
2. LD_LIBRARY_PATH
3. any other env vars, while ignoring those that
- are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
- don't contain the path separator `/`
If multiple libraries are found in part 3, we optimistically try one,
while giving a warning message.
"""
candidate_env_vars = get_potentially_lib_path_containing_env_vars()
if "CONDA_PREFIX" in candidate_env_vars:
conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
warn_in_case_of_duplicates(conda_cuda_libs)
if conda_cuda_libs:
return next(iter(conda_cuda_libs))
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
if "LD_LIBRARY_PATH" in candidate_env_vars:
lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
if lib_ld_cuda_libs:
return next(iter(lib_ld_cuda_libs))
warn_in_case_of_duplicates(lib_ld_cuda_libs)
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain '
f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
remaining_candidate_env_vars = {
env_var: value for env_var, value in candidate_env_vars.items()
if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
}
cuda_runtime_libs = set()
for env_var, value in remaining_candidate_env_vars.items():
cuda_runtime_libs.update(find_cuda_lib_in(value))
if len(cuda_runtime_libs) == 0:
CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...')
cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64'))
warn_in_case_of_duplicates(cuda_runtime_libs)
return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
def check_cuda_result(cuda, result_val):
# 3. Check for CUDA errors
if result_val != 0:
error_str = ct.c_char_p()
cuda.cuGetErrorString(result_val, ct.byref(error_str))
if error_str.value is not None:
CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
else:
CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
def get_cuda_version(cuda, cudart_path):
if cuda is None: return None
try:
cudart = ct.CDLL(cudart_path)
except OSError:
CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
return None
version = ct.c_int()
try:
check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version)))
except AttributeError as e:
CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}')
CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}')
CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
version = int(version.value)
major = version//1000
minor = (version-(major*1000))//10
if major < 11:
CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
return f'{major}{minor}'
def get_cuda_lib_handle():
# 1. find libcuda.so library (GPU driver) (/usr/lib)
try:
cuda = ct.CDLL("libcuda.so")
except OSError:
CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
return None
check_cuda_result(cuda, cuda.cuInit(0))
return cuda
def get_compute_capabilities(cuda):
"""
1. find libcuda.so library (GPU driver) (/usr/lib)
init_device -> init variables -> call function by reference
2. call extern C function to determine CC
(https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
3. Check for CUDA errors
https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
# bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
"""
nGpus = ct.c_int()
cc_major = ct.c_int()
cc_minor = ct.c_int()
device = ct.c_int()
check_cuda_result(cuda, cuda.cuDeviceGetCount(ct.byref(nGpus)))
ccs = []
for i in range(nGpus.value):
check_cuda_result(cuda, cuda.cuDeviceGet(ct.byref(device), i))
ref_major = ct.byref(cc_major)
ref_minor = ct.byref(cc_minor)
# 2. call extern C function to determine CC
check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device))
ccs.append(f"{cc_major.value}.{cc_minor.value}")
return ccs
# def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
def get_compute_capability(cuda):
"""
Extracts the highest compute capbility from all available GPUs, as compute
capabilities are downwards compatible. If no GPUs are detected, it returns
None.
"""
if cuda is None: return None
# TODO: handle different compute capabilities; for now, take the max
ccs = get_compute_capabilities(cuda)
if ccs: return ccs[-1]
def evaluate_cuda_setup():
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
print('')
print('='*35 + 'BUG REPORT' + '='*35)
print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
print('='*80)
if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None
cuda_setup = CUDASetup.get_instance()
cudart_path = determine_cuda_runtime_lib_path()
cuda = get_cuda_lib_handle()
cc = get_compute_capability(cuda)
cuda_version_string = get_cuda_version(cuda, cudart_path)
failure = False
if cudart_path is None:
failure = True
cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
else:
cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
if cc == '' or cc is None:
failure = True
cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True)
else:
cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
if cuda is None:
failure = True
else:
cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
# 7.5 is the minimum CC vor cublaslt
has_cublaslt = is_cublasLt_compatible(cc)
# TODO:
# (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
# (2) Multiple CUDA versions installed
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
if failure:
binary_name = "libbitsandbytes_cpu.so"
elif has_cublaslt:
binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so"
else:
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so"
return binary_name, cudart_path, cuda, cc, cuda_version_string
|
bitsandbytes-main
|
bitsandbytes/cuda_setup/main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer1State
class RMSprop(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
alpha=0.99,
eps=1e-8,
weight_decay=0,
momentum=0,
centered=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if alpha == 0:
raise NotImplementedError(
"RMSprop with alpha==0.0 is not supported!"
)
if centered:
raise NotImplementedError("Centered RMSprop is not supported!")
super().__init__(
"rmsprop",
params,
lr,
(alpha, momentum),
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class RMSprop8bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
alpha=0.99,
eps=1e-8,
weight_decay=0,
momentum=0,
centered=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if alpha == 0:
raise NotImplementedError(
"RMSprop with alpha==0.0 is not supported!"
)
if centered:
raise NotImplementedError("Centered RMSprop is not supported!")
super().__init__(
"rmsprop",
params,
lr,
(alpha, momentum),
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class RMSprop32bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
alpha=0.99,
eps=1e-8,
weight_decay=0,
momentum=0,
centered=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if alpha == 0:
raise NotImplementedError(
"RMSprop with alpha==0.0 is not supported!"
)
if centered:
raise NotImplementedError("Centered RMSprop is not supported!")
super().__init__(
"rmsprop",
params,
lr,
(alpha, momentum),
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
bitsandbytes-main
|
bitsandbytes/optim/rmsprop.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer1State
class Lion(Optimizer1State):
def __init__(
self,
params,
lr=1e-4,
betas=(0.9, 0.99),
weight_decay=0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"lion",
params,
lr,
betas,
0.,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Lion8bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-4,
betas=(0.9, 0.99),
weight_decay=0,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"lion",
params,
lr,
betas,
0.,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Lion32bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-4,
betas=(0.9, 0.99),
weight_decay=0,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"lion",
params,
lr,
betas,
0.,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
bitsandbytes-main
|
bitsandbytes/optim/lion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer2State
class LAMB(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adam_w_mode=True,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=False,
max_unorm=1.0,
):
super().__init__(
"lamb",
params,
lr,
betas,
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
max_unorm=1.0,
)
class LAMB8bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adam_w_mode=True,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=False,
max_unorm=1.0,
):
super().__init__(
"lamb",
params,
lr,
betas,
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
max_unorm=1.0,
)
class LAMB32bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adam_w_mode=True,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=False,
max_unorm=1.0,
):
super().__init__(
"lamb",
params,
lr,
betas,
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
max_unorm=1.0,
)
|
bitsandbytes-main
|
bitsandbytes/optim/lamb.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer1State
class SGD(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if momentum == 0:
raise NotImplementedError("SGD without momentum is not supported!")
super().__init__(
"momentum",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class SGD8bit(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if momentum == 0:
raise NotImplementedError("SGD without momentum is not supported!")
super().__init__(
"momentum",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class SGD32bit(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if momentum == 0:
raise NotImplementedError("SGD without momentum is not supported!")
super().__init__(
"momentum",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
bitsandbytes-main
|
bitsandbytes/optim/sgd.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.optim import Optimizer
from bitsandbytes.optim.optimizer import Optimizer1State
class LARS(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
max_unorm=0.02,
):
if momentum == 0:
raise NotImplementedError(
"LARS without momentum is not supported!"
)
super().__init__(
"lars",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
max_unorm=max_unorm,
block_wise=False,
)
class LARS8bit(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
max_unorm=0.02,
):
if momentum == 0:
raise NotImplementedError(
"LARS without momentum is not supported!"
)
super().__init__(
"lars",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
max_unorm=max_unorm,
block_wise=False,
)
class LARS32bit(Optimizer1State):
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
max_unorm=0.02,
):
if momentum == 0:
raise NotImplementedError(
"LARS without momentum is not supported!"
)
super().__init__(
"lars",
params,
lr,
(momentum, dampening),
0.0,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
max_unorm=max_unorm,
block_wise=False,
)
class PytorchLARS(Optimizer):
def __init__(
self,
params,
lr=0.01,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
max_unorm=0.02,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
max_unorm=max_unorm,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening"
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
max_unorm = group["max_unorm"]
lr = group["lr"]
for p in group["params"]:
if p.grad is None:
continue
state = self.state[p]
d_p = p.grad
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
buf = state.get("momentum_buffer", None)
if buf is None:
buf = torch.clone(d_p).detach()
state["momentum_buffer"] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
update = d_p + buf * momentum
else:
update = buf
update_scale = 1.0
if max_unorm > 0.0:
assert p.dtype == torch.float32
pnorm = torch.norm(p.detach())
unorm = torch.norm(update)
if unorm > max_unorm * pnorm:
update_scale = max_unorm * pnorm / unorm
p.add_(update, alpha=-lr * update_scale)
return loss
|
bitsandbytes-main
|
bitsandbytes/optim/lars.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.cextension import COMPILED_WITH_CUDA
from .adagrad import Adagrad, Adagrad8bit, Adagrad32bit
from .adam import Adam, Adam8bit, Adam32bit
from .adamw import AdamW, AdamW8bit, AdamW32bit
from .lamb import LAMB, LAMB8bit, LAMB32bit
from .lars import LARS, LARS8bit, LARS32bit, PytorchLARS
from .optimizer import GlobalOptimManager
from .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit
from .lion import Lion, Lion8bit, Lion32bit
from .sgd import SGD, SGD8bit, SGD32bit
|
bitsandbytes-main
|
bitsandbytes/optim/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer1State
class Adagrad(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if initial_accumulator_value != 0.0:
raise ValueError("Initial accumulator value != 0.0 not supported!")
if lr_decay != 0.0:
raise ValueError("Lr Decay != 0.0 not supported!")
super().__init__(
"adagrad",
params,
lr,
(0.0, 0.0),
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Adagrad8bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
optim_bits=8,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if initial_accumulator_value != 0.0:
raise ValueError("Initial accumulator value != 0.0 not supported!")
if lr_decay != 0.0:
raise ValueError("Lr Decay != 0.0 not supported!")
assert block_wise
super().__init__(
"adagrad",
params,
lr,
(0.0, 0.0),
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Adagrad32bit(Optimizer1State):
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if initial_accumulator_value != 0.0:
raise ValueError("Initial accumulator value != 0.0 not supported!")
if lr_decay != 0.0:
raise ValueError("Lr Decay != 0.0 not supported!")
super().__init__(
"adagrad",
params,
lr,
(0.0, 0.0),
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
bitsandbytes-main
|
bitsandbytes/optim/adagrad.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer2State
class AdamW(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=1e-2,
amsgrad=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class AdamW8bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=1e-2,
amsgrad=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class AdamW32bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=1e-2,
amsgrad=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
|
bitsandbytes-main
|
bitsandbytes/optim/adamw.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import torch
import torch.distributed as dist
import bitsandbytes.functional as F
from bitsandbytes.optim.optimizer import Optimizer2State
class Adam(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
optim_bits,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Adam8bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
8,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class Adam32bit(Optimizer2State):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
):
super().__init__(
"adam",
params,
lr,
betas,
eps,
weight_decay,
32,
args,
min_8bit_size,
percentile_clipping,
block_wise,
)
class AnalysisAdam(torch.optim.Optimizer):
"""Adam that performs 8-bit vs 32-bit error analysis.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
bnb_analysis="dynamic-blockwise",
savedir=None,
):
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
)
super().__init__(params, defaults)
self.analysis = bnb_analysis
self.savedir = savedir
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p_id, p in enumerate(group["params"]):
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
assert not amsgrad
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
state["abserrors"] = torch.zeros(
(256, 256), device=p_data_fp32.device
)
state["relerrors"] = torch.zeros(
(256, 256), device=p_data_fp32.device
)
state["counts"] = torch.zeros(
(256, 256), device=p_data_fp32.device
)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
state["step"] += 1
beta1, beta2 = group["betas"]
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = (
group["lr"] * math.sqrt(bias_correction2) / bias_correction1
)
e = state["abserrors"]
rele = state["relerrors"]
counts = state["counts"]
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
update_fp32 = exp_avg / denom
if (
p_data_fp32.numel() <= 8192
or p_data_fp32.numel() > 50000 * 1000
):
# embedding layer or too small
p_data_fp32 += -step_size * update_fp32
else:
if self.analysis == "dynamic-blockwise":
code1 = F.create_dynamic_map(signed=True).to(p.device)
code2 = F.create_dynamic_map(signed=False).to(p.device)
C1, S1 = F.quantize_blockwise(exp_avg, code=code1)
state1 = F.dequantize_blockwise(C1, S1)
C2, S2 = F.quantize_blockwise(exp_avg_sq, code=code2)
state2 = F.dequantize_blockwise(C2, S2)
elif self.analysis == "dynamic":
code1 = F.create_dynamic_map(signed=True).to(p.device)
code2 = F.create_dynamic_map(signed=False).to(p.device)
C1, S1 = F.quantize(exp_avg, code=code1)
state1 = F.dequantize(C1, S1)
C2, S2 = F.quantize(exp_avg_sq, code=code2)
state2 = F.dequantize(C2, S2)
elif self.analysis == "linear":
code1 = F.create_linear_map(signed=True).to(p.device)
code2 = F.create_linear_map(signed=False).to(p.device)
C1, S1 = F.quantize(exp_avg, code=code1)
state1 = F.dequantize(C1, S1)
C2, S2 = F.quantize(exp_avg_sq, code=code2)
state2 = F.dequantize(C2, S2)
elif self.analysis == "quantile":
code1 = F.estimate_quantiles(exp_avg)
code2 = F.estimate_quantiles(exp_avg_sq)
C1 = F.quantize_no_absmax(exp_avg, code=code1)
state1 = F.dequantize_no_absmax(C1, code1)
C2 = F.quantize_no_absmax(exp_avg_sq, code=code2)
state2 = F.dequantize_no_absmax(C2, code2)
elif self.analysis == "my-quantization-routine":
pass
# 1. get code
# 2. quantize
# 3. dequantize
# Error will be calculated automatically!
else:
raise ValueError(
f"Invalid analysis value: {self.analysis}!"
)
denom = state2.sqrt().add_(group["eps"])
update_8bit = state1 / denom
abserr = torch.abs(update_8bit - update_fp32)
relerr = abserr / torch.abs(update_fp32 + 1e-6)
C1, C2 = C1.int(), C2.int()
F.histogram_scatter_add_2d(e, C1.int(), C2.int(), abserr)
F.histogram_scatter_add_2d(rele, C1.int(), C2.int(), relerr)
F.histogram_scatter_add_2d(
counts, C1.int(), C2.int(), torch.ones_like(abserr)
)
p_data_fp32 += -step_size * update_fp32
if not dist.is_initialized() or dist.get_rank() == 0:
if self.savedir != "" and state["step"] % 100 == 0:
if not os.path.exists(self.savedir):
os.makedirs(self.savedir)
shapestr = "_".join(
[str(dim) for dim in p_data_fp32.shape]
)
pathe = os.path.join(
self.savedir, f"{p_id}_{shapestr}_abserr.pkl"
)
pathrele = os.path.join(
self.savedir, f"{p_id}_{shapestr}_relerr.pkl"
)
pathcounts = os.path.join(
self.savedir, f"{p_id}_{shapestr}_counts.pkl"
)
torch.save(e, pathe)
torch.save(rele, pathrele)
torch.save(counts, pathcounts)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
|
bitsandbytes-main
|
bitsandbytes/optim/adam.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import abc as container_abcs
from collections import defaultdict
from copy import deepcopy
from itertools import chain
import torch
import bitsandbytes.functional as F
class MockArgs:
def __init__(self, initial_data):
for key in initial_data:
setattr(self, key, initial_data[key])
class GlobalOptimManager:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.pid2config = {}
self.index2config = {}
self.optimizer = None
self.uses_config_override = False
self.module_weight_config_triple = []
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def register_parameters(self, params):
param_groups = list(params)
if not isinstance(param_groups[0], dict):
param_groups = [{"params": param_groups}]
for group_index, group in enumerate(param_groups):
for p_index, p in enumerate(group["params"]):
if id(p) in self.pid2config:
self.index2config[(group_index, p_index)] = self.pid2config[
id(p)
]
def override_config(
self, parameters, key=None, value=None, key_value_dict=None
):
"""
Overrides initial optimizer config for specific parameters.
The key-values of the optimizer config for the input parameters are overridden
This can be both, optimizer parameters like "betas", or "lr" or it can be
8-bit specific parameters like "optim_bits", "percentile_clipping".
Parameters
----------
parameters : torch.Tensor or list(torch.Tensors)
The input parameters.
key : str
The hyperparamter to override.
value : object
The value for the hyperparamters.
key_value_dict : dict
A dictionary with multiple key-values to override.
"""
self.uses_config_override = True
if isinstance(parameters, torch.nn.Parameter):
parameters = [parameters]
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
if key is not None and value is not None:
assert key_value_dict is None
key_value_dict = {key: value}
if key_value_dict is not None:
for p in parameters:
if id(p) in self.pid2config:
self.pid2config[id(p)].update(key_value_dict)
else:
self.pid2config[id(p)] = key_value_dict
def register_module_override(self, module, param_name, config):
self.module_weight_config_triple.append((module, param_name, config))
class Optimizer8bit(torch.optim.Optimizer):
def __init__(self, params, defaults, optim_bits=32):
super().__init__(params, defaults)
self.initialized = False
self.name2qmap = {}
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = {
"qmap1",
"qmap2",
"max1",
"max2",
"new_max1",
"new_max2",
"state1",
"state2",
"gnorm_vec",
"absmax1",
"absmax2",
"unorm_vec",
}
if optim_bits == 8:
self.fill_qmap()
def fill_qmap(self):
self.name2qmap["dynamic"] = F.create_dynamic_map(signed=True)
self.name2qmap["udynamic"] = F.create_dynamic_map(signed=False)
def __setstate__(self, state):
super().__setstate__(state)
def load_state_dict(self, state_dict):
r"""Loads the optimizer state.
Args:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict["param_groups"]
if len(groups) != len(saved_groups):
raise ValueError(
"loaded state dict has a different number of "
"parameter groups"
)
param_lens = (len(g["params"]) for g in groups)
saved_lens = (len(g["params"]) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError(
"loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group"
)
# Update the state
id_map = {
old_id: p
for old_id, p in zip(
chain.from_iterable(g["params"] for g in saved_groups),
chain.from_iterable(g["params"] for g in groups),
)
}
def cast(param, value):
r"""Make a deep copy of value, casting all tensors to device of param."""
if isinstance(value, torch.Tensor):
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
if param.is_floating_point() and value.dtype != torch.uint8:
value = value.to(param.dtype)
return value
elif isinstance(value, dict):
for k, v in value.items():
if k in self.non_castable_tensor_keys:
value[k] = v.to(param.device)
else:
value[k] = cast(param, v)
return value
elif isinstance(value, container_abcs.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = defaultdict(dict)
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
new_group["params"] = group["params"]
return new_group
param_groups = [
update_group(g, ng) for g, ng in zip(groups, saved_groups)
]
self.__setstate__({"state": state, "param_groups": param_groups})
def to_gpu(self):
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
self.state[p][k] = v.to(p.device)
def check_overrides(self):
for module, attr, config in self.mng.module_weight_config_triple:
pmodule = getattr(module, attr)
assert pmodule is not None
assert isinstance(pmodule, torch.Tensor) or isinstance(
pmodule, torch.Parameter
)
found = False
for gindex, group in enumerate(self.param_groups):
if found:
break
for pindex, p in enumerate(group["params"]):
if found:
break
if id(p) == id(pmodule):
# found the matching parameter
# init override
self.mng.pid2config[id(p)] = config
self.mng.index2config[
(gindex, pindex)
] = self.mng.pid2config[id(p)]
found = True
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
overflows = []
if not self.initialized:
self.check_overrides()
self.to_gpu() # needed for fairseq pure fp16 training
self.initialized = True
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
self.init_state(group, p, gindex, pindex)
self.update_step(group, p, gindex, pindex)
return loss
def get_config(self, gindex, pindex, group):
config = {}
config["betas"] = group["betas"]
config["eps"] = group["eps"]
config["weight_decay"] = group["weight_decay"]
config["lr"] = group["lr"]
config["optim_bits"] = self.args.optim_bits
config["min_8bit_size"] = self.args.min_8bit_size
config["percentile_clipping"] = self.args.percentile_clipping
config["block_wise"] = self.args.block_wise
config["max_unorm"] = self.args.max_unorm
config["skip_zeros"] = self.args.skip_zeros
if (gindex, pindex) in self.mng.index2config:
config.update(self.mng.index2config[(gindex, pindex)])
return config
def init_state(self, group, p, gindex, pindex):
raise NotImplementedError("init_state method needs to be overridden")
def update_step(self, group, p, gindex, pindex):
raise NotImplementedError(
"The update_step method needs to be overridden"
)
class Optimizer2State(Optimizer8bit):
def __init__(
self,
optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if isinstance(betas, str):
# format: '(beta1, beta2)'
betas = betas.replace("(", "").replace(")", "").strip().split(",")
betas = [float(b) for b in betas]
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(
f"Invalid beta parameter at index {i}: {betas[i]}"
)
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults, optim_bits)
if args is None:
args = {}
args["optim_bits"] = optim_bits
args["percentile_clipping"] = 100
args["min_8bit_size"] = min_8bit_size
args["percentile_clipping"] = percentile_clipping
args["block_wise"] = block_wise
args["max_unorm"] = max_unorm
args["skip_zeros"] = skip_zeros
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
@torch.no_grad()
def init_state(self, group, p, gindex, pindex):
config = self.get_config(gindex, pindex, group)
if config["optim_bits"] == 32:
dtype = torch.float32
elif config["optim_bits"] == 8:
dtype = torch.uint8
else:
raise NotImplementedError(
f'Amount of optimizer bits not supported: {config["optim_bits"]}'
)
if p.numel() < config["min_8bit_size"]:
dtype = torch.float32
state = self.state[p]
state["step"] = 0
if dtype == torch.float32 or (
dtype == torch.uint8 and p.numel() < 4096
):
state["state1"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.float32,
device=p.device,
)
state["state2"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.float32,
device=p.device,
)
elif dtype == torch.uint8:
if state["step"] == 0:
if "dynamic" not in self.name2qmap:
self.fill_qmap()
self.name2qmap["dynamic"] = self.name2qmap["dynamic"].to(
p.device
)
self.name2qmap["udynamic"] = self.name2qmap["udynamic"].to(
p.device
)
state["state1"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.uint8,
device=p.device,
)
state["qmap1"] = self.name2qmap["dynamic"]
state["state2"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.uint8,
device=p.device,
)
state["qmap2"] = self.name2qmap["udynamic"]
if config["block_wise"]:
n = p.numel()
blocks = n // 2048
blocks += 1 if n % 2048 > 0 else 0
state["absmax1"] = torch.zeros(
(blocks,), dtype=torch.float32, device=p.device
)
state["absmax2"] = torch.zeros(
(blocks,), dtype=torch.float32, device=p.device
)
else:
state["max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["max2"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max2"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
if config["percentile_clipping"] < 100:
state["gnorm_vec"] = torch.zeros((100,), device=p.device)
if config["max_unorm"] > 0.0:
state["unorm_vec"] = torch.zeros((1,), device=p.device)
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
state = self.state[p]
grad = p.grad
config = self.get_config(gindex, pindex, group)
state["step"] += 1
step = state["step"]
if config["percentile_clipping"] < 100:
current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(
grad, state["gnorm_vec"], step, config["percentile_clipping"]
)
else:
gnorm_scale = 1.0
if state["state1"].dtype == torch.float:
F.optimizer_update_32bit(
self.optimizer_name,
grad,
p,
state["state1"],
config["betas"][0],
config["eps"],
step,
config["lr"],
state["state2"],
config["betas"][1],
config["weight_decay"],
gnorm_scale,
state["unorm_vec"] if config["max_unorm"] > 0.0 else None,
max_unorm=config["max_unorm"],
skip_zeros=config["skip_zeros"],
)
elif state["state1"].dtype == torch.uint8 and not config["block_wise"]:
F.optimizer_update_8bit(
self.optimizer_name,
grad,
p,
state["state1"],
state["state2"],
config["betas"][0],
config["betas"][1],
config["eps"],
step,
config["lr"],
state["qmap1"],
state["qmap2"],
state["max1"],
state["max2"],
state["new_max1"],
state["new_max2"],
config["weight_decay"],
gnorm_scale=gnorm_scale,
unorm_vec=state["unorm_vec"]
if config["max_unorm"] > 0.0
else None,
max_unorm=config["max_unorm"],
)
# swap maxes
state["max1"], state["new_max1"] = state["new_max1"], state["max1"]
state["max2"], state["new_max2"] = state["new_max2"], state["max2"]
elif state["state1"].dtype == torch.uint8 and config["block_wise"]:
F.optimizer_update_8bit_blockwise(
self.optimizer_name,
grad,
p,
state["state1"],
state["state2"],
config["betas"][0],
config["betas"][1],
config["eps"],
step,
config["lr"],
state["qmap1"],
state["qmap2"],
state["absmax1"],
state["absmax2"],
config["weight_decay"],
gnorm_scale=gnorm_scale,
skip_zeros=config["skip_zeros"],
)
class Optimizer1State(Optimizer8bit):
def __init__(
self,
optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.0),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(
f"Invalid beta parameter at index {i}: {betas[i]}"
)
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults, optim_bits)
if args is None:
args = {}
args["optim_bits"] = optim_bits
args["percentile_clipping"] = 100
args["min_8bit_size"] = min_8bit_size
args["percentile_clipping"] = percentile_clipping
args["block_wise"] = block_wise
args["max_unorm"] = max_unorm
args["skip_zeros"] = skip_zeros
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
@torch.no_grad()
def init_state(self, group, p, gindex, pindex):
config = self.get_config(gindex, pindex, group)
if config["optim_bits"] == 32:
dtype = torch.float32
elif config["optim_bits"] == 8:
dtype = torch.uint8
else:
raise NotImplementedError(
f'Amount of optimizer bits not supported: {config["optim_bits"]}'
)
if p.numel() < config["min_8bit_size"]:
dtype = torch.float32
state = self.state[p]
state["step"] = 0
if dtype == torch.float32 or (
dtype == torch.uint8 and p.numel() < 4096
):
state["state1"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.float32,
device=p.device,
)
elif dtype == torch.uint8:
if state["step"] == 0:
if "dynamic" not in self.name2qmap:
self.fill_qmap()
self.name2qmap["dynamic"] = self.name2qmap["dynamic"].to(
p.device
)
state["state1"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.uint8,
device=p.device,
)
state["qmap1"] = self.name2qmap["dynamic"]
if config["block_wise"]:
n = p.numel()
blocks = n // 2048
blocks += 1 if n % 2048 > 0 else 0
state["absmax1"] = torch.zeros(
(blocks,), dtype=torch.float32, device=p.device
)
else:
state["max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
if config["percentile_clipping"] < 100:
state["gnorm_vec"] = torch.zeros((100,), device=p.device)
if config["max_unorm"] > 0.0:
state["unorm_vec"] = torch.zeros((1,), device=p.device)
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
state = self.state[p]
grad = p.grad
config = self.get_config(gindex, pindex, group)
state["step"] += 1
step = state["step"]
if config["percentile_clipping"] < 100:
current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(
grad, state["gnorm_vec"], step, config["percentile_clipping"]
)
else:
gnorm_scale = 1.0
if state["state1"].dtype == torch.float:
F.optimizer_update_32bit(
self.optimizer_name,
grad,
p,
state["state1"],
config["betas"][0],
config["eps"],
step,
config["lr"],
None,
0.0,
config["weight_decay"],
gnorm_scale,
state["unorm_vec"] if config["max_unorm"] > 0.0 else None,
max_unorm=config["max_unorm"],
skip_zeros=config["skip_zeros"],
)
elif state["state1"].dtype == torch.uint8 and not config["block_wise"]:
F.optimizer_update_8bit(
self.optimizer_name,
grad,
p,
state["state1"],
None,
config["betas"][0],
config["betas"][1],
config["eps"],
step,
config["lr"],
state["qmap1"],
None,
state["max1"],
None,
state["new_max1"],
None,
config["weight_decay"],
gnorm_scale,
state["unorm_vec"] if config["max_unorm"] > 0.0 else None,
max_unorm=config["max_unorm"],
)
state["max1"], state["new_max1"] = state["new_max1"], state["max1"]
elif state["state1"].dtype == torch.uint8 and config["block_wise"]:
F.optimizer_update_8bit_blockwise(
self.optimizer_name,
grad,
p,
state["state1"],
None,
config["betas"][0],
config["betas"][1],
config["eps"],
step,
config["lr"],
state["qmap1"],
None,
state["absmax1"],
None,
config["weight_decay"],
gnorm_scale=gnorm_scale,
skip_zeros=config["skip_zeros"],
)
|
bitsandbytes-main
|
bitsandbytes/optim/optimizer.py
|
from setuptools import setup, find_packages
setup(
name = 'anymal-belief-state-encoder-decoder-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.20',
license='MIT',
description = 'Anymal Belief-state Encoder Decoder - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/anymal-belief-state-encoder-decoder-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'attention gating',
'belief state',
'robotics'
],
install_requires=[
'einops>=0.4',
'einops-exts',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
anymal-belief-state-encoder-decoder-pytorch-main
|
setup.py
|
from anymal_belief_state_encoder_decoder_pytorch.networks import Student, Teacher, MLP, Anymal
from anymal_belief_state_encoder_decoder_pytorch.ppo import PPO, MockEnv
|
anymal-belief-state-encoder-decoder-pytorch-main
|
anymal_belief_state_encoder_decoder_pytorch/__init__.py
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import GRUCell
from torch.distributions import Categorical
from torch.optim import Adam
from einops import rearrange
from einops_exts import check_shape
from einops.layers.torch import Rearrange
from anymal_belief_state_encoder_decoder_pytorch.running import RunningStats
# helper functions
def exists(val):
return val is not None
# freezing of neural networks (teacher needs to be frozen)
def set_module_requires_grad_(module, requires_grad):
for param in module.parameters():
param.requires_grad = requires_grad
def freeze_all_layers_(module):
set_module_requires_grad_(module, False)
def unfreeze_all_layers_(module):
set_module_requires_grad_(module, True)
# in the paper
# the network attention gates the exteroception, and then sums it to the belief state
# todo: make sure the padding is on the right side
def sum_with_zeropad(x, y):
x_dim, y_dim = x.shape[-1], y.shape[-1]
if x_dim == y_dim:
return x + y
if x_dim < y_dim:
x = F.pad(x, (y_dim - x_dim, 0))
if y_dim < x_dim:
y = F.pad(y, (x_dim - y_dim, 0))
return x + y
# add basic MLP
class MLP(nn.Module):
def __init__(
self,
dims,
activation = nn.LeakyReLU,
final_activation = False
):
super().__init__()
assert isinstance(dims, (list, tuple))
assert len(dims) > 2, 'must have at least 3 dimensions (input, *hiddens, output)'
dim_pairs = list(zip(dims[:-1], dims[1:]))
*dim_pairs, dim_out_pair = dim_pairs
layers = []
for dim_in, dim_out in dim_pairs:
layers.extend([
nn.Linear(dim_in, dim_out),
activation()
])
layers.append(nn.Linear(*dim_out_pair))
if final_activation:
layers.append(activation())
self.net = nn.Sequential(*layers)
def forward(self, x):
if isinstance(x, (tuple, list)):
x = torch.cat(x, dim = -1)
return self.net(x)
class Student(nn.Module):
def __init__(
self,
num_actions,
proprio_dim = 133,
extero_dim = 52, # in paper, height samples was marked as 208, but wasn't sure if that was per leg, or (4 legs x 52) = 208
latent_extero_dim = 24,
extero_encoder_hidden = (80, 60),
belief_state_encoder_hiddens = (64, 64),
extero_gate_encoder_hiddens = (64, 64),
belief_state_dim = 120, # should be equal to teacher's extero_dim + privileged_dim (part of the GRU's responsibility is to maintain a hidden state that forms an opinion on the privileged information)
gru_num_layers = 2,
gru_hidden_size = 50,
mlp_hidden = (256, 160, 128),
num_legs = 4,
privileged_dim = 50,
privileged_decoder_hiddens = (64, 64),
extero_decoder_hiddens = (64, 64),
):
super().__init__()
assert belief_state_dim > (num_legs * latent_extero_dim)
self.num_legs = num_legs
self.proprio_dim = proprio_dim
self.extero_dim = extero_dim
# encoding of exteroception
self.extero_encoder = MLP((extero_dim, *extero_encoder_hidden, latent_extero_dim))
# GRU related parameters
gru_input_dim = (latent_extero_dim * num_legs) + proprio_dim
gru_input_dims = (gru_input_dim, *((gru_hidden_size,) * (gru_num_layers - 1)))
self.gru_cells = nn.ModuleList([GRUCell(input_dim, gru_hidden_size) for input_dim in gru_input_dims])
self.gru_hidden_size = gru_hidden_size
# belief state encoding
self.belief_state_encoder = MLP((gru_hidden_size, *belief_state_encoder_hiddens, belief_state_dim))
# attention gating of exteroception
self.to_latent_extero_attn_gate = MLP((gru_hidden_size, *extero_gate_encoder_hiddens, latent_extero_dim * num_legs))
# belief state decoder
self.privileged_decoder = MLP((gru_hidden_size, *privileged_decoder_hiddens, privileged_dim))
self.extero_decoder = MLP((gru_hidden_size, *extero_decoder_hiddens, extero_dim * num_legs))
self.to_extero_attn_gate = MLP((gru_hidden_size, *extero_gate_encoder_hiddens, extero_dim * num_legs))
# final MLP to action logits
self.to_logits = MLP((
belief_state_dim + proprio_dim,
*mlp_hidden
))
self.to_action_head = nn.Sequential(
nn.LeakyReLU(),
nn.Linear(mlp_hidden[-1], num_actions)
)
def get_gru_hiddens(self):
device = next(self.parameters()).device
return torch.zeros((len(self.gru_cells), self.gru_hidden_size))
def forward(
self,
proprio,
extero,
hiddens = None,
return_estimated_info = False, # for returning estimated privileged info + exterceptive info, for reconstruction loss
return_action_categorical_dist = False
):
check_shape(proprio, 'b d', d = self.proprio_dim)
check_shape(extero, 'b n d', n = self.num_legs, d = self.extero_dim)
latent_extero = self.extero_encoder(extero)
latent_extero = rearrange(latent_extero, 'b ... -> b (...)')
# RNN
if not exists(hiddens):
prev_hiddens = (None,) * len(self.gru_cells)
else:
prev_hiddens = hiddens.unbind(dim = -2)
gru_input = torch.cat((proprio, latent_extero), dim = -1)
next_hiddens = []
for gru_cell, prev_hidden in zip(self.gru_cells, prev_hiddens):
gru_input = gru_cell(gru_input, prev_hidden)
next_hiddens.append(gru_input)
gru_output = gru_input
next_hiddens = torch.stack(next_hiddens, dim = -2)
# attention gating of exteroception
latent_extero_attn_gate = self.to_latent_extero_attn_gate(gru_output)
gated_latent_extero = latent_extero * latent_extero_attn_gate.sigmoid()
# belief state and add gated exteroception
belief_state = self.belief_state_encoder(gru_output)
belief_state = sum_with_zeropad(belief_state, gated_latent_extero)
# to action logits
belief_state_with_proprio = torch.cat((
proprio,
belief_state,
), dim = 1)
logits = self.to_logits(belief_state_with_proprio)
pi_logits = self.to_action_head(logits)
return_action = Categorical(pi_logits.softmax(dim = -1)) if return_action_categorical_dist else pi_logits
if not return_estimated_info:
return return_action, next_hiddens
# belief state decoding
# for reconstructing privileged and exteroception information from hidden belief states
recon_privileged = self.privileged_decoder(gru_output)
recon_extero = self.extero_decoder(gru_output)
extero_attn_gate = self.to_extero_attn_gate(gru_output)
gated_extero = rearrange(extero, 'b ... -> b (...)') * extero_attn_gate.sigmoid()
recon_extero = recon_extero + gated_extero
recon_extero = rearrange(recon_extero, 'b (n d) -> b n d', n = self.num_legs)
# whether to return raw policy logits or action probs wrapped with Categorical
return return_action, next_hiddens, (recon_privileged, recon_extero)
class Teacher(nn.Module):
def __init__(
self,
num_actions,
proprio_dim = 133,
extero_dim = 52, # in paper, height samples was marked as 208, but wasn't sure if that was per leg, or (4 legs x 52) = 208
latent_extero_dim = 24,
extero_encoder_hidden = (80, 60),
privileged_dim = 50,
latent_privileged_dim = 24,
privileged_encoder_hidden = (64, 32),
mlp_hidden = (256, 160, 128),
num_legs = 4
):
super().__init__()
self.num_legs = num_legs
self.proprio_dim = proprio_dim
self.extero_dim = extero_dim
self.privileged_dim = privileged_dim
self.extero_encoder = MLP((extero_dim, *extero_encoder_hidden, latent_extero_dim))
self.privileged_encoder = MLP((privileged_dim, *privileged_encoder_hidden, latent_privileged_dim))
self.to_logits = MLP((
latent_extero_dim * num_legs + latent_privileged_dim + proprio_dim,
*mlp_hidden
))
self.to_action_head = nn.Sequential(
nn.LeakyReLU(),
nn.Linear(mlp_hidden[-1], num_actions)
)
self.to_value_head = nn.Sequential(
nn.LeakyReLU(),
nn.Linear(mlp_hidden[-1], 1),
Rearrange('... 1 -> ...')
)
def forward(
self,
proprio,
extero,
privileged,
return_value_head = False,
return_action_categorical_dist = False
):
check_shape(proprio, 'b d', d = self.proprio_dim)
check_shape(extero, 'b n d', n = self.num_legs, d = self.extero_dim)
check_shape(privileged, 'b d', d = self.privileged_dim)
latent_extero = self.extero_encoder(extero)
latent_extero = rearrange(latent_extero, 'b ... -> b (...)')
latent_privileged = self.privileged_encoder(privileged)
latent = torch.cat((
proprio,
latent_extero,
latent_privileged,
), dim = -1)
logits = self.to_logits(latent)
pi_logits = self.to_action_head(logits)
if not return_value_head:
return pi_logits
value_logits = self.to_value_head(logits)
return_action = Categorical(pi_logits.softmax(dim = -1)) if return_action_categorical_dist else pi_logits
return return_action, value_logits
# manages both teacher and student under one module
class Anymal(nn.Module):
def __init__(
self,
num_actions,
proprio_dim = 133,
extero_dim = 52,
privileged_dim = 50,
num_legs = 4,
latent_extero_dim = 24,
latent_privileged_dim = 24,
teacher_extero_encoder_hidden = (80, 60),
teacher_privileged_encoder_hidden = (64, 32),
student_extero_gate_encoder_hiddens = (64, 64),
student_belief_state_encoder_hiddens = (64, 64),
student_belief_state_dim = 120,
student_gru_num_layers = 2,
student_gru_hidden_size = 50,
student_privileged_decoder_hiddens = (64, 64),
student_extero_decoder_hiddens = (64, 64),
student_extero_encoder_hidden = (80, 60),
mlp_hidden = (256, 160, 128),
recon_loss_weight = 0.5
):
super().__init__()
self.proprio_dim = proprio_dim
self.num_legs = num_legs
self.extero_dim = extero_dim
self.student = Student(
num_actions = num_actions,
proprio_dim = proprio_dim,
extero_dim = extero_dim,
latent_extero_dim = latent_extero_dim,
extero_encoder_hidden = student_extero_encoder_hidden,
belief_state_encoder_hiddens = student_belief_state_encoder_hiddens,
extero_gate_encoder_hiddens = student_extero_gate_encoder_hiddens,
belief_state_dim = student_belief_state_dim,
gru_num_layers = student_gru_num_layers,
gru_hidden_size = student_gru_hidden_size,
mlp_hidden = mlp_hidden,
num_legs = num_legs,
privileged_dim = privileged_dim,
privileged_decoder_hiddens = student_privileged_decoder_hiddens,
extero_decoder_hiddens = student_extero_decoder_hiddens,
)
self.teacher = Teacher(
num_actions = num_actions,
proprio_dim = proprio_dim,
extero_dim = extero_dim,
latent_extero_dim = latent_extero_dim,
extero_encoder_hidden = teacher_extero_encoder_hidden,
privileged_dim = privileged_dim,
latent_privileged_dim = latent_privileged_dim,
privileged_encoder_hidden = teacher_privileged_encoder_hidden,
mlp_hidden = mlp_hidden,
num_legs = num_legs
)
self.recon_loss_weight = recon_loss_weight
def get_observation_running_stats(self):
return RunningStats(self.proprio_dim), RunningStats((self.num_legs, self.extero_dim))
def init_student_with_teacher(self):
self.student.extero_encoder.load_state_dict(self.teacher.extero_encoder.state_dict())
self.student.to_logits.load_state_dict(self.teacher.to_logits.state_dict())
self.student.to_action_head.load_state_dict(self.teacher.to_action_head.state_dict())
def forward_teacher(self, *args, return_value_head = False, **kwargs):
return self.teacher(*args, return_value_head = return_value_head, **kwargs)
def forward_student(self, *args, **kwargs):
return self.student(*args, **kwargs)
# main forward for training the student with teacher as guide
def forward(
self,
proprio,
extero,
privileged,
teacher_states = None,
hiddens = None,
noise_strength = 0.1
):
self.teacher.eval()
freeze_all_layers_(self.teacher)
with torch.no_grad():
teacher_proprio, teacher_extero = teacher_states if exists(teacher_states) else (proprio, extero)
teacher_action_logits = self.forward_teacher(teacher_proprio, teacher_extero, privileged)
noised_extero = extero + torch.rand_like(extero) * noise_strength
student_action_logits, hiddens, recons = self.student(proprio, noised_extero, hiddens = hiddens, return_estimated_info = True)
# calculate reconstruction loss of privileged and denoised exteroception
(recon_privileged, recon_extero) = recons
recon_loss = F.mse_loss(recon_privileged, privileged) + F.mse_loss(recon_extero, extero)
# calculate behavior loss, which is also squared distance?
behavior_loss = F.mse_loss(teacher_action_logits, student_action_logits) # why not kl div on action probs?
loss = behavior_loss + recon_loss * self.recon_loss_weight
return loss, hiddens
|
anymal-belief-state-encoder-decoder-pytorch-main
|
anymal_belief_state_encoder_decoder_pytorch/networks.py
|
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from collections import deque
from einops import rearrange
from anymal_belief_state_encoder_decoder_pytorch import Anymal
class ExperienceDataset(Dataset):
def __init__(self, data):
super().__init__()
self.data = data
def __len__(self):
return len(self.data[0])
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind], self.data))
def create_dataloader(data, batch_size):
ds = ExperienceDataset(data)
return DataLoader(ds, batch_size = batch_size, drop_last = True)
class StudentTrainer(nn.Module):
def __init__(
self,
*,
anymal,
env,
epochs = 2,
lr = 5e-4,
max_timesteps = 10000,
update_timesteps = 5000,
minibatch_size = 16,
truncate_tpbtt = 10
):
super().__init__()
self.env = env
self.anymal = anymal
self.optimizer = Adam(anymal.student.parameters(), lr = lr)
self.epochs = epochs
self.max_timesteps = max_timesteps
self.update_timesteps = update_timesteps
self.minibatch_size = minibatch_size
self.truncate_tpbtt = truncate_tpbtt
self.running_proprio, self.running_extero = anymal.get_observation_running_stats()
def learn_from_memories(
self,
memories,
next_states,
noise_strength = 0.
):
device = next(self.parameters()).device
# retrieve and prepare data from memory for training
states = []
teacher_states = []
hiddens = []
dones = []
for (state, teacher_state, hidden, done) in memories:
states.append(state)
teacher_states.append(teacher_state)
hiddens.append(hidden)
dones.append(torch.Tensor([done]))
states = tuple(zip(*states))
teacher_states = tuple(zip(*teacher_states))
# convert values to torch tensors
to_torch_tensor = lambda t: torch.stack(t).to(device).detach()
states = map(to_torch_tensor, states)
teacher_states = map(to_torch_tensor, teacher_states)
hiddens = to_torch_tensor(hiddens)
dones = to_torch_tensor(dones)
# prepare dataloader for policy phase training
dl = create_dataloader([*states, *teacher_states, hiddens, dones], self.minibatch_size)
current_hiddens = self.anymal.student.get_gru_hiddens()
current_hiddens = rearrange(current_hiddens, 'l d -> 1 l d')
for _ in range(self.epochs):
for ind, (proprio, extero, privileged, teacher_proprio, teacher_extero, episode_hiddens, done) in enumerate(dl):
straight_through_hiddens = current_hiddens - current_hiddens.detach() + episode_hiddens
loss, current_hiddens = self.anymal(
proprio,
extero,
privileged,
teacher_states = (teacher_proprio, teacher_extero),
hiddens = straight_through_hiddens,
noise_strength = noise_strength
)
loss.backward(retain_graph = True)
tbptt_limit = not ((ind + 1) % self.truncate_tpbtt)
if tbptt_limit: # how far back in time should the gradients go for recurrence
self.optimizer.step()
self.optimizer.zero_grad()
current_hiddens = current_hiddens.detach()
# detacher hiddens depending on whether it is a new episode or not
# todo: restructure dataloader to load one episode per batch rows
maybe_detached_hiddens = []
for current_hidden, done in zip(current_hiddens.unbind(dim = 0), dones.unbind(dim = 0)):
maybe_detached_hiddens.append(current_hidden.detached() if done else current_hidden)
current_hiddens = torch.stack(maybe_detached_hiddens)
def forward(
self,
noise_strength = 0.
):
device = next(self.parameters()).device
time = 0
done = False
states = self.env.reset()
memories = deque([])
hidden = self.anymal.student.get_gru_hiddens()
hidden = rearrange(hidden, 'l d -> 1 l d')
self.running_proprio.clear()
self.running_extero.clear()
for timestep in range(self.max_timesteps):
time += 1
states = list(map(lambda t: t.to(device), states))
anymal_states = list(map(lambda t: rearrange(t, '... -> 1 ...'), states))
# teacher needs to have normalized observations
(proprio, extero, privileged) = states
self.running_proprio.push(proprio)
self.running_extero.push(extero)
teacher_states = (
self.running_proprio.norm(proprio),
self.running_extero.norm(extero)
)
teacher_anymal_states = list(map(lambda t: rearrange(t, '... -> 1 ...'), teacher_states))
# add states to memories
memories.append((
states,
teacher_states,
rearrange(hidden, '1 ... -> ...'),
done
))
dist, hidden = self.anymal.forward_student(
*anymal_states[:-1],
hiddens = hidden,
return_action_categorical_dist = True
)
action = dist.sample()
action_log_prob = dist.log_prob(action)
action = action.item()
next_states, _, done, _ = self.env.step(action)
states = next_states
if time % self.update_timesteps == 0:
self.learn_from_memories(memories, next_states, noise_strength = noise_strength)
memories.clear()
if done:
break
|
anymal-belief-state-encoder-decoder-pytorch-main
|
anymal_belief_state_encoder_decoder_pytorch/trainer.py
|
from collections import namedtuple, deque
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from anymal_belief_state_encoder_decoder_pytorch import Anymal
from anymal_belief_state_encoder_decoder_pytorch.networks import unfreeze_all_layers_
from einops import rearrange
# they use basic PPO for training the teacher with privileged information
# then they used noisy student training, using the trained "oracle" teacher as guide
# ppo data
Memory = namedtuple('Memory', ['state', 'action', 'action_log_prob', 'reward', 'done', 'value'])
class ExperienceDataset(Dataset):
def __init__(self, data):
super().__init__()
self.data = data
def __len__(self):
return len(self.data[0])
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind], self.data))
def create_shuffled_dataloader(data, batch_size):
ds = ExperienceDataset(data)
return DataLoader(ds, batch_size = batch_size, shuffle = True)
# ppo helper functions
def normalize(t, eps = 1e-5):
return (t - t.mean()) / (t.std() + eps)
def clipped_value_loss(values, rewards, old_values, clip):
value_clipped = old_values + (values - old_values).clamp(-clip, clip)
value_loss_1 = (value_clipped.flatten() - rewards) ** 2
value_loss_2 = (values.flatten() - rewards) ** 2
return torch.mean(torch.max(value_loss_1, value_loss_2))
# mock environment
class MockEnv(object):
def __init__(
self,
proprio_dim,
extero_dim,
privileged_dim,
num_legs = 4
):
self.proprio_dim = proprio_dim
self.extero_dim = extero_dim
self.privileged_dim = privileged_dim
self.num_legs = num_legs
def rand_state(self):
return (
torch.randn((self.proprio_dim,)),
torch.randn((self.num_legs, self.extero_dim,)),
torch.randn((self.privileged_dim,))
)
def reset(self):
return self.rand_state()
def step(self, action):
reward = torch.randn((1,))
done = torch.tensor([False])
return self.rand_state(), reward, done, None
# main ppo class
class PPO(nn.Module):
def __init__(
self,
*,
env,
anymal,
epochs = 2,
lr = 5e-4,
betas = (0.9, 0.999),
eps_clip = 0.2,
beta_s = 0.005,
value_clip = 0.4,
max_timesteps = 10000,
update_timesteps = 5000,
lam = 0.95,
gamma = 0.99,
minibatch_size = 8300
):
super().__init__()
assert isinstance(anymal, Anymal)
self.env = env
self.anymal = anymal
self.minibatch_size = minibatch_size
self.optimizer = Adam(anymal.teacher.parameters(), lr = lr, betas = betas)
self.epochs = epochs
self.max_timesteps = max_timesteps
self.update_timesteps = update_timesteps
self.beta_s = beta_s
self.eps_clip = eps_clip
self.value_clip = value_clip
self.lam = lam
self.gamma = gamma
# in paper, they said observations fed to teacher were normalized
# by running mean
self.running_proprio, self.running_extero = anymal.get_observation_running_stats()
def learn_from_memories(
self,
memories,
next_states
):
device = next(self.parameters()).device
# retrieve and prepare data from memory for training
states = []
actions = []
old_log_probs = []
rewards = []
masks = []
values = []
for mem in memories:
states.append(mem.state)
actions.append(torch.tensor(mem.action))
old_log_probs.append(mem.action_log_prob)
rewards.append(mem.reward)
masks.append(1 - float(mem.done))
values.append(mem.value)
states = tuple(zip(*states))
# calculate generalized advantage estimate
next_states = map(lambda t: t.to(device), next_states)
next_states = map(lambda t: rearrange(t, '... -> 1 ...'), next_states)
_, next_value = self.anymal.forward_teacher(*next_states, return_value_head = True)
next_value = next_value.detach()
values = values + [next_value]
returns = []
gae = 0
for i in reversed(range(len(rewards))):
delta = rewards[i] + self.gamma * values[i + 1] * masks[i] - values[i]
gae = delta + self.gamma * self.lam * masks[i] * gae
returns.insert(0, gae + values[i])
# convert values to torch tensors
to_torch_tensor = lambda t: torch.stack(t).to(device).detach()
states = map(to_torch_tensor, states)
actions = to_torch_tensor(actions)
old_log_probs = to_torch_tensor(old_log_probs)
old_values = to_torch_tensor(values[:-1])
old_values = rearrange(old_values, '... 1 -> ...')
rewards = torch.tensor(returns).float().to(device)
# prepare dataloader for policy phase training
dl = create_shuffled_dataloader([*states, actions, old_log_probs, rewards, old_values], self.minibatch_size)
# policy phase training, similar to original PPO
for _ in range(self.epochs):
for proprio, extero, privileged, actions, old_log_probs, rewards, old_values in dl:
dist, values = self.anymal.forward_teacher(
proprio, extero, privileged,
return_value_head = True,
return_action_categorical_dist = True
)
action_log_probs = dist.log_prob(actions)
entropy = dist.entropy()
ratios = (action_log_probs - old_log_probs).exp()
advantages = normalize(rewards - old_values.detach())
surr1 = ratios * advantages
surr2 = ratios.clamp(1 - self.eps_clip, 1 + self.eps_clip) * advantages
policy_loss = - torch.min(surr1, surr2) - self.beta_s * entropy
value_loss = clipped_value_loss(values, rewards, old_values, self.value_clip)
(policy_loss.mean() + value_loss.mean()).backward()
self.optimizer.step()
self.optimizer.zero_grad()
# does one episodes worth of learning
def forward(self):
device = next(self.parameters()).device
unfreeze_all_layers_(self.anymal)
time = 0
states = self.env.reset() # states assumed to be (proprioception, exteroception, privileged information)
memories = deque([])
self.running_proprio.clear()
self.running_extero.clear()
for timestep in range(self.max_timesteps):
time += 1
states = list(map(lambda t: t.to(device), states))
proprio, extero, privileged = states
# update running means for observations, for teacher
self.running_proprio.push(proprio)
self.running_extero.push(extero)
# normalize observation states for teacher (proprio and extero)
states = (
self.running_proprio.norm(proprio),
self.running_extero.norm(extero),
privileged
)
anymal_states = list(map(lambda t: rearrange(t, '... -> 1 ...'), states))
dist, values = self.anymal.forward_teacher(
*anymal_states,
return_value_head = True,
return_action_categorical_dist = True
)
action = dist.sample()
action_log_prob = dist.log_prob(action)
action = action.item()
next_states, reward, done, _ = self.env.step(action)
memory = Memory(states, action, action_log_prob, reward, done, values)
memories.append(memory)
states = next_states
if time % self.update_timesteps == 0:
self.learn_from_memories(memories, next_states)
memories.clear()
if done:
break
print('trained for 1 episode')
|
anymal-belief-state-encoder-decoder-pytorch-main
|
anymal_belief_state_encoder_decoder_pytorch/ppo.py
|
import torch
from torch import nn
class RunningStats(nn.Module):
def __init__(self, shape, eps = 1e-5):
super().__init__()
shape = shape if isinstance(shape, tuple) else (shape,)
self.shape = shape
self.eps = eps
self.n = 0
self.register_buffer('old_mean', torch.zeros(shape), persistent = False)
self.register_buffer('new_mean', torch.zeros(shape), persistent = False)
self.register_buffer('old_std', torch.zeros(shape), persistent = False)
self.register_buffer('new_std', torch.zeros(shape), persistent = False)
def clear(self):
self.n = 0
def push(self, x):
self.n += 1
if self.n == 1:
self.old_mean.copy_(x.data)
self.new_mean.copy_(x.data)
self.old_std.zero_()
self.new_std.zero_()
return
self.new_mean.copy_(self.old_mean + (x - self.old_mean) / self.n)
self.new_std.copy_(self.old_std + (x - self.old_mean) * (x - self.new_mean))
self.old_mean.copy_(self.new_mean)
self.old_std.copy_(self.new_std)
def mean(self):
return self.new_mean if self.n else torch.zeros_like(self.new_mean)
def variance(self):
return (self.new_std / (self.n - 1)) if self.n > 1 else torch.zeros_like(self.new_std)
def rstd(self):
return torch.rsqrt(self.variance() + self.eps)
def norm(self, x):
return (x - self.mean()) * self.rstd()
|
anymal-belief-state-encoder-decoder-pytorch-main
|
anymal_belief_state_encoder_decoder_pytorch/running.py
|
from setuptools import find_packages, setup
setup(
name="PaLM-pytorch",
packages=find_packages(exclude=[]),
version="0.2.2",
license="MIT",
description="PaLM: Scaling Language Modeling with Pathways - Pytorch",
author="Phil Wang",
author_email="lucidrains@gmail.com",
long_description_content_type = 'text/markdown',
url="https://github.com/lucidrains/PaLM-pytorch",
keywords=[
"artificial general intelligence",
"deep learning",
"transformers",
"attention mechanism",
],
install_requires=[
"einops>=0.4",
"torch>=1.6",
"triton>=2.0dev"
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
|
PaLM-pytorch-main
|
setup.py
|
import gzip
import random
import numpy as np
import torch
import torch.optim as optim
import tqdm
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from palm_pytorch.triton import PaLM
from palm_pytorch.autoregressive_wrapper import AutoregressiveWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = PaLM(num_tokens=256, dim=512, depth=8)
model = AutoregressiveWrapper(model, max_seq_len=SEQ_LEN)
model.cuda()
# prepare enwik8 data
with gzip.open("./examples/enwik8_deepspeed/data/enwik8.gz") as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f"training loss: {loss.item()}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
|
PaLM-pytorch-main
|
train.py
|
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum, nn
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.register_buffer("mask", None, persistent=False)
self.register_buffer("pos_emb", None, persistent=False)
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n]
pos_emb = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def forward(self, x):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# transformer
def PaLM(*, dim, num_tokens, depth, dim_head=64, heads=8, ff_mult=4):
net = nn.Sequential(
nn.Embedding(num_tokens, dim),
*[
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult))
for _ in range(depth)
],
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
net[-1].weight = net[0].weight
nn.init.normal_(net[0].weight, std=0.02)
return net
|
PaLM-pytorch-main
|
palm_pytorch/palm_pytorch.py
|
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import nn
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres=0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float("-inf"))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, max_seq_len=2048, pad_value=0):
super().__init__()
self.max_seq_len = max_seq_len
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token=None,
temperature=1.0,
filter_thres=0.9,
**kwargs
):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
logits = self.net(out, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = out == eos_token
if is_eos_token.any(dim=-1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_token, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim=-1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
logits = self.net(x_inp, **kwargs)
return F.cross_entropy(rearrange(logits, "b c n -> b n c"), x_labels)
|
PaLM-pytorch-main
|
palm_pytorch/autoregressive_wrapper.py
|
from palm_pytorch.palm_pytorch import PaLM
|
PaLM-pytorch-main
|
palm_pytorch/__init__.py
|
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from torch import einsum, nn
from math import log2, floor
def exists(val):
return val is not None
# normalization
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# AliBi
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, **kwargs):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(i, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** floor(log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, qk_sim):
h, i, j, device = *qk_sim.shape[-3:], qk_sim.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied))
self.register_buffer('bias', bias, persistent=False)
return bias
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = RMSNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.alibi_pos_biases = AlibiPositionalBias(heads = self.heads)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask
self.register_buffer("mask", None, persistent=False)
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.triu(torch.ones((n, n), device=device, dtype=torch.bool), 1)
self.register_buffer("mask", mask, persistent=False)
return mask
def forward(self, x):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys or values (shared key / values is a personal discovery of mine), and feedforward inner
q, kv, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h = h)
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, kv)
# add the alibi bias
sim = sim + self.alibi_pos_biases(sim)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
out = einsum("b h i j, b j d -> b h i d", attn, kv)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
merge_heads = self.attn_out(out) + self.ff_out(ff)
return merge_heads
def PaLM(*, dim, num_tokens, depth, dim_head=64, heads=8, ff_mult=4):
net = nn.Sequential(
nn.Embedding(num_tokens, dim),
*[Residual(ParallelTransformerBlock(dim, dim_head, heads, ff_mult)) for _ in range(depth)],
RMSNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
net[-1].weight = net[0].weight
nn.init.normal_(net[0].weight, std=0.02)
return net
# For testing functionality of the model
if __name__ == "__main__":
palm = PaLM(
num_tokens = 20000,
dim = 512,
depth = 1,
heads = 8,
dim_head = 64,
)
tokens = torch.randint(0, 20000, (1, 2048))
logits = palm(tokens) # (1, 2048, 20000)
n_params_torch = sum(
p.numel() for p in palm.parameters() if p.requires_grad
)
print(f"Number of parameters in torch model: {n_params_torch}")
|
PaLM-pytorch-main
|
palm_pytorch/palm_lite.py
|
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum, nn
from palm_pytorch.triton.softmax import causal_softmax
from palm_pytorch.triton.layernorm import layernorm_without_bias
# normalization
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return layernorm_without_bias(x, x.shape[-1:], self.gamma)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
# rotary positional embedding
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# feedforward
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(SwiGLU(), nn.Linear(ff_inner_dim, dim, bias=False))
# for caching of rotary embeddings
self.register_buffer("pos_emb", None, persistent=False)
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n]
pos_emb = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def forward(self, x):
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# attention
attn = causal_softmax(sim)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# transformer
def PaLM(*, dim, num_tokens, depth, dim_head=64, heads=8, ff_mult=4):
net = nn.Sequential(
nn.Embedding(num_tokens, dim),
*[
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult))
for _ in range(depth)
],
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
net[-1].weight = net[0].weight
nn.init.normal_(net[0].weight, std=0.02)
return net
|
PaLM-pytorch-main
|
palm_pytorch/triton/palm.py
|
# taken from Phil Tillet's layernorm tutorial for Triton
# Triton - https://triton-lang.org
# Layernorm tutorial - https://triton-lang.org/master/getting-started/tutorials/05-layer-norm.html#sphx-glr-getting-started-tutorials-05-layer-norm-py
# modified to be bias-less
import torch
import triton
import triton.language as tl
@triton.jit
def _layer_norm_fwd_fused(X, Y, W, M, V, stride, N,
BLOCK_SIZE: tl.constexpr):
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE)
mask = cols < N
X += row * stride
Y += row * stride
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
mean = tl.sum(x, axis=0) / N
xmean = tl.where(mask, x - mean, 0.)
var = tl.sum(xmean * xmean, axis=0) / N
rstd = 1 / tl.sqrt(var + 1e-5)
xhat = xmean * rstd
tl.store(M + row, mean)
tl.store(V + row, rstd)
w = tl.load(W + cols, mask=mask)
y = xhat * w
tl.store(Y + cols, y, mask=mask)
@triton.jit
def _layer_norm_bwd_dx_fused(DX, DY, DW, X, W, M, V, Lock, stride, N,
GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
X += row * stride
DY += row * stride
DX += row * stride
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
DW = DW + lock_id * N + cols
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
w = tl.load(W + cols, mask=mask).to(tl.float32)
mean = tl.load(M + row)
rstd = tl.load(V + row)
xhat = (x - mean) * rstd
wdy = w * dy
xhat = tl.where(mask, xhat, 0.)
wdy = tl.where(mask, wdy, 0.)
mean1 = tl.sum(xhat * wdy, axis=0) / N
mean2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * mean1 + mean2)) * rstd
tl.store(DX + cols, dx, mask=mask)
partial_dw = (dy * xhat).to(w.dtype)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
if count == 0:
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(DW, mask=mask)
tl.store(DW, partial_dw, mask=mask)
tl.atomic_xchg(Lock, 0)
@triton.jit
def _layer_norm_bwd_dw(DW, FINAL_DW, M, N,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, BLOCK_SIZE_M)
mask = (rows[:, None] < M) & (cols[None, :] < N)
offs = rows[:, None] * N + cols[None, :]
dw += tl.load(DW + offs, mask=mask, other=0.)
sum_dw = tl.sum(dw, axis=0)
tl.store(FINAL_DW + cols, sum_dw, mask=cols < N)
class LayerNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, normalized_shape, weight):
y = torch.empty_like(x)
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
mean = torch.empty((M, ), dtype=torch.float32, device='cuda')
rstd = torch.empty((M, ), dtype=torch.float32, device='cuda')
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE:
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
_layer_norm_fwd_fused[(M,)](x_arg, y, weight, mean, rstd,
x_arg.stride(0), N,
BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps)
ctx.save_for_backward(x, weight, mean, rstd)
ctx.BLOCK_SIZE = BLOCK_SIZE
ctx.num_warps = num_warps
return y
@staticmethod
def backward(ctx, dy):
x, w, m, v = ctx.saved_tensors
N = w.shape[0]
GROUP_SIZE_M = 64
if N <= 8192: GROUP_SIZE_M = 96
if N <= 4096: GROUP_SIZE_M = 128
if N <= 1024: GROUP_SIZE_M = 256
locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device='cuda')
_dw = torch.empty((GROUP_SIZE_M, w.shape[0]), dtype=x.dtype, device=w.device)
dw = torch.empty((w.shape[0],), dtype=w.dtype, device=w.device)
dx = torch.empty_like(dy)
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
_layer_norm_bwd_dx_fused[(M,)](dx, dy, _dw, x, w, m, v, locks,
x_arg.stride(0), N,
BLOCK_SIZE_N=ctx.BLOCK_SIZE,
GROUP_SIZE_M=GROUP_SIZE_M,
num_warps=ctx.num_warps)
grid = lambda meta: [triton.cdiv(N, meta['BLOCK_SIZE_N'])]
_layer_norm_bwd_dw[grid](_dw, dw, GROUP_SIZE_M, N,
BLOCK_SIZE_M=32,
BLOCK_SIZE_N=128)
return dx, None, dw, None
layernorm_without_bias = LayerNorm.apply
|
PaLM-pytorch-main
|
palm_pytorch/triton/layernorm.py
|
from palm_pytorch.triton.palm import PaLM
|
PaLM-pytorch-main
|
palm_pytorch/triton/__init__.py
|
import torch
from torch import autograd
import torch.nn.functional as F
import triton
import triton.language as tl
from triton_transformer.utils import calc_num_warps
@triton.jit
def softmax_kernel_forward(
output_ptr,
input_ptr,
input_row_stride,
output_row_stride,
n_cols,
BLOCK_SIZE: tl.constexpr
):
row_idx = tl.program_id(0)
row_start_ptr = input_ptr + row_idx * input_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
mask = col_offsets < n_cols
row = tl.load(input_ptrs, mask = mask, other = -float('inf'))
causal_mask = col_offsets > (row_idx % n_cols)
row = row + tl.where(causal_mask, -float('inf'), 0.)
row_minus_max = row - tl.max(row, axis=0)
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_output, mask = mask)
@triton.jit
def softmax_kernel_backward(
output_ptr,
input_ptr,
grad_ptr,
grad_row_stride,
input_row_stride,
output_row_stride,
n_cols,
BLOCK_SIZE: tl.constexpr
):
row_idx = tl.program_id(0)
row_start_ptr = input_ptr + row_idx * input_row_stride
grad_row_start_ptr = grad_ptr + row_idx * grad_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
grad_ptrs = grad_row_start_ptr + col_offsets
mask = col_offsets < n_cols
probs_row = tl.load(input_ptrs, mask = mask, other = 0.)
grad_row = tl.load(grad_ptrs, mask = mask, other = 0.)
dxhat = probs_row * grad_row
softmax_grad_output = dxhat - probs_row * tl.sum(dxhat, axis = 0)
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_grad_output, mask = mask)
class _softmax(autograd.Function):
@classmethod
def forward(self, ctx, x):
shape = x.shape
x = x.view(-1, shape[-1])
n_rows, n_cols = x.shape
BLOCK_SIZE = triton.next_power_of_2(n_cols)
num_warps = calc_num_warps(BLOCK_SIZE)
y = torch.empty_like(x)
softmax_kernel_forward[(n_rows,)](
y,
x,
x.stride(0),
y.stride(0),
n_cols,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE,
)
if x.requires_grad:
ctx.save_for_backward(y)
return y.view(*shape)
@classmethod
def backward(self, ctx, grad_probs):
shape = grad_probs.shape
probs, = ctx.saved_tensors
grad_probs = grad_probs.view(-1, grad_probs.shape[-1])
n_rows, n_cols = grad_probs.shape
BLOCK_SIZE = triton.next_power_of_2(n_cols)
num_warps = calc_num_warps(BLOCK_SIZE)
dx = torch.empty_like(probs)
softmax_kernel_backward[(n_rows,)](
dx,
probs,
grad_probs,
grad_probs.stride(0),
probs.stride(0),
dx.stride(0),
n_cols,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE
)
return dx.view(*shape), None
causal_softmax = _softmax.apply
|
PaLM-pytorch-main
|
palm_pytorch/triton/softmax.py
|
import deepspeed
from palm_pytorch import PaLM
from palm_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from einops import rearrange
from torch import einsum, nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
def add_argument():
parser=argparse.ArgumentParser(description='enwik8')
parser.add_argument('--with_cuda', default=False, action='store_true',
help='use CPU in case there\'s no GPU support')
parser.add_argument('--use_ema', default=False, action='store_true',
help='whether use exponential moving average')
parser.add_argument('-b', '--batch_size', default=32, type=int,
help='mini-batch size (default: 32)')
parser.add_argument('-e', '--epochs', default=30, type=int,
help='number of total epochs (default: 30)')
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser = deepspeed.add_config_arguments(parser)
args=parser.parse_args()
return args
# constants
EPOCHS = 20
GRADIENT_ACCUMULATE_EVERY = 4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = PaLM(num_tokens = 256, dim = 512, depth = 8)
model = AutoregressiveWrapper(model, max_seq_len=2048)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
# setup deepspeed
cmd_args = add_argument()
model_engine, optimizer, trainloader, _ = deepspeed.initialize(args=cmd_args, model=model, model_parameters=model.parameters(), training_data=train_dataset)
# training
for _ in range(EPOCHS):
for i, data in enumerate(trainloader):
model_engine.train()
data = data.to(model_engine.local_rank)
loss = model_engine(data)
model_engine.backward(loss)
torch.nn.utils.clip_grad_norm_(model_engine.parameters(), 0.5)
model_engine.step()
print(loss.item() * GRADIENT_ACCUMULATE_EVERY)
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
inp = random.choice(val_dataset)[:-1]
loss = model(inp[None, :].cuda())
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp[None, ...].cuda(), GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
|
PaLM-pytorch-main
|
examples/enwik8_deepspeed/train.py
|
from setuptools import setup, find_packages
setup(
name = 'bottleneck-transformer-pytorch',
packages = find_packages(),
version = '0.1.4',
license='MIT',
description = 'Bottleneck Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/bottleneck-transformer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'image classification',
'vision'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
bottleneck-transformer-pytorch-main
|
setup.py
|
from bottleneck_transformer_pytorch.bottleneck_transformer_pytorch import BottleStack, BottleBlock
|
bottleneck-transformer-pytorch-main
|
bottleneck_transformer_pytorch/__init__.py
|
import math
import torch
from torch import nn, einsum
from einops import rearrange
# translated from tensorflow code
# https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
# positional embedding helpers
def pair(x):
return (x, x) if not isinstance(x, tuple) else x
def expand_dim(t, dim, k):
t = t.unsqueeze(dim = dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def rel_to_abs(x):
b, h, l, _, device, dtype = *x.shape, x.device, x.dtype
dd = {'device': device, 'dtype': dtype}
col_pad = torch.zeros((b, h, l, 1), **dd)
x = torch.cat((x, col_pad), dim = 3)
flat_x = rearrange(x, 'b h l c -> b h (l c)')
flat_pad = torch.zeros((b, h, l - 1), **dd)
flat_x_padded = torch.cat((flat_x, flat_pad), dim = 2)
final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1)
final_x = final_x[:, :, :l, (l-1):]
return final_x
def relative_logits_1d(q, rel_k):
b, heads, h, w, dim = q.shape
logits = einsum('b h x y d, r d -> b h x y r', q, rel_k)
logits = rearrange(logits, 'b h x y r -> b (h x) y r')
logits = rel_to_abs(logits)
logits = logits.reshape(b, heads, h, w, w)
logits = expand_dim(logits, dim = 3, k = h)
return logits
# positional embeddings
class AbsPosEmb(nn.Module):
def __init__(
self,
fmap_size,
dim_head
):
super().__init__()
height, width = pair(fmap_size)
scale = dim_head ** -0.5
self.height = nn.Parameter(torch.randn(height, dim_head) * scale)
self.width = nn.Parameter(torch.randn(width, dim_head) * scale)
def forward(self, q):
emb = rearrange(self.height, 'h d -> h () d') + rearrange(self.width, 'w d -> () w d')
emb = rearrange(emb, ' h w d -> (h w) d')
logits = einsum('b h i d, j d -> b h i j', q, emb)
return logits
class RelPosEmb(nn.Module):
def __init__(
self,
fmap_size,
dim_head
):
super().__init__()
height, width = pair(fmap_size)
scale = dim_head ** -0.5
self.fmap_size = fmap_size
self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale)
self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale)
def forward(self, q):
h, w = self.fmap_size
q = rearrange(q, 'b h (x y) d -> b h x y d', x = h, y = w)
rel_logits_w = relative_logits_1d(q, self.rel_width)
rel_logits_w = rearrange(rel_logits_w, 'b h x i y j-> b h (x y) (i j)')
q = rearrange(q, 'b h x y d -> b h y x d')
rel_logits_h = relative_logits_1d(q, self.rel_height)
rel_logits_h = rearrange(rel_logits_h, 'b h x i y j -> b h (y x) (j i)')
return rel_logits_w + rel_logits_h
# classes
class Attention(nn.Module):
def __init__(
self,
*,
dim,
fmap_size,
heads = 4,
dim_head = 128,
rel_pos_emb = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)
rel_pos_class = AbsPosEmb if not rel_pos_emb else RelPosEmb
self.pos_emb = rel_pos_class(fmap_size, dim_head)
def forward(self, fmap):
heads, b, c, h, w = self.heads, *fmap.shape
q, k, v = self.to_qkv(fmap).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim = sim + self.pos_emb(q)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return out
class BottleBlock(nn.Module):
def __init__(
self,
*,
dim,
fmap_size,
dim_out,
proj_factor,
downsample,
heads = 4,
dim_head = 128,
rel_pos_emb = False,
activation = nn.ReLU()
):
super().__init__()
# shortcut
if dim != dim_out or downsample:
kernel_size, stride, padding = (3, 2, 1) if downsample else (1, 1, 0)
self.shortcut = nn.Sequential(
nn.Conv2d(dim, dim_out, kernel_size, stride = stride, padding = padding, bias = False),
nn.BatchNorm2d(dim_out),
activation
)
else:
self.shortcut = nn.Identity()
# contraction and expansion
attn_dim_in = dim_out // proj_factor
attn_dim_out = heads * dim_head
self.net = nn.Sequential(
nn.Conv2d(dim, attn_dim_in, 1, bias = False),
nn.BatchNorm2d(attn_dim_in),
activation,
Attention(
dim = attn_dim_in,
fmap_size = fmap_size,
heads = heads,
dim_head = dim_head,
rel_pos_emb = rel_pos_emb
),
nn.AvgPool2d((2, 2)) if downsample else nn.Identity(),
nn.BatchNorm2d(attn_dim_out),
activation,
nn.Conv2d(attn_dim_out, dim_out, 1, bias = False),
nn.BatchNorm2d(dim_out)
)
# init last batch norm gamma to zero
nn.init.zeros_(self.net[-1].weight)
# final activation
self.activation = activation
def forward(self, x):
shortcut = self.shortcut(x)
x = self.net(x)
x = x + shortcut
return self.activation(x)
# main bottle stack
class BottleStack(nn.Module):
def __init__(
self,
*,
dim,
fmap_size,
dim_out = 2048,
proj_factor = 4,
num_layers = 3,
heads = 4,
dim_head = 128,
downsample = True,
rel_pos_emb = False,
activation = nn.ReLU()
):
super().__init__()
fmap_size = pair(fmap_size)
self.dim = dim
self.fmap_size = fmap_size
layers = []
for i in range(num_layers):
is_first = i == 0
dim = (dim if is_first else dim_out)
layer_downsample = is_first and downsample
fmap_divisor = (2 if downsample and not is_first else 1)
layer_fmap_size = tuple(map(lambda t: t // fmap_divisor, fmap_size))
layers.append(BottleBlock(
dim = dim,
fmap_size = layer_fmap_size,
dim_out = dim_out,
proj_factor = proj_factor,
heads = heads,
dim_head = dim_head,
downsample = layer_downsample,
rel_pos_emb = rel_pos_emb,
activation = activation
))
self.net = nn.Sequential(*layers)
def forward(self, x):
_, c, h, w = x.shape
assert c == self.dim, f'channels of feature map {c} must match channels given at init {self.dim}'
assert h == self.fmap_size[0] and w == self.fmap_size[1], f'height and width ({h} {w}) of feature map must match the fmap_size given at init {self.fmap_size}'
return self.net(x)
|
bottleneck-transformer-pytorch-main
|
bottleneck_transformer_pytorch/bottleneck_transformer_pytorch.py
|
from setuptools import setup, find_packages
setup(
name = 'omninet-pytorch',
packages = find_packages(),
version = '0.0.6',
license='MIT',
description = 'Omninet - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/omninet-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformer',
'attention mechanism'
],
install_requires=[
'einops>=0.3',
'torch>=1.6',
'performer-pytorch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
omninet-pytorch-main
|
setup.py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# use Performer, as it had the best reported numbers
from performer_pytorch import SelfAttention as PerformerAttention
# helpers
def exists(val):
return val is not None
def get_module_device(module):
return next(module.parameters()).device
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
dropout = 0.,
causal = False
):
super().__init__()
inner_dim = heads * dim_head
self.heads = heads
self.scale = dim_head ** -0.5
self.causal = causal
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None):
b, n, d, h, device = *x.shape, self.heads, x.device
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
sim.masked_fill_(~mask, max_neg_value)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
causal_mask = rearrange(causal_mask, 'i j -> () i j')
sim.masked_fill_(causal_mask, max_neg_value)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
return self.to_out(out)
# main class
class Omninet(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
pool_layer_tokens_every = 2,
attn_dropout = 0.,
ff_dropout = 0.,
feature_redraw_interval = 1000
):
super().__init__()
layers = nn.ModuleList([])
for ind in range(depth):
num_layers = ind + 1
should_pool = num_layers % pool_layer_tokens_every
layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim = dim, dropout = ff_dropout)),
PerformerAttention(dim = dim, heads= heads, dim_head = dim_head) if should_pool else None
]))
self.layers = layers
self.pool_num_layers = pool_layer_tokens_every
# keep track of redrawing projection matrix for Performer
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
def fix_projection_matrices_(self):
self.feature_redraw_interval = None
def check_redraw_projections(self):
if not self.training:
return
if exists(self.feature_redraw_interval) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(self)
fast_attentions = find_modules(self, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def forward(self, x, mask = None):
self.check_redraw_projections()
pool_num_layers = self.pool_num_layers
hiddens = [x]
for attn, ff, efficient_attn in self.layers:
x = attn(x, mask = mask) + x
x = ff(x) + x
hiddens.append(x)
if exists(efficient_attn):
layers_to_pool = hiddens[-pool_num_layers:]
num_layers = len(layers_to_pool)
all_tokens = torch.stack(layers_to_pool)
all_tokens = rearrange(all_tokens, 'l b n d -> b (n l) d')
pool_attn_mask = None
if exists(mask):
pool_attn_mask = repeat(mask, 'b n -> b (n l)', l = num_layers)
attended_tokens = efficient_attn(all_tokens, mask = pool_attn_mask)
attended_tokens = rearrange(attended_tokens, 'b n c -> b c n')
pooled_tokens = F.max_pool1d(attended_tokens, kernel_size = num_layers, stride = num_layers)
x += rearrange(pooled_tokens, 'b c n -> b n c')
return x
# causal case is sufficiently different to warrant its own class
# use layer axial attention for now, until I rewrite the linear attention cuda kernel
class OmninetCausal(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
pool_layer_tokens_every = 2,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.layer_pos_emb = nn.Parameter(torch.randn(depth + 1, dim))
layers = nn.ModuleList([])
for ind in range(depth):
num_layers = ind + 1
should_pool = num_layers % pool_layer_tokens_every
layers.append(nn.ModuleList([
PreNorm(dim, Attention(causal = True, dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim = dim, dropout = ff_dropout)),
Attention(dim = dim, heads= heads, dim_head = dim_head) if should_pool else None
]))
self.layers = layers
self.pool_num_layers = pool_layer_tokens_every
def forward(self, x, mask = None):
pool_num_layers = self.pool_num_layers
b = x.shape[0]
pos_embs = rearrange(self.layer_pos_emb, 'n d -> () n d')
x += pos_embs[:, 0]
hiddens = [x]
for ind, (attn, ff, layer_axial_attn) in enumerate(self.layers):
x = attn(x, mask = mask) + x
x = ff(x) + x
x += pos_embs[:, ind + 1]
hiddens.append(x)
if exists(layer_axial_attn):
layers_to_pool = hiddens[-pool_num_layers:]
num_layers = len(layers_to_pool)
layer_tokens = rearrange(torch.stack(layers_to_pool), 'l b n d -> (b n) l d')
attended_tokens = layer_axial_attn(layer_tokens)
attended_tokens = rearrange(attended_tokens, '(b n) l d -> b n l d', b = b)
pooled_attended_tokens = attended_tokens.max(dim = -2).values
x += pooled_attended_tokens
return x
|
omninet-pytorch-main
|
omninet_pytorch/omninet_pytorch.py
|
from omninet_pytorch.omninet_pytorch import Omninet, OmninetCausal
|
omninet-pytorch-main
|
omninet_pytorch/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'nystrom-attention',
packages = find_packages(),
version = '0.0.11',
license='MIT',
description = 'Nystrom Attention - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/nystrom-attention',
keywords = [
'artificial intelligence',
'attention mechanism'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
nystrom-attention-main
|
setup.py
|
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
# helper functions
def exists(val):
return val is not None
def moore_penrose_iter_pinv(x, iters = 6):
device = x.device
abs_x = torch.abs(x)
col = abs_x.sum(dim = -1)
row = abs_x.sum(dim = -2)
z = rearrange(x, '... i j -> ... j i') / (torch.max(col) * torch.max(row))
I = torch.eye(x.shape[-1], device = device)
I = rearrange(I, 'i j -> () i j')
for _ in range(iters):
xz = x @ z
z = 0.25 * z @ (13 * I - (xz @ (15 * I - (xz @ (7 * I - xz)))))
return z
# main attention class
class NystromAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
num_landmarks = 256,
pinv_iterations = 6,
residual = True,
residual_conv_kernel = 33,
eps = 1e-8,
dropout = 0.
):
super().__init__()
self.eps = eps
inner_dim = heads * dim_head
self.num_landmarks = num_landmarks
self.pinv_iterations = pinv_iterations
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
self.residual = residual
if residual:
kernel_size = residual_conv_kernel
padding = residual_conv_kernel // 2
self.res_conv = nn.Conv2d(heads, heads, (kernel_size, 1), padding = (padding, 0), groups = heads, bias = False)
def forward(self, x, mask = None, return_attn = False):
b, n, _, h, m, iters, eps = *x.shape, self.heads, self.num_landmarks, self.pinv_iterations, self.eps
# pad so that sequence can be evenly divided into m landmarks
remainder = n % m
if remainder > 0:
padding = m - (n % m)
x = F.pad(x, (0, 0, padding, 0), value = 0)
if exists(mask):
mask = F.pad(mask, (padding, 0), value = False)
# derive query, keys, values
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# set masked positions to 0 in queries, keys, values
if exists(mask):
mask = rearrange(mask, 'b n -> b () n')
q, k, v = map(lambda t: t * mask[..., None], (q, k, v))
q = q * self.scale
# generate landmarks by sum reduction, and then calculate mean using the mask
l = ceil(n / m)
landmark_einops_eq = '... (n l) d -> ... n d'
q_landmarks = reduce(q, landmark_einops_eq, 'sum', l = l)
k_landmarks = reduce(k, landmark_einops_eq, 'sum', l = l)
# calculate landmark mask, and also get sum of non-masked elements in preparation for masked mean
divisor = l
if exists(mask):
mask_landmarks_sum = reduce(mask, '... (n l) -> ... n', 'sum', l = l)
divisor = mask_landmarks_sum[..., None] + eps
mask_landmarks = mask_landmarks_sum > 0
# masked mean (if mask exists)
q_landmarks /= divisor
k_landmarks /= divisor
# similarities
einops_eq = '... i d, ... j d -> ... i j'
sim1 = einsum(einops_eq, q, k_landmarks)
sim2 = einsum(einops_eq, q_landmarks, k_landmarks)
sim3 = einsum(einops_eq, q_landmarks, k)
# masking
if exists(mask):
mask_value = -torch.finfo(q.dtype).max
sim1.masked_fill_(~(mask[..., None] * mask_landmarks[..., None, :]), mask_value)
sim2.masked_fill_(~(mask_landmarks[..., None] * mask_landmarks[..., None, :]), mask_value)
sim3.masked_fill_(~(mask_landmarks[..., None] * mask[..., None, :]), mask_value)
# eq (15) in the paper and aggregate values
attn1, attn2, attn3 = map(lambda t: t.softmax(dim = -1), (sim1, sim2, sim3))
attn2_inv = moore_penrose_iter_pinv(attn2, iters)
out = (attn1 @ attn2_inv) @ (attn3 @ v)
# add depth-wise conv residual of values
if self.residual:
out += self.res_conv(v)
# merge and combine heads
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
out = self.to_out(out)
out = out[:, -n:]
if return_attn:
attn = attn1 @ attn2_inv @ attn3
return out, attn
return out
# transformer
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class Nystromformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
num_landmarks = 256,
pinv_iterations = 6,
attn_values_residual = True,
attn_values_residual_conv_kernel = 33,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, NystromAttention(dim = dim, dim_head = dim_head, heads = heads, num_landmarks = num_landmarks, pinv_iterations = pinv_iterations, residual = attn_values_residual, residual_conv_kernel = attn_values_residual_conv_kernel, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim = dim, dropout = ff_dropout))
]))
def forward(self, x, mask = None):
for attn, ff in self.layers:
x = attn(x, mask = mask) + x
x = ff(x) + x
return x
|
nystrom-attention-main
|
nystrom_attention/nystrom_attention.py
|
from nystrom_attention.nystrom_attention import NystromAttention, Nystromformer
Nystromer = Nystromformer
|
nystrom-attention-main
|
nystrom_attention/__init__.py
|
import os
from copy import deepcopy
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from soft_moe_pytorch.soft_moe import Experts, FeedForward as Expert
from soft_moe_pytorch.distributed import all_gather_variable_dim
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group("gloo", rank = rank, world_size = world_size)
def cleanup():
dist.destroy_process_group()
def start(
rank,
world_size,
batch_size,
batch_size_var_len,
num_experts,
tokens_per_expert,
dim,
):
setup(rank, world_size)
net = Experts([Expert(dim) for _ in range(num_experts)])
if batch_size_var_len:
batch_size = batch_size + rank
seq = torch.randn(batch_size, num_experts, tokens_per_expert, dim)
# distributed
model = DDP(net)
out = model(seq)
out.mean().backward()
ddp_all_out, _ = all_gather_variable_dim(out)
# on single device
all_inputs, _ = all_gather_variable_dim(seq)
copied_net = deepcopy(net)
single_out = copied_net(
all_inputs,
is_distributed = False
)
single_out.mean().backward()
if rank == 0:
# validate output is the same
# if done on 1 vs multiple machines
assert torch.allclose(single_out, ddp_all_out), 'output is not the same'
# validate backwards and grad
get_first_expert_grad = lambda t: t.experts[0][0].weight.grad
assert torch.allclose(
get_first_expert_grad(net),
get_first_expert_grad(copied_net),
atol = 1e-2
), 'grad is not the same'
print('✅')
cleanup()
if __name__ == '__main__':
world_size = 9
num_experts = 8
batch_size = 2
batch_size_var_len = False
seq_len = 32
dim = 8
mp.spawn(
start,
args = (
world_size,
batch_size,
batch_size_var_len,
num_experts,
seq_len,
dim
),
nprocs = world_size,
join = True
)
|
soft-moe-pytorch-main
|
assert.py
|
from setuptools import setup, find_packages
setup(
name = 'soft-moe-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.9',
license='MIT',
description = 'Soft MoE - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/soft-moe-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'mixture of experts'
],
install_requires=[
'einops>=0.6.1',
'torch>=2.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
soft-moe-pytorch-main
|
setup.py
|
from soft_moe_pytorch.soft_moe import SoftMoE
from soft_moe_pytorch.soft_moe_with_dynamic_slots import DynamicSlotsSoftMoE
|
soft-moe-pytorch-main
|
soft_moe_pytorch/__init__.py
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
import torch.distributed as dist
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(num, den):
return (num % den) == 0
def pad_dim_to(t, length, dim = 0):
pad_length = length - t.shape[dim]
zero_pairs = (-dim - 1) if dim < 0 else (t.ndim - dim - 1)
return F.pad(t, (*((0, 0) * zero_pairs), 0, pad_length))
def all_gather_same_dim(t):
world_size = dist.get_world_size()
gathered_tensors = [torch.empty_like(t, device = t.device, dtype = t.dtype) for i in range(world_size)]
dist.all_gather(gathered_tensors, t)
return gathered_tensors
def gather_sizes(t, *, dim):
size = torch.tensor(t.shape[dim], device = t.device, dtype = torch.long)
sizes = all_gather_same_dim(size)
return torch.stack(sizes)
def has_only_one_value(t):
return (t == t[0]).all()
def all_gather_variable_dim(t, dim = 0, sizes = None):
device, rank, world_size = t.device, dist.get_rank(), dist.get_world_size()
if not exists(sizes):
sizes = gather_sizes(t, dim = dim)
if has_only_one_value(sizes):
gathered_tensors = all_gather_same_dim(t)
gathered_tensors = torch.cat(gathered_tensors, dim = dim)
return gathered_tensors, sizes
max_size = sizes.amax().item()
padded_t = pad_dim_to(t, max_size, dim = dim)
gathered_tensors = all_gather_same_dim(padded_t)
gathered_tensors = torch.cat(gathered_tensors, dim = dim)
seq = torch.arange(max_size, device = device)
mask = rearrange(seq, 'j -> 1 j') < rearrange(sizes, 'i -> i 1')
mask = rearrange(mask, 'i j -> (i j)')
seq = torch.arange(mask.shape[-1], device = device)
indices = seq[mask]
gathered_tensors = gathered_tensors.index_select(dim, indices)
return gathered_tensors, sizes
class AllGatherFunction(Function):
@staticmethod
def forward(ctx, x, dim, sizes):
x, batch_sizes = all_gather_variable_dim(x, dim = dim, sizes = sizes)
ctx.batch_sizes = batch_sizes.tolist()
ctx.dim = dim
return x, batch_sizes
@staticmethod
def backward(ctx, grads, _):
batch_sizes, rank = ctx.batch_sizes, dist.get_rank()
grads_by_rank = grads.split(batch_sizes, dim = ctx.dim)
return grads_by_rank[rank], None, None
class AllGather(nn.Module):
def __init__(self, *, dim = 0):
super().__init__()
self.dim = dim
def forward(self, x, sizes = None):
return AllGatherFunction.apply(x, self.dim, sizes)
class SplitByRank(Function):
@staticmethod
def forward(ctx, x):
rank = dist.get_rank()
return x[rank]
@staticmethod
def backward(ctx, grads):
grads = rearrange(grads, '... -> 1 ...')
grads = all_gather_variable_dim(grads)
return grads
split_by_rank = SplitByRank.apply
|
soft-moe-pytorch-main
|
soft_moe_pytorch/distributed.py
|
import math
import torch
from torch.nn import Module
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from einops import rearrange, reduce, pack, unpack
from einops.layers.torch import Rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, dim = - 1)
def pad_to_multiple(
tensor,
multiple,
dim = -1,
value = 0
):
seqlen = tensor.shape[dim]
m = seqlen / multiple
if m.is_integer():
return False, tensor
remainder = math.ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return True, F.pad(tensor, (*pad_offset, 0, remainder), value = value)
# norm
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return l2norm(x) * self.scale * self.gamma
# expert
def FeedForward(
dim,
mult = 4,
dropout = 0.
):
dim_hidden = int(dim * mult)
return nn.Sequential(
nn.Linear(dim, dim_hidden),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim_hidden, dim)
)
class GEGLU(Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.gelu(gate)
def GLUFeedForward(
dim,
mult = 4,
dropout = 0.
):
dim_hidden = int(dim * mult * 2 / 3)
return nn.Sequential(
nn.Linear(dim, dim_hidden * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim_hidden, dim)
)
# main class
class DynamicSlotsSoftMoE(Module):
def __init__(
self,
dim,
*,
num_experts = 4,
expert_mult = 4,
dropout = 0.,
geglu = False
):
super().__init__()
self.norm = RMSNorm(dim)
self.num_experts = num_experts
self.to_slot_embeds = nn.Sequential(
nn.Linear(dim, dim * num_experts, bias = False),
Rearrange('b n (e d) -> b e n d', e = num_experts),
RMSNorm(dim)
)
expert_klass = GLUFeedForward if geglu else FeedForward
self.experts = nn.ModuleList([
expert_klass(dim = dim, mult = expert_mult, dropout = dropout) for _ in range(num_experts)
])
def forward(self, x, mask = None):
"""
einstein notation
b - batch
n - sequence length
e - number of experts
s - number of slots per expert
d - feature dimension
"""
seq_len, is_image, num_experts = x.shape[-2], x.ndim == 4, self.num_experts
if is_image:
x = rearrange(x, 'b d h w -> b h w d')
x, ps = pack([x], 'b * d')
# following Algorithm 1, with the normalization they proposed, but with scaling of both (the now popular rmsnorm + gamma)
x = self.norm(x)
# dynamic slot embeds
# first average consecutive tokens, by number of experts
# then, for each position, project out to that number of expert slot tokens
# there should be # slots ~= sequence length, like in a usual MoE with 1 expert
is_padded, x = pad_to_multiple(x, num_experts, dim = -2)
if is_padded:
if not exists(mask):
mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool)
_, mask = pad_to_multiple(mask, num_experts, dim = -1, value = False)
x_segmented = rearrange(x, 'b (n e) d -> b n e d', e = num_experts)
if exists(mask):
segmented_mask = rearrange(mask, 'b (n e) -> b n e', e = num_experts)
x_segmented = x_segmented.masked_fill(~rearrange(segmented_mask, '... -> ... 1'), 0.)
# perform a masked mean
if exists(mask):
num = reduce(x_segmented, 'b n e d -> b n d', 'sum')
den = reduce(segmented_mask.float(), 'b n e -> b n 1', 'sum').clamp(min = 1e-5)
x_consecutive_mean = num / den
slots_mask = segmented_mask.any(dim = -1)
else:
x_consecutive_mean = reduce(x_segmented, 'b n e d -> b n d', 'mean')
# project to get dynamic slots embeddings
# could potentially inject sinusoidal positions here too before projection
slot_embeds = self.to_slot_embeds(x_consecutive_mean)
logits = einsum('b n d, b e s d -> b n e s', x, slot_embeds)
# account for key padding mask
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1 1')
slots_mask = rearrange(slots_mask, 'b s -> b 1 1 s')
logits = logits.masked_fill(~mask, -torch.finfo(logits.dtype).max)
logits = logits.masked_fill(~slots_mask, -torch.finfo(logits.dtype).max)
# get dispatch and combine weights (softmax across right dimensions)
dispatch_weights = logits.softmax(dim = 1)
combine_weights = rearrange(logits, 'b n e s -> b n (e s)')
combine_weights = combine_weights.softmax(dim = -1)
# derive slots by weighted average of input tokens using the dispatch weights from above
slots = einsum('b n d, b n e s -> e b s d', x, dispatch_weights)
# route the slots per expert to each expert
out = []
for slots_per_expert, expert in zip(slots, self.experts):
out.append(expert(slots_per_expert))
out = torch.stack(out)
# combine back out
out = rearrange(out, 'e b s d -> b (e s) d')
out = einsum('b s d, b n s -> b n d', out, combine_weights)
if is_image:
out, = unpack(out, ps, 'b * d')
out = rearrange(out, 'b h w d -> b d h w')
return out[:, :seq_len]
|
soft-moe-pytorch-main
|
soft_moe_pytorch/soft_moe_with_dynamic_slots.py
|
import torch
from torch.nn import Module
import torch.nn.functional as F
import torch.distributed as dist
from torch import nn, einsum, Tensor
from einops import rearrange, pack, unpack
from soft_moe_pytorch.distributed import (
AllGather,
split_by_rank,
gather_sizes,
has_only_one_value
)
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(num, den):
return (num % den) == 0
def chunk_num(num, chunks):
num_per_chunk, remainder = divmod(num, chunks)
out = []
for i in range(chunks):
n = num_per_chunk
out.append(n + int(i < remainder))
return out
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def l2norm(t):
return F.normalize(t, dim = - 1)
def cumsum_exclusive(t, dim = -3):
assert dim < 0
num_pad_dims = -dim - 1
pre_padding = (0, 0) * num_pad_dims
return F.pad(t, (*pre_padding, 1, -1)).cumsum(dim = dim)
# norm
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return l2norm(x) * self.scale * self.gamma
# expert
def FeedForward(
dim,
mult = 4,
dropout = 0.
):
dim_hidden = int(dim * mult)
return nn.Sequential(
nn.Linear(dim, dim_hidden),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim_hidden, dim)
)
class GEGLU(Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.gelu(gate)
def GLUFeedForward(
dim,
mult = 4,
dropout = 0.
):
dim_hidden = int(dim * mult * 2 / 3)
return nn.Sequential(
nn.Linear(dim, dim_hidden * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim_hidden, dim)
)
# experts
class Experts(nn.Module):
def __init__(
self,
experts,
is_distributed = None
):
super().__init__()
self.num_experts = len(experts)
self.experts = nn.ModuleList(experts)
self.is_distributed = is_distributed
if not exists(self.is_distributed):
self.is_distributed = dist.is_initialized() and dist.get_world_size() > 1
self.all_gather = AllGather()
self.register_buffer('dummy', torch.ones(1), persistent = False)
@property
def device(self):
return self.dummy.device
def all_experts_to_cpu_besides(self, selection):
if isinstance(selection, int):
experts = [self.experts[selection]]
if isinstance(selection, slice):
experts = self.experts[selection]
else:
experts = selection
experts_set = set(experts)
for expert in self.experts:
device = self.device if expert in experts_set else 'cpu'
expert.to(device)
def forward(
self,
x,
is_distributed = None
):
"""
einops notation:
b - batch
r - rank (device / machines)
e - experts
n - sequence (number of tokens per expert)
d - feature dimension
"""
is_distributed = default(is_distributed, self.is_distributed)
shape, num_experts = x.shape, self.num_experts
# for now naively all gather across batch dimension if distributed, optimize later
if is_distributed:
seq_sizes = gather_sizes(x, dim = -2)
assert has_only_one_value(seq_sizes), 'number of tokens per expert must be the same'
x, batch_sizes = self.all_gather(x)
total_batch_size = x.shape[0]
world_size = dist.get_world_size()
rank = dist.get_rank()
else:
world_size = 1
rank = 0
# the experts in use on the rank
if is_distributed:
if world_size <= num_experts:
num_experts_across_ranks = chunk_num(num_experts, world_size)
start_indices = cumsum_exclusive(torch.tensor(num_experts_across_ranks), dim = -1)
num_experts_per_rank = num_experts_across_ranks[rank]
num_experts_batches_across_ranks = tuple(i * total_batch_size for i in num_experts_across_ranks)
expert_start_index = start_indices[rank].item()
else:
num_batch_chunks = world_size // num_experts
total_ranks_in_use = num_batch_chunks * num_experts
expert_start_index = rank // num_batch_chunks
batch_splits = chunk_num(total_batch_size, num_batch_chunks)
num_experts_batches_across_ranks = batch_splits * num_experts
# for now, remaining machines just process nothing
remain_ranks = world_size % num_experts
num_experts_batches_across_ranks += (0,) * remain_ranks
num_experts_per_rank = int(rank < total_ranks_in_use)
assert len(num_experts_batches_across_ranks) == world_size
expert_slice = slice(expert_start_index, expert_start_index + num_experts_per_rank)
else:
num_experts_per_rank = num_experts
expert_slice = slice(0, num_experts)
# if distributed, each machine only handles subset of experts and batch
x = rearrange(x, 'b e n d -> e b n d')
if is_distributed:
x, expert_batch_packed_shape = pack_one(x, '* n d')
x = x.split(num_experts_batches_across_ranks, dim = 0)
x = split_by_rank(x)
if num_experts_per_rank > 0:
x = rearrange(x, '(e b) n d -> e b n d', e = num_experts_per_rank)
else:
x = x.reshape(num_experts, *x.shape)
# get the experts in use
self.all_experts_to_cpu_besides(expert_slice)
experts = self.experts[expert_slice]
# route tokens to appropriate experts
outs = []
for expert, expert_input in zip(experts, x):
out = expert(expert_input)
outs.append(out)
if len(outs) > 0:
outs = torch.stack(outs)
else:
outs = torch.empty_like(x).requires_grad_()
# all gather across merged expert batches dimensions
# then split the batch dimension back
if is_distributed:
outs = rearrange(outs, 'e b n d -> (e b) n d')
outs, _ = self.all_gather(outs)
outs = unpack_one(outs, expert_batch_packed_shape, '* n d')
outs = rearrange(outs, 'e b n d -> b e n d')
if is_distributed:
outs = outs.split(batch_sizes.tolist())
outs = split_by_rank(outs)
assert outs.shape == shape
return outs
# main class
class SoftMoE(Module):
def __init__(
self,
dim,
*,
seq_len = None,
num_experts = 4,
num_slots = None,
expert_mult = 4,
dropout = 0.,
geglu = False,
is_distributed = None
):
super().__init__()
assert exists(seq_len) ^ exists(num_slots), 'either seq_len, or num_slots must be passed into SoftMoE'
num_slots = default(num_slots, seq_len // num_experts)
self.norm = RMSNorm(dim)
self.slot_norm = RMSNorm(dim)
self.slot_embeds = nn.Parameter(torch.randn(num_experts, num_slots, dim))
expert_klass = GLUFeedForward if geglu else FeedForward
self.experts = Experts(
experts = [expert_klass(dim = dim, mult = expert_mult, dropout = dropout) for _ in range(num_experts)],
is_distributed = is_distributed
)
def forward(self, x, mask = None):
"""
einstein notation
b - batch
n - sequence length
e - number of experts
s - number of slots per expert
d - feature dimension
"""
is_image = x.ndim == 4
if is_image:
x = rearrange(x, 'b d h w -> b h w d')
x, ps = pack([x], 'b * d')
# following Algorithm 1, with the normalization they proposed, but with scaling of both (the now popular rmsnorm + gamma)
x = self.norm(x)
slot_embeds = self.slot_norm(self.slot_embeds)
logits = einsum('b n d, e s d -> b n e s', x, slot_embeds)
# account for key padding mask
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1 1')
logits = logits.masked_fill(~mask, -torch.finfo(logits.dtype).max)
# get dispatch and combine weights (softmax across right dimensions)
dispatch_weights = logits.softmax(dim = 1)
combine_weights = rearrange(logits, 'b n e s -> b n (e s)')
combine_weights = combine_weights.softmax(dim = -1)
# derive slots by weighted average of input tokens using the dispatch weights from above
slots = einsum('b n d, b n e s -> b e s d', x, dispatch_weights)
# route the slots per expert to each expert
out = self.experts(slots)
# combine back out
out = rearrange(out, ' b e s d -> b (e s) d')
out = einsum('b s d, b n s -> b n d', out, combine_weights)
if is_image:
out, = unpack(out, ps, 'b * d')
out = rearrange(out, 'b h w d -> b d h w')
return out
|
soft-moe-pytorch-main
|
soft_moe_pytorch/soft_moe.py
|
from setuptools import setup, find_packages
setup(
name = 'sinkhorn_transformer',
packages = find_packages(exclude=['examples']),
version = '0.11.4',
license='MIT',
description = 'Sinkhorn Transformer - Sparse Sinkhorn Attention',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/sinkhorn-transformer',
keywords = ['transformers', 'attention', 'artificial intelligence'],
install_requires=[
'axial-positional-embedding>=0.1.0',
'local-attention',
'product-key-memory',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
sinkhorn-transformer-master
|
setup.py
|
import torch
from sinkhorn_transformer.sinkhorn_transformer import SinkhornTransformerLM
from sinkhorn_transformer.autoregressive_wrapper import AutoregressiveWrapper
N_BATCH = 16
SRC_SEQ_LEN = 512
TGT_SEQ_LEN = 512
enc = SinkhornTransformerLM(
num_tokens = 64,
dim = 512,
depth = 1,
heads = 8,
max_seq_len = SRC_SEQ_LEN,
bucket_size = 64,
return_embeddings = True
).cuda()
dec = SinkhornTransformerLM(
num_tokens = 64,
dim = 512,
depth = 2,
heads = 8,
max_seq_len = TGT_SEQ_LEN,
bucket_size = 64,
causal = True,
receives_context = True
).cuda()
dec = AutoregressiveWrapper(dec, ignore_index = 0, pad_value = 0)
opt = torch.optim.Adam([*enc.parameters(), *dec.parameters()], lr=2e-4)
bos = 1 * torch.ones(N_BATCH, 1).long()
eos = 2 * torch.ones(N_BATCH, 1).long()
pos = 3 * torch.ones(N_BATCH, 1).long()
for i in range(10000):
train_seq_in = torch.randint(4, 63, (N_BATCH, SRC_SEQ_LEN-2)).long()
train_seq_out = train_seq_in + 1
x = torch.cat([bos, train_seq_in, eos], dim=1).cuda()
y = torch.cat([bos, train_seq_out, eos], dim=1).cuda()
context = enc(x)
loss = dec(y, context = context, return_loss = True)
loss.backward()
opt.step()
opt.zero_grad()
print(i, loss.item())
|
sinkhorn-transformer-master
|
examples/increment_by_one/train.py
|
import deepspeed
from sinkhorn_transformer import SinkhornTransformerLM
from sinkhorn_transformer.autoregressive_wrapper import AutoregressiveWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
def add_argument():
parser=argparse.ArgumentParser(description='enwik8')
parser.add_argument('--with_cuda', default=False, action='store_true',
help='use CPU in case there\'s no GPU support')
parser.add_argument('--use_ema', default=False, action='store_true',
help='whether use exponential moving average')
parser.add_argument('-b', '--batch_size', default=32, type=int,
help='mini-batch size (default: 32)')
parser.add_argument('-e', '--epochs', default=30, type=int,
help='number of total epochs (default: 30)')
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
return args
# constants
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 4096
# helpers
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = SinkhornTransformerLM(
num_tokens = 256,
emb_dim = 128,
dim = 512,
depth = 8,
max_seq_len = SEQ_LEN,
heads = 8,
bucket_size = 128,
ff_chunks = 10,
causal = True,
reversible = True,
attn_dropout = 0.1,
n_local_attn_heads = 4
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
# setup deepspeed
cmd_args = add_argument()
model_engine, optimizer, trainloader, _ = deepspeed.initialize(args=cmd_args, model=model, model_parameters=model.parameters(), training_data=train_dataset)
# training
for i, data in enumerate(trainloader):
model_engine.train()
data = data.to(model_engine.local_rank)
loss = model_engine(data, return_loss = True)
model_engine.backward(loss)
model_engine.step()
print(loss.item() * 4)
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
inp = random.choice(val_dataset)
loss = model(inp[None, :].cuda(), return_loss = True)
print(f'validation loss: {loss.item()}')
if model_engine.local_rank == 0 and i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp.cuda(), GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
|
sinkhorn-transformer-master
|
examples/enwik8_deepspeed/train.py
|
from sinkhorn_transformer import SinkhornTransformerLM
from sinkhorn_transformer.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = SinkhornTransformerLM(
num_tokens = 256,
emb_dim = 128,
dim = 512,
depth = 8,
max_seq_len = SEQ_LEN,
heads = 8,
bucket_size = 128,
ff_chunks = 2,
causal = True,
reversible = True,
attn_dropout = 0.1,
n_local_attn_heads = 4
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
|
sinkhorn-transformer-master
|
examples/enwik8_simple/train.py
|
import math
import torch
from torch import nn
import torch.nn.functional as F
from sinkhorn_transformer.sinkhorn_transformer import SinkhornTransformer, SinkhornTransformerLM
def find_module(nn_module, type):
for module in nn_module.modules():
if isinstance(module, type):
return module
return None
def pad_to_multiple(tensor, multiple, dim=-1, pad_left = False):
seqlen = tensor.shape[dim]
m = seqlen / multiple
if m.is_integer():
return tensor, 0
pre_pad_offset = (0,) * (-1 - dim) * 2
padding = math.ceil(m) * multiple - seqlen
offset = (padding, 0) if pad_left else (0, padding)
padded_tensor = F.pad(tensor, (*pre_pad_offset, *offset), value=0)
return padded_tensor, padding
class Autopadder(nn.Module):
def __init__(self, net, pad_left=False):
super().__init__()
assert isinstance(net, (SinkhornTransformer, SinkhornTransformerLM)), 'only modules SinkhornTransformer and SinkhornTransformerLM accepted'
self.net = net
is_lm = isinstance(net, SinkhornTransformerLM)
sinkhorn = find_module(net, SinkhornTransformer)
self.bucket_size = sinkhorn.pad_to_bucket_size
self.context_bucket_size = sinkhorn.context_bucket_size
self.pad_dim = -1 if is_lm else -2
self.pad_left = pad_left
def forward(self, x, **kwargs):
b, t, device = *x.shape[:2], x.device
context = kwargs.get('context')
input_mask = kwargs.get('input_mask')
context_mask = kwargs.get('context_mask')
if input_mask is None:
input_mask = torch.full(x.shape[:2], True, device=x.device, dtype=torch.bool)
if context is not None and context_mask is None:
context_mask = torch.full(context.shape[0:2], True, device=x.device, dtype=torch.bool)
x, padding = pad_to_multiple(x, self.bucket_size, dim=self.pad_dim, pad_left=self.pad_left)
if padding != 0:
offset = (0, padding) if not self.pad_left else (padding, 0)
new_mask = F.pad(input_mask, offset, value=False)
kwargs.update(input_mask=new_mask)
if context is not None:
context, context_padding = pad_to_multiple(context, self.context_bucket_size, dim=-2)
if context_padding != 0:
new_mask = F.pad(context_mask, (0, context_padding), value=False)
kwargs.update(context_mask=new_mask)
kwargs.update(context=context)
out = self.net(x, **kwargs)
output_slice = slice(0, t) if not self.pad_left else slice(padding, None)
return out[:, output_slice]
|
sinkhorn-transformer-master
|
sinkhorn_transformer/autopadder.py
|
from functools import partial
import torch
from random import randint
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from sinkhorn_transformer.sinkhorn_transformer import SinkhornTransformerLM
from sinkhorn_transformer.autopadder import Autopadder
def default(value, default):
return value if value is not None else default
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def pad_sequence_left(seqs, value):
m = max([len(s) for s in seqs])
return torch.stack([F.pad(s, (m - len(s), 0)) for s in seqs])
def random_truncate_inputs(inputs, mask = None, pad_value=0):
b, t, device, dtype = *inputs.shape, inputs.device, inputs.dtype
mask = default(mask, torch.ones_like(inputs))
rand_lengths = torch.randint(2, t, (b, 1))
rand_mask = (torch.arange(t) < rand_lengths).to(device)
target_seqs = [t.masked_select(mask) for mask, t in zip(rand_mask, inputs)]
mask_seqs = [m.masked_select(mask) for mask, m in zip(rand_mask, rand_mask)]
return pad_sequence_left(target_seqs, pad_value), pad_sequence_left(mask_seqs, False)
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = None, pad_value = 0, pad_left = True):
super().__init__()
assert isinstance(net, SinkhornTransformerLM), 'generative trainer wrapper can only accept SinkhornTransformerLM class'
self.pad_value = pad_value
self.ignore_index = default(ignore_index, pad_value)
self.net = Autopadder(net, pad_left = pad_left)
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('input_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, input_mask=input_mask, **kwargs)[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
input_mask = F.pad(input_mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, return_loss = False, randomly_truncate_sequence = False, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
m = kwargs.pop('input_mask', None)
if randomly_truncate_sequence:
x, m = random_truncate_inputs(x, m, pad_value = self.pad_value)
if isinstance(x, torch.Tensor):
xi, xo = x[:, :-1], x[:, 1:]
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
if m is not None:
assert m.shape == x.shape[0:2], 'input mask must be the same shape as the input of the auto-regressive wrapper to automatically handle'
kwargs.update(input_mask = m[:, :-1])
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
|
sinkhorn-transformer-master
|
sinkhorn_transformer/autoregressive_wrapper.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.