python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
sys.path.insert(1, project_root + '/fairseq')
sys.path.insert(2, project_root + '/fairseq/scripts')
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + project_root + '/fairseq:' + project_root + '/fairseq/scripts' + os.environ.get('PYTHONPATH', '')
import math
from pathlib import Path
import pickle
import random
import subprocess # To run fairseq-generate
import re # To extract BLEU score from output of fairseq-generate
import socket # For hostname
import numpy as np
import torch
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from, grid_search
from ray.tune.schedulers import AsyncHyperBandScheduler
# Fairseq scripts
from train import train_translation
from generate import generate_translation
from average_checkpoints import main as avg_checkpoints
def evaluate_translation(gen_args):
# gen_process = subprocess.run(['fairseq-generate'] + gen_args, capture_output=True)
# Need to use sys.executable to call the correct Python interpreter
gen_process = subprocess.run([sys.executable, str(Path(project_root) / 'fairseq/generate.py')] + gen_args,
capture_output=True)
err = gen_process.stderr.decode(sys.stdout.encoding)
out = gen_process.stdout.decode(sys.stdout.encoding)
sys.stderr.write(err)
sys.stdout.write(out)
m = re.search(r'BLEU4 = ([-+]?\d*\.\d+|\d+),', out)
return None, float(m.group(1)) # Return a pair to be compatible with generate_translation
class TrainableModel(Trainable):
"""Trainable object for a Pytorch model, to be used with Ray's Hyperband tuning.
"""
def _setup(self, config):
device = config['device']
self.device = device
torch.manual_seed(config['seed'])
if self.device == 'cuda':
torch.cuda.manual_seed(config['seed'])
model = config['model']
train_args = [str(Path(project_root) / 'fairseq/data-bin/iwslt14.tokenized.de-en')]
train_args += ['--clip-norm', '0'] if model['name'] == 'DynamicConv' else []
train_args += ['--optimizer', 'adam']
train_args += ['--lr', str(config['lr'])]
train_args += ['--source-lang', 'de']
train_args += ['--target-lang', 'en']
train_args += ['--max-tokens', '4000']
train_args += ['--no-progress-bar']
train_args += ['--log-interval', '100']
train_args += ['--min-lr', "1e-09"]
train_args += ['--weight-decay', str(config['weight_decay'])]
train_args += ['--criterion', 'label_smoothed_cross_entropy']
train_args += ['--label-smoothing', '0.1']
train_args += ['--lr-scheduler', 'inverse_sqrt']
train_args += ['--ddp-backend=no_c10d']
train_args += ['--max-update', str(config['nmaxupdates'])]
train_args += ['--warmup-updates', '4000']
train_args += ['--warmup-init-lr', "1e-07"]
train_args += ['--adam-betas=(0.9, 0.98)']
train_args += ['--keep-last-epochs', '10']
# Always train from scratch, to overwrite previous runs, so point to nonexistent checkpoint file
# train_args += ['--restore-file', 'nonexistent_checkpoint.pt']
train_args += ['-a', 'lightconv_butterfly_iwslt_de_en'] if model['name'] == 'DynamicConv' else ['-a', 'transformer_butterfly_iwslt_de_en']
train_args += ['--dropout', str(config['dropout'])]
train_args += ['--attention-dropout', '0.1'] if model['name'] == 'DynamicConv' else []
train_args += ['--weight-dropout', '0.1'] if model['name'] == 'DynamicConv' else []
train_args += ['--encoder-glu', '0'] if model['name'] == 'DynamicConv' else []
train_args += ['--decoder-glu', '0 '] if model['name'] == 'DynamicConv' else []
train_args += ['--seed', str(config['seed'])]
self._save_dir = Path(config['result_dir']) / f"seed={config['seed']}"
train_args += ['--save-dir', str(self._save_dir)]
train_args += ['--encoder-layers', str(len(config['encoder']))]
train_args += ['--decoder-layers', str(len(config['decoder']))]
train_args += ['--encoder-structure-type-list', str(config['encoder'])]
train_args += ['--decoder-structure-type-list', str(config['decoder'])]
train_args += ['--structure-lr-multiplier', str(config['structure-lr-multiplier'])]
print(f'Host: {socket.gethostname()}, save_dir: {self._save_dir}')
avg_args = [
'--inputs=' + str(self._save_dir), '--num-epoch-checkpoints=10',
'--output=' + str(self._save_dir / 'model.pt')
]
gen_args = [project_root + '/fairseq/data-bin/iwslt14.tokenized.de-en',
'--batch-size=64', '--remove-bpe',
'--beam=4', '--quiet', '--no-progress-bar'
]
self._train_args = train_args
self._avg_args = avg_args
self._gen_args = gen_args
def _train(self):
self._save_dir.mkdir(parents=True, exist_ok=True)
stdout = sys.stdout
with open(self._save_dir / 'logs.txt', 'w+') as log:
sys.stdout = log
# [2019-08-02] For some reason ray gets stuck when I call train_translation
# or generate_translation.
# Workaround: use subprocess to call fairseq-generate in another process
# train_translation(self._train_args)
subprocess.run([sys.executable, str(Path(project_root) / 'fairseq/train.py')] + self._train_args,
stdout=log)
avg_checkpoints(cmdline_args=self._avg_args)
last_model = self._save_dir / 'checkpoint_last.pt'
best_model = self._save_dir / 'checkpoint_best.pt'
ensemble_model = self._save_dir / 'model.pt'
# Delete checkpoints to save disk space
# for ckpt_file in Path(self._save_dir).glob('*.pt'):
# if ckpt_file != last_model and ckpt_file != ensemble_model \
# and ckpt_file != best_model:
# ckpt_file.unlink()
if self.device == 'cuda':
torch.cuda.empty_cache()
_, BLEU_last_valid = evaluate_translation(
self._gen_args + ['--gen-subset=valid', '--path=' + str(last_model)])
_, BLEU_ensm_valid = evaluate_translation(
self._gen_args + ['--gen-subset=valid', '--path=' + str(ensemble_model)])
_, BLEU_last_test = evaluate_translation(
self._gen_args + ['--gen-subset=test', '--path=' + str(last_model)])
_, BLEU_ensm_test = evaluate_translation(
self._gen_args + ['--gen-subset=test', '--path=' + str(ensemble_model)])
sys.stdout = stdout
return {
'final_bleu_valid': BLEU_last_valid,
'ensemble_bleu_valid': BLEU_ensm_valid,
'final_bleu_test': BLEU_last_test,
'ensemble_bleu_test': BLEU_ensm_test,
}
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model_optimizer.pth")
return checkpoint_path
def _restore(self, checkpoint_path):
pass
ex = Experiment('Transformer_experiment')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('../config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.config
def default_config():
model = 'DynamicConv' # Name of model, either 'DynamicConv' or 'Transformer'
model_args = {} # Arguments to be passed to the model, as a dictionary
encoder = ['D'] * (7 if model == 'DynamicConv' else 6) # Layers in the encoder
decoder = ['D'] * 6 # Layers in the decoder
structure_lr_multiplier = 1.0 # Learning rate multiplier for structured parameters
ntrials = 3 # Number of trials for hyperparameter tuning
nmaxupdates = 50000 # Maximum number of updates
result_dir = project_root + '/transformer/results' # Directory to store results
cuda = torch.cuda.is_available() # Whether to use GPU
smoke_test = False # Finish quickly for testing
@ex.capture
def dynamic_conv_experiment(model, model_args, encoder, decoder, structure_lr_multiplier,
nmaxupdates, ntrials, result_dir, cuda, smoke_test):
# name=f"{model}_{model_args}_encoder_[{'-'.join(encoder)}]_decoder_[{'-'.join(decoder)}]_structlr_{structure_lr_multiplier}"
name=f"{model}_{model_args}_encoder_[{'-'.join(encoder)}]_decoder_[{'-'.join(decoder)}]_structlr_grid"
config={
# 'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(1e-3)))),
# 'lr': grid_search([5e-4, 7e-4, 9e-4, 11e-4]),
# 'lr': grid_search([1e-4, 2.5e-4, 5e-4, 7.5e-4]),
'lr': 5e-4,
# 'weight_decay': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-6), math.log(5e-4)))) if model == 'DynamicConv' else 1e-4,
'weight_decay': 1e-4,
# Transformer seems to need dropout 0.3
# 'dropout': sample_from(lambda spec: random.uniform(0.1, 0.3)) if model == 'DynamicConv' else 0.3,
'dropout': 0.3,
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'encoder': list(encoder), # Need to copy @encoder as sacred created a read-only list
'decoder': list(decoder),
# 'structure-lr-multiplier': structure_lr_multiplier,
'structure-lr-multiplier': grid_search([0.25, 0.5, 1.0, 2.0, 4.0]),
'device': 'cuda' if cuda else 'cpu',
'model': {'name': model, 'args': model_args},
'nmaxupdates': nmaxupdates,
'result_dir': result_dir + '/' + name
}
experiment = RayExperiment(
name=name,
run=TrainableModel,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=False,
checkpoint_freq=1000, # Just to enable recovery with @max_failures
max_failures=-1,
resources_per_trial={'cpu': 2, 'gpu': 1 if cuda else 0},
stop={"training_iteration": 1},
config=config,
)
return experiment
@ex.automain
def run(model, encoder, decoder, result_dir):
experiment = dynamic_conv_experiment()
try:
with open('../config/redis_address', 'r') as f:
address = f.read().strip()
ray.init(redis_address=address)
except:
ray.init()
trials = ray.tune.run(experiment, raise_on_failed_trial=False, queue_trials=True).trials
trials = [trial for trial in trials if trial.last_result is not None]
bleu = [(trial.last_result.get('ensemble_bleu_valid', float('-inf')),
trial.last_result.get('ensemble_bleu_test', float('-inf'))) for trial in trials]
max_bleu = max(bleu, key=lambda x: x[0])[1]
return model, encoder, decoder, max_bleu
|
butterfly-master
|
transformer/dynamic_conv_experiment.py
|
import math
import unittest
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import torch_butterfly
class ButterflyTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_multiply(self):
for batch_size, n in [(10, 4096), (8192, 512)]: # Test size smaller than 1024 and large batch size for race conditions
# for batch_size, n in [(10, 64)]:
# for batch_size, n in [(1, 2)]:
log_n = int(math.log2(n))
nstacks = 2
nblocks = 3
for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']):
# for device in ['cuda']:
for complex in [False, True]:
# for complex in [False]:
for increasing_stride in [True, False]:
# for increasing_stride in [True]:
if batch_size > 1024 and (device == 'cpu'):
continue
dtype = torch.float32 if not complex else torch.complex64
# complex randn already has the correct scaling of stddev=1.0
scaling = 1 / math.sqrt(2)
twiddle = torch.randn((nstacks, nblocks, log_n, n // 2, 2, 2), dtype=dtype, requires_grad=True, device=device) * scaling
input = torch.randn((batch_size, nstacks, n), dtype=dtype, requires_grad=True, device=twiddle.device)
output = torch_butterfly.butterfly_multiply(twiddle, input, increasing_stride)
output_torch = torch_butterfly.multiply.butterfly_multiply_torch(twiddle, input, increasing_stride)
self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol),
((output - output_torch).abs().max().item(), device, complex, increasing_stride))
grad = torch.randn_like(output_torch)
d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True)
d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol),
((d_input - d_input_torch).abs().max().item(), device, complex, increasing_stride))
# if device == 'cuda' and batch_size > 1024 and not complex and increasing_stride:
# print((d_twiddle - d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4)))
# print(((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4)))
# i = ((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().argmax()
# print(d_twiddle.flatten()[i])
# print(d_twiddle_torch.flatten()[i])
# print(d_twiddle.flatten()[i-5:i+5])
# print(d_twiddle_torch.flatten()[i-5:i+5])
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * (10 if batch_size > 1024 else 1),
atol=self.atol * (10 if batch_size > 1024 else 1)),
(((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(),
(batch_size, n), device, complex, increasing_stride))
def test_input_padding_output_slicing(self):
batch_size = 10
nstacks = 2
nblocks = 3
for n in [32, 4096]:
log_n = int(math.log2(n))
for input_size in [n // 2 - 1, n - 4, 2 * n + 7]:
for output_size in [None, n // 2 - 2, n - 5]:
for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']):
for complex in [False, True]:
for increasing_stride in [True, False]:
dtype = torch.float32 if not complex else torch.complex64
# complex randn already has the correct scaling of stddev=1.0
scaling = 1 / math.sqrt(2)
twiddle = torch.randn((nstacks, nblocks, log_n, n // 2, 2, 2), dtype=dtype, requires_grad=True, device=device) * scaling
input = torch.randn((batch_size, nstacks, input_size), dtype=dtype, requires_grad=True, device=twiddle.device)
output = torch_butterfly.butterfly_multiply(twiddle, input, increasing_stride, output_size)
output_torch = torch_butterfly.multiply.butterfly_multiply_torch(twiddle, input, increasing_stride, output_size)
self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol),
((output - output_torch).abs().max().item(), device, complex, increasing_stride))
grad = torch.randn_like(output_torch)
d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True)
d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol),
((d_input - d_input_torch).abs().max().item(), device, complex, increasing_stride))
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * (10 if batch_size > 1024 else 1),
atol=self.atol * (10 if batch_size > 1024 else 1)),
(((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(),
(d_twiddle - d_twiddle_torch).abs().max().item(),
(batch_size, n), device, complex, increasing_stride))
if __name__ == "__main__":
unittest.main()
|
butterfly-master
|
tests/test_multiply.py
|
import copy
import itertools
import unittest
import torch
import torch_butterfly
class ButterflyCombineTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_diagonal_butterfly(self):
batch_size = 10
for in_size, out_size in [(9, 15), (15, 9)]:
for nblocks in [1, 2, 3]:
for complex in [False, True]:
for increasing_stride in [True, False]:
for diag_first in [True, False]:
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, in_size, dtype=dtype)
b = torch_butterfly.Butterfly(in_size, out_size, bias=False,
complex=complex,
increasing_stride=increasing_stride,
nblocks=nblocks)
twiddle_copy = b.twiddle.clone()
diagonal = torch.randn(in_size if diag_first else out_size,
dtype=dtype)
out = b(input * diagonal) if diag_first else b(input) * diagonal
for inplace in [True, False]:
b_copy = copy.deepcopy(b) # otherwise inplace would modify b
bd = torch_butterfly.combine.diagonal_butterfly(
b_copy, diagonal, diag_first, inplace)
out_bd = bd(input)
self.assertTrue(torch.allclose(out_bd, out, self.rtol, self.atol))
if not inplace:
self.assertTrue(torch.allclose(b.twiddle, twiddle_copy,
self.rtol, self.atol))
def test_butterfly_product(self):
batch_size = 10
n = 16
in_size = 13
out_size = 15
for complex in [False, True]:
for inc_stride1, inc_stride2 in itertools.product([True, False], [True, False]):
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, in_size, dtype=dtype)
b1 = torch_butterfly.Butterfly(in_size, n, bias=False, complex=complex,
increasing_stride=inc_stride1)
b2 = torch_butterfly.Butterfly(n, out_size, bias=False, complex=complex,
increasing_stride=inc_stride2)
out = b2(b1(input))
b = torch_butterfly.combine.butterfly_product(b1, b2)
out_prod = b(input)
self.assertTrue(torch.allclose(out_prod, out, self.rtol, self.atol))
def test_butterfly_kronecker(self):
batch_size = 10
n1 = 16
n2 = 32
for complex in [False, True]:
for increasing_stride in [True, False]:
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, n2, n1, dtype=dtype)
b1 = torch_butterfly.Butterfly(n1, n1, bias=False, complex=complex,
increasing_stride=increasing_stride)
b2 = torch_butterfly.Butterfly(n2, n2, bias=False, complex=complex,
increasing_stride=increasing_stride)
b_tp = torch_butterfly.combine.TensorProduct(b1, b2)
out_tp = b_tp(input)
b = torch_butterfly.combine.butterfly_kronecker(b1, b2)
out = b(input.reshape(batch_size, n2 * n1)).reshape(batch_size, n2, n1)
self.assertTrue(torch.allclose(out, out_tp, self.rtol, self.atol))
def test_flip_increasing_stride(self):
batch_size = 10
for n in [16, 64]:
for nblocks in [1, 2, 3]:
for complex in [False, True]:
for increasing_stride in [True, False]:
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, n, dtype=dtype)
b = torch_butterfly.Butterfly(n, n, bias=False, complex=complex,
increasing_stride=increasing_stride,
nblocks=nblocks)
b_new = torch_butterfly.combine.flip_increasing_stride(b)
self.assertTrue(b_new[1].increasing_stride == (not b.increasing_stride))
self.assertTrue(torch.allclose(b_new(input), b(input),
self.rtol, self.atol))
if __name__ == "__main__":
unittest.main()
|
butterfly-master
|
tests/test_combine.py
|
import math
import unittest
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import torch_butterfly
class ButterflyBase4Test(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_butterfly_imul(self):
batch_size = 10
device = 'cpu'
for in_size, out_size in [(7, 15), (15, 7)]:
for complex in [False, True]:
for increasing_stride in [True, False]:
for init in ['randn', 'ortho', 'identity']:
for nblocks in [1, 2, 3]:
for scale in [0.13, 2.75]:
b = torch_butterfly.ButterflyBase4(in_size, out_size, False,
complex, increasing_stride,
init, nblocks=nblocks).to(device)
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, in_size, dtype=dtype, device=device)
output = b(input)
with torch.no_grad():
b *= scale
output_scaled = b(input)
self.assertTrue(torch.allclose(output * scale, output_scaled,
self.rtol, self.atol),
(output.shape, device, (in_size, out_size), complex, init, nblocks))
if __name__ == "__main__":
unittest.main()
|
butterfly-master
|
tests/test_butterfly_base4.py
|
import copy
import itertools
import unittest
import torch
import torch_butterfly
from torch_butterfly.complex_utils import complex_matmul, index_last_dim
class ButterflyComplexUtilsTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_complex_matmul(self):
"""Check that our index_last_dim backward is also correct for real input
"""
bs = (3, 5)
for device in ['cpu', 'cuda']:
X = torch.randn(*bs, 128, 16, dtype=torch.complex64, device=device, requires_grad=True)
Y = torch.randn(*bs, 16, 32, dtype=torch.complex64, device=device, requires_grad=True)
prod = complex_matmul(X, Y)
prod_sum = (X.unsqueeze(-1) * Y.unsqueeze(-3)).sum(dim=-2)
self.assertTrue(torch.allclose(prod, prod_sum, self.rtol, self.atol))
g = torch.randn_like(prod)
grad_X, grad_Y = torch.autograd.grad(prod, (X, Y), g)
grad_X_sum, grad_Y_sum = torch.autograd.grad(prod_sum, (X, Y), g)
self.assertTrue(torch.allclose(grad_X, grad_X_sum, self.rtol, self.atol))
self.assertTrue(torch.allclose(grad_Y, grad_Y_sum, self.rtol, self.atol))
X = torch.randn(5, 3, 32, 32, dtype=torch.complex64, device=device, requires_grad=True)
Y = torch.randn(6, 3, 32, 32, dtype=torch.complex64, device=device, requires_grad=True)
prod = complex_matmul(X.permute(2, 3, 0, 1), Y.permute(2, 3, 1, 0)).permute(2, 3, 0, 1)
prod_sum = (X.unsqueeze(1) * Y).sum(dim=2)
self.assertTrue(torch.allclose(prod, prod_sum, self.rtol, self.atol))
g = torch.randn_like(prod)
grad_X, grad_Y = torch.autograd.grad(prod, (X, Y), g)
grad_X_sum, grad_Y_sum = torch.autograd.grad(prod_sum, (X, Y), g)
self.assertTrue(torch.allclose(grad_X, grad_X_sum, self.rtol, self.atol))
self.assertTrue(torch.allclose(grad_Y, grad_Y_sum, self.rtol, self.atol))
def test_index_last_dim(self):
"""Check that our index_last_dim backward is also correct for real input
"""
sizes = (2, 3, 17)
p = torch.randperm(sizes[-1])
X = torch.randn(sizes, requires_grad=True)
out_torch = X[..., p]
out = index_last_dim(X, p)
self.assertTrue(torch.allclose(out, out_torch))
g = torch.randn_like(out)
grad_x_torch, = torch.autograd.grad(out_torch, X, g)
grad_x, = torch.autograd.grad(out, X, g)
self.assertTrue(torch.allclose(grad_x, grad_x_torch))
if __name__ == "__main__":
unittest.main()
|
butterfly-master
|
tests/test_complex_utils.py
|
import math
import unittest
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch_butterfly.multiply import butterfly_multiply_torch
from torch_butterfly.multiply_base4 import butterfly_multiply_base4_torch
from torch_butterfly.multiply_base4 import twiddle_base2_to_base4
class MultiplyBase4Test(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_multiply_base4(self):
batch_size = 10
nstacks = 2
for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']):
for n in [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]:
log_n = int(math.log2(n))
for nblocks in [1, 2, 3, 4]:
for complex in [False, True]:
for increasing_stride in [True, False]:
dtype = torch.float32 if not complex else torch.complex64
# complex randn already has the correct scaling of stddev=1.0
scaling = 1 / math.sqrt(2)
twiddle = torch.randn((nstacks, nblocks, log_n, n // 2, 2, 2),
dtype=dtype, device=device) * scaling
input = torch.randn((batch_size, nstacks, n), dtype=dtype,
device=twiddle.device)
output2 = butterfly_multiply_torch(twiddle, input, increasing_stride)
twiddle4, twiddle2 = twiddle_base2_to_base4(twiddle, increasing_stride)
output4 = butterfly_multiply_base4_torch(twiddle4, twiddle2, input,
increasing_stride)
self.assertTrue(torch.allclose(output2, output4,
rtol=self.rtol, atol=self.atol),
((output2 - output4).abs().max().item(),
n, nblocks, complex, increasing_stride))
if __name__ == "__main__":
unittest.main()
|
butterfly-master
|
tests/test_multiply_base4.py
|
import math
import unittest
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import torch.fft
import torch_butterfly
from torch_butterfly import Butterfly
from torch_butterfly.complex_utils import complex_matmul
from torch_butterfly.combine import TensorProduct
from torch_butterfly.complex_utils import real2complex
class ButterflyTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_butterfly(self):
batch_size = 10
for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']):
for in_size, out_size in [(7, 15), (15, 7)]:
for complex in [False, True]:
for increasing_stride in [True, False]:
for init in ['randn', 'ortho', 'identity']:
for nblocks in [1, 2, 3]:
b = Butterfly(in_size, out_size, True, complex, increasing_stride,
init, nblocks=nblocks).to(device)
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, in_size, dtype=dtype, device=device)
output = b(input)
self.assertTrue(output.shape == (batch_size, out_size),
(output.shape, device, (in_size, out_size), complex, init, nblocks))
if init == 'ortho':
twiddle = b.twiddle
twiddle_np = twiddle.detach().to('cpu').numpy()
twiddle_np = twiddle_np.reshape(-1, 2, 2)
twiddle_norm = np.linalg.norm(twiddle_np, ord=2, axis=(1, 2))
self.assertTrue(np.allclose(twiddle_norm, 1),
(twiddle_norm, device, (in_size, out_size), complex, init))
def test_fft_init(self):
batch_size = 10
n = 16
input = torch.randn(batch_size, n, dtype=torch.complex64)
br = torch_butterfly.permutation.bitreversal_permutation(n, pytorch_format=True)
for increasing_stride in [True, False]:
for nblocks in [1, 2, 3]:
with torch.no_grad():
out_torch = torch.fft.fft(input, norm='ortho')
b = Butterfly(n, n, False, complex=True, increasing_stride=increasing_stride,
init='fft_no_br', nblocks=nblocks)
out = b(input[..., br]) if increasing_stride else b(input)[..., br]
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
out_torch = torch.fft.ifft(input, norm='ortho')
b = Butterfly(n, n, False, complex=True, increasing_stride=increasing_stride,
init='ifft_no_br', nblocks=nblocks)
out = b(input[..., br]) if increasing_stride else b(input)[..., br]
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_fft2d_init(self):
batch_size = 10
in_channels = 3
out_channels = 4
n1, n2 = 16, 32
input = torch.randn(batch_size, in_channels, n2, n1)
for kernel_size1 in [1, 3, 5, 7]:
for kernel_size2 in [1, 3, 5, 7]:
padding1 = (kernel_size1 - 1) // 2
padding2 = (kernel_size2 - 1) // 2
conv = nn.Conv2d(in_channels, out_channels, (kernel_size2, kernel_size1),
padding=(padding2, padding1), padding_mode='circular',
bias=False)
out_torch = conv(input)
weight = conv.weight
w = F.pad(weight.flip(dims=(-1,)), (0, n1 - kernel_size1)).roll(
-padding1, dims=-1)
w = F.pad(w.flip(dims=(-2,)), (0, 0, 0, n2 - kernel_size2)).roll(
-padding2, dims=-2)
increasing_strides = [False, False, True]
inits = ['fft_no_br', 'fft_no_br', 'ifft_no_br']
for nblocks in [1, 2, 3]:
Kd, K1, K2 = [
TensorProduct(
Butterfly(n1, n1, bias=False, complex=complex,
increasing_stride=incstride, init=i, nblocks=nblocks),
Butterfly(n2, n2, bias=False, complex=complex,
increasing_stride=incstride, init=i, nblocks=nblocks)
)
for incstride, i in zip(increasing_strides, inits)
]
with torch.no_grad():
Kd.map1 *= math.sqrt(n1)
Kd.map2 *= math.sqrt(n2)
out = K2(
complex_matmul(K1(real2complex(input)).permute(2, 3, 0, 1),
Kd(real2complex(w)).permute(2, 3, 1, 0)).permute(2, 3, 0, 1)
).real
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_autograd(self):
"""Check if autograd works (especially for complex), by trying to match a 4x4 matrix.
"""
size = 4
niters = 10000
true_model = nn.Linear(size, size, bias=False)
x = torch.eye(size)
with torch.no_grad():
y = true_model(x)
for complex in [False, True]:
if complex:
model = nn.Sequential(
torch_butterfly.complex_utils.Real2Complex(),
Butterfly(size, size, bias=False, complex=complex),
torch_butterfly.complex_utils.Complex2Real(),
)
else:
model = Butterfly(size, size, bias=False, complex=complex)
with torch.no_grad():
inital_loss = F.mse_loss(model(x), y)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
for i in range(niters):
out = model(x)
loss = F.mse_loss(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# At least loss should decrease
# print(inital_loss, loss)
self.assertTrue(loss.item() < inital_loss.item())
def test_transpose_conjugate_multiply(self):
n = 16
for complex in [False, True]:
for increasing_stride in [True, False]:
for nblocks in [1, 2, 3]:
b = Butterfly(n, n, False, complex, increasing_stride, nblocks=nblocks)
dtype = torch.float32 if not complex else torch.complex64
input = torch.eye(n, dtype=dtype)
matrix = b(input).t()
matrix_t = b.forward(input, transpose=True).t()
matrix_conj = b.forward(input, conjugate=True).t()
matrix_t_conj = b.forward(input, transpose=True, conjugate=True).t()
self.assertTrue(torch.allclose(matrix.t(), matrix_t, self.rtol, self.atol),
(complex, increasing_stride, nblocks))
self.assertTrue(torch.allclose(matrix.conj(), matrix_conj,
self.rtol, self.atol),
(complex, increasing_stride, nblocks))
self.assertTrue(torch.allclose(matrix.t().conj(), matrix_t_conj,
self.rtol, self.atol),
(complex, increasing_stride, nblocks))
def test_subtwiddle(self):
batch_size = 10
n = 16
input_size = 8
for complex in [False, True]:
for increasing_stride in [True, False]:
for nblocks in [1, 2, 3]:
b = Butterfly(n, n, True, complex, increasing_stride, nblocks=nblocks)
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, input_size, dtype=dtype)
output = b(input, subtwiddle=True)
self.assertTrue(output.shape == (batch_size, input_size),
(output.shape, n, input_size, complex, nblocks))
def test_butterfly_imul(self):
batch_size = 10
device = 'cpu'
for in_size, out_size in [(7, 15), (15, 7)]:
for complex in [False, True]:
for increasing_stride in [True, False]:
for init in ['randn', 'ortho', 'identity']:
for nblocks in [1, 2, 3]:
for scale in [0.13, 2.75]:
b = Butterfly(in_size, out_size, False, complex, increasing_stride,
init, nblocks=nblocks).to(device)
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, in_size, dtype=dtype, device=device)
output = b(input)
with torch.no_grad():
b *= scale
output_scaled = b(input)
self.assertTrue(torch.allclose(output * scale, output_scaled,
self.rtol, self.atol),
(output.shape, device, (in_size, out_size), complex, init, nblocks))
def test_butterfly_to_base4(self):
batch_size = 10
for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']):
for in_size, out_size in [(7, 15), (15, 7)]:
for complex in [False, True]:
for increasing_stride in [True, False]:
for init in ['randn', 'ortho', 'identity']:
for nblocks in [1, 2, 3]:
b = Butterfly(in_size, out_size, True, complex, increasing_stride,
init, nblocks=nblocks).to(device)
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, in_size, dtype=dtype, device=device)
output = b(input)
b4 = b.to_base4()
output_base4 = b4(input)
self.assertTrue(torch.allclose(output, output_base4,
self.rtol, self.atol),
(output.shape, device, (in_size, out_size), complex, init, nblocks))
def test_butterfly_unitary(self):
# Test shape
batch_size = 10
for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']):
for in_size, out_size in [(7, 15), (15, 7)]:
for increasing_stride in [True, False]:
for nblocks in [1, 2, 3]:
b = torch_butterfly.ButterflyUnitary(in_size, out_size, True,
increasing_stride, nblocks=nblocks).to(device)
dtype = torch.complex64
input = torch.randn(batch_size, in_size, dtype=dtype, device=device)
output = b(input)
self.assertTrue(output.shape == (batch_size, out_size),
(output.shape, device, (in_size, out_size), nblocks))
# Test that it's actually unitary
size = 32
for increasing_stride in [True, False]:
for nblocks in [1, 2, 3]:
b = torch_butterfly.ButterflyUnitary(size, size, False,
increasing_stride, nblocks=nblocks)
eye = torch.eye(size, dtype=torch.complex64)
twiddle_matrix_np = b(eye).t().detach().numpy()
self.assertTrue(np.allclose(twiddle_matrix_np @ twiddle_matrix_np.T.conj(),
np.eye(size), self.rtol, self.atol))
def test_butterfly_bmm(self):
batch_size = 10
matrix_batch = 3
for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']):
for in_size, out_size in [(7, 15), (15, 7)]:
for complex in [False, True]:
for increasing_stride in [True, False]:
for nblocks in [1, 2, 3]:
# Test shape
b_bmm = torch_butterfly.ButterflyBmm(in_size, out_size, matrix_batch, True,
complex, increasing_stride, nblocks=nblocks).to(device)
dtype = torch.float32 if not complex else torch.complex64
input = torch.randn(batch_size, matrix_batch, in_size, dtype=dtype, device=device)
output = b_bmm(input)
self.assertTrue(output.shape == (batch_size, matrix_batch, out_size),
(output.shape, device, (in_size, out_size), nblocks))
# Check that the result is the same as looping over butterflies
output_loop = []
for i in range(matrix_batch):
b = Butterfly(in_size, out_size, True, complex, increasing_stride,
init=b_bmm.twiddle[i * b_bmm.nstacks:(i + 1)
* b_bmm.nstacks],
nblocks=nblocks).to(device)
with torch.no_grad():
b.bias.copy_(b_bmm.bias[i])
output_loop.append(b(input[:, i]))
with torch.no_grad():
output_loop = torch.stack(output_loop, dim=1)
self.assertTrue(torch.allclose(output, output_loop),
((output - output_loop).abs().max().item(), output.shape, device, (in_size, out_size), complex))
def test_butterfly_bmm_tensorproduct(self):
# Just to show how to do TensorProduct (e.g., Conv2d) with ButterflyBmm
batch_size = 10
in_channels = 3
out_channels = 6
n1, n2 = 32, 16
dtype = torch.complex64
input = torch.randn(batch_size, in_channels, n2, n1, dtype=dtype)
# Generate out_channels x in_channels butterfly matrices and loop over them
b1s = [Butterfly(n1, n1, bias=False, complex=True)
for _ in range(out_channels * in_channels)]
b2s = [Butterfly(n2, n2, bias=False, complex=True)
for _ in range(out_channels * in_channels)]
b_tp = [torch_butterfly.combine.TensorProduct(b1, b2) for b1, b2 in zip(b1s, b2s)]
with torch.no_grad():
outputs = []
for o in range(out_channels):
output = []
for i in range(in_channels):
index = o * in_channels + i
output.append(b_tp[index](input[:, i]))
outputs.append(torch.stack(output, dim=1))
out = torch.stack(outputs, dim=1)
assert out.shape == (batch_size, out_channels, in_channels, n2, n1)
# Use ButterflyBmm instead
b1_bmm = torch_butterfly.ButterflyBmm(n1, n1, matrix_batch=out_channels * in_channels,
bias=False, complex=True,
init=torch.cat([b1.twiddle for b1 in b1s]))
b2_bmm = torch_butterfly.ButterflyBmm(n2, n2, matrix_batch=out_channels * in_channels,
bias=False, complex=True,
init=torch.cat([b2.twiddle for b2 in b2s]))
input_reshaped = input.transpose(1, 2).reshape(batch_size, n2, 1, in_channels, n1)
input_expanded = input_reshaped.expand(batch_size, n2, out_channels, in_channels, n1)
out_bmm = b1_bmm(input_expanded.reshape(batch_size, n2, out_channels * in_channels, n1))
out_bmm = out_bmm.transpose(1, 3) # (batch_size, n1, out_channels * in_channels, n2)
out_bmm = b2_bmm(out_bmm) # (batch_size, n1, out_channels * in_channels, n2)
out_bmm = out_bmm.permute(0, 2, 3, 1) # (batch_size, out_channels * in_channels, n2, n1)
out_bmm = out_bmm.reshape(batch_size, out_channels, in_channels, n2, n1)
self.assertTrue(torch.allclose(out_bmm, out))
if __name__ == "__main__":
unittest.main()
|
butterfly-master
|
tests/test_butterfly.py
|
import math
import unittest
import numpy as np
from scipy import linalg as la
import scipy.fft
import torch
from torch import nn
from torch.nn import functional as F
import torch.fft
import pywt # To test wavelet
import torch_butterfly
class ButterflySpecialTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_fft(self):
batch_size = 10
n = 16
input = torch.randn(batch_size, n, dtype=torch.complex64)
for normalized in [False, True]:
out_torch = torch.fft.fft(input, norm=None if not normalized else 'ortho')
for br_first in [True, False]:
b = torch_butterfly.special.fft(n, normalized=normalized, br_first=br_first)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_fft_unitary(self):
batch_size = 10
n = 16
input = torch.randn(batch_size, n, dtype=torch.complex64)
normalized = True
out_torch = torch.fft.fft(input, norm=None if not normalized else 'ortho')
for br_first in [True, False]:
b = torch_butterfly.special.fft_unitary(n, br_first=br_first)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_ifft(self):
batch_size = 10
n = 16
input = torch.randn(batch_size, n, dtype=torch.complex64)
for normalized in [False, True]:
out_torch = torch.fft.ifft(input, norm=None if not normalized else 'ortho')
for br_first in [True, False]:
b = torch_butterfly.special.ifft(n, normalized=normalized, br_first=br_first)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_ifft_unitary(self):
batch_size = 10
n = 16
input = torch.randn(batch_size, n, dtype=torch.complex64)
normalized = True
out_torch = torch.fft.ifft(input, norm=None if not normalized else 'ortho')
for br_first in [True, False]:
b = torch_butterfly.special.ifft_unitary(n, br_first=br_first)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_dct(self):
batch_size = 10
n = 16
input = torch.randn(batch_size, n)
for type in [2, 3, 4]:
for normalized in [False, True]:
out_sp = torch.tensor(scipy.fft.dct(input.numpy(), type=type,
norm=None if not normalized else 'ortho'))
b = torch_butterfly.special.dct(n, type=type, normalized=normalized)
out = b(input)
self.assertTrue(torch.allclose(out, out_sp, self.rtol, self.atol))
def test_dst(self):
batch_size = 1
n = 16
input = torch.randn(batch_size, n)
for type in [2, 4]:
for normalized in [False, True]:
out_sp = torch.tensor(scipy.fft.dst(input.numpy(), type=type,
norm=None if not normalized else 'ortho'))
b = torch_butterfly.special.dst(n, type=type, normalized=normalized)
out = b(input)
self.assertTrue(torch.allclose(out, out_sp, self.rtol, self.atol))
def test_circulant(self):
batch_size = 10
n = 13
for complex in [False, True]:
dtype = torch.float32 if not complex else torch.complex64
col = torch.randn(n, dtype=dtype)
C = la.circulant(col.numpy())
input = torch.randn(batch_size, n, dtype=dtype)
out_torch = torch.tensor(input.detach().numpy() @ C.T)
out_np = torch.tensor(np.fft.ifft(np.fft.fft(input.numpy()) * np.fft.fft(col.numpy())),
dtype=dtype)
self.assertTrue(torch.allclose(out_torch, out_np, self.rtol, self.atol))
# Just to show how to implement circulant multiply with FFT
if complex:
input_f = torch.fft.fft(input)
col_f = torch.fft.fft(col)
prod_f = input_f * col_f
out_fft = torch.fft.ifft(prod_f)
self.assertTrue(torch.allclose(out_torch, out_fft, self.rtol, self.atol))
for separate_diagonal in [True, False]:
b = torch_butterfly.special.circulant(col, transposed=False,
separate_diagonal=separate_diagonal)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
row = torch.randn(n, dtype=dtype)
C = la.circulant(row.numpy()).T
input = torch.randn(batch_size, n, dtype=dtype)
out_torch = torch.tensor(input.detach().numpy() @ C.T)
# row is the reverse of col, except the 0-th element stays put
# This corresponds to the same reversal in the frequency domain.
# https://en.wikipedia.org/wiki/Discrete_Fourier_transform#Time_and_frequency_reversal
row_f = np.fft.fft(row.numpy())
row_f_reversed = np.hstack((row_f[:1], row_f[1:][::-1]))
out_np = torch.tensor(np.fft.ifft(np.fft.fft(input.numpy())
* row_f_reversed), dtype=dtype)
self.assertTrue(torch.allclose(out_torch, out_np, self.rtol, self.atol))
for separate_diagonal in [True, False]:
b = torch_butterfly.special.circulant(row, transposed=True,
separate_diagonal=separate_diagonal)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_toeplitz(self):
batch_size = 10
for n, m in [(13, 38), (27, 11)]:
for complex in [False, True]:
dtype = torch.float32 if not complex else torch.complex64
col = torch.randn(n, dtype=dtype)
row = torch.randn(m, dtype=dtype)
T = la.toeplitz(col.numpy(), row.numpy())
input = torch.randn(batch_size, m, dtype=dtype)
out_torch = torch.tensor(input.detach().numpy() @ T.T)
for separate_diagonal in [True, False]:
b = torch_butterfly.special.toeplitz(col, row,
separate_diagonal=separate_diagonal)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_hadamard(self):
batch_size = 10
n = 16
H = torch.tensor(la.hadamard(n), dtype=torch.float32)
input = torch.randn(batch_size, n)
out_torch = F.linear(input, H) / math.sqrt(n)
for increasing_stride in [True, False]:
b = torch_butterfly.special.hadamard(n, normalized=True,
increasing_stride=increasing_stride)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_hadamard_diagonal(self):
batch_size = 10
n = 16
H = torch.tensor(la.hadamard(n), dtype=torch.float32) / math.sqrt(n)
for k in [1, 2, 3]:
diagonals = torch.randint(0, 2, (k, n)) * 2 - 1.0
input = torch.randn(batch_size, n)
out_torch = input
for diagonal in diagonals.unbind():
out_torch = F.linear(out_torch * diagonal, H)
for increasing_stride in [True, False]:
for separate_diagonal in [True, False]:
b = torch_butterfly.special.hadamard_diagonal(
diagonals, normalized=True, increasing_stride=increasing_stride,
separate_diagonal=separate_diagonal
)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_conv1d_circular_singlechannel(self):
batch_size = 10
for n in [13, 16]:
for kernel_size in [1, 3, 5, 7]:
padding = (kernel_size - 1) // 2
conv = nn.Conv1d(1, 1, kernel_size, padding=padding, padding_mode='circular',
bias=False)
weight = conv.weight
input = torch.randn(batch_size, 1, n)
out_torch = conv(input)
# Just to show how to implement conv1d with FFT
input_f = torch.fft.rfft(input)
col = F.pad(weight.flip(dims=(-1,)), (0, n - kernel_size)).roll(-padding, dims=-1)
col_f = torch.fft.rfft(col)
prod_f = input_f * col_f
out_fft = torch.fft.irfft(prod_f, n=n)
self.assertTrue(torch.allclose(out_torch, out_fft, self.rtol, self.atol))
for separate_diagonal in [True, False]:
b = torch_butterfly.special.conv1d_circular_singlechannel(n, weight,
separate_diagonal)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_conv1d_circular_multichannel(self):
batch_size = 10
in_channels = 3
out_channels = 4
for n in [13, 16]:
for kernel_size in [1, 3, 5, 7]:
padding = (kernel_size - 1) // 2
conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding,
padding_mode='circular', bias=False)
weight = conv.weight
input = torch.randn(batch_size, in_channels, n)
out_torch = conv(input)
# Just to show how to implement conv1d with FFT
input_f = torch.fft.rfft(input)
col = F.pad(weight.flip(dims=(-1,)), (0, n - kernel_size)).roll(-padding, dims=-1)
col_f = torch.fft.rfft(col)
prod_f = (input_f.unsqueeze(1) * col_f).sum(dim=2)
out_fft = torch.fft.irfft(prod_f, n=n)
self.assertTrue(torch.allclose(out_torch, out_fft, self.rtol, self.atol))
b = torch_butterfly.special.conv1d_circular_multichannel(n, weight)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_fft2d(self):
batch_size = 10
n1 = 16
n2 = 32
input = torch.randn(batch_size, n2, n1, dtype=torch.complex64)
for normalized in [False, True]:
out_torch = torch.fft.fftn(input, dim=(-1, -2),
norm=None if not normalized else 'ortho')
# Just to show how fft2d is exactly 2 ffts on each dimension
input_f = torch.fft.fft(input, dim=-1, norm=None if not normalized else 'ortho')
out_fft = torch.fft.fft(input_f, dim=-2, norm=None if not normalized else 'ortho')
self.assertTrue(torch.allclose(out_torch, out_fft, self.rtol, self.atol))
for br_first in [True, False]:
for flatten in [False, True]:
b = torch_butterfly.special.fft2d(n1, n2, normalized=normalized,
br_first=br_first, flatten=flatten)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_fft2d_unitary(self):
batch_size = 10
n1 = 16
n2 = 32
input = torch.randn(batch_size, n2, n1, dtype=torch.complex64)
normalized = True
out_torch = torch.fft.fftn(input, dim=(-1, -2), norm=None if not normalized else 'ortho')
for br_first in [True, False]:
b = torch_butterfly.special.fft2d_unitary(n1, n2, br_first=br_first)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_ifft2d(self):
batch_size = 10
n1 = 32
n2 = 16
input = torch.randn(batch_size, n2, n1, dtype=torch.complex64)
for normalized in [False, True]:
out_torch = torch.fft.ifftn(input, dim=(-1, -2),
norm=None if not normalized else 'ortho')
# Just to show how ifft2d is exactly 2 iffts on each dimension
input_f = torch.fft.ifft(input, dim=-1, norm=None if not normalized else 'ortho')
out_fft = torch.fft.ifft(input_f, dim=-2, norm=None if not normalized else 'ortho')
self.assertTrue(torch.allclose(out_torch, out_fft, self.rtol, self.atol))
for br_first in [True, False]:
for flatten in [False, True]:
b = torch_butterfly.special.ifft2d(n1, n2, normalized=normalized,
br_first=br_first, flatten=flatten)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_ifft2d_unitary(self):
batch_size = 10
n1 = 16
n2 = 32
input = torch.randn(batch_size, n2, n1, dtype=torch.complex64)
normalized = True
out_torch = torch.fft.ifftn(input, dim=(-1, -2), norm=None if not normalized else 'ortho')
for br_first in [True, False]:
b = torch_butterfly.special.ifft2d_unitary(n1, n2, br_first=br_first)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_conv2d_circular_multichannel(self):
batch_size = 10
in_channels = 3
out_channels = 4
for n1 in [13, 16]:
for n2 in [27, 32]:
# flatten is only supported for powers of 2 for now
if n1 == 1 << int(math.log2(n1)) and n2 == 1 << int(math.log2(n2)):
flatten_cases = [False, True]
else:
flatten_cases = [False]
for kernel_size1 in [1, 3, 5, 7]:
for kernel_size2 in [1, 3, 5, 7]:
padding1 = (kernel_size1 - 1) // 2
padding2 = (kernel_size2 - 1) // 2
conv = nn.Conv2d(in_channels, out_channels, (kernel_size2, kernel_size1),
padding=(padding2, padding1), padding_mode='circular',
bias=False)
weight = conv.weight
input = torch.randn(batch_size, in_channels, n2, n1)
out_torch = conv(input)
# Just to show how to implement conv2d with FFT
input_f = torch.fft.rfftn(input, dim=(-1, -2))
col = F.pad(weight.flip(dims=(-1,)), (0, n1 - kernel_size1)).roll(
-padding1, dims=-1)
col = F.pad(col.flip(dims=(-2,)), (0, 0, 0, n2 - kernel_size2)).roll(
-padding2, dims=-2)
col_f = torch.fft.rfftn(col, dim=(-1, -2))
prod_f = (input_f.unsqueeze(1) * col_f).sum(dim=2)
out_fft = torch.fft.irfftn(prod_f, dim=(-1, -2), s=(n1, n2))
self.assertTrue(torch.allclose(out_torch, out_fft, self.rtol, self.atol))
for flatten in flatten_cases:
b = torch_butterfly.special.conv2d_circular_multichannel(
n1, n2, weight, flatten=flatten)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_fastfood(self):
batch_size = 10
n = 32
H = torch.tensor(la.hadamard(n), dtype=torch.float32) / math.sqrt(n)
diag1 = torch.randint(0, 2, (n,)) * 2 - 1.0
diag2, diag3 = torch.randn(2, n)
permutation = torch.randperm(n)
input = torch.randn(batch_size, n)
out_torch = F.linear(input * diag1, H)[:, permutation]
out_torch = F.linear(out_torch * diag2, H) * diag3
for increasing_stride in [True, False]:
for separate_diagonal in [True, False]:
b = torch_butterfly.special.fastfood(
diag1, diag2, diag3, permutation, normalized=True,
increasing_stride=increasing_stride, separate_diagonal=separate_diagonal
)
out = b(input)
self.assertTrue(torch.allclose(out, out_torch, self.rtol, self.atol))
def test_acdc(self):
batch_size = 10
n = 32
input = torch.randn(batch_size, n)
diag1, diag2 = torch.randn(2, n)
for separate_diagonal in [True, False]:
out_sp = torch.tensor(scipy.fft.dct(input.numpy(), norm='ortho')) * diag1
out_sp = torch.tensor(scipy.fft.idct(out_sp.numpy(), norm='ortho')) * diag2
b = torch_butterfly.special.acdc(diag1, diag2, dct_first=True,
separate_diagonal=separate_diagonal)
out = b(input)
self.assertTrue(torch.allclose(out, out_sp, self.rtol, self.atol))
out_sp = torch.tensor(scipy.fft.idct(input.numpy(), norm='ortho')) * diag1
out_sp = torch.tensor(scipy.fft.dct(out_sp.numpy(), norm='ortho')) * diag2
b = torch_butterfly.special.acdc(diag1, diag2, dct_first=False,
separate_diagonal=separate_diagonal)
out = b(input)
self.assertTrue(torch.allclose(out, out_sp, self.rtol, self.atol))
def test_wavelet_haar(self):
batch_size = 10
n = 32
input = torch.randn(batch_size, n)
out_pywt = torch.tensor(np.hstack(pywt.wavedec(input.numpy(), 'haar')))
b = torch_butterfly.special.wavelet_haar(n)
out = b(input)
self.assertTrue(torch.allclose(out, out_pywt, self.rtol, self.atol))
if __name__ == "__main__":
unittest.main()
|
butterfly-master
|
tests/test_special.py
|
import copy
import itertools
import math
import unittest
import numpy as np
import torch
import torch_butterfly
from torch_butterfly.permutation import perm_vec_to_mat, invert, matrix_to_butterfly_factor
class ButterflyPermutationTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_matrix_to_butterfly_factor(self):
num_repeats = 10
for n in [2, 16, 64]:
for _ in range(num_repeats):
log_n = int(math.ceil(math.log2(n)))
for log_k in range(1, log_n + 1):
b = torch_butterfly.Butterfly(n, n, bias=False, init='identity')
factor = torch.randn(n//2, 2, 2)
b.twiddle[0, 0, log_k - 1].copy_(factor)
matrix = b(torch.eye(n)).t()
factor_out = matrix_to_butterfly_factor(matrix.detach().numpy(), log_k,
pytorch_format=True, check_input=True)
self.assertTrue(torch.allclose(factor_out, factor))
def test_modular_balance(self):
num_repeats = 50
for n in [2, 16, 64]:
for _ in range(num_repeats):
v = np.random.permutation(n)
Rinv_perms, L_vec = torch_butterfly.permutation.modular_balance(v)
self.assertTrue(torch_butterfly.permutation.is_modular_balanced(L_vec))
v2 = v
for p in Rinv_perms:
v2 = v2[p]
self.assertTrue(np.all(v2 == L_vec))
lv2 = L_vec
for p in reversed(Rinv_perms):
lv2 = lv2[torch_butterfly.permutation.invert(p)]
self.assertTrue(np.all(lv2 == v))
R_perms = [perm_vec_to_mat(invert(p)) for p in reversed(Rinv_perms)]
mat = perm_vec_to_mat(v, left=False)
for p in reversed(R_perms):
mat = mat @ p.T
self.assertTrue(np.allclose(mat, perm_vec_to_mat(L_vec)))
def test_perm2butterfly_slow(self):
num_repeats = 50
for n in [2, 13, 38]:
for increasing_stride in [False, True]:
for complex in [False, True]:
for _ in range(num_repeats):
v = torch.randperm(n)
b = torch_butterfly.permutation.perm2butterfly_slow(v, complex,
increasing_stride)
input = torch.arange(n, dtype=torch.float32)
if complex:
input = input.to(torch.complex64)
self.assertTrue(torch.allclose(input[v], b(input)))
def test_perm2butterfly(self):
num_repeats = 50
for n in [2, 13, 38]:
for increasing_stride in [False, True]:
for complex in [False, True]:
for _ in range(num_repeats):
v = torch.randperm(n)
b = torch_butterfly.permutation.perm2butterfly(v, complex,
increasing_stride)
input = torch.arange(n, dtype=torch.float32)
if complex:
input = input.to(torch.complex64)
self.assertTrue(torch.allclose(input[v], b(input)))
if __name__ == "__main__":
unittest.main()
|
butterfly-master
|
tests/test_permutation.py
|
import os, sys, subprocess
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import math
from pathlib import Path
import pickle
import random
import numpy as np
import torch
from torch import nn
from torch import optim
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from
from ray.tune.schedulers import AsyncHyperBandScheduler
from tune import run
from butterfly import Butterfly
from butterfly.permutation import Permutation, FixedPermutation, PermutationFactor
from butterfly.utils import bitreversal_permutation
from butterfly.complex_utils import real_to_complex
from training import PytorchTrainable, TrainableMatrixFactorization
from target_matrix import named_target_matrix
N_LBFGS_STEPS = 20
N_TRIALS_TO_POLISH = 16
class TrainableBP(TrainableMatrixFactorization):
"""Product of butterfly matrices and permutation matrices.
"""
def _setup(self, config):
device = config['device']
self.device = device
size = config['size']
if isinstance(config['target_matrix'], str):
self.target_matrix = torch.tensor(named_target_matrix(config['target_matrix'], size), dtype=torch.float).to(device)
else:
self.target_matrix = torch.tensor(config['target_matrix'], dtype=torch.float).to(device)
assert self.target_matrix.shape[0] == self.target_matrix.shape[1], 'Only square matrices are supported'
assert self.target_matrix.dim() in [2, 3], 'target matrix must be 2D if real of 3D if complex'
complex = self.target_matrix.dim() == 3 or config['complex']
torch.manual_seed(config['seed'])
if config['model'] == 'B':
self.model = nn.Sequential(
FixedPermutation(torch.tensor(bitreversal_permutation(size))),
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, ortho_init=True)
).to(device)
elif config['model'] == 'BP':
self.model = nn.Sequential(
Permutation(size=size, share_logit=config['share_logit'][0]),
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, ortho_init=True)
).to(device)
elif config['model'] == 'PBT':
self.model = nn.Sequential(
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, increasing_stride=False, ortho_init=True),
Permutation(size=size, share_logit=config['share_logit'][0])
).to(device)
elif config['model'] == 'BPP':
self.model = nn.Sequential(
PermutationFactor(size=size),
Permutation(size=size, share_logit=config['share_logit'][0]),
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, ortho_init=True)
).to(device)
elif config['model'] == 'BPBP':
self.model = nn.Sequential(
Permutation(size=size, share_logit=config['share_logit'][0]),
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, ortho_init=True),
Permutation(size=size, share_logit=config['share_logit'][1]),
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, ortho_init=True)
).to(device)
elif config['model'] == 'BBT':
# param_type = 'regular' if complex else 'perm'
param_type = config['param']
self.model = nn.Sequential(
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, param=param_type, increasing_stride=False),
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, param=param_type, increasing_stride=True)
)
elif config['model'][0] == 'T' and (config['model'][1:]).isdigit():
depth = int(config['model'][1:])
param_type = config['param']
self.model = nn.Sequential(
*[
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, param=param_type, increasing_stride=False)
for _ in range(depth)
]
)
elif config['model'][0:3] == 'BBT' and (config['model'][3:]).isdigit():
depth = int(config['model'][3:])
param_type = config['param']
self.model = nn.Sequential(
*[
nn.Sequential(
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, param=param_type, increasing_stride=False),
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, param=param_type, increasing_stride=True)
)
for _ in range(depth)
]
)
elif config['model'][0] == 'B' and (config['model'][1:]).isdigit():
depth = int(config['model'][1:])
param_type = config['param']
self.model = nn.Sequential(
*[
Butterfly(in_size=size, out_size=size, bias=False, complex=complex, param=param_type, increasing_stride=True)
for _ in range(depth)
]
)
elif config['model'] == 'butterfly':
# e = int(config['model'][4:])
self.model = Butterfly(in_size=size, out_size=size, complex=complex, **config['bfargs'])
# elif config['model'][0:3] == 'ODO':
# if (config['model'][3:]).isdigit():
# width = int(config['model'][3:])
# self.model = Butterfly(in_size=size, out_size=size, bias=False, complex=False, param='odo', tied_weight=True, nblocks=0, expansion=width, diag_init='normal')
# elif config['model'][3] == 'k':
# k = int(config['model'][4:])
# self.model = Butterfly(in_size=size, out_size=size, bias=False, complex=False, param='odo', tied_weight=True, nblocks=k, diag_init='normal')
# non-butterfly transforms
# elif config['model'][0:2] == 'TL' and (config['model'][2:]).isdigit():
# rank = int(config['model'][2:])
elif config['model'][0:4] == 'rank' and (config['model'][4:]).isdigit():
rank = int(config['model'][4:])
self.model = nn.Sequential(
nn.Linear(size, rank, bias=False),
nn.Linear(rank, size, bias=False),
)
else:
assert False, f"Model {config['model']} not implemented"
self.nparameters = sum(param.nelement() for param in self.model.parameters())
print("Parameters: ", self.nparameters)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.n_epochs_per_validation = config['n_epochs_per_validation']
self.input = torch.eye(size).to(device)
if complex:
self.input = real_to_complex(self.input)
def freeze(self):
try:
for i, m in enumerate(self.model):
if isinstance(m, Permutation) or isinstance(m, PermutationFactor):
self.model[i] = FixedPermutation(m.argmax())
except:
pass
def polish(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
# Hack: create new instance without call __init__, since trainable.__init__
# creates result_dir and log_dir in the wrong place (~/ray_results)
trainable_cls = TrainableBP
trainable = trainable_cls.__new__(trainable_cls)
trainable._setup(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
loss = trainable.polish(N_LBFGS_STEPS, save_to_self_model=True)
torch.save(trainable.model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
# round for permutation experiments
def proj(m):
if isinstance(m, Butterfly):
m.round_to_perm()
trainable.model.apply(proj)
loss = trainable.loss().item()
return loss
ex = Experiment('Transform_factorization')
ex.observers.append(FileStorageObserver.create('logs_new'))
slack_config_path = Path('../config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.config
def default_config():
model = 'BP'
target = 'dft' # The target matrix to factor ('dft', 'idft', 'dct', 'hadamard')
size = 8 # Size of matrix to factor, must be power of 2
complex = False # Whether to use complex factorization or real factorization
fixed_order = True # Whether the order of the factors are fixed
param = 'regular' # How to constrain the parameters
lr_min = 1e-4
lr_max = 1e-2
ntrials = 20 # Number of trials for hyperparameter tuning
nsteps = 400 # Number of steps per epoch
nepochsvalid = 10 # Frequency of validation (polishing), in terms of epochs
nmaxepochs = 200 # Maximum number of epochs
result_dir = project_root + '/learning_transforms/results_new' # Directory to store results
cuda = torch.cuda.is_available() # Whether to use GPU
nthreads = 1 # Number of CPU threads per job
smoke_test = False # Finish quickly for testing
@ex.capture
def transform_experiment(model, target, size, complex, param, lr_min, lr_max, ntrials, nsteps, nepochsvalid, result_dir, cuda, nthreads, smoke_test, b):
# assert model in ['B', 'BP', 'PBT', 'BPP', 'BPBP', 'BBT', 'BBB'], f'Model {model} not implemented'
config={
'model': model,
'target_matrix': target,
'size': size,
'complex': complex,
# 'share_logit': sample_from(lambda spec: np.random.choice((True, False), size=2)),
'share_logit': True,
'bfargs': b,
'param': param,
# 'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(lr_min), math.log(lr_max)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
'n_epochs_per_validation': nepochsvalid,
'device': 'cuda' if cuda else 'cpu',
}
b_args = '_'.join([k+':'+str(v) for k,v in b.items()])
commit_id = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip().decode('utf-8')
experiment = RayExperiment(
# name=f'{commit_id}_{target}_factorization_{model}_{complex}_{size}_{param}',
name=f'{size}_{target}_{model}_{b_args}_c{complex}_{commit_id}',
run=TrainableBP,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0.25 if cuda else 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.automain
def run(model, target, size, result_dir, nmaxepochs, nthreads, cuda, b):
experiment = transform_experiment()
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = str(nthreads)
torch.set_num_threads(nthreads)
try:
with open('../config/redis_address', 'r') as f:
address = f.read().strip()
ray.init(redis_address=address)
except:
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=nmaxepochs)
trials = run(experiment, scheduler=ahb, raise_on_failed_trial=False, queue_trials=True, early_stop_all_trials=True)
trials = [trial for trial in trials if trial.last_result is not None]
losses = [-trial.last_result.get('negative_loss', float('-inf')) for trial in trials]
nparameters = trials[0].last_result['nparameters']
niterations = trials[0].last_result['training_iteration']
print(np.array(losses))
# Polish solutions with L-BFGS
polish_fn = ray.remote(num_gpus=0.25 if cuda else 0)(polish)
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result.get('negative_loss', float('-inf')))
n_trials = min(N_TRIALS_TO_POLISH, len(trials))
sorted_trials = sorted_trials[:n_trials]
polished_losses = ray.get([polish_fn.remote(trial) for trial in sorted_trials[:N_TRIALS_TO_POLISH]])
for i in range(min(N_TRIALS_TO_POLISH, len(trials))):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
sorted_polished_trials = sorted(sorted_trials, key=lambda trial: -trial.last_result['polished_negative_loss'])
print(np.array([-trial.last_result['negative_loss'] for trial in sorted_polished_trials]))
print(np.array([-trial.last_result['polished_negative_loss'] for trial in sorted_polished_trials]))
# print(np.sort(losses)[:N_TRIALS_TO_POLISH])
# print(np.sort(polished_losses))
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
if not min(losses + polished_losses) == -sorted_polished_trials[0].last_result['polished_negative_loss']:
print("BEST LOSS", min(losses + polished_losses), "BEST POLISHED", -sorted_polished_trials[0].last_result['polished_negative_loss'])
return size, target, model, b, nparameters, niterations, -sorted_polished_trials[0].last_result['polished_negative_loss']
|
butterfly-master
|
learning_transforms/learning_transforms.py
|
import math
import operator
import functools
import torch
from torch import nn
from complex_utils import complex_mul, complex_matmul
from ops import polymatmul, ops_transpose_mult_br
from sparsemax import sparsemax
from utils import bitreversal_permutation
class HstackDiag(nn.Module):
"""Horizontally stacked diagonal matrices of size n x 2n. Each entry in a 2x2
matrix of polynomials.
"""
def __init__(self, size, deg=0, diag1=None, diag2=None):
"""
Parameters:
size: size of diagonal matrix
deg: degree of the polynomials
diag1: initialization for the diagonal, should be n x 2 x 2 x (d + 1), where d is the degree of the polynomials
diag2: initialization for the diagonal, should be n x 2 x 2 x (d + 1), where d is the degree of the polynomials
"""
super().__init__()
self.size = size
self.diag1 = diag1 or nn.Parameter(torch.randn(size, 2, 2, deg + 1))
self.diag2 = diag2 or nn.Parameter(torch.randn(size, 2, 2, deg + 1))
assert self.diag1.shape == self.diag2.shape, 'The two diagonals must have the same shape'
self.deg = self.diag1.shape[-1] - 1
def forward(self, input_):
"""
Parameters:
input_: (b, 2 * size, 2, 2, d1)
Return:
output: (b, size, 2, 2, d1 + self.deg - 1)
"""
output = polymatmul(input_[:, :self.size], self.diag1) + polymatmul(input_[:, self.size:], self.diag2)
return output
class HstackDiagProduct(nn.Module):
"""Product of HstackDiag matrices.
"""
def __init__(self, size):
m = int(math.log2(size))
assert size == 1 << m, "size must be a power of 2"
super().__init__()
self.size = size
self.factors = nn.ModuleList([HstackDiag(size >> (i + 1), deg=(1 << i)) for i in range(m)[::-1]])
self.P_init = nn.Parameter(torch.randn(1, 2, 1, 2))
def forward(self, input_):
"""
Parameters:
input_: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
output = input_
for factor in self.factors[::-1]:
output = factor(output)
result = polymatmul(output[:, :, [1], :, :-1], self.P_init).squeeze(1).squeeze(1).squeeze(1)
return result
def test_hstackdiag_product():
size = 8
model = HstackDiagProduct(size)
# Legendre polynomials
n = size
m = int(np.log2(n))
n_range = torch.arange(n, dtype=torch.float)
a = (2 * n_range + 3) / (n_range + 2)
b = torch.zeros(n)
c = -(n_range + 1) / (n_range + 2)
p0 = 1.0
p1 = (0.0, 1.0)
# Preprocessing: compute T_{i:j}, the transition matrix from p_i to p_j.
T_br = [None] * m
# Lowest level, filled with T_{i:i+1}
# n matrices, each 2 x 2, with coefficients being polynomials of degree <= 1
T_br[0] = torch.zeros(n, 2, 2, 2)
T_br[0][:, 0, 0, 1] = a
T_br[0][:, 0, 0, 0] = b
T_br[0][:, 0, 1, 0] = c
T_br[0][:, 1, 0, 0] = 1.0
br_perm = bitreversal_permutation(n)
T_br[0] = T_br[0][br_perm]
for i in range(1, m):
T_br[i] = polymatmul(T_br[i - 1][n >> i:], T_br[i - 1][:n >> i])
P_init = torch.tensor([p1, [p0, 0.0]], dtype=torch.float) # [p_1, p_0]
P_init = P_init.unsqueeze(0).unsqueeze(-2)
Tidentity = torch.eye(2).unsqueeze(0).unsqueeze(3)
model.P_init = nn.Parameter(P_init)
for i in range(m):
factor = model.factors[m - i - 1]
factor.diag1 = nn.Parameter(torch.cat((Tidentity.expand(factor.size, -1, -1, -1), torch.zeros(factor.size, 2, 2, factor.deg)), dim=-1))
factor.diag2 = nn.Parameter(T_br[i][:factor.size])
batch_size = 2
x_original = torch.randn((batch_size, size))
x = (x_original[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
output = model(x[:, br_perm])
assert output.shape == (batch_size, size)
assert torch.allclose(output, ops_transpose_mult_br(a, b, c, p0, p1, x_original))
def main():
test_hstackdiag_product()
if __name__ == '__main__':
main()
|
butterfly-master
|
learning_transforms/hstack_diag.py
|
import numpy as np
import torch
from target_matrix import named_target_matrix
def baseline_rmse(name, size, param_fn):
# dft = named_target_matrix('dft', 512)
# dft = dft.view('complex128').squeeze(-1)
# n, m = size, int(np.log(size)/np.log(2))
n = size
params = int(param_fn(n))
# sparsity = 2 * n*m # n log n
sparsity = params
mat = named_target_matrix(name, n)
# print(mat)
# sparse
entries = np.sort(mat.reshape(-1)**2)
rmse_s = np.sqrt(np.sum(entries[:-sparsity])) # /n
# low rank
u, s, v = np.linalg.svd(mat)
rank = params // (2*n)
se = np.sum(s[rank:]**2) # rank log n
rmse_lr = np.sqrt(se) # /n
return rmse_s, rmse_lr
# transforms = ['dft', 'dct', 'dst', 'convolution', 'hadamard', 'hartley', 'legendre', 'hilbert', 'randn']
# sizes = [8, 16, 32, 64, 128, 256, 512, 1024]
# bf_params = [lambda n: 2*n*np.log2(n)]
transforms = ['sparse1', 'rank1', 'butterfly', 'convolution', 'fastfood', 'randn']
bf_params = [
lambda n: 4*n*(np.log2(n)+1),
lambda n: n*(np.log2(n)+1),
lambda n: n*(np.log2(n)+1),
lambda n: 2*n*(np.log2(n)+1),
lambda n: 2*n*(np.log2(n)+1),
lambda n: n*(np.log2(n)+1),
]
sizes = [256]
print()
sparse_all_rmse = []
lr_all_rmse = []
for name, param_fn in zip(transforms, bf_params):
sparse_rmse = []
lr_rmse = []
for N in sizes:
if name == 'dft': # Calculate by hand, does not support complex
r1 = np.sqrt((N - np.log2(N)) / N)
r2 = np.sqrt(N - np.log2(N)) / N
else:
r1, r2 = baseline_rmse(name, N, param_fn)
print(f"{name:12} {r1:10.6} {r2:10.6}")
sparse_rmse.append(r1)
lr_rmse.append(r2)
sparse_all_rmse.append(sparse_rmse)
lr_all_rmse.append(lr_rmse)
import pickle
with open('sparse_rmse.pkl', 'wb') as f:
pickle.dump(np.array(sparse_all_rmse), f)
with open('lr_rmse.pkl', 'wb') as f:
pickle.dump(np.array(lr_all_rmse), f)
|
butterfly-master
|
learning_transforms/baselines.py
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from matplotlib.colors import LinearSegmentedColormap
with open('rmse.pkl', 'rb') as f:
data = pickle.load(f)
transform_names = data['names']
our_rmse = np.array(data['rmse'])
our_rmse = np.delete(our_rmse, -2, axis=0)
with open('sparse_rmse.pkl', 'rb') as f:
sparse_rmse = pickle.load(f)
sparse_rmse = np.delete(sparse_rmse, -2, axis=0)
with open('lr_rmse.pkl', 'rb') as f:
lr_rmse = pickle.load(f)
lr_rmse = np.delete(lr_rmse, -2, axis=0)
# with open('mse_robust_pca.pkl', 'rb') as f:
# sparse_lr_rmse = pickle.load(f)
# It's always an option (depending on parameter) to get just one sparse matrix, or just one low rank, so we take minimum here
# sparse_lr_rmse = np.minimum(sparse_lr_rmse, sparse_rmse)
# sparse_lr_rmse = np.minimum(sparse_lr_rmse, lr_rmse)
sparse_lr_rmse = np.minimum(sparse_rmse, lr_rmse)
# For LaTeX
print(" \\\\\n".join([" & ".join(map('{0:.1e}'.format, line)) for line in our_rmse]))
# Red-green colormap
cdict = {'red': ((0.0, 0.0, 0.0),
(1/6., 0.0, 0.0),
(1/2., 0.8, 1.0),
(5/6., 1.0, 1.0),
(1.0, 0.4, 1.0)),
'green': ((0.0, 0.0, 0.4),
(1/6., 1.0, 1.0),
(1/2., 1.0, 0.8),
(5/6., 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(1/6., 0.0, 0.0),
(1/2., 0.9, 0.9),
(5/6., 0.0, 0.0),
(1.0, 0.0, 0.0))
}
cmap=LinearSegmentedColormap('rg',cdict, N=256)
rmses = [our_rmse, sparse_rmse, lr_rmse, sparse_lr_rmse]
titles = ['Butterfly', 'Sparse', 'Low rank', 'Sparse + Low rank']
first = True
fig, axes = plt.subplots(nrows=1, ncols=4)
for rmse, ax, title in zip(rmses, axes.flat, titles):
# im = ax.imshow(np.log10(rmse), interpolation='none', vmin=-4, vmax=0, cmap=cmap);
im = ax.imshow(np.log10(rmse), interpolation='none', vmin=-4, vmax=0, cmap='bwr');
ax.set_title(title, fontsize=10)
# curr_ax = fig.gca();
# curr_ax = fig.gca();
# Major ticks
ax.set_xticks(range(8));
ax.set_yticks(range(8));
# Labels for major ticks
ax.set_xticklabels(['N=8', '16', '32', '64', '128', '256', '512', '1024'], rotation=270, fontsize=8);
if first:
# ax.set_yticklabels(['DFT', 'DCT', 'DST', 'Conv', 'Hadamard', 'Hartley', 'Legendre', 'Hilbert', 'Randn']);
ax.set_yticklabels(['DFT', 'DCT', 'DST', 'Conv', 'Hadamard', 'Hartley', 'Legendre', 'Randn'], fontsize=8);
first = False
else:
ax.set_yticklabels([]);
# Minor ticks
ax.set_xticks(np.arange(-.5, 8, 1), minor=True);
ax.set_yticks(np.arange(-.5, 8, 1), minor=True);
# Gridlines based on minor ticks
ax.grid(which='minor', color='w', linestyle='-', linewidth=2)
# plt.figure()
# # im = plt.imshow(np.log10(rmse), interpolation='none', vmin=-4, vmax=0, aspect='equal');
# # im = plt.imshow(np.log10(rmse), interpolation='none', vmin=-4, vmax=0, cmap='Accent');
# im = plt.imshow(np.log10(rmse), interpolation='none', vmin=-4, vmax=0, cmap=cmap);
# ax = plt.gca();
# ax = plt.gca();
# # Major ticks
# ax.set_xticks(range(8));
# ax.set_yticks(range(9));
# # Labels for major ticks
# ax.set_xticklabels(['N=8', '16', '32', '64', '128', '256', '512', '1024']);
# ax.set_yticklabels(['DFT', 'DCT', 'DST', 'Conv', 'Hadamard', 'Hartley', 'Legendre', 'Hilbert', 'Randn']);
# # Minor ticks
# ax.set_xticks(np.arange(-.5, 8, 1), minor=True);
# ax.set_yticks(np.arange(-.5, 9, 1), minor=True);
# # Gridlines based on minor ticks
# ax.grid(which='minor', color='w', linestyle='-', linewidth=2)
# cbar = plt.colorbar(orientation='horizontal', ticks=[-4, -3, -2, -1, 0])
fig.subplots_adjust(bottom=-0.2, wspace=0.5)
cbar_ax = fig.add_axes([0.25, 0.11, 0.5, 0.02])
cbar = fig.colorbar(im, cax=cbar_ax, orientation='horizontal', ticks=[-4, -3, -2, -1, 0])
cbar.ax.set_xticklabels(['1e-4', '1e-3', '1e-2', '1e-1', '1e0'], fontsize=8)
plt.savefig('heatmap.pdf', bbox_inches='tight')
|
butterfly-master
|
learning_transforms/heatmap.py
|
import math
import operator
import functools
import torch
from torch import nn
from butterfly.complex_utils import real_to_complex, complex_mul, complex_matmul
from factor_multiply import permutation_factor_even_odd_multiply, permutation_factor_even_odd_multiply_backward
from factor_multiply import permutation_factor_reverse_multiply, permutation_factor_reverse_multiply_backward
class PermutationFactorEvenOddMult(torch.autograd.Function):
@staticmethod
def forward(ctx, p, input):
ctx.save_for_backward(p, input)
return permutation_factor_even_odd_multiply(p, input)
@staticmethod
def backward(ctx, grad):
p, input = ctx.saved_tensors
d_p, d_input = permutation_factor_even_odd_multiply_backward(grad, p, input)
return d_p, d_input
permutation_factor_even_odd_mult = PermutationFactorEvenOddMult.apply
class PermutationFactorReverseMult(torch.autograd.Function):
@staticmethod
def forward(ctx, p, input):
ctx.save_for_backward(p, input)
return permutation_factor_reverse_multiply(p, input)
@staticmethod
def backward(ctx, grad):
p, input = ctx.saved_tensors
d_p, d_input = permutation_factor_reverse_multiply_backward(grad, p, input)
return d_p, d_input
permutation_factor_reverse_mult = PermutationFactorReverseMult.apply
def test_permutation_factor_even_odd_multiply():
import time
n = 1024
m = int(math.log2(n))
x = torch.randn(n, requires_grad=True)
sizes = [n >> i for i in range(m)]
# first = time.perf_counter()
for size in sizes:
x = x.reshape(-1, size)
p = torch.randn(3, requires_grad=True)
result_slow = ((1 - p[0]) * x.reshape(x.shape[:-1] + (2, x.shape[-1] // 2)) + p[0] * x.reshape(x.shape[:-1] + (x.shape[-1] // 2, 2)).transpose(-1, -2)).reshape(x.shape)
# start = time.perf_counter()
result = permutation_factor_even_odd_mult(p[:1], x)
# [permutation_factor_even_odd_mult(bf.ABCD, x.reshape(-1, 2, bf.ABCD.shape[-1])).reshape(x.shape) for _ in range(10000)]
# end = time.perf_counter()
# print(end - start)
assert torch.allclose(result, result_slow, atol=1e-6)
grad = torch.randn_like(x)
d_p_slow, d_x_slow = torch.autograd.grad(result_slow, (p, x), grad, retain_graph=True)
d_p, d_x = torch.autograd.grad(result, (p, x), grad, retain_graph=True)
assert torch.allclose(d_p, d_p_slow, atol=1e-6)
assert torch.allclose(d_x, d_x_slow, atol=1e-6)
# last = time.perf_counter()
# print(last - first)
def test_permutation_factor_reverse_multiply():
import time
n = 1024
m = int(math.log2(n))
x = torch.randn(n, requires_grad=True)
sizes = [n >> i for i in range(m)]
# first = time.perf_counter()
for size in sizes:
x = x.reshape(-1, size)
p = torch.randn(3, requires_grad=True)
result_slow = ((1 - p[1:]).unsqueeze(-1) * x.reshape(-1, 2, x.shape[-1] // 2) + p[1:].unsqueeze(-1) * x.reshape((-1, 2, x.shape[-1] // 2)).flip(-1)).reshape(x.shape)
# start = time.perf_counter()
result = permutation_factor_reverse_mult(p[1:], x)
# [permutation_factor_reverse_mult(bf.ABCD, x.reshape(-1, 2, bf.ABCD.shape[-1])).reshape(x.shape) for _ in range(10000)]
# end = time.perf_counter()
# print(end - start)
assert torch.allclose(result, result_slow, atol=1e-6)
grad = torch.randn_like(x)
d_p_slow, d_x_slow = torch.autograd.grad(result_slow, (p, x), grad, retain_graph=True)
d_p, d_x = torch.autograd.grad(result, (p, x), grad, retain_graph=True)
assert torch.allclose(d_p, d_p_slow, atol=1e-6)
assert torch.allclose(d_x, d_x_slow, atol=1e-6)
# last = time.perf_counter()
# print(last - first)
|
butterfly-master
|
learning_transforms/permutation_factor.py
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.patches as mpatches
plt.rcParams['font.family'] = 'serif'
rs = [1]
markers = ['o', 'v', 'D', 'p', 's', '>']
loc = 'speed_data.pkl'
data = pickle.load(open(loc,'rb'))
colors = ['red', 'orange', 'green', 'blue']
speedups_fft = data['speedup_fft']
speedups_dct = data['speedup_dct']
speedups_dst = data['speedup_dst']
speedups_bp = data['speedup_bp']
sizes = data['sizes']
lw = 3
msize = 6
print('data: ', data)
start_idx = 0
print('fft speedup: ', speedups_fft[start_idx:])
print('dct speedup: ', speedups_dct[start_idx:])
print('dst speedup: ', speedups_dst[start_idx:])
print('bp speedup: ', speedups_bp[start_idx:])
print('sizes, speedups: ', sizes.size, speedups_fft.shape)
plt.plot(sizes[start_idx:],speedups_fft[start_idx:], linewidth=lw, label='FFT',marker=markers[0],color=colors[0],
markeredgecolor=colors[0],markersize=msize)
plt.plot(sizes[start_idx:],speedups_dct[start_idx:], linewidth=lw, label='DCT',marker=markers[0],color=colors[1],
markeredgecolor=colors[1],markersize=msize)
plt.plot(sizes[start_idx:],speedups_dst[start_idx:], linewidth=lw, label='DST',marker=markers[0],color=colors[2],
markeredgecolor=colors[2],markersize=msize)
plt.plot(sizes[start_idx:],speedups_bp[start_idx:], linewidth=lw, label='BP',marker=markers[0],color=colors[3],
markeredgecolor=colors[3],markersize=msize)
plt.axhline(y=1.0, color='black',linewidth=3)
plt.xscale('log', basex=2)
plt.yscale('log')
plt.xlabel(r'$N$',fontsize=14)
# plt.ylabel("Speedup over GEMV", fontsize=14)
plt.ylabel("Speedup over dense multiply", fontsize=18)
classes = [mpatches.Patch(color=colors[0], label='FFT'),
mpatches.Patch(color=colors[1], label='DCT'),
mpatches.Patch(color=colors[2], label='DST'),
mpatches.Patch(color=colors[3], label='BP')]
#ranks_row1 = []
rank_entries = []
#marker_entries = {}
plt.legend(handles=classes, ncol=4, bbox_to_anchor=(0.85, -0.15))#, loc='upper left')
plt.savefig('speed_plot.pdf', bbox_inches='tight')
|
butterfly-master
|
learning_transforms/speed_plot.py
|
import itertools
import multiprocessing as mp
import os
import numpy as np
import cvxpy as cp
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
from target_matrix import named_target_matrix
ntrials = 1
# sizes = [8, 16, 32, 64, 128, 256, 512, 1024]
sizes = [8, 16, 32, 64, 128]
# sizes = [8, 16, 32]
# transform_names = ['dft', 'dct', 'dst', 'convolution', 'hadamard', 'hartley', 'legendre', 'hilbert', 'randn']
transform_names = ['dct', 'dst', 'convolution', 'hadamard', 'hartley', 'legendre', 'randn']
model = {'dft': 'BP', 'dct': 'BP', 'dst': 'BP', 'convolution': 'BPBP', 'hadamard': 'BP', 'hartley': 'BP', 'legendre': 'BP', 'randn': 'BP'}
def sparse_lowrank_mse(name_size):
name, size = name_size
print(name, size)
matrix = named_target_matrix(name, size)
M = matrix
lambda1 = cp.Parameter(nonneg=True)
lambda2 = cp.Parameter(nonneg=True)
L = cp.Variable((size, size))
S = cp.Variable((size, size))
prob = cp.Problem(cp.Minimize(cp.sum_squares(M - L - S) / size**2 + lambda1 / size * cp.norm(L, 'nuc') + lambda2 / size**2 * cp.norm(S, 1)))
result = []
for _ in range(ntrials):
l1 = np.exp(np.random.uniform(np.log(1e-2), np.log(1e4)))
l2 = np.exp(np.random.uniform(np.log(1e-2), np.log(1e4)))
lambda1.value = l1
lambda2.value = l2
try:
prob.solve()
nnz = (np.abs(S.value) >= 1e-7).sum()
singular_values = np.linalg.svd(L.value, compute_uv=False)
rank = (singular_values >= 1e-7).sum()
n_params = nnz + 2 * rank * size
mse = np.sum((matrix - L.value - S.value)**2) / size**2
result.append((n_params, mse))
except:
pass
budget = 2 * size * np.log2(size)
if model[name] == 'BPBP':
budget *= 2
eligible = [res for res in result if res[0] <= budget]
if eligible:
mse = min(m for (n_params, m) in eligible)
else:
mse = np.sum(matrix**2) / size**2
print(name, size, 'done')
return (name, size, mse)
pool = mp.Pool()
mse = pool.map(sparse_lowrank_mse, list(itertools.product(transform_names, sizes)))
import pickle
with open('mse_robust_pca_small.pkl', 'wb') as f:
pickle.dump(mse, f)
# with open('mse_robust_pca.pkl', 'rb') as f:
# mse = pickle.load(f)
|
butterfly-master
|
learning_transforms/robust_pca.py
|
import math
import multiprocessing as mp
import os
from pathlib import Path
import pickle
import random
import sys
import numpy as np
from scipy.linalg import circulant
import torch
from torch import nn
from torch import optim
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from, run_experiments
from ray.tune.schedulers import AsyncHyperBandScheduler
from butterfly import Butterfly, ButterflyProduct
from semantic_loss import semantic_loss_exactly_one
from utils import PytorchTrainable, bitreversal_permutation
from complex_utils import complex_mul, complex_matmul
N_LBFGS_STEPS = 300
N_TRIALS_TO_POLISH = 20
class TrainableCirculantReal(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=False,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
# Need to transpose as dct acts on rows of matrix np.eye, not columns
n = size
np.random.seed(0)
x = np.random.randn(n)
C = circulant(x)
self.target_matrix = torch.tensor(C, dtype=torch.float)
arange_ = np.arange(size)
dct_perm = np.concatenate((arange_[::2], arange_[::-2]))
br_perm = bitreversal_permutation(size)
assert config['perm'] in ['id', 'br', 'dct']
if config['perm'] == 'id':
self.perm = torch.arange(size)
elif config['perm'] == 'br':
self.perm = br_perm
elif config['perm'] == 'dct':
self.perm = torch.arange(size)[dct_perm][br_perm]
else:
assert False, 'Wrong perm in config'
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableCirculantComplex(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=True,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
n = size
np.random.seed(0)
x = np.random.randn(n)
C = circulant(x)
self.target_matrix = torch.tensor(C, dtype=torch.float)
arange_ = np.arange(size)
dct_perm = np.concatenate((arange_[::2], arange_[::-2]))
br_perm = bitreversal_permutation(size)
assert config['perm'] in ['id', 'br', 'dct']
if config['perm'] == 'id':
self.perm = torch.arange(size)
elif config['perm'] == 'br':
self.perm = br_perm
elif config['perm'] == 'dct':
self.perm = torch.arange(size)[dct_perm][br_perm]
else:
assert False, 'Wrong perm in config'
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.perm, 0]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
def polish_dct_real(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
return loss.item()
def polish_dct_complex(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm, 0], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm, 0], trainable.target_matrix)
return loss.item()
ex = Experiment('Circulant_factorization')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.named_config
def softmax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.named_config
def sparsemax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'sparsemax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.config
def fixed_order_config():
fixed_order = True # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
size = 8 # Size of matrix to factor, must be power of 2
ntrials = 20 # Number of trials for hyperparameter tuning
nsteps = 400 # Number of steps per epoch
nmaxepochs = 200 # Maximum number of epochs
result_dir = 'results' # Directory to store results
nthreads = 1 # Number of CPU threads per job
smoke_test = False # Finish quickly for testing
@ex.capture
def circulant_experiment_real(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'perm': sample_from(lambda spec: random.choice(['id', 'br', 'dct'])),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Circulant_factorization_real_{fixed_order}_{softmax_fn}_{size}',
run=TrainableCirculantReal,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def circulant_experiment_complex(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'perm': sample_from(lambda spec: random.choice(['id', 'br', 'dct'])),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Circulant_factorization_complex_{fixed_order}_{softmax_fn}_{size}',
run=TrainableCirculantComplex,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.automain
def run(result_dir, nmaxepochs, nthreads):
experiment = circulant_experiment_real()
# experiment = circulant_experiment_complex()
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = str(nthreads)
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=nmaxepochs)
trials = run_experiments(experiment, scheduler=ahb, raise_on_failed_trial=False)
losses = [-trial.last_result['negative_loss'] for trial in trials]
# Polish solutions with L-BFGS
pool = mp.Pool()
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result['negative_loss'])
polished_losses = pool.map(polish_dct_real, sorted_trials[:N_TRIALS_TO_POLISH])
# polished_losses = pool.map(polish_dct_complex, sorted_trials[:N_TRIALS_TO_POLISH])
pool.close()
pool.join()
for i in range(N_TRIALS_TO_POLISH):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
print(np.array(losses))
print(np.sort(losses))
# print(np.sort(losses)[:N_TRIALS_TO_POLISH])
print(np.sort(polished_losses))
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return min(losses + polished_losses)
|
butterfly-master
|
learning_transforms/learning_circulant.py
|
import math
import torch
from torch import nn
from butterfly.complex_utils import real_to_complex, complex_mul, complex_matmul
from factor_multiply import butterfly_factor_multiply, butterfly_factor_multiply_backward
from factor_multiply import butterfly_multiply_intermediate, butterfly_multiply_intermediate_backward
# from ABCD_mult import ABCD_mult
class ButterflyFactorMult(torch.autograd.Function):
@staticmethod
def forward(ctx, coefficients, input):
ctx.save_for_backward(coefficients, input)
return butterfly_factor_multiply(coefficients, input)
# output = torch.empty_like(input)
# ABCD_mult(coefficients.detach().numpy(), input.detach().numpy(), output.detach().numpy())
# return output
@staticmethod
def backward(ctx, grad):
coefficients, input = ctx.saved_tensors
# assert grad.shape == input.shape
# d_coefficients = torch.einsum('abc, adc -> bdc', (grad, input))
# d_input = ButterflyFactorMult.apply(coefficients.transpose(0, 1), grad)
# return d_coefficients, d_input
d_coefficients, d_input = butterfly_factor_multiply_backward(grad, coefficients, input)
# d_coefficients = torch.zeros_like(coefficients)
# d_input = torch.zeros_like(input)
# d_coefficients = (grad.permute(2, 1, 0) @ input.permute(2, 0, 1)).permute(1, 2, 0) # Extremely slow on CUDA
# d_input = butterfly_factor_multiply(coefficients.transpose(0, 1), grad)
return d_coefficients, d_input
butterfly_factor_mult = ButterflyFactorMult.apply
class ButterflyFactorMultIntermediate(torch.autograd.Function):
@staticmethod
def forward(ctx, twiddle, input):
output = butterfly_multiply_intermediate(twiddle, input)
ctx.save_for_backward(twiddle, output)
return output[-1]
@staticmethod
def backward(ctx, grad):
twiddle, output = ctx.saved_tensors
d_coefficients, d_input = butterfly_multiply_intermediate_backward(grad, twiddle, output)
return d_coefficients, d_input
butterfly_factor_mult_intermediate = ButterflyFactorMultIntermediate.apply
def test_butterfly_factor_multiply():
import time
n = 1024
batch_size = 1000
ntrials = 100
m = int(math.log2(n))
x = torch.randn(n, requires_grad=True)
sizes = [n >> i for i in range(m)]
first = time.perf_counter()
for size in sizes:
bf = Block2x2Diag(size)
x = x.view(-1, 2 * bf.ABCD.shape[-1])
# result_slow = bf(x)
start = time.perf_counter()
result = butterfly_factor_mult(bf.ABCD, x.view(-1, 2, bf.ABCD.shape[-1])).view(x.shape)
[butterfly_factor_mult(bf.ABCD, x.view(-1, 2, bf.ABCD.shape[-1])).view(x.shape) for _ in range(ntrials)]
# assert torch.allclose(result, result_slow)
grad = torch.randn_like(x)
# d_coef_slow, d_x_slow = torch.autograd.grad(result_slow, (bf.ABCD, x), grad, retain_graph=True)
# d_coef, d_x = torch.autograd.grad(result, (bf.ABCD, x), grad, retain_graph=True)
[torch.autograd.grad(result, (bf.ABCD, x), grad, retain_graph=True) for _ in range(ntrials)]
end = time.perf_counter()
print(end - start)
# assert torch.allclose(d_coef, d_coef_slow)
# assert torch.allclose(d_x, d_x_slow)
last = time.perf_counter()
print(last - first)
def test_butterfly_factor_multiply_bmm():
import time
n = 1024
batch_size = 1000
ntrials = 100
m = int(math.log2(n))
x = torch.randn(n, requires_grad=True)
sizes = [n >> i for i in range(m)]
first = time.perf_counter()
for size in sizes:
bf = Block2x2Diag(size)
ABCD = bf.ABCD.permute(2, 0, 1).clone()
x = x.view(ABCD.shape[0], 2, -1)
start = time.perf_counter()
result = ABCD @ x
[ABCD @ x for _ in range(ntrials)]
# assert torch.allclose(result, result_slow)
grad = torch.randn_like(x)
# d_coef, d_x = torch.autograd.grad(result, (ABCD, x), grad, retain_graph=True)
[torch.autograd.grad(result, (ABCD, x), grad, retain_graph=True) for _ in range(ntrials)]
end = time.perf_counter()
print(end - start)
# assert torch.allclose(d_coef, d_coef_slow)
# assert torch.allclose(d_x, d_x_slow)
last = time.perf_counter()
print(last - first)
def test_butterfly_factor_complex_multiply():
from complex_utils import complex_mul
n = 1024
m = int(math.log2(n))
x = torch.randn((n, 2), requires_grad=True)
sizes = [n >> i for i in range(m)]
for size in sizes:
bf = Block2x2Diag(size, complex=True)
x = x.view(-1, 2 * bf.ABCD.shape[-2], 2)
result_slow = (complex_mul(bf.ABCD, x.view(x.shape[:-2] + (1, 2, size // 2, 2))).sum(dim=-3)).view(x.shape)
result = butterfly_factor_mult(bf.ABCD, x.view(-1, 2, bf.ABCD.shape[-2], 2)).view(x.shape)
assert torch.allclose(result, result_slow, atol=1e-6)
grad = torch.randn_like(x)
d_coef_slow, d_x_slow = torch.autograd.grad(result_slow, (bf.ABCD, x), grad, retain_graph=True)
d_coef, d_x = torch.autograd.grad(result, (bf.ABCD, x), grad, retain_graph=True)
assert torch.allclose(d_coef, d_coef_slow, atol=1e-6)
assert torch.allclose(d_x, d_x_slow, atol=1e-6)
|
butterfly-master
|
learning_transforms/butterfly_factor.py
|
"""Compute the exact Fisher information matrix of a butterfly matrix.
For an n x n butterfly matrix, this has space complexity O(n^2 log^2 n), which is optimal, and
time complexity O(n^3 log^2 n).
The space is the bottleneck anyway.
"""
import math
from functools import partial
import numpy as np
import torch
import torch_butterfly
import jax.numpy as jnp
from jax import grad, jit, vmap
from jax import random
# Avoid printing in scientific notation
np.set_printoptions(suppress=True)
torch.set_printoptions(sci_mode=False)
def twiddle_factor_to_matrix(twiddle_factor, stride):
"""
twiddle_factor: (n // 2, 2, 2)
stride: int
Return:
(n, n)
"""
n = twiddle_factor.shape[0] * 2
assert twiddle_factor.shape == (n // 2, 2, 2)
assert stride == 1 << int(math.log2(stride)), 'stride must be a power of 2'
x = jnp.eye(n)
t = jnp.moveaxis(twiddle_factor.reshape(n // (2 * stride), stride, 2, 2), -3, -1)
y = x.reshape(n, n // (2 * stride), 1, 2, stride)
y = (t * y).sum(axis=-2).reshape(n, n)
return y.T
def twiddle_factor_perm(n, stride):
"""The indices in a n x n matrix that marks where the entries of a butterfly factors are.
"""
# TODO: The logic here is more complicated than necessary
# I don't have time rn to find a simpler way
factor = jnp.arange(1, 1 + 2 * n).reshape(n // 2, 2, 2)
matrix_flat = twiddle_factor_to_matrix(factor, stride).flatten()
nonzero_locs, = jnp.nonzero(matrix_flat)
perm = nonzero_locs[jnp.argsort(matrix_flat[nonzero_locs])]
return perm
def butterfly_multiply_single(twiddle, x, increasing_stride=True, return_intermediates=False):
"""
twiddle: (log_n, n / 2, 2, 2)
x: (n)
Return:
(n)
"""
log_n = twiddle.shape[0]
n = 1 << log_n
assert twiddle.shape == (log_n, n // 2, 2, 2)
assert x.shape == (n,)
y = x
intermediates = [y]
for idx in range(log_n):
log_stride = idx if increasing_stride else log_n - 1 - idx
stride = 1 << log_stride
t = jnp.moveaxis(twiddle[idx].reshape(n // (2 * stride), stride, 2, 2), -3, -1)
y = y.reshape(n // (2 * stride), 1, 2, stride)
y = (t * y).sum(axis=-2).reshape(n)
intermediates.append(y)
return y if not return_intermediates else jnp.stack(intermediates)
butterfly_multiply = vmap(butterfly_multiply_single, in_axes=(None, 0))
torch.manual_seed(2357)
batch_size = 3
n = 32
log_n = int(math.log2(n))
twiddle_pt = torch.randn(1, 1, log_n, n // 2, 2, 2) / math.sqrt(2)
# twiddle_pt = torch.arange(1.0, 17.0).reshape(1, 1, log_n, n // 2, 2, 2)
x_pt = torch.randn(3, 1, n)
out_pt = torch_butterfly.butterfly_multiply(twiddle_pt, x_pt, increasing_stride=True).squeeze()
twiddle = jnp.array(twiddle_pt[0, 0].numpy())
x = jnp.array(x_pt[:, 0].numpy())
out = butterfly_multiply(twiddle, x)
key = random.PRNGKey(2357)
batch_size = 10000
key, key_x, key_y, key_true, key_y_t = random.split(key, 5)
x = random.normal(key_x, (batch_size, n))
true = random.normal(key_true, (n, n))
y = x @ true.T + 0.1 * random.normal(key_y, (batch_size, n))
loss = lambda twiddle, x, y: 0.5 * jnp.sum((butterfly_multiply_single(twiddle, x) - y)**2, axis=-1).mean()
factor_perms = jnp.stack([twiddle_factor_perm(n, 1 << i) for i in range(log_n)])
factor_row_perms = factor_perms // n
factor_col_perms = factor_perms % n
matrices = [twiddle_factor_to_matrix(twiddle[i], 1 << i) for i in range(log_n)]
def fisher_numerical(twiddle, x, key_y_t):
"""Compute Fisher information matrix numerically, using per-sample gradient
"""
batch_size, n = x.shape
y_t = butterfly_multiply(twiddle, x) + random.normal(key_y_t, (batch_size, n))
grad_per_sample = vmap(grad(loss, 0), (None, 0, 0))(twiddle, x, y_t)
grad_per_sample = grad_per_sample.swapaxes(-1, -2).reshape(batch_size, -1)
fisher = (grad_per_sample.T @ grad_per_sample) / batch_size
assert jnp.allclose(fisher, fisher.T)
return fisher
def fisher_exact(twiddle, x, return_factor=False):
# behind = [jnp.eye(n)]
# for i in range(log_n - 1):
# behind.append(matrices[i] @ behind[-1])
bmul_intermediate = vmap(partial(butterfly_multiply_single, return_intermediates=True),
(None, 0), 1)
behind = bmul_intermediate(twiddle, jnp.eye(n)).swapaxes(-1, -2)[:-1]
# ahead = [jnp.eye(n)]
# for i in range(1, log_n)[::-1]:
# ahead.append(ahead[-1] @ matrices[i])
# ahead = list(reversed(ahead))
bmul_t_intermediate = vmap(partial(butterfly_multiply_single, increasing_stride=False,
return_intermediates=True), (None, 0), 1)
ahead = bmul_t_intermediate(twiddle[::-1].swapaxes(-1, -2), jnp.eye(n))[:-1][::-1]
# fisher_exact_list = []
# for i in range(log_n):
# fisher_exact_row = []
# for j in range(log_n):
# if j >= i:
# Fij = jnp.kron(behind[i] @ behind[j].T, ahead[i].T @ ahead[j])
# Fij_t = Fij[factor_perms[i]][:, factor_perms[j]]
# else:
# Fij_t = fisher_exact_list[j][i].T
# fisher_exact_row.append(Fij_t)
# fisher_exact_list.append(fisher_exact_row)
# fisher_exact = jnp.block(fisher_exact_list)
# A = jnp.stack([jnp.kron(behind[i], ahead[i].T) for i in range(log_n)])
# PA = jnp.concatenate([jnp.kron(behind[i], ahead[i].T)[factor_perms[i]] for i in range(log_n)])
# PA = vmap(lambda b, a, p: jnp.kron(b, a.T)[p])(behind, ahead, factor_perms).reshape(-1, n * n)
# PA = vmap(
# lambda b, a, p: (jnp.repeat(b, n, 0)[p][:, :, None] * jnp.tile(a.T, (n, 1))[p][:, None, :])
# )(behind, ahead, factor_perms).reshape(-1, n * n)
# fisher_exact = PA @ PA.T
PA = None
# L = vmap(lambda b, p: jnp.repeat(b, n, 0)[p])(behind, factor_perms).reshape(-1, n)
L = vmap(lambda b, p: b[p])(behind, factor_row_perms).reshape(-1, n)
# R = vmap(lambda a, p: jnp.tile(a.T, (n, 1))[p])(ahead, factor_perms).reshape(-1, n)
R = vmap(lambda a, p: a.T[p])(ahead, factor_col_perms).reshape(-1, n)
fisher_exact = (L @ L.T) * (R @ R.T)
return fisher_exact if not return_factor else fisher_exact, PA
F = fisher_numerical(twiddle, x, key_y_t)
F_exact, PA = fisher_exact(twiddle, x, return_factor=True)
print(jnp.linalg.norm(F - F_exact, 2) / jnp.linalg.norm(F, 2))
print(jnp.linalg.norm(F - F_exact, 'fro') / jnp.linalg.norm(F, 'fro'))
# for i in range(log_n):
# for j in range(log_n):
# print((i, j))
# print(jnp.nonzero(fisher_exact_list[i][j]))
def check_pinv(A, A_pinv):
AAp = A @ A_pinv
ApA = A_pinv @ A
return (jnp.linalg.norm(AAp @ A - A), jnp.linalg.norm(ApA @ A_pinv - A_pinv),
jnp.linalg.norm(AAp.T - AAp), jnp.linalg.norm(ApA.T - ApA))
# F_exact_pinv = jnp.linalg.pinv(F_exact)
# U, S, _ = jnp.linalg.svd(PA, full_matrices=False)
# # (S > 1e-3).sum()
# # This seems to have rank (log_n + 1) n
# rank = (log_n + 1) * n
# # F_svd_pinv = U[:, :rank] @ jnp.diag(1.0 / S[:rank]**2) @ U.T[:rank]
# F_svd_pinv = (U[:, :rank] / S[:rank]**2) @ U.T[:rank]
# print([float(e) for e in check_pinv(F_exact, F_exact_pinv)])
# print([float(e) for e in check_pinv(F_exact, F_svd_pinv)])
|
butterfly-master
|
learning_transforms/fisher.py
|
import torch
from torch import nn
# def semantic_loss_exactly_one(prob, dim=-1):
# """Semantic loss to encourage the multinomial probability to be "peaked",
# i.e. only one class is picked.
# The loss has the form -log sum_{i=1}^n p_i prod_{j=1, j!=i}^n (1 - p_j).
# Paper: http://web.cs.ucla.edu/~guyvdb/papers/XuICML18.pdf
# Code: https://github.com/UCLA-StarAI/Semantic-Loss/blob/master/semi_supervised/semantic.py
# Parameters:
# prob: probability of a multinomial distribution, shape (n, )
# dim: dimension to sum over
# Returns:
# semantic_loss: shape (1, )
# """
# This is probably not the most numerically stable way to implement the
# loss. Maybe it's better to compute from log softmax. The difficulty is to
# compute log(1 - p) from log(p). Pytorch's logsumexp doesn't support
# weight yet (as of Pytorch 1.0), unlike scipy's logsumexp, so we can't do
# subtraction in log scale.
# loss = -((1 - prob).log().sum(dim=dim) + (prob / (1 - prob)).sum(dim=dim).log())
# Hacky way to avoid NaN when prob is very peaked, but doesn't work because the gradient is still NaN
# loss[torch.isnan(loss)] = 0.0
# Another hacky way: clamp the result instead of return inf - inf, doesn't work either
# loss = -(torch.clamp((1 - prob).log().sum(dim=dim), min=-torch.finfo(prob.dtype).max) + torch.clamp((prob / (1 - prob)).sum(dim=dim).log(), max=torch.finfo(prob.dtype).max))
# TODO: This only works when dim=-1 and prob is 2 dimensional
# loss = torch.zeros(prob.shape[0])
# prob_not_one = torch.all(prob != 1.0, dim=-1)
# loss[prob_not_one] = -((1 - prob[prob_not_one]).log().sum(dim=dim) + (prob[prob_not_one] / (1 - prob[prob_not_one])).sum(dim=dim).log())
# return loss
def semantic_loss_exactly_one(log_prob):
"""Semantic loss to encourage the multinomial probability to be "peaked",
i.e. only one class is picked.
The loss has the form -log sum_{i=1}^n p_i prod_{j=1, j!=i}^n (1 - p_j).
Paper: http://web.cs.ucla.edu/~guyvdb/papers/XuICML18.pdf
Code: https://github.com/UCLA-StarAI/Semantic-Loss/blob/master/semi_supervised/semantic.py
Parameters:
log_prob: log probability of a multinomial distribution, shape (batch_size, n)
Returns:
semantic_loss: shape (batch_size)
"""
_, argmaxes = torch.max(log_prob, dim=-1)
# Compute log(1-p) separately for the largest probabilities, by doing
# logsumexp on the rest of the log probabilities.
log_prob_temp = log_prob.clone()
log_prob_temp[range(log_prob.shape[0]), argmaxes] = torch.tensor(float('-inf'))
log_1mprob_max = torch.logsumexp(log_prob_temp, dim=-1)
# Compute log(1-p) normally for the rest of the probabilities
log_1mprob = torch.log1p(-torch.exp(log_prob_temp))
log_1mprob[range(log_prob.shape[0]), argmaxes] = log_1mprob_max
loss = -(log_1mprob.sum(dim=-1) + torch.logsumexp(log_prob - log_1mprob, dim=-1))
return loss
def test_semantic_loss_exactly_one():
m = 5
logit = torch.randn(m)
p = nn.functional.softmax(logit, dim=-1)
# Compute manually
result = 0.0
for i in range(m):
prod = p[i].clone()
for j in range(m):
if j != i:
prod *= 1 - p[j]
result += prod
result = -torch.log(result)
result1 = -torch.logsumexp(torch.log(1 - p).sum() + torch.log(p / (1 - p)), dim=-1)
result2 = semantic_loss_exactly_one(p.unsqueeze(0)).squeeze()
assert torch.allclose(result, result1)
assert torch.allclose(result, result2)
if __name__ == '__main__':
test_semantic_loss_exactly_one()
|
butterfly-master
|
learning_transforms/semantic_loss.py
|
# encoding: utf8
"""
From Softmax to Sparsemax: A Sparse Model of Attention and Multi-Label
Classification. André F. T. Martins, Ramón Fernandez Astudillo
In: Proc. of ICML 2016, https://arxiv.org/abs/1602.02068
Code adapted from https://github.com/vene/sparse-structured-attention
and https://github.com/KrisKorrel/sparsemax-pytorch
"""
import torch
def project_simplex(v, z=1.0):
"""Project a vector v onto the simplex.
That is, return argmin_w ||w - v||^2 where w >= 0 elementwise and sum(w) = z.
Parameters:
v: Tensor of shape (batch_size, n)
z: real number
Return:
Projection of v on the simplex, along the last dimension: (batch_size, n)
"""
v_sorted, _ = v.sort(dim=-1, descending=True)
range_ = torch.arange(1.0, 1 + v.shape[-1])
cumsum_divided = (v_sorted.cumsum(dim=-1) - z) / range_
# rho = (v_sorted - cumsum_divided > 0).nonzero()[-1]
cond = (v_sorted - cumsum_divided > 0).type(v.dtype)
rho = (cond * range_).argmax(dim=-1)
tau = cumsum_divided[range(v.shape[0]), rho]
return torch.clamp(v - tau.unsqueeze(-1), min=0)
def sparsemax_grad(output, grad):
support = output > 0
support_f = support.type(grad.dtype)
s = (grad * support_f).sum(dim=-1) / support_f.sum(dim=-1)
return support_f * (grad - s.unsqueeze(-1))
# temp = (grad - s.unsqueeze(-1))[support]
# result = torch.zeros_like(grad)
# result[support] = temp
class Sparsemax(torch.autograd.Function):
@staticmethod
def forward(ctx, v):
output = project_simplex(v)
ctx.save_for_backward(output)
return output
@staticmethod
def backward(ctx, grad):
output, = ctx.saved_tensors
return sparsemax_grad(output, grad)
sparsemax = Sparsemax.apply
|
butterfly-master
|
learning_transforms/sparsemax.py
|
import math
import multiprocessing as mp
import os
from pathlib import Path
import pickle
import random
import sys
import numpy as np
from numpy.polynomial import legendre
import torch
from torch import nn
from torch import optim
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from, run_experiments
from ray.tune.schedulers import AsyncHyperBandScheduler
from butterfly import Butterfly, ButterflyProduct
from semantic_loss import semantic_loss_exactly_one
from utils import PytorchTrainable, bitreversal_permutation
from complex_utils import complex_mul, complex_matmul
N_LBFGS_STEPS = 300
N_TRIALS_TO_POLISH = 20
class TrainableLegendreReal(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=False,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
# Need to transpose as dct acts on rows of matrix np.eye, not columns
n = size
x = np.linspace(-1, 1, n + 2)[1:-1]
E = legendre.legvander(x, n - 1).T
# E = np.zeros((n, n), dtype=np.float32)
# for i, coef in enumerate(np.eye(n)):
# E[i] = legendre.legval(x, coef)
self.target_matrix = torch.tensor(E, dtype=torch.float)
arange_ = np.arange(size)
dct_perm = np.concatenate((arange_[::2], arange_[::-2]))
br_perm = bitreversal_permutation(size)
assert config['perm'] in ['id', 'br', 'dct']
if config['perm'] == 'id':
self.perm = torch.arange(size)
elif config['perm'] == 'br':
self.perm = br_perm
elif config['perm'] == 'dct':
self.perm = torch.arange(size)[dct_perm][br_perm]
else:
assert False, 'Wrong perm in config'
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableLegendreComplex(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=True,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
n = size
x = np.linspace(-1, 1, n + 2)[1:-1]
E = legendre.legvander(x, n - 1).T
self.target_matrix = torch.tensor(E, dtype=torch.float)
arange_ = np.arange(size)
dct_perm = np.concatenate((arange_[::2], arange_[::-2]))
br_perm = bitreversal_permutation(size)
assert config['perm'] in ['id', 'br', 'dct']
if config['perm'] == 'id':
self.perm = torch.arange(size)
elif config['perm'] == 'br':
self.perm = br_perm
elif config['perm'] == 'dct':
self.perm = torch.arange(size)[dct_perm][br_perm]
else:
assert False, 'Wrong perm in config'
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.perm, 0]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
def polish_dct_real(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
return loss.item()
def polish_dct_complex(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm, 0], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm, 0], trainable.target_matrix)
return loss.item()
ex = Experiment('LegendreEval_factorization')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.named_config
def softmax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.named_config
def sparsemax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'sparsemax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.config
def fixed_order_config():
fixed_order = True # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
size = 8 # Size of matrix to factor, must be power of 2
ntrials = 20 # Number of trials for hyperparameter tuning
nsteps = 400 # Number of steps per epoch
nmaxepochs = 200 # Maximum number of epochs
result_dir = 'results' # Directory to store results
nthreads = 1 # Number of CPU threads per job
smoke_test = False # Finish quickly for testing
@ex.capture
def legendreeval_experiment_real(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'perm': sample_from(lambda spec: random.choice(['id', 'br', 'dct'])),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'LegendreEval_factorization_real_{fixed_order}_{softmax_fn}_{size}',
run=TrainableLegendreReal,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def legendreeval_experiment_complex(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'perm': sample_from(lambda spec: random.choice(['id', 'br', 'dct'])),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'LegendreEval_factorization_complex_{fixed_order}_{softmax_fn}_{size}',
run=TrainableLegendreComplex,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.automain
def run(result_dir, nmaxepochs, nthreads):
# experiment = legendreeval_experiment_real()
experiment = legendreeval_experiment_complex()
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = str(nthreads)
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=nmaxepochs)
trials = run_experiments(experiment, scheduler=ahb, raise_on_failed_trial=False)
losses = [-trial.last_result['negative_loss'] for trial in trials]
# Polish solutions with L-BFGS
pool = mp.Pool()
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result['negative_loss'])
# polished_losses = pool.map(polish_dct_real, sorted_trials[:N_TRIALS_TO_POLISH])
polished_losses = pool.map(polish_dct_complex, sorted_trials[:N_TRIALS_TO_POLISH])
pool.close()
pool.join()
for i in range(N_TRIALS_TO_POLISH):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
print(np.array(losses))
print(np.sort(losses))
# print(np.sort(losses)[:N_TRIALS_TO_POLISH])
print(np.sort(polished_losses))
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return min(losses + polished_losses)
|
butterfly-master
|
learning_transforms/learning_legendre.py
|
import torch
from torch import nn
from torch import optim
from butterfly_factor import butterfly_factor_mult
from permutation_factor import permutation_factor_even_odd_mult, permutation_factor_reverse_mult
from butterfly import Block2x2Diag, Block2x2DiagProduct, BlockPermProduct, Block2x2DiagProductBmm
def profile_butterfly_mult():
nsteps = 10
batch_size = 100
n = 1024
B = Block2x2DiagProduct(n)
x = torch.randn(batch_size, n)
# B(x)
optimizer = optim.Adam(B.parameters(), lr=0.01)
for _ in range(nsteps):
optimizer.zero_grad()
# output = B(x)
# loss = nn.functional.mse_loss(output, x)
output = x
for factor in B.factors[::-1]:
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(x.shape)
# output = output.reshape(x.shape)
loss = output.sum()
loss.backward()
optimizer.step()
import os
os.environ['MKL_NUM_THREADS'] = '1'
torch.set_num_threads(1)
# nsteps = 50
nsteps = 1
batch_size = 1000
# batch_size = 1
n = 1024
# n = 8
# B = Block2x2DiagProduct(n)
B = Block2x2DiagProduct(n).to('cuda')
# B = Block2x2DiagProductBmm(n).to('cuda')
# P = BlockPermProduct(n)
P = BlockPermProduct(n, complex=False).to('cuda')
model = nn.Sequential(P, B)
# model = nn.Sequential(B)
# x = torch.randn(batch_size, n, requires_grad=True)
x = torch.randn(batch_size, n, requires_grad=True).to('cuda')
# B = Block2x2DiagProduct(n, complex=True)
# x = torch.randn(batch_size, n, 2)
optimizer = optim.Adam(model.parameters(), lr=0.01)
# with torch.autograd.profiler.profile() as prof:
# # with torch.autograd.profiler.profile(use_cuda=True) as prof:
# for _ in range(nsteps):
# optimizer.zero_grad()
# output = model(x)
# # output = x
# # output_slow = x
# # for factor in B.factors[::-1]:
# # output_prev = output
# # output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(x.shape)
# # output_slow = ((factor.ABCD * output_prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(output_prev.shape)
# # print((output - output_slow).abs().max().item())
# # grad = torch.randn_like(output)
# # d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, output_prev), grad, retain_graph=True)
# # # d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, output_prev), grad, retain_graph=True)
# # d_twiddle_slow = (grad.view(-1, 2, 1, factor.size // 2) * output_prev.view(-1, 1, 2, factor.size // 2)).sum(dim=0)
# # d_input_slow = (factor.ABCD.transpose(0, 1) * grad.view(-1, 1, 2, factor.size // 2)).sum(dim=-2).view(output_prev.shape)
# # print((d_twiddle - d_twiddle_slow).abs().max().item())
# # print((d_input - d_input_slow).abs().max().item())
# output = output.view(x.shape)
# loss = nn.functional.mse_loss(output, x)
# # loss = output.sum()
# loss.backward()
# optimizer.step()
# sorted_events = torch.autograd.profiler.EventList(sorted(prof.key_averages(), key=lambda event: event.cpu_time_total, reverse=True))
# print(sorted_events)
import time
# nsteps = 1000
nsteps = 1
grad = torch.randn_like(x)
# output = x
# torch.cuda.synchronize()
# start = time.perf_counter()
# for factor in B.factors[::-1]:
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# output_fast = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(x.shape)
# # output = ((factor.ABCD * output.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(output.shape)
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# grad_fast = torch.autograd.grad(output_fast, (factor.ABCD, output), grad.view(output_fast.shape), retain_graph=True)
# # output = ((factor.ABCD * output.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(output.shape)
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# end = time.perf_counter()
# print('Total: ', end - start)
output = B(x)
output.backward(gradient=grad)
# output = x
# torch.cuda.synchronize()
# start = time.perf_counter()
# for factor in B.factors[::-1]:
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# # output_fast = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(x.shape)
# output_slow = ((factor.ABCD * output.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(output.shape)
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# # torch.cuda.synchronize()
# # start_micro = time.perf_counter()
# # for _ in range(nsteps):
# # # output_fast = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(x.shape)
# # grad_slow = (output_slow.view(-1, 2, 1, factor.size // 2) * output_slow.view(-1, 1, 2, factor.size // 2)).sum(dim=0)
# # torch.cuda.synchronize()
# # end_micro = time.perf_counter()
# # print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# end = time.perf_counter()
# print('Total: ', end - start)
# a = torch.randn(batch_size * n // 2, 4, device='cuda')
# a = B.factors[-1].ABCD * x.view(-1, 1, 2, 1)
# print(a.shape)
# print(a.stride())
# b = a[:, ::2].sum(dim=-1)
# b = a.sum(dim=0)
# output = x
# prob = torch.zeros(3, device=x.device, requires_grad=True)
# prob[0] = 0.7
# prob[1] = 0.6
# prob[2] = 0.4
# torch.cuda.synchronize()
# start = time.perf_counter()
# for factor in P.factors[::-1]:
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# output_fast = permutation_factor_even_odd_mult(prob[:1], output.view(-1, factor.size))
# # output_slow = ((1 - prob[0]) * output.view(-1, 2, factor.size // 2) + prob[0] * output.view(-1, factor.size // 2, 2).transpose(-1, -2)).view(-1, factor.size)
# # print((output_fast - output_slow).abs().max().item())
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# d_prob_fast, d_output_fast = torch.autograd.grad(output_fast, (prob, output), grad.view(output_fast.shape), retain_graph=True)
# # d_prob_slow, d_output_slow = torch.autograd.grad(output_slow, (prob, output), grad.view(output_slow.shape), retain_graph=True)
# # print((d_prob_fast))
# # print((d_prob_slow))
# # print((d_output_fast - d_output_slow).abs().max().item())
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# end = time.perf_counter()
# print('Total: ', end - start)
# torch.cuda.synchronize()
# start = time.perf_counter()
# for factor in P.factors[::-1]:
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# output_slow = ((1 - prob[0]) * output.view(-1, 2, factor.size // 2) + prob[0] * output.view(-1, factor.size // 2, 2).transpose(-1, -2)).view(-1, factor.size)
# # output = torch.add((1 - prob[0]) * output.view(-1, 2, factor.size // 2), prob[0], output.view(-1, factor.size // 2, 2).transpose(-1, -2)).view(-1, factor.size)
# # output_slow = torch.lerp(output.view(-1, 2, factor.size // 2), output.view(-1, factor.size // 2, 2).transpose(-1, -2), prob[0]).view(-1, factor.size)
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# grad_slow = torch.autograd.grad(output_slow, (prob, output), grad.view(output_slow.shape), retain_graph=True)
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# end = time.perf_counter()
# print('Total: ', end - start)
# torch.cuda.synchronize()
# start = time.perf_counter()
# for factor in P.factors[::-1]:
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# output_fast = permutation_factor_reverse_mult(prob[1:], output.view(-1, factor.size))
# # output_slow = ((1 - prob[1:]).unsqueeze(-1) * output.view(-1, 2, factor.size//2) + prob[1:].unsqueeze(-1) * output.view(-1, 2, factor.size//2).flip(-1)).view(-1, factor.size)
# # print((output_fast - output_slow).abs().max().item())
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# d_prob_fast, d_output_fast = torch.autograd.grad(output_fast, (prob, output), grad.view(output_fast.shape), retain_graph=True)
# # d_prob_slow, d_output_slow = torch.autograd.grad(output_slow, (prob, output), grad.view(output_slow.shape), retain_graph=True)
# # print((d_prob_fast))
# # print((d_prob_slow))
# # assert d_output_fast.shape == d_output_slow.shape
# # print((d_output_fast - d_output_slow).abs().max().item())
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# end = time.perf_counter()
# print('Total: ', end - start)
# torch.cuda.synchronize()
# start = time.perf_counter()
# for factor in P.factors[::-1]:
# reverse_idx = torch.arange(factor.size//2 - 1, -1, -1, device=output.device)
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# # output_slow = (((1 - prob[1:]).unsqueeze(-1) * output.view(-1, 2, factor.size//2) + prob[1:].unsqueeze(-1) * output.view(-1, 2, factor.size//2).flip(-1))).view(-1, factor.size)
# output_slow = (((1 - prob[1:]).unsqueeze(-1) * output.view(-1, 2, factor.size//2) + prob[1:].unsqueeze(-1) * output.view(-1, 2, factor.size//2)[:, :, reverse_idx])).view(-1, factor.size)
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# start_micro = time.perf_counter()
# for _ in range(nsteps):
# grad_slow = torch.autograd.grad(output_slow, (prob, output), grad.view(output_slow.shape), retain_graph=True)
# torch.cuda.synchronize()
# end_micro = time.perf_counter()
# print(f'Size {factor.size}: {end_micro - start_micro}s')
# torch.cuda.synchronize()
# end = time.perf_counter()
# print('Total: ', end - start)
|
butterfly-master
|
learning_transforms/profile.py
|
import math
import multiprocessing as mp
import os
from pathlib import Path
import pickle
import random
import sys
import numpy as np
import torch
from torch import nn
from torch import optim
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from, run_experiments
from ray.tune.schedulers import AsyncHyperBandScheduler
from butterfly import Butterfly, ButterflyProduct
from semantic_loss import semantic_loss_exactly_one
from utils import PytorchTrainable, bitreversal_permutation
from complex_utils import complex_mul, complex_matmul
N_LBFGS_STEPS = 300
N_TRIALS_TO_POLISH = 20
class TrainableVandermondeReal(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=False,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
# Need to transpose as dct acts on rows of matrix np.eye, not columns
n = size
np.random.seed(0)
x = np.random.randn(n)
V = np.vander(x, increasing=True)
self.target_matrix = torch.tensor(V, dtype=torch.float)
arange_ = np.arange(size)
dct_perm = np.concatenate((arange_[::2], arange_[::-2]))
br_perm = bitreversal_permutation(size)
assert config['perm'] in ['id', 'br', 'dct']
if config['perm'] == 'id':
self.perm = torch.arange(size)
elif config['perm'] == 'br':
self.perm = br_perm
elif config['perm'] == 'dct':
self.perm = torch.arange(size)[dct_perm][br_perm]
else:
assert False, 'Wrong perm in config'
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableVandermondeComplex(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=True,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
n = size
np.random.seed(0)
x = np.random.randn(n)
V = np.vander(x, increasing=True)
self.target_matrix = torch.tensor(V, dtype=torch.float)
arange_ = np.arange(size)
dct_perm = np.concatenate((arange_[::2], arange_[::-2]))
br_perm = bitreversal_permutation(size)
assert config['perm'] in ['id', 'br', 'dct']
if config['perm'] == 'id':
self.perm = torch.arange(size)
elif config['perm'] == 'br':
self.perm = br_perm
elif config['perm'] == 'dct':
self.perm = torch.arange(size)[dct_perm][br_perm]
else:
assert False, 'Wrong perm in config'
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.perm, 0]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
def polish_dct_real(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
return loss.item()
def polish_dct_complex(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm, 0], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm, 0], trainable.target_matrix)
return loss.item()
ex = Experiment('VandermondeEval_factorization')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.named_config
def softmax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.named_config
def sparsemax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'sparsemax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.config
def fixed_order_config():
fixed_order = True # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
size = 8 # Size of matrix to factor, must be power of 2
ntrials = 20 # Number of trials for hyperparameter tuning
nsteps = 400 # Number of steps per epoch
nmaxepochs = 200 # Maximum number of epochs
result_dir = 'results' # Directory to store results
nthreads = 1 # Number of CPU threads per job
smoke_test = False # Finish quickly for testing
@ex.capture
def vandermonde_experiment_real(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'perm': sample_from(lambda spec: random.choice(['id', 'br', 'dct'])),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'VandermondeEval_factorization_real_{fixed_order}_{softmax_fn}_{size}',
run=TrainableVandermondeReal,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def vandermonde_experiment_complex(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'perm': sample_from(lambda spec: random.choice(['id', 'br', 'dct'])),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'VandermondeEval_factorization_complex_{fixed_order}_{softmax_fn}_{size}',
run=TrainableVandermondeComplex,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.automain
def run(result_dir, nmaxepochs, nthreads):
experiment = vandermonde_experiment_real()
# experiment = vandermonde_experiment_complex()
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = str(nthreads)
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=nmaxepochs)
trials = run_experiments(experiment, scheduler=ahb, raise_on_failed_trial=False)
losses = [-trial.last_result['negative_loss'] for trial in trials]
# Polish solutions with L-BFGS
pool = mp.Pool()
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result['negative_loss'])
polished_losses = pool.map(polish_dct_real, sorted_trials[:N_TRIALS_TO_POLISH])
# polished_losses = pool.map(polish_dct_complex, sorted_trials[:N_TRIALS_TO_POLISH])
pool.close()
pool.join()
for i in range(N_TRIALS_TO_POLISH):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
print(np.array(losses))
print(np.sort(losses))
# print(np.sort(losses)[:N_TRIALS_TO_POLISH])
print(np.sort(polished_losses))
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return min(losses + polished_losses)
|
butterfly-master
|
learning_transforms/learning_vandermonde.py
|
import os
from timeit import default_timer as timer
import numpy as np
from scipy.fftpack import fft, dct, dst
import torch
from torch import nn
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from butterfly import Block2x2DiagProduct, BlockPermProduct
from inference import Block2x2DiagProduct_to_ABCDs, BP_mul_cy_inplace
from test_factor_multiply import twiddle_list_concat
# We limit to 1 thread for reliable speed test
os.environ['MKL_NUM_THREADS'] = '1'
torch.set_num_threads(1)
exps = np.arange(6, 14)
sizes = 1 << exps
ntrials = [100000, 100000, 1000, 100, 100, 10, 10, 10]
dense_times = np.zeros(exps.size)
fft_times = np.zeros(exps.size)
scipyfft_times = np.zeros(exps.size)
dct_times = np.zeros(exps.size)
dst_times = np.zeros(exps.size)
bp_times = np.zeros(exps.size)
for idx_n, (n, ntrial) in enumerate(zip(sizes, ntrials)):
print(n)
x = np.random.random(n).astype(np.float32)
B = Block2x2DiagProduct(n)
P = BlockPermProduct(n)
B_matrix = B(torch.eye(int(n))).t().contiguous()
B_matrix_np = B_matrix.detach().numpy()
ABCDs = Block2x2DiagProduct_to_ABCDs(B)
perm = P.argmax().detach().numpy().astype(int)
# Dense multiply
start = timer()
[B_matrix_np @ x for _ in range(ntrial)]
end = timer()
dense_times[idx_n] = (end-start) / ntrial
# FFT
start = timer()
[np.fft.fft(x) for _ in range(ntrial)]
end = timer()
fft_times[idx_n] = (end-start) / ntrial
# Scipy's FFT, it's about 1.5x slower than Numpy's FFT
start = timer()
[fft(x) for _ in range(ntrial)]
end = timer()
scipyfft_times[idx_n] = (end-start) / ntrial
# DCT
start = timer()
[dct(x) for _ in range(ntrial)]
end = timer()
dct_times[idx_n] = (end-start) / ntrial
# DST
start = timer()
[dst(x) for _ in range(ntrial)]
end = timer()
dst_times[idx_n] = (end-start) / ntrial
# BP
start = timer()
[BP_mul_cy_inplace(ABCDs, perm, x) for _ in range(ntrial)]
end = timer()
bp_times[idx_n] = (end-start) / ntrial
print(dense_times)
print(fft_times)
print(scipyfft_times)
print(dct_times)
print(dst_times)
print(bp_times)
# print(bp_times / fft_times)
# print(bp_times / dct_times)
# print(bp_times / dst_times)
# plt.figure()
# plt.semilogy(sizes, dense_times / fft_times, label='FFT')
# # plt.semilogy(sizes, dense_times / scipyfft_times, label='scipy FFT')
# plt.semilogy(sizes, dense_times / dct_times, label='DCT')
# plt.semilogy(sizes, dense_times / dst_times, label='DST')
# plt.semilogy(sizes, dense_times / bp_times, label='BP')
# plt.xscale('log', basex=2)
# plt.xlabel("Dimension")
# plt.ylabel("Speedup over GEMV")
# plt.legend()
# # plt.show()
# plt.savefig('speed.pdf')
data = {
'sizes': sizes,
'speedup_fft': dense_times / fft_times,
'speedup_dct': dense_times / dct_times,
'speedup_dst': dense_times / dst_times,
'speedup_bp': dense_times / bp_times,
}
# import pickle
# with open('speed_data.pkl', 'wb') as f:
# pickle.dump(data, f)
|
butterfly-master
|
learning_transforms/speed_test.py
|
import numpy as np
n = 4
# x = np.random.randn(n)
x = np.arange(2, n+2)
V = np.vander(x, increasing=True)
D = np.diag(x)
D_inv = np.diag(1 / x)
Z0 = np.diag(np.ones(n-1), -1)
G = np.array(x ** n)[:, None]
H = np.array([0, 0, 0, 1])[:, None]
assert np.allclose(D @ V - V @ Z0 - G @ H.T, 0)
G = np.array(x ** (n-1))[:, None]
V - D_inv @ V @ Z0 - G @ H.T
A_power = [np.linalg.matrix_power(D_inv, i) for i in range(n)]
B_power = [np.linalg.matrix_power(Z0.T, i) for i in range(n)]
A_power_G = np.hstack([a @ G for a in A_power])
B_power_H = np.hstack([b @ H for b in B_power])
A_power_G @ B_power_H.T
v = np.random.randn(n)
result_slow = V @ v
A_power_G @ v[::-1]
result_slow = V.T @ v
(V * v[:, None]).sum(axis=0)
|
butterfly-master
|
learning_transforms/vandermonde.py
|
butterfly-master
|
learning_transforms/__init__.py
|
|
"""Target matrices to factor: DFT, DCT, Hadamard, convolution, Legendre, Vandermonde.
Complex complex must be converted to real matrices with 2 as the last dimension
(for Pytorch's compatibility).
"""
import math
import numpy as np
from numpy.polynomial import legendre
import scipy.linalg as LA
from scipy.fftpack import dct, dst, fft2
import scipy.sparse as sparse
from scipy.linalg import hadamard
import torch
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
from butterfly import Butterfly
# Copied from https://stackoverflow.com/questions/23869694/create-nxn-haar-matrix
def haar_matrix(n, normalized=False):
# Allow only size n of power 2
n = 2**np.ceil(np.log2(n))
if n > 2:
h = haar_matrix(n / 2)
else:
return np.array([[1, 1], [1, -1]])
# calculate upper haar part
h_n = np.kron(h, [1, 1])
# calculate lower haar part
if normalized:
h_i = np.sqrt(n/2)*np.kron(np.eye(len(h)), [1, -1])
else:
h_i = np.kron(np.eye(len(h)), [1, -1])
# combine parts
h = np.vstack((h_n, h_i))
return h
def hartley_matrix(n):
"""Matrix corresponding to the discrete Hartley transform.
https://en.wikipedia.org/wiki/Discrete_Hartley_transform
"""
range_ = np.arange(n)
indices = np.outer(range_, range_)
arg = indices * 2 * math.pi / n
return np.cos(arg) + np.sin(arg)
def hilbert_matrix(n):
"""
https://en.wikipedia.org/wiki/Hilbert_matrix
"""
range_ = np.arange(n) + 1
arg = range_[:, None] + range_ - 1
return 1.0 / arg
def krylov_construct(A, v, m):
n = v.shape[0]
assert A.shape == (n,n)
d = np.diagonal(A, 0)
subd = np.diagonal(A, -1)
K = np.zeros(shape=(m,n))
K[0,:] = v
for i in range(1,m):
K[i,1:] = subd*K[i-1,:-1]
return K
def toeplitz_like(G, H):
n = g.shape[0]
r = g.shape[1]
assert n == size and h.shape[0] == n and h.shape[1] == r
A1 = np.diag(np.ones(size-1), -1)
A1[0,n-1] = 1
A1_ = np.diag(np.ones(size-1), -1)
A1_[0,n-1] = -1
rank1s = [krylov_construct(A1, G[:,i], n) @ krylov_construct(A1_, H[:i], n).T for i in range(r)]
M = sum(ranks1s)
return M
def named_target_matrix(name, size):
"""
Parameter:
name: name of the target matrix
Return:
target_matrix: (n, n) numpy array for real matrices or (n, n, 2) for complex matrices.
"""
if name == 'dft':
return LA.dft(size, scale='sqrtn')[:, :, None].view('float64')
elif name == 'idft':
return np.ascontiguousarray(LA.dft(size, scale='sqrtn').conj().T)[:, :, None].view('float64')
elif name == 'dft2':
size_sr = int(math.sqrt(size))
matrix = np.fft.fft2(np.eye(size_sr**2).reshape(-1, size_sr, size_sr), norm='ortho').reshape(-1, size_sr**2)
# matrix1d = LA.dft(size_sr, scale='sqrtn')
# assert np.allclose(np.kron(m1d, m1d), matrix)
# return matrix[:, :, None].view('float64')
from butterfly.utils import bitreversal_permutation
br_perm = bitreversal_permutation(size_sr)
br_perm2 = np.arange(size_sr**2).reshape(size_sr, size_sr)[br_perm][:, br_perm].reshape(-1)
matrix = np.ascontiguousarray(matrix[:, br_perm2])
return matrix[:, :, None].view('float64')
elif name == 'dct':
# Need to transpose as dct acts on rows of matrix np.eye, not columns
# return dct(np.eye(size), norm='ortho').T
return dct(np.eye(size)).T / math.sqrt(size)
elif name == 'dst':
return dst(np.eye(size)).T / math.sqrt(size)
elif name == 'hadamard':
return LA.hadamard(size) / math.sqrt(size)
elif name == 'hadamard2':
size_sr = int(math.sqrt(size))
matrix1d = LA.hadamard(size_sr) / math.sqrt(size_sr)
return np.kron(matrix1d, matrix1d)
elif name == 'b2':
size_sr = int(math.sqrt(size))
from butterfly import Block2x2DiagProduct
b = Block2x2DiagProduct(size_sr)
matrix1d = b(torch.eye(size_sr)).t().detach().numpy()
return np.kron(matrix1d, matrix1d)
elif name == 'convolution':
np.random.seed(0)
x = np.random.randn(size)
return LA.circulant(x) / math.sqrt(size)
elif name == 'hartley':
return hartley_matrix(size) / math.sqrt(size)
elif name == 'haar':
return haar_matrix(size, normalized=True) / math.sqrt(size)
elif name == 'legendre':
grid = np.linspace(-1, 1, size + 2)[1:-1]
return legendre.legvander(grid, size - 1).T / math.sqrt(size)
elif name == 'hilbert':
H = hilbert_matrix(size)
return H / np.linalg.norm(H, 2)
elif name == 'randn':
np.random.seed(0)
return np.random.randn(size, size) / math.sqrt(size)
elif name == 'permutation':
np.random.seed(0)
perm = np.random.permutation(size)
P = np.eye(size)[perm]
return P
elif name.startswith('rank-unnorm'):
r = int(name[11:])
np.random.seed(0)
G = np.random.randn(size, r)
H = np.random.randn(size, r)
M = G @ H.T
# M /= math.sqrt(size*r)
return M
elif name.startswith('rank'):
r = int(name[4:])
np.random.seed(0)
G = np.random.randn(size, r)
H = np.random.randn(size, r)
M = G @ H.T
M /= math.sqrt(size*r)
return M
elif name.startswith('sparse'):
s = int(name[6:])
# 2rn parameters
np.random.seed(0)
mask = sparse.random(size, size, density=s/size, data_rvs=np.ones)
M = np.random.randn(size, size) * (mask.toarray())
M /= math.sqrt(s)
return M
elif name.startswith('toeplitz'):
r = int(name[8:])
G = np.random.randn(size, r) / math.sqrt(size*r)
H = np.random.randn(size, r) / math.sqrt(size*r)
M = toeplitz_like(G, H)
return M
elif name == 'fastfood':
n = size
S = np.random.randn(n)
G = np.random.randn(n)
B = np.random.randn(n)
# P = np.arange(n)
P = np.random.permutation(n)
H = hadamard(n)
# SHGPHB
# print(H)
# print((H*B)[P,:])
# print((H @ (G[:,np.newaxis] * (H * B)[P,:])))
F = S[:,np.newaxis] * (H @ (G[:,np.newaxis] * (H * B)[P,:])) / n
return F
# x = np.random.randn(batch_size,n)
# HB = hadamard_transform(B)
# PHBx = HBx[:, P]
# HGPHBx = hadamard_transform(G*PHBx)
# return S*HGPHBx
elif name == 'butterfly':
# n (log n+1) params in the hierarchy
b = Butterfly(in_size=size, out_size=size, bias=False, tied_weight=False, param='odo', nblocks=0)
M = b(torch.eye(size))
return M.cpu().detach().numpy()
else:
assert False, 'Target matrix name not recognized or implemented'
|
butterfly-master
|
learning_transforms/target_matrix.py
|
import numpy as np
import torch
from torch.nn import functional as F
from numpy.polynomial import chebyshev, legendre
from utils import bitreversal_permutation
def polymatmul(A, B):
"""Batch-multiply two matrices of polynomials
Parameters:
A: (N, batch_size, n, m, d1)
B: (batch_size, m, p, d2)
Returns:
AB: (N, batch_size, n, p, d1 + d2 - 1)
"""
unsqueezed = False
if A.dim() == 4:
unsqueezed = True
A = A.unsqueeze(0)
N, batch_size, n, m, d1 = A.shape
batch_size_, m_, p, d2 = B.shape
assert batch_size == batch_size_
assert m == m_
# Naive implementation using conv1d and loop, slower but easier to understand
# Bt_flipped = B.transpose(1, 2).flip(-1)
# result = torch.stack([
# F.conv1d(A[:, i].reshape(-1, m, d1), Bt_flipped[i], padding=d2 - 1).reshape(N, n, p, -1)
# for i in range(batch_size)
# ], dim=1)
# Batched implementation using grouped convolution, faster
result = F.conv1d(A.transpose(1, 2).reshape(N * n, batch_size * m, d1),
B.transpose(1, 2).reshape(batch_size * p, m, d2).flip(-1),
padding=d2 - 1,
groups=batch_size).reshape(N, n, batch_size, p, d1 + d2 - 1).transpose(1, 2)
return result.squeeze(0) if unsqueezed else result
def ops_transpose_mult(a, b, c, p0, p1, v):
"""Fast algorithm to multiply P^T v where P is the matrix of coefficients of
OPs, specified by the coefficients a, b, c, and the starting polynomials p0,
p_1.
In particular, the recurrence is
P_{n+2}(x) = (a[n] x + b[n]) P_{n+1}(x) + c[n] P_n(x).
Parameters:
a: array of length n
b: array of length n
c: array of length n
p0: real number representing P_0(x).
p1: pair of real numbers representing P_1(x).
v: (batch_size, n)
Return:
result: P^T v.
"""
n = v.shape[-1]
m = int(np.log2(n))
assert n == 1 << m, "Length n must be a power of 2."
# Preprocessing: compute T_{i:j}, the transition matrix from p_i to p_j.
T = [None] * (m + 1)
# Lowest level, filled with T_{i:i+1}
# n matrices, each 2 x 2, with coefficients being polynomials of degree <= 1
T[0] = torch.zeros(n, 2, 2, 2)
T[0][:, 0, 0, 1] = a
T[0][:, 0, 0, 0] = b
T[0][:, 0, 1, 0] = c
T[0][:, 1, 0, 0] = 1.0
for i in range(1, m + 1):
T[i] = polymatmul(T[i - 1][1::2], T[i - 1][::2])
P_init = torch.tensor([p1, [p0, 0.0]], dtype=torch.float) # [p_1, p_0]
P_init = P_init.unsqueeze(0).unsqueeze(-2)
# Check that T is computed correctly
# These should be the polynomials P_{n+1} and P_n
# Pnp1n = polymatmul(T[m], P_init).squeeze()
# Bottom-up multiplication algorithm to avoid recursion
S = [None] * m
Tidentity = torch.eye(2).unsqueeze(0).unsqueeze(3)
S[0] = v[:, 1::2, None, None, None] * T[0][::2]
S[0][:, :, :, :, :1] += v[:, ::2, None, None, None] * Tidentity
for i in range(1, m):
S[i] = polymatmul(S[i - 1][:, 1::2], T[i][::2])
S[i][:, :, :, :, :S[i - 1].shape[-1]] += S[i - 1][:, ::2]
result = polymatmul(S[m - 1][:, :, [1], :, :n-1], P_init).squeeze(1).squeeze(1).squeeze(1)
return result
def ops_transpose_mult_br(a, b, c, p0, p1, v):
"""Fast algorithm to multiply P^T v where P is the matrix of coefficients of
OPs, specified by the coefficients a, b, c, and the starting polynomials p0,
p_1. Implementation with bit-reversal.
In particular, the recurrence is
P_{n+2}(x) = (a[n] x + b[n]) P_{n+1}(x) + c[n] P_n(x).
Parameters:
a: array of length n
b: array of length n
c: array of length n
p0: real number representing P_0(x).
p1: pair of real numbers representing P_1(x).
v: (batch_size, n)
Return:
result: P^T v.
"""
n = v.shape[-1]
m = int(np.log2(n))
assert n == 1 << m, "Length n must be a power of 2."
# Preprocessing: compute T_{i:j}, the transition matrix from p_i to p_j.
T_br = [None] * (m + 1)
# Lowest level, filled with T_{i:i+1}
# n matrices, each 2 x 2, with coefficients being polynomials of degree <= 1
T_br[0] = torch.zeros(n, 2, 2, 2)
T_br[0][:, 0, 0, 1] = a
T_br[0][:, 0, 0, 0] = b
T_br[0][:, 0, 1, 0] = c
T_br[0][:, 1, 0, 0] = 1.0
br_perm = bitreversal_permutation(n)
T_br[0] = T_br[0][br_perm]
for i in range(1, m + 1):
T_br[i] = polymatmul(T_br[i - 1][n >> i:], T_br[i - 1][:n >> i])
P_init = torch.tensor([p1, [p0, 0.0]], dtype=torch.float) # [p_1, p_0]
P_init = P_init.unsqueeze(0).unsqueeze(-2)
# Check that T_br is computed correctly
# These should be the polynomials P_{n+1} and P_n
# Pnp1n = polymatmul(T_br[m], P_init).squeeze()
v_br = v[:, br_perm]
# Bottom-up multiplication algorithm to avoid recursion
S_br = [None] * m
Tidentity = torch.eye(2).unsqueeze(0).unsqueeze(3)
S_br[0] = v_br[:, n//2:, None, None, None] * T_br[0][:n // 2]
S_br[0][:, :, :, :, :1] += v_br[:, :n//2, None, None, None] * Tidentity
for i in range(1, m):
S_br[i] = polymatmul(S_br[i - 1][:, (n >> (i + 1)):], T_br[i][:(n >> (i + 1))])
S_br[i][:, :, :, :, :S_br[i - 1].shape[-1]] += S_br[i - 1][:, :(n >> (i + 1))]
result = polymatmul(S_br[m - 1][:, :, [1], :, :n-1], P_init).squeeze(1).squeeze(1).squeeze(1)
return result
def chebyshev_transpose_mult_slow(v):
"""Naive multiplication P^T v where P is the matrix of coefficients of
Chebyshev polynomials.
Parameters:
v: (batch_size, n)
Return:
P^T v: (batch_size, n)
"""
n = v.shape[-1]
# Construct the coefficient matrix P for Chebyshev polynomials
P = np.zeros((n, n), dtype=np.float32)
for i, coef in enumerate(np.eye(n)):
P[i, :i + 1] = chebyshev.cheb2poly(coef)
P = torch.tensor(P)
return v @ P
def legendre_transpose_mult_slow(v):
"""Naive multiplication P^T v where P is the matrix of coefficients of
Legendre polynomials.
Parameters:
v: (batch_size, n)
Return:
P^T v: (batch_size, n)
"""
n = v.shape[-1]
# Construct the coefficient matrix P for Legendre polynomials
P = np.zeros((n, n), dtype=np.float32)
for i, coef in enumerate(np.eye(n)):
P[i, :i + 1] = legendre.leg2poly(coef)
P = torch.tensor(P)
return v @ P
def ops_transpose_mult_test():
# Trying to find memory leak
# n = 64
# batch_size = 1000
n = 8
batch_size = 2
v = torch.randn(batch_size, n)
# Chebyshev polynomials
result = ops_transpose_mult(2.0 * torch.ones(n), torch.zeros(n), -torch.ones(n), 1.0, (0.0, 1.0), v)
result_br = ops_transpose_mult_br(2.0 * torch.ones(n), torch.zeros(n), -torch.ones(n), 1.0, (0.0, 1.0), v)
result_slow = chebyshev_transpose_mult_slow(v)
assert torch.allclose(result, result_slow)
assert torch.allclose(result, result_br)
# Legendre polynomials
n_range = torch.arange(n, dtype=torch.float)
result = ops_transpose_mult((2 * n_range + 3) / (n_range + 2), torch.zeros(n), -(n_range + 1) / (n_range + 2), 1.0, (0.0, 1.0), v)
result_br = ops_transpose_mult_br((2 * n_range + 3) / (n_range + 2), torch.zeros(n), -(n_range + 1) / (n_range + 2), 1.0, (0.0, 1.0), v)
result_slow = legendre_transpose_mult_slow(v)
assert torch.allclose(result, result_slow)
assert torch.allclose(result, result_br)
if __name__ == '__main__':
ops_transpose_mult_test()
# TODO: there might be a memory leak, trying to find it here
# for _ in range(1000):
# temp = polymatmul(A, B)
|
butterfly-master
|
learning_transforms/ops.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
extensions = [
Extension('ABCD_mult', ['ABCD_mult.pyx'],
include_dirs = [numpy.get_include()],
extra_compile_args=['-O3', '-march=native']
),
]
setup(
ext_modules = cythonize(extensions),
)
|
butterfly-master
|
learning_transforms/setup.py
|
# Copied from https://github.com/ray-project/ray/blob/master/python/ray/tune/tune.py.
# We adapt to stop early if any of the trials get good validation loss, since
# all we care about is that there exists a good factorization
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
import logging
import os
import time
from ray.tune.error import TuneError
from ray.tune.experiment import convert_to_experiment_list, Experiment
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial, Checkpoint, DEBUG_PRINT_INTERVAL
from ray.tune.log_sync import wait_for_log_sync
from ray.tune.trial_runner import TrialRunner
from ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,
FIFOScheduler, MedianStoppingRule)
from ray.tune.web_server import TuneServer
logger = logging.getLogger(__name__)
_SCHEDULERS = {
"FIFO": FIFOScheduler,
"MedianStopping": MedianStoppingRule,
"HyperBand": HyperBandScheduler,
"AsyncHyperBand": AsyncHyperBandScheduler,
}
def _make_scheduler(args):
if args.scheduler in _SCHEDULERS:
return _SCHEDULERS[args.scheduler](**args.scheduler_config)
else:
raise TuneError("Unknown scheduler: {}, should be one of {}".format(
args.scheduler, _SCHEDULERS.keys()))
def _find_checkpoint_dir(exp):
# TODO(rliaw): Make sure the checkpoint_dir is resolved earlier.
# Right now it is resolved somewhere far down the trial generation process
return os.path.join(exp.spec["local_dir"], exp.name)
def _prompt_restore(checkpoint_dir, resume):
restore = False
if TrialRunner.checkpoint_exists(checkpoint_dir):
if resume == "prompt":
msg = ("Found incomplete experiment at {}. "
"Would you like to resume it?".format(checkpoint_dir))
restore = click.confirm(msg, default=False)
if restore:
logger.info("Tip: to always resume, "
"pass resume=True to run()")
else:
logger.info("Tip: to always start a new experiment, "
"pass resume=False to run()")
elif resume:
restore = True
else:
logger.info("Tip: to resume incomplete experiments, "
"pass resume='prompt' or resume=True to run()")
else:
logger.info(
"Did not find checkpoint file in {}.".format(checkpoint_dir))
return restore
def run(run_or_experiment,
name=None,
stop=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
upload_dir=None,
trial_name_creator=None,
loggers=None,
sync_function=None,
checkpoint_freq=0,
checkpoint_at_end=False,
export_formats=None,
max_failures=3,
restore=None,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True,
early_stop_all_trials=False):
"""Executes training.
Args:
run_or_experiment (function|class|str|Experiment): If
function|class|str, this is the algorithm or model to train.
This may refer to the name of a built-on algorithm
(e.g. RLLib's DQN or PPO), a user-defined trainable
function or class, or the string identifier of a
trainable function or class registered in the tune registry.
If Experiment, then Tune will execute training based on
Experiment.spec.
name (str): Name of experiment.
stop (dict): The stopping criteria. The keys may be any field in
the return result of 'train()', whichever is reached first.
Defaults to empty dict.
config (dict): Algorithm-specific configuration for Tune variant
generation (e.g. env, hyperparams). Defaults to empty dict.
Custom search algorithms may ignore this.
resources_per_trial (dict): Machine resources to allocate per trial,
e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be
assigned unless you specify them here. Defaults to 1 CPU and 0
GPUs in ``Trainable.default_resource_request()``.
num_samples (int): Number of times to sample from the
hyperparameter space. Defaults to 1. If `grid_search` is
provided as an argument, the grid will be repeated
`num_samples` of times.
local_dir (str): Local dir to save training results to.
Defaults to ``~/ray_results``.
upload_dir (str): Optional URI to sync training results
to (e.g. ``s3://bucket``).
trial_name_creator (func): Optional function for generating
the trial string representation.
loggers (list): List of logger creators to be used with
each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.
See `ray/tune/logger.py`.
sync_function (func|str): Function for syncing the local_dir to
upload_dir. If string, then it must be a string template for
syncer to run. If not provided, the sync command defaults
to standard S3 or gsutil sync comamnds.
checkpoint_freq (int): How many training iterations between
checkpoints. A value of 0 (default) disables checkpointing.
checkpoint_at_end (bool): Whether to checkpoint at the end of the
experiment regardless of the checkpoint_freq. Default is False.
export_formats (list): List of formats that exported at the end of
the experiment. Default is None.
max_failures (int): Try to recover a trial from its last
checkpoint at least this many times. Only applies if
checkpointing is enabled. Setting to -1 will lead to infinite
recovery retries. Defaults to 3.
restore (str): Path to checkpoint. Only makes sense to set if
running 1 trial. Defaults to None.
search_alg (SearchAlgorithm): Search Algorithm. Defaults to
BasicVariantGenerator.
scheduler (TrialScheduler): Scheduler for executing
the experiment. Choose among FIFO (default), MedianStopping,
AsyncHyperBand, and HyperBand.
with_server (bool): Starts a background Tune server. Needed for
using the Client API.
server_port (int): Port number for launching TuneServer.
verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,
1 = only status updates, 2 = status and trial results.
resume (bool|"prompt"): If checkpoint exists, the experiment will
resume from there. If resume is "prompt", Tune will prompt if
checkpoint detected.
queue_trials (bool): Whether to queue trials when the cluster does
not currently have enough resources to launch one. This should
be set to True when running on an autoscaling cluster to enable
automatic scale-up.
reuse_actors (bool): Whether to reuse actors between different trials
when possible. This can drastically speed up experiments that start
and stop actors often (e.g., PBT in time-multiplexing mode). This
requires trials to have the same resource requirements.
trial_executor (TrialExecutor): Manage the execution of trials.
raise_on_failed_trial (bool): Raise TuneError if there exists failed
trial (of ERROR state) when the experiments complete.
Returns:
List of Trial objects.
Raises:
TuneError if any trials failed and `raise_on_failed_trial` is True.
Examples:
>>> tune.run(mytrainable, scheduler=PopulationBasedTraining())
>>> tune.run(mytrainable, num_samples=5, reuse_actors=True)
>>> tune.run(
"PG",
num_samples=5,
config={
"env": "CartPole-v0",
"lr": tune.sample_from(lambda _: np.random.rand())
}
)
"""
experiment = run_or_experiment
if not isinstance(run_or_experiment, Experiment):
experiment = Experiment(
name, run_or_experiment, stop, config, resources_per_trial,
num_samples, local_dir, upload_dir, trial_name_creator, loggers,
sync_function, checkpoint_freq, checkpoint_at_end, export_formats,
max_failures, restore)
else:
logger.debug("Ignoring some parameters passed into tune.run.")
checkpoint_dir = _find_checkpoint_dir(experiment)
should_restore = _prompt_restore(checkpoint_dir, resume)
runner = None
if should_restore:
try:
runner = TrialRunner.restore(checkpoint_dir, search_alg, scheduler,
trial_executor)
except Exception:
logger.exception("Runner restore failed. Restarting experiment.")
else:
logger.info("Starting a new experiment.")
if not runner:
scheduler = scheduler or FIFOScheduler()
search_alg = search_alg or BasicVariantGenerator()
search_alg.add_configurations([experiment])
runner = TrialRunner(
search_alg,
scheduler=scheduler,
metadata_checkpoint_dir=checkpoint_dir,
launch_web_server=with_server,
server_port=server_port,
verbose=bool(verbose > 1),
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor)
if verbose:
print(runner.debug_string(max_debug=99999))
last_debug = 0
while not runner.is_finished():
runner.step()
if time.time() - last_debug > DEBUG_PRINT_INTERVAL:
if verbose:
print(runner.debug_string())
last_debug = time.time()
if early_stop_all_trials:
# Check if any trial has good validation loss, in which case we stop all trials
should_stop = False
for trial in runner.get_trials():
try:
result = trial.last_result
if any(result[criteria] >= stop_value for criteria, stop_value in trial.stopping_criterion.items()):
should_stop = True
break
except Exception:
pass
if should_stop:
# Checkpoint all trials
for trial in runner.get_trials():
if hasattr(trial, "runner") and trial.runner:
runner.trial_executor.save(trial, storage=Checkpoint.DISK)
runner.stop_trial(trial)
break
if verbose:
print(runner.debug_string(max_debug=99999))
wait_for_log_sync()
errored_trials = []
for trial in runner.get_trials():
if trial.status != Trial.TERMINATED:
errored_trials += [trial]
if errored_trials:
if raise_on_failed_trial:
raise TuneError("Trials did not complete", errored_trials)
else:
logger.error("Trials did not complete: %s", errored_trials)
return runner.get_trials()
def run_experiments(experiments,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True,
early_stop_all_trials=False):
"""Runs and blocks until all trials finish.
Examples:
>>> experiment_spec = Experiment("experiment", my_func)
>>> run_experiments(experiments=experiment_spec)
>>> experiment_spec = {"experiment": {"run": my_func}}
>>> run_experiments(experiments=experiment_spec)
>>> run_experiments(
>>> experiments=experiment_spec,
>>> scheduler=MedianStoppingRule(...))
>>> run_experiments(
>>> experiments=experiment_spec,
>>> search_alg=SearchAlgorithm(),
>>> scheduler=MedianStoppingRule(...))
Returns:
List of Trial objects, holding data for each executed trial.
"""
# This is important to do this here
# because it schematize the experiments
# and it conducts the implicit registration.
experiments = convert_to_experiment_list(experiments)
trials = []
for exp in experiments:
trials += run(
exp,
search_alg=search_alg,
scheduler=scheduler,
with_server=with_server,
server_port=server_port,
verbose=verbose,
resume=resume,
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial,
early_stop_all_trials=early_stop_all_trials)
return trials
|
butterfly-master
|
learning_transforms/tune.py
|
import pickle
from pathlib import Path
import numpy as np
result_dir = 'results_new'
experiment_names = []
experiment_names += [[f'dft_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]
experiment_names += [[f'dct_factorization_TrainableBPP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]
experiment_names += [[f'dst_factorization_TrainableBPP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]
experiment_names += [[f'convolution_factorization_TrainableBPBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]
experiment_names += [[f'hadamard_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]
experiment_names += [[f'hartley_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]
experiment_names += [[f'legendre_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]
experiment_names += [[f'randn_factorization_TrainableBP_True_{size}' for size in [8, 16, 32, 64, 128, 256, 512, 1024]]]
all_rmse = []
for experiment_names_ in experiment_names:
print(experiment_names_[0])
best_loss = []
best_polished_loss = []
for experiment_name in experiment_names_:
checkpoint_path = Path(result_dir) / experiment_name / 'trial.pkl'
with checkpoint_path.open('rb') as f:
trials = pickle.load(f)
losses = [-trial.last_result['negative_loss'] for trial in trials]
polished_losses = [-trial.last_result.get('polished_negative_loss', float('-inf')) for trial in trials]
# best_loss.append(min(losses))
best_loss.append(np.sort(losses)[0]) # to deal with NaN
best_polished_loss.append(np.sort(polished_losses)[0]) # to deal with NaN
# print(np.array(losses))
# print(np.sort(losses))
# best_trial = max(trials, key=lambda trial: trial.last_result['negative_loss'])
# train_model = best_trial._get_trainable_cls()(best_trial.config)
# train_model = TrainableHadamardFactorFixedOrder(best_trial.config)
# train_model = TrainableHadamardFactorSoftmax(best_trial.config)
# train_model = TrainableHadamardFactorSparsemax(best_trial.config)
# train_model.restore(str(Path(best_trial.logdir) / best_trial._checkpoint.value))
# model = train_model.model
# best_rmse = np.sqrt(best_loss)
# print(best_rmse)
print(np.sqrt(best_polished_loss))
all_rmse.append(np.sqrt(best_polished_loss))
print(np.array(all_rmse))
transform_names = ['DFT', 'DCT', 'DST', 'Conv', 'Hadamard', 'Hartley', 'Legendre', 'Rand']
import pickle
with open('rmse.pkl', 'wb') as f:
pickle.dump({'names': transform_names, 'rmse': all_rmse}, f)
|
butterfly-master
|
learning_transforms/print_results.py
|
import argparse
import math
import multiprocessing as mp
import os
from pathlib import Path
import pickle
import random
import sys
import numpy as np
import torch
from torch import nn
from torch import optim
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from
from ray.tune.schedulers import AsyncHyperBandScheduler
from tune import run_experiments
from butterfly import Butterfly, ButterflyProduct, sinkhorn, Block2x2DiagProduct, BlockPermProduct, FixedPermutation
from semantic_loss import semantic_loss_exactly_one
from training import PytorchTrainable, TrainableMatrixFactorization
from utils import bitreversal_permutation
from complex_utils import real_to_complex, complex_matmul
from target_matrix import named_target_matrix
N_LBFGS_STEPS = 300
N_LBFGS_STEPS_VALIDATION = 15
N_TRIALS_TO_POLISH = 60
class TrainableFftFactorFixedOrder(PytorchTrainable):
def _setup(self, config):
size = config['size']
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=size, complex=True, fixed_order=True)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.br_perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftFactorSoftmax(PytorchTrainable):
def _setup(self, config):
size = config['size']
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=size, complex=True, fixed_order=False)
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.br_perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
total_loss = loss + self.semantic_loss_weight * semantic_loss.mean()
total_loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftFactorSparsemax(TrainableFftFactorFixedOrder):
def _setup(self, config):
size = config['size']
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=size, complex=True, fixed_order=False, softmax_fn='sparsemax')
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
class TrainableFftFactorSparsemaxNoPerm(TrainableFftFactorSparsemax):
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftFactorSoftmaxNoPerm(TrainableFftFactorSoftmax):
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableRandnFactorSoftmaxNoPerm(PytorchTrainable):
def _setup(self, config):
size = config['size']
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=size, complex=False, fixed_order=False, softmax_fn='softmax')
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.rand(size, size, requires_grad=False)
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftFactorSparsemaxPermFront(TrainableFftFactorSparsemax):
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[self.br_perm, :]
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
def fft_factorization_fixed_order(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_fixed_order_{args.size}',
run=TrainableFftFactorFixedOrder,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_softmax(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_softmax_{args.size}',
run=TrainableFftFactorSoftmax,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'is_nan': True,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'semantic_loss_weight': sample_from(lambda spec: math.exp(random.uniform(math.log(5e-4), math.log(5e-1)))),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_sparsemax(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_sparsemax_{args.size}',
run=TrainableFftFactorSparsemax,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_sparsemax_no_perm(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_sparsemax_no_perm_{args.size}',
run=TrainableFftFactorSparsemaxNoPerm,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_softmax_no_perm(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_softmax_no_perm_{args.size}',
run=TrainableFftFactorSoftmaxNoPerm,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def randn_factorization_softmax_no_perm(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Randn_factorization_softmax_no_perm_{args.size}',
run=TrainableRandnFactorSoftmaxNoPerm,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_sparsemax_perm_front(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_sparsemax_perm_front_{args.size}',
run=TrainableFftFactorSparsemaxPermFront,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
# if __name__ == '__main__':
# # experiment, args = fft_factorization_fixed_order(sys.argv[1:])
# experiment, args = fft_factorization_softmax(sys.argv[1:])
# # experiment, args = fft_factorization_sparsemax(sys.argv[1:])
# # experiment, args = fft_factorization_sparsemax_no_perm(sys.argv[1:])
# # experiment, args = fft_factorization_softmax_no_perm(sys.argv[1:])
# # experiment, args = randn_factorization_softmax_no_perm(sys.argv[1:])
# # experiment, args = fft_factorization_sparsemax_perm_front(sys.argv[1:])
# # We'll use multiple processes so disable MKL multithreading
# os.environ['MKL_NUM_THREADS'] = str(args.nthreads)
# ray.init()
# ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=args.nmaxepochs)
# trials = run_experiments(experiment, scheduler=ahb)
# losses = [-trial.last_result['negative_loss'] for trial in trials]
# print(np.array(losses))
# print(np.sort(losses))
# checkpoint_path = Path(args.result_dir) / experiment.name
# checkpoint_path.mkdir(parents=True, exist_ok=True)
# checkpoint_path /= 'trial.pkl'
# with checkpoint_path.open('wb') as f:
# pickle.dump(trials, f)
class TrainableFft(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=True,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
# br_perm = bitreversal_permutation(size)
# br_reverse = torch.tensor(list(br_perm[::-1]))
# br_reverse = torch.cat((torch.tensor(list(br_perm[:size//2][::-1])), torch.tensor(list(br_perm[size//2:][::-1]))))
# Same as [6, 2, 4, 0, 7, 3, 5, 1], which is [0, 1]^4 * [0, 2, 1, 3]^2 * [6, 4, 2, 0, 7, 5, 3, 1]
# br_reverse = torch.cat((torch.tensor(list(br_perm[:size//4][::-1])), torch.tensor(list(br_perm[size//4:size//2][::-1])), torch.tensor(list(br_perm[size//2:3*size//4][::-1])), torch.tensor(list(br_perm[3*size//4:][::-1]))))
# self.br_perm = br_reverse
# self.br_perm = torch.tensor([0, 7, 4, 3, 2, 5, 6, 1]) # Doesn't work
# self.br_perm = torch.tensor([7, 3, 0, 4, 2, 6, 5, 1]) # Doesn't work
# self.br_perm = torch.tensor([4, 0, 6, 2, 5, 1, 7, 3]) # This works, [0, 1]^4 * [2, 0, 3, 1]^2 * [0, 2, 4, 6, 1, 3, 5, 7] or [1, 0]^4 * [0, 2, 1, 3]^2 * [0, 2, 4, 6, 1, 3, 5, 7]
# self.br_perm = torch.tensor([4, 0, 2, 6, 5, 1, 3, 7]) # Doesn't work, [0, 1]^4 * [2, 0, 1, 3]^2 * [0, 2, 4, 6, 1, 3, 5, 7]
# self.br_perm = torch.tensor([1, 5, 3, 7, 0, 4, 2, 6]) # This works, [0, 1]^4 * [4, 6, 5, 7, 0, 4, 2, 6]
# self.br_perm = torch.tensor([4, 0, 6, 2, 5, 1, 3, 7]) # Doesn't work
# self.br_perm = torch.tensor([4, 0, 6, 2, 1, 5, 3, 7]) # Doesn't work
# self.br_perm = torch.tensor([0, 4, 6, 2, 1, 5, 7, 3]) # Doesn't work
# self.br_perm = torch.tensor([4, 1, 6, 2, 5, 0, 7, 3]) # This works, since it's just swapping 0 and 1
# self.br_perm = torch.tensor([5, 1, 6, 2, 4, 0, 7, 3]) # This works, since it's swapping 4 and 5
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.br_perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftBlock2x2(TrainableMatrixFactorization):
def _setup(self, config):
self.target_matrix = torch.tensor(config['target_matrix'], dtype=torch.float)
assert self.target_matrix.shape[0] == self.target_matrix.shape[1], 'Only square matrices are supported'
assert self.target_matrix.dim() in [2, 3], 'target matrix must be 2D if real of 3D if complex'
size = self.target_matrix.shape[0]
torch.manual_seed(config['seed'])
self.model = Block2x2DiagProduct(size=size, complex=True)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.n_epochs_per_validation = config['n_epochs_per_validation']
self.input = real_to_complex(torch.eye(size)[:, torch.tensor(bitreversal_permutation(size))])
class TrainableFftBlockPerm(TrainableMatrixFactorization):
def _setup(self, config):
self.target_matrix = torch.tensor(config['target_matrix'], dtype=torch.float)
assert self.target_matrix.shape[0] == self.target_matrix.shape[1], 'Only square matrices are supported'
assert self.target_matrix.dim() in [2, 3], 'target matrix must be 2D if real of 3D if complex'
size = self.target_matrix.shape[0]
complex = self.target_matrix.dim() == 3 or config['complex']
torch.manual_seed(config['seed'])
self.model = nn.Sequential(
BlockPermProduct(size=size, complex=True, share_logit=False),
Block2x2DiagProduct(size=size, complex=True)
)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.n_epochs_per_validation = config['n_epochs_per_validation']
self.input = real_to_complex(torch.eye(size))
def freeze(self):
if not isinstance(self.model[0], FixedPermutation):
self.model[0] = FixedPermutation(self.model[0].argmax(), complex=self.model[0].complex)
class TrainableFftBlockPermTranspose(TrainableMatrixFactorization):
def _setup(self, config):
self.target_matrix = torch.tensor(config['target_matrix'], dtype=torch.float)
assert self.target_matrix.shape[0] == self.target_matrix.shape[1], 'Only square matrices are supported'
assert self.target_matrix.dim() in [2, 3], 'target matrix must be 2D if real of 3D if complex'
size = self.target_matrix.shape[0]
torch.manual_seed(config['seed'])
# Transposing the permutation product won't capture the FFT, since we'll
# permutations that interleave the first half and second half (inverse
# of the permutation that separates the even and the odd).
# However, using the permutation product with increasing size will work
# since it can represent bit reversal, which is its own inverse.
self.model = nn.Sequential(
Block2x2DiagProduct(size=size, complex=True, decreasing_size=False),
BlockPermProduct(size=size, complex=True, share_logit=False, increasing_size=True),
)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.n_epochs_per_validation = config['n_epochs_per_validation']
self.input = real_to_complex(torch.eye(size))
def freeze(self):
if not isinstance(self.model[1], FixedPermutation):
self.model[1] = FixedPermutation(self.model[1].argmax(), complex=self.model[1].complex)
class TrainableFftTempAnnealing(TrainableFft):
def _train(self):
temperature = 1.0 / (0.1 * self._iteration + 1)
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix(temperature)[:, self.br_perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftLearnPerm(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=True,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'],
learn_perm=True)
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
def _train(self):
temperature = 1.0 / (0.3 * self._iteration + 1)
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix(temperature)
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -polished_loss_fft_learn_perm(self)}
def polish(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
loss = trainable.polish(N_LBFGS_STEPS, save_to_self_model=True)
torch.save(trainable.model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
return loss
def polish_fft(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
return loss.item()
def polish_fft_learn_perm(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
temperature = 1.0 / (0.3 * trainable._iteration + 1)
trainable.perm = torch.argmax(sinkhorn(model.perm_logit / temperature), dim=1)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
return loss.item()
def polished_loss_fft_learn_perm(trainable):
model = trainable.model
polished_model = ButterflyProduct(size=model.size, complex=model.complex, fixed_order=True)
temperature = 1.0 / (0.3 * trainable._iteration + 1)
trainable.perm = torch.argmax(sinkhorn(model.perm_logit / temperature), dim=1)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
preopt_loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS_VALIDATION):
optimizer.step(closure)
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
# return loss.item() if not torch.isnan(loss) else preopt_loss.item() if not torch.isnan(preopt_loss) else float('inf')
return loss.item() if not torch.isnan(loss) else preopt_loss.item() if not torch.isnan(preopt_loss) else 9999.0
ex = Experiment('Fft_factorization')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.named_config
def softmax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.named_config
def sparsemax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'sparsemax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.config
def fixed_order_config():
fixed_order = True # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
size = 8 # Size of matrix to factor, must be power of 2
ntrials = 20 # Number of trials for hyperparameter tuning
nsteps = 400 # Number of steps per epoch
nepochsvalid = 5 # Frequency of validation (polishing), in terms of epochs
nmaxepochs = 200 # Maximum number of epochs
result_dir = 'results' # Directory to store results
nthreads = 1 # Number of CPU threads per job
smoke_test = False # Finish quickly for testing
complex = True # Whether to use complex factorization or real factorization
@ex.capture
def fft_experiment(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Fft_factorization_{fixed_order}_{softmax_fn}_{size}',
run=TrainableFft,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def fft_experiment_temp_annealing(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Fft_factorization_Temp_{fixed_order}_{softmax_fn}_{size}',
run=TrainableFftTempAnnealing,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def fft_experiment_learn_perm(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Fft_factorization_Learnperm_{fixed_order}_{softmax_fn}_{size}',
run=TrainableFftLearnPerm,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
# 'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def fft_experiment_block(trainable, size, ntrials, nsteps, nepochsvalid, result_dir, nthreads, smoke_test):
config={
'target_matrix': named_target_matrix('dft', size),
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
'n_epochs_per_validation': nepochsvalid,
'complex': True,
}
experiment = RayExperiment(
name=f'Fft_factorization_{trainable.__name__}_{size}',
run=trainable,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.automain
def run(result_dir, nmaxepochs, nthreads):
# experiment = fft_experiment()
# experiment = fft_experiment_temp_annealing()
# experiment = fft_experiment_learn_perm()
experiment = fft_experiment_block(TrainableFftBlock2x2)
# experiment = fft_experiment_block(TrainableFftBlockPerm)
# experiment = fft_experiment_block(TrainableFftBlockPermTranspose)
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = str(nthreads)
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=nmaxepochs)
trials = run_experiments(experiment, scheduler=ahb, raise_on_failed_trial=False, early_stop_all_trials=True)
trials = [trial for trial in trials if trial.last_result is not None]
losses = [-trial.last_result.get('negative_loss', float('inf')) for trial in trials]
# Polish solutions with L-BFGS
pool = mp.Pool()
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result['negative_loss'])
# polished_losses = pool.map(polish_fft, sorted_trials[:N_TRIALS_TO_POLISH])
# polished_losses = pool.map(polish_fft_learn_perm, sorted_trials[:N_TRIALS_TO_POLISH])
polished_losses = pool.map(polish, sorted_trials[:N_TRIALS_TO_POLISH])
# polished_losses = [-trial.last_result['polished_negative_loss'] for trial in sorted_trials[:N_TRIALS_TO_POLISH]]
pool.close()
pool.join()
for i in range(min(N_TRIALS_TO_POLISH, len(trials))):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
print(np.array(losses))
print(np.sort(losses))
# print(np.sort(losses)[:N_TRIALS_TO_POLISH])
print(np.sort(polished_losses))
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return min(losses + polished_losses)
|
butterfly-master
|
learning_transforms/learning_fft.py
|
import os
import time
import numpy as np
import torch
from torch import nn
from butterfly_factor import butterfly_factor_mult_intermediate
# from butterfly import Block2x2DiagProduct
# from test_factor_multiply import twiddle_list_concat
exps = np.arange(6, 14)
sizes = 1 << exps
batch_size = 256
ntrials = [100000, 100000, 10000, 10000, 10000, 10000, 10000, 10000]
dense_times = np.zeros(exps.size)
fft_times = np.zeros(exps.size)
butterfly_times = np.zeros(exps.size)
for idx_n, (n, ntrial) in enumerate(zip(sizes, ntrials)):
print(n)
# B = Block2x2DiagProduct(n).to('cuda')
L = torch.nn.Linear(n, n, bias=False).to('cuda')
x = torch.randn(batch_size, n, requires_grad=True).to('cuda')
grad = torch.randn_like(x)
# twiddle = twiddle_list_concat(B)
# Dense multiply
output = L(x) # Do it once to initialize cuBlas handle and such
torch.autograd.grad(output, (L.weight, x), grad)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = L(x)
torch.autograd.grad(output, (L.weight, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
dense_times[idx_n] = (end - start) / ntrial
# FFT
output = torch.rfft(x, 1) # Do it once to initialize cuBlas handle and such
grad_fft = torch.randn_like(output)
torch.autograd.grad(output, x, grad_fft)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = torch.rfft(x, 1)
torch.autograd.grad(output, x, grad_fft)
torch.cuda.synchronize()
end = time.perf_counter()
fft_times[idx_n] = (end - start) / ntrial
# Butterfly
output = butterfly_factor_mult_intermediate(twiddle, x)
torch.autograd.grad(output, (twiddle, x), grad)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = butterfly_factor_mult_intermediate(twiddle, x)
torch.autograd.grad(output, (twiddle, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
butterfly_times[idx_n] = (end-start) / ntrial
print(dense_times)
print(fft_times)
print(butterfly_times)
print(dense_times / butterfly_times)
print(dense_times / fft_times)
data = {
'sizes': sizes,
'speedup_fft': dense_times / fft_times,
'speedup_butterfly': dense_times / butterfly_times,
}
import pickle
with open('speed_training_data.pkl', 'wb') as f:
pickle.dump(data, f)
|
butterfly-master
|
learning_transforms/speed_test_training.py
|
"""Convert BP model from Pytorch to Numpy for inference.
To compile Cython extension: python setup.py build_ext --inplace
"""
import numpy as np
import torch
from torch import nn
from timeit import default_timer as timer
from butterfly import Block2x2DiagProduct
from ABCD_mult import ABCD_mult, ABCD_mult_inplace, ABCD_mult_inplace_memview, ABCD_mult_inplace_complex, ABCD_mult_inplace_generic
def butterfly_mul_np(ABCDs, input_):
"""Product of block 2x2 diagonal matrices, implemented in Numpy.
Parameters:
ABCDs: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
input_: input_ vector as numpy array, (batch_size, n)
"""
output = input_
for ABCD in ABCDs[::-1]:
output = output.reshape(output.shape[:-1] + (-1, 1, 2, ABCD.shape[-1]))
output = (ABCD * output).sum(axis=-2).reshape(input_.shape)
return output
# a = ABCD[..., 0][None]
# b = output.reshape(2048, 2, 1)
# b_cont = output.reshape(2048, 2).T
# c = output.reshape(2, 2048)
# %timeit a @ b
# %timeit a @ b_cont
# %timeit a @ c
def butterfly_mul_np_transpose(ABCDs_transpose, input_):
"""Product of block 2x2 diagonal matrices, implemented in Numpy.
Parameters:
ABCDs_transpose: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
input_: input_ vector as numpy array, (batch_size, n)
"""
output = input_
for ABCD in ABCDs_transpose[::-1]:
# output = (output.reshape(-1, ABCD.shape[0], 1, 2) @ ABCD).reshape(input_.shape)
# output = (output.reshape(ABCD.shape[0], -1, 2) @ ABCD).reshape(input_.shape)
# output = (ABCD @ output.reshape(ABCD.shape[0], 2, -1)).reshape(input_.shape)
# output = (ABCD @ output.reshape(ABCD.shape[0], 2, -1)).reshape(input_.shape)
output = (ABCD @ output.reshape(ABCD.shape[0], 2, -1)).reshape(input_.shape)
start = timer()
[(ABCD @ output.reshape(ABCD.shape[0], 2, -1)).reshape(input_.shape) for _ in range(1000)]
end = timer()
print(end - start)
return output
def butterfly_mul_cy(ABCDs, input_):
"""Product of block 2x2 diagonal matrices, implemented in Numpy + Cython.
Parameters:
ABCDs: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
input_: input_ vector as numpy array, (batch_size, n)
"""
assert input_.dtype == np.float32
output = input_.copy()
buffer = np.empty(input_.size, dtype=np.float32)
for ABCD in ABCDs[::-1]:
output = output.reshape((-1, 2, ABCD.shape[-1]))
buffer = buffer.reshape(output.shape)
ABCD_mult(ABCD, output, buffer)
output, buffer = buffer, output
return output.reshape(input_.shape)
def butterfly_mul_cy_inplace(ABCDs, input_):
"""Product of block 2x2 diagonal matrices, implemented in Numpy + Cython.
Parameters:
ABCDs: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
input_: input_ vector as numpy array, (batch_size, n)
"""
assert input_.dtype == np.float32
output = input_.copy()
for ABCD in ABCDs[::-1]:
output = output.reshape((-1, 2, ABCD.shape[-1]))
ABCD_mult_inplace(ABCD, output)
# start = timer()
# [ABCD_mult_inplace(ABCD, output.reshape((-1, 2, ABCD.shape[-1]))) for _ in range(1000)]
# end = timer()
# print(end - start)
return output.reshape(input_.shape)
def butterfly_mul_cy_inplace_memview(ABCDs, input_):
"""Product of block 2x2 diagonal matrices, implemented in Numpy + Cython.
Parameters:
ABCDs: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
input_: input_ vector as numpy array, (batch_size, n)
"""
assert input_.dtype == np.float32
output = input_.copy()
for ABCD in ABCDs[::-1]:
output = output.reshape((-1, 2, ABCD.shape[-1]))
ABCD_mult_inplace_memview(ABCD, output)
return output.reshape(input_.shape)
def butterfly_mul_cy_inplace_index(ABCDs, input_):
"""Product of block 2x2 diagonal matrices, implemented in Numpy + Cython.
Parameters:
ABCDs: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
input_: input_ vector as numpy array, (batch_size, n)
"""
assert input_.dtype == np.float32
output = input_.copy()
func = ABCD_mult_inplace_generic[float]
# cython.floatcomplex, cython.doublecomplex
for ABCD in ABCDs[::-1]:
output = output.reshape((-1, 2, ABCD.shape[-1]))
func(ABCD, output)
return output.reshape(input_.shape)
def butterfly_mul_cy_inplace_complex(ABCDs, input_):
"""Product of block 2x2 diagonal matrices, implemented in Numpy + Cython.
Parameters:
ABCDs: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
input_: input_ vector as numpy array, (batch_size, n)
"""
assert input_.dtype == np.complex64
output = input_.copy()
for ABCD in ABCDs[::-1]:
output = output.reshape((-1, 2, ABCD.shape[-1]))
ABCD_mult_inplace_complex(ABCD, output)
return output.reshape(input_.shape)
def butterfly_mul_cy_inplace_generic(ABCDs, input_):
"""Product of block 2x2 diagonal matrices, implemented in Numpy + Cython.
Parameters:
ABCDs: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
input_: input_ vector as numpy array, (batch_size, n)
"""
output = input_.copy()
for ABCD in ABCDs[::-1]:
output = output.reshape((-1, 2, ABCD.shape[-1]))
ABCD_mult_inplace_generic(ABCD, output)
return output.reshape(input_.shape)
def BP_mul_cy_inplace(ABCDs, perm, input_):
"""Product of block 2x2 diagonal matrices, with permutation, implemented in Numpy + Cython.
Parameters:
ABCDs: list of the ABCDs factors as used in Block2x2DiagProduct, in numpy array.
we accept real and complex.
perm: a permutation, (n, ) int numpy array
input_: input_ vector as numpy array, (batch_size, n)
"""
assert input_.dtype == np.float32
output = input_[..., perm]
for ABCD in ABCDs[::-1]:
output = output.reshape((-1, 2, ABCD.shape[-1]))
ABCD_mult_inplace(ABCD, output)
return output.reshape(input_.shape)
def Block2x2DiagProduct_to_ABCDs(model):
"""Convert a model of the type Block2x2DiagProduct into list of ABCDs factors,
ready for butterfly_mul_np.
"""
assert isinstance(model, Block2x2DiagProduct)
ABCDs = []
if not model.complex:
ABCDs = [factor.ABCD.detach().numpy() for factor in model.factors]
else:
ABCDs = [factor.ABCD.detach().numpy().view('complex64').squeeze(-1) for factor in model.factors]
return ABCDs
# TODO: Turn these into tests
import os
os.environ['MKL_NUM_THREADS'] = '1'
n = 4096
batch_size = 1
x = torch.randn(batch_size, n)
B = Block2x2DiagProduct(n)
# B_matrix = B(torch.eye(n)).t().contiguous().detach().numpy()
B_matrix = B(torch.eye(n)).t().contiguous()
B_matrix_np = B_matrix.detach().numpy()
x_np = x.detach().numpy()
ABCDs = Block2x2DiagProduct_to_ABCDs(B)
# ABCDs_transpose = [a.T.copy() for a in ABCDs]
# TODO: need to tranpose for correct result, right now I'm just testing speed
ABCDs_transpose = [a.transpose(2, 0, 1).copy() for a in ABCDs]
# %timeit B_matrix @ x.t()
# %timeit B_matrix_np @ x_np.T
# %timeit B(x)
# %timeit butterfly_mul_np(ABCDs, x_np)
# %timeit butterfly_mul_np_transpose(ABCDs_transpose, x_np)
# %timeit butterfly_mul_cy(ABCDs, x_np)
# %timeit butterfly_mul_cy_inplace(ABCDs, x_np)
# %timeit butterfly_mul_cy_inplace_memview(ABCDs, x_np)
# %timeit butterfly_mul_cy_inplace_index(ABCDs, x_np)
# %timeit butterfly_mul_cy_inplace_generic(ABCDs, x_np)
# %timeit np.fft.fft(x_np)
# x = torch.randn(batch_size, n, 2)
# x_np = x.detach().numpy().view('complex64').squeeze(-1)
# B = Block2x2DiagProduct(n, complex=True)
# ABCDs = Block2x2DiagProduct_to_ABCDs(B)
# %timeit B(x)
# %timeit butterfly_mul_cy_inplace_complex(ABCDs, x_np)
# %timeit butterfly_mul_cy_inplace_generic(ABCDs, x_np)
# np.abs(B(x).detach().numpy().view('complex64').squeeze(-1) - butterfly_mul_cy_inplace_complex(ABCDs, x_np)).max()
|
butterfly-master
|
learning_transforms/inference.py
|
import pickle
from pathlib import Path
import numpy as np
result_dir = 'results'
experiment_names = []
experiment_names += [[f'Hadamard_factorization_True_softmax_{size}' for size in [8, 16, 32, 64, 128, 256]]]
experiment_names += [[f'Hadamard_factorization_False_softmax_{size}' for size in [8, 16, 32, 64, 128, 256]]]
experiment_names += [[f'Hadamard_factorization_False_sparsemax_{size}' for size in [8, 16, 32, 64, 128, 256]]]
experiment_names += [[f'Fft_factorization_True_softmax_{size}' for size in [8, 16, 32, 64, 128, 256]]]
experiment_names += [[f'Fft_factorization_False_softmax_{size}' for size in [8, 16, 32, 64, 128]]]
experiment_names += [[f'Fft_factorization_False_sparsemax_{size}' for size in [8, 16, 32, 64, 128]]]
experiment_names += [[f'Fft_factorization_sparsemax_no_perm_{size}' for size in [8, 16, 32]]]
experiment_names += [[f'Fft_factorization_softmax_no_perm_{size}' for size in [8, 16, 32]]]
experiment_names += [[f'Randn_factorization_softmax_no_perm_{size}' for size in [8, 16, 32]]]
experiment_names += [[f'Fft_factorization_sparsemax_perm_front_{size}' for size in [8, 16, 32]]]
experiment_names += [[f'Dct_factorization_True_softmax_{size}' for size in [8, 16, 32, 64, 128]]]
experiment_names += [[f'Dct_factorization_False_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'Dct_factorization_False_sparsemax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'Dct_factorization_block_perm_one_extra_{size}' for size in [8, 16, 32, 64, 128]]]
experiment_names += [[f'LegendreEval_factorization_real_True_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'LegendreEval_factorization_real_False_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'LegendreEval_factorization_real_False_sparsemax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'LegendreEval_factorization_complex_True_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'LegendreEval_factorization_complex_False_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'LegendreEval_factorization_complex_False_sparsemax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'Circulant_factorization_real_True_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'Circulant_factorization_real_False_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'Circulant_factorization_real_False_sparsemax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'Circulant_factorization_complex_True_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'Circulant_factorization_complex_False_softmax_{size}' for size in [8, 16, 32, 64]]]
experiment_names += [[f'Circulant_factorization_complex_False_sparsemax_{size}' for size in [8, 16, 32, 64]]]
for experiment_names_ in experiment_names:
print(experiment_names_[0])
best_loss = []
best_polished_loss = []
for experiment_name in experiment_names_:
checkpoint_path = Path(result_dir) / experiment_name / 'trial.pkl'
with checkpoint_path.open('rb') as f:
trials = pickle.load(f)
losses = [-trial.last_result['negative_loss'] for trial in trials]
polished_losses = [-trial.last_result.get('polished_negative_loss', float('-inf')) for trial in trials]
# best_loss.append(min(losses))
best_loss.append(np.sort(losses)[0]) # to deal with NaN
best_polished_loss.append(np.sort(polished_losses)[0]) # to deal with NaN
# print(np.array(losses))
# print(np.sort(losses))
# best_trial = max(trials, key=lambda trial: trial.last_result['negative_loss'])
# train_model = best_trial._get_trainable_cls()(best_trial.config)
# train_model = TrainableHadamardFactorFixedOrder(best_trial.config)
# train_model = TrainableHadamardFactorSoftmax(best_trial.config)
# train_model = TrainableHadamardFactorSparsemax(best_trial.config)
# train_model.restore(str(Path(best_trial.logdir) / best_trial._checkpoint.value))
# model = train_model.model
best_rmse = np.sqrt(best_loss)
print(best_rmse)
print(np.sqrt(best_polished_loss))
|
butterfly-master
|
learning_transforms/fft_hadamard_analysis.py
|
import math
import operator
import functools
import torch
from torch import nn
from butterfly.complex_utils import real_to_complex, complex_mul, complex_matmul
from sparsemax import sparsemax
from butterfly.utils import bitreversal_permutation
from butterfly_factor import butterfly_factor_mult, butterfly_factor_mult_intermediate
from permutation_factor import permutation_factor_even_odd_mult, permutation_factor_reverse_mult
def sinkhorn(logit, n_iters=5):
"""Sinkhorn iterations.
Parameters:
logit: (..., n, n)
n_iters: integer
Return:
(..., n, n) matrix that's close to a doubly stochastic matrix.
"""
assert logit.dim() >= 2, 'logit must be at least a 2D tensor'
assert logit.shape[-2] == logit.shape[-1], 'logit must be a square matrix'
for _ in range(n_iters):
logit = logit - torch.logsumexp(logit, dim=-1, keepdim=True)
logit = logit - torch.logsumexp(logit, dim=-2, keepdim=True)
return torch.exp(logit)
class Butterfly(nn.Module):
"""Butterfly matrix of size n x n where only the diagonal and the k-th
subdiagonal and superdiagonal are nonzero.
"""
def __init__(self, size, diagonal=1, complex=False, diag=None, subdiag=None, superdiag=None):
"""A butterfly matrix where only the diagonal and the k-th subdiagonal
and superdiagonal are nonzero.
Parameters:
size: size of butterfly matrix
diagonal: the k-th subdiagonal and superdiagonal that are nonzero.
complex: real or complex matrix
diag: initialization for the diagonal
subdiag: initialization for the subdiagonal
superdiag: initialization for the superdiagonal
"""
super().__init__()
assert size > diagonal, 'size must be larger than diagonal'
self.size = size
self.diagonal = diagonal
self.complex = complex
self.mul_op = complex_mul if complex else operator.mul
diag_shape = (size, 2) if complex else (size, )
superdiag_shape = subdiag_shape = (size - diagonal, 2) if complex else (size - diagonal,)
if diag is None:
self.diag = nn.Parameter(torch.randn(diag_shape))
# self.diag = nn.Parameter(torch.ones(diag_shape))
else:
assert diag.shape == diag_shape, f'diag must have shape {diag_shape}'
self.diag = diag
if subdiag is None:
self.subdiag = nn.Parameter(torch.randn(subdiag_shape))
# self.subdiag = nn.Parameter(torch.ones(subdiag_shape))
else:
assert subdiag.shape == subdiag_shape, f'subdiag must have shape {subdiag_shape}'
self.subdiag = subdiag
if superdiag is None:
self.superdiag = nn.Parameter(torch.randn(superdiag_shape))
# self.superdiag = nn.Parameter(torch.ones(superdiag_shape))
else:
assert superdiag.shape == superdiag_shape, f'superdiag must have shape {superdiag_shape}'
self.superdiag = superdiag
def matrix(self):
"""Matrix form of the butterfly matrix
"""
if not self.complex:
return (torch.diag(self.diag)
+ torch.diag(self.subdiag, -self.diagonal)
+ torch.diag(self.superdiag, self.diagonal))
else: # Use torch.diag_embed (available in Pytorch 1.0) to deal with complex case.
return (torch.diag_embed(self.diag.t(), dim1=0, dim2=1)
+ torch.diag_embed(self.subdiag.t(), -self.diagonal, dim1=0, dim2=1)
+ torch.diag_embed(self.superdiag.t(), self.diagonal, dim1=0, dim2=1))
def forward(self, input):
"""
Parameters:
input: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
if not self.complex:
output = self.diag * input
output[..., self.diagonal:] += self.subdiag * input[..., :-self.diagonal]
output[..., :-self.diagonal] += self.superdiag * input[..., self.diagonal:]
else:
output = self.mul_op(self.diag, input)
output[..., self.diagonal:, :] += self.mul_op(self.subdiag, input[..., :-self.diagonal, :])
output[..., :-self.diagonal, :] += self.mul_op(self.superdiag, input[..., self.diagonal:, :])
# assert torch.allclose(output, input @ self.matrix().t())
return output
class MatrixProduct(nn.Module):
"""Product of matrices. The order are chosen by softmaxes, which are learnable.
Each factor matrix must implement .matrix() function.
"""
def __init__(self, factors, n_terms=None, complex=False, fixed_order=False, softmax_fn='softmax'):
super().__init__()
self.factors = nn.ModuleList(factors)
if n_terms is None:
n_terms = len(factors)
self.n_terms = n_terms
self.complex = complex
self.matmul_op = complex_matmul if complex else operator.matmul
self.fixed_order = fixed_order
if not self.fixed_order:
assert softmax_fn in ['softmax', 'sparsemax']
self.logit = nn.Parameter(torch.randn((self.n_terms, len(factors))))
if softmax_fn == 'softmax':
self.softmax_fn = lambda logit: nn.functional.softmax(logit, dim=-1)
else:
self.softmax_fn = sparsemax
def matrix(self, temperature=1.0):
if self.fixed_order:
matrices = [factor.matrix() for factor in self.factors]
return functools.reduce(self.matmul_op, matrices)
else:
prob = self.softmax_fn(self.logit / temperature)
stack = torch.stack([factor.matrix() for factor in self.factors])
matrices = (prob @ stack.reshape(stack.shape[0], -1)).reshape((-1,) + stack.shape[1:])
# Alternative: slightly slower but easier to understand
# matrices = torch.einsum('ab, b...->a...', (prob, stack))
# return torch.chain_matmul(*matrices) ## Doesn't work for complex
return functools.reduce(self.matmul_op, matrices)
def forward(self, input, temperature=1.0):
"""
Parameters:
input: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
if self.fixed_order:
output = input
for factor in self.factors[::-1]:
output = factor(output)
return output
else:
prob = self.softmax_fn(self.logit / temperature)
output = input
for i in range(self.n_terms)[::-1]:
# output = (torch.stack([factor(output) for factor in self.factors], dim=-1) * prob[i]).sum(dim=-1)
stack = torch.stack([factor(output) for factor in self.factors])
output = (prob[i:i+1] @ stack.reshape(stack.shape[0], -1)).reshape(stack.shape[1:])
return output
class ButterflyProduct(MatrixProduct):
"""Product of butterfly matrices. The order are chosen by softmaxes, which
are learnable.
"""
def __init__(self, size, n_terms=None, complex=False, fixed_order=False, softmax_fn='softmax', learn_perm=False):
m = int(math.log2(size))
assert size == 1 << m, "size must be a power of 2"
self.size = size
factors = [Butterfly(size, diagonal=1 << i, complex=complex) for i in range(m)[::-1]]
super().__init__(factors, n_terms, complex, fixed_order, softmax_fn)
self.learn_perm = learn_perm
if learn_perm:
self.perm_logit = nn.Parameter(torch.randn((size, size)))
def matrix(self, temperature=1.0):
matrix = super().matrix(temperature)
if self.learn_perm:
perm = sinkhorn(self.perm_logit / temperature)
if not self.complex:
matrix = matrix @ perm
else:
matrix = (matrix.transpose(-1, -2) @ perm).transpose(-1, -2)
return matrix
def forward(self, input, temperature=1.0):
"""
Parameters:
input: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
if self.learn_perm:
perm = sinkhorn(self.perm_logit / temperature)
if not self.complex:
input = input @ perm.t()
else:
input = (input.transpose(-1, -2) @ perm.t()).transpose(-1, -2)
return super().forward(input, temperature)
class Block2x2Diag(nn.Module):
"""Block matrix of size n x n of the form [[A, B], [C, D]] where each of A, B,
C, D are diagonal. This means that only the diagonal and the n//2-th
subdiagonal and superdiagonal are nonzero.
"""
def __init__(self, size, complex=False, ABCD=None, ortho_init=False):
"""
Parameters:
size: size of butterfly matrix
complex: real or complex matrix
ABCD: block of [[A, B], [C, D]], of shape (2, 2, size//2) if real or (2, 2, size//2, 2) if complex
ortho_init: whether the twiddle factors are initialized to be orthogonal (real) or unitary (complex)
"""
super().__init__()
assert size % 2 == 0, 'size must be even'
self.size = size
self.complex = complex
self.mul_op = complex_mul if complex else operator.mul
ABCD_shape = (2, 2, size // 2) if not complex else (2, 2, size // 2, 2)
scaling = 1.0 / 2 if complex else 1.0 / math.sqrt(2)
if ABCD is None:
if not ortho_init:
self.ABCD = nn.Parameter(torch.randn(ABCD_shape) * scaling)
else:
if not complex:
theta = torch.rand(size // 2) * math.pi * 2
c, s = torch.cos(theta), torch.sin(theta)
det = torch.randint(0, 2, (size // 2, ), dtype=c.dtype) * 2 - 1 # Rotation (+1) or reflection (-1)
self.ABCD = nn.Parameter(torch.stack((torch.stack((det * c, -det * s)),
torch.stack((s, c)))))
else:
# Sampling from the Haar measure on U(2) is a bit subtle.
# Using the parameterization here: http://home.lu.lv/~sd20008/papers/essays/Random%20unitary%20[paper].pdf
phi = torch.asin(torch.sqrt(torch.rand(size // 2)))
c, s = torch.cos(phi), torch.sin(phi)
alpha, psi, chi = torch.randn(3, size // 2) * math.pi * 2
A = torch.stack((c * torch.cos(alpha + psi), c * torch.sin(alpha + psi)), dim=-1)
B = torch.stack((s * torch.cos(alpha + chi), s * torch.sin(alpha + chi)), dim=-1)
C = torch.stack((-s * torch.cos(alpha - chi), -s * torch.sin(alpha - chi)), dim=-1)
D = torch.stack((c * torch.cos(alpha - psi), c * torch.sin(alpha - psi)), dim=-1)
self.ABCD = nn.Parameter(torch.stack((torch.stack((A, B)),
torch.stack((C, D)))))
else:
assert ABCD.shape == ABCD_shape, f'ABCD must have shape {ABCD_shape}'
self.ABCD = ABCD
def forward(self, input):
"""
Parameters:
input: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
if not self.complex:
# return ((self.ABCD * input.view(input.shape[:-1] + (1, 2, self.size // 2))).sum(dim=-2)).view(input.shape)
return butterfly_factor_mult(self.ABCD, input.view(-1, 2, self.size // 2)).view(input.shape)
else:
# return (self.mul_op(self.ABCD, input.view(input.shape[:-2] + (1, 2, self.size // 2, 2))).sum(dim=-3)).view(input.shape)
return butterfly_factor_mult(self.ABCD, input.view(-1, 2, self.size // 2, 2)).view(input.shape)
class Block2x2DiagProduct(nn.Module):
"""Product of block 2x2 diagonal matrices.
"""
def __init__(self, size, complex=False, decreasing_size=True, ortho_init=False):
super().__init__()
m = int(math.log2(size))
assert size == 1 << m, "size must be a power of 2"
self.size = size
self.complex = complex
sizes = [size >> i for i in range(m)] if decreasing_size else [size >> i for i in range(m)[::-1]]
self.factors = nn.ModuleList([Block2x2Diag(size_, complex=complex, ortho_init=ortho_init) for size_ in sizes])
def forward(self, input):
"""
Parameters:
input: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
output = input.contiguous()
for factor in self.factors[::-1]:
if not self.complex:
output = factor(output.view(output.shape[:-1] + (-1, factor.size))).view(output.shape)
else:
output = factor(output.view(output.shape[:-2] + (-1, factor.size, 2))).view(output.shape)
return output
class Block2x2DiagProductAllinOne(nn.Module):
"""Product of block 2x2 diagonal matrices.
"""
def __init__(self, size, rank=1, complex=False, twiddle=None, ortho_init=False):
super().__init__()
m = int(math.log2(size))
assert size == 1 << m, "size must be a power of 2"
self.size = size
self.rank = rank
self.complex = complex
twiddle_shape = (rank, size - 1, 2, 2) if not complex else (rank, size - 1, 2, 2, 2)
scaling = 1.0 / 2 if complex else 1.0 / math.sqrt(2)
if twiddle is None:
if not ortho_init:
self.twiddle = nn.Parameter(torch.randn(twiddle_shape) * scaling)
else:
if not complex:
theta = torch.rand(rank, size - 1) * math.pi * 2
c, s = torch.cos(theta), torch.sin(theta)
det = torch.randint(0, 2, (rank, size - 1), dtype=c.dtype) * 2 - 1 # Rotation (+1) or reflection (-1)
self.twiddle = nn.Parameter(torch.stack((torch.stack((det * c, -det * s), dim=-1),
torch.stack((s, c), dim=-1)), dim=-1))
else:
# Sampling from the Haar measure on U(2) is a bit subtle.
# Using the parameterization here: http://home.lu.lv/~sd20008/papers/essays/Random%20unitary%20[paper].pdf
phi = torch.asin(torch.sqrt(torch.rand(rank, size - 1)))
c, s = torch.cos(phi), torch.sin(phi)
alpha, psi, chi = torch.randn(3, rank, size - 1) * math.pi * 2
A = torch.stack((c * torch.cos(alpha + psi), c * torch.sin(alpha + psi)), dim=-1)
B = torch.stack((s * torch.cos(alpha + chi), s * torch.sin(alpha + chi)), dim=-1)
C = torch.stack((-s * torch.cos(alpha - chi), -s * torch.sin(alpha - chi)), dim=-1)
D = torch.stack((c * torch.cos(alpha - psi), c * torch.sin(alpha - psi)), dim=-1)
self.twiddle = nn.Parameter(torch.stack((torch.stack((A, B), dim=-1),
torch.stack((C, D), dim=-1)), dim=-1))
else:
assert twiddle.shape == twiddle_shape, f'twiddle must have shape {twiddle_shape}'
self.twiddle = twiddle
def forward(self, input):
"""
Parameters:
input: (batch, size) if real or (batch, size, 2) if complex
Return:
output: (batch, rank * size) if real or (batch, rank * size, 2) if complex
"""
output_shape = (input.shape[0], self.rank * input.shape[1]) if self.real else (input.shape[0], self.rank * input.shape[1], 2)
return butterfly_factor_mult_intermediate(self.twiddle, input).view(output_shape)
class Block2x2DiagRectangular(nn.Module):
"""Block matrix of size k n x k n of the form [[A, B], [C, D]] where each of A, B,
C, D are diagonal. This means that only the diagonal and the n//2-th
subdiagonal and superdiagonal are nonzero.
"""
def __init__(self, size, stack=1, complex=False, ABCD=None, n_blocks=1, tied_weight=True):
"""
Parameters:
size: input has shape (stack, ..., size)
stack: number of stacked components, output has shape (stack, ..., size)
complex: real or complex matrix
ABCD: block of [[A, B], [C, D]], of shape (stack, 2, 2, size//2) if real or (stack, 2, 2, size//2, 2) if complex
n_blocks: number of such blocks of ABCD
tied_weight: whether the weights ABCD at different blocks are tied to be the same.
"""
super().__init__()
assert size % 2 == 0, 'size must be even'
self.size = size
self.stack = stack
self.complex = complex
self.n_blocks = n_blocks
self.tied_weight = tied_weight
if tied_weight:
ABCD_shape = (stack, 2, 2, size // 2) if not complex else (stack, 2, 2, size // 2, 2)
else:
ABCD_shape = (stack, n_blocks, 2, 2, size // 2) if not complex else (stack, n_blocks, 2, 2, size // 2, 2)
scaling = 1.0 / 2 if complex else 1.0 / math.sqrt(2)
if ABCD is None:
self.ABCD = nn.Parameter(torch.randn(ABCD_shape) * scaling)
else:
assert ABCD.shape == ABCD_shape, f'ABCD must have shape {ABCD_shape}'
self.ABCD = ABCD
def forward(self, input):
"""
Parameters:
input: (stack, ..., size) if real or (stack, ..., size, 2) if complex
if not tied_weight: (stack, n_blocks, ..., size) if real or (stack, n_blocks, ..., size, 2) if complex
Return:
output: (stack, ..., size) if real or (stack, ..., size, 2) if complex
if not tied_weight: (stack, n_blocks, ..., size) if real or (stack, n_blocks, ..., size, 2) if complex
"""
if self.tied_weight:
if not self.complex:
return (self.ABCD.unsqueeze(1) * input.view(self.stack, -1, 1, 2, self.size // 2)).sum(dim=-2).view(input.shape)
else:
return complex_mul(self.ABCD.unsqueeze(1), input.view(self.stack, -1, 1, 2, self.size // 2, 2)).sum(dim=-3).view(input.shape)
else:
if not self.complex:
return (self.ABCD.unsqueeze(2) * input.view(self.stack, self.n_blocks, -1, 1, 2, self.size // 2)).sum(dim=-2).view(input.shape)
else:
return complex_mul(self.ABCD.unsqueeze(2), input.view(self.stack, self.n_blocks, -1, 1, 2, self.size // 2, 2)).sum(dim=-3).view(input.shape)
class Block2x2DiagProductRectangular(nn.Module):
"""Product of block 2x2 diagonal matrices.
"""
def __init__(self, in_size, out_size, complex=False, decreasing_size=True, tied_weight=True, bias=True):
super().__init__()
self.in_size = in_size
m = int(math.ceil(math.log2(in_size)))
self.in_size_extended = 1 << m # Will zero-pad input if in_size is not a power of 2
self.out_size = out_size
self.stack = int(math.ceil(out_size / self.in_size_extended))
self.complex = complex
self.tied_weight = tied_weight
in_sizes = [self.in_size_extended >> i for i in range(m)] if decreasing_size else [self.in_size_extended >> i for i in range(m)[::-1]]
if tied_weight:
self.factors = nn.ModuleList([Block2x2DiagRectangular(in_size_, stack=self.stack, complex=complex)
for in_size_ in in_sizes])
else:
self.factors = nn.ModuleList([Block2x2DiagRectangular(in_size_, stack=self.stack, complex=complex, n_blocks=self.in_size_extended // in_size_, tied_weight=tied_weight)
for in_size_ in in_sizes])
if bias:
if not self.complex:
self.bias = nn.Parameter(torch.Tensor(out_size))
else:
self.bias = nn.Parameter(torch.Tensor(out_size, 2))
self.reset_parameters()
def reset_parameters(self):
"""Initialize bias the same way as torch.nn.Linear."""
if hasattr(self, 'bias'):
bound = 1 / math.sqrt(self.in_size)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Parameters:
input: (..., in_size) if real or (..., in_size, 2) if complex
Return:
output: (..., out_size) if real or (..., out_size, 2) if complex
"""
output = input.contiguous()
if self.in_size != self.in_size_extended: # Zero-pad
if not self.complex:
output = torch.cat((output, torch.zeros(output.shape[:-1] + (self.in_size_extended - self.in_size, ), dtype=output.dtype, device=output.device)), dim=-1)
else:
output = torch.cat((output, torch.zeros(output.shape[:-2] + (self.in_size_extended - self.in_size, 2), dtype=output.dtype, device=output.device)), dim=-2)
output = output.unsqueeze(0).expand((self.stack, ) + output.shape)
for factor in self.factors[::-1]:
if not self.complex:
output = factor(output.view(output.shape[:-1] + (-1, factor.size))).view(output.shape)
else:
output = factor(output.view(output.shape[:-2] + (-1, factor.size, 2))).view(output.shape)
if not self.complex:
output = output.permute(tuple(range(1, output.dim() - 1)) + (0, -1)).reshape(input.shape[:-1] + (self.stack * self.in_size_extended, ))[..., :self.out_size]
else:
output = output.permute(tuple(range(1, output.dim() - 2)) + (0, -2, -1)).reshape(input.shape[:-2] + (self.stack * self.in_size_extended, 2))[..., :self.out_size, :]
if hasattr(self, 'bias'):
output += self.bias
return output
class Block2x2DiagBmm(nn.Module):
"""Block matrix of size n x n of the form [[A, B], [C, D]] where each of A, B,
C, D are diagonal. This means that only the diagonal and the n//2-th
subdiagonal and superdiagonal are nonzero.
"""
def __init__(self, size, complex=False, ABCD=None):
"""
Parameters:
size: size of butterfly matrix
complex: real or complex matrix
ABCD: block of [[A, B], [C, D]], of shape (2, 2, size//2) if real or (2, 2, size//2, 2) if complex
"""
super().__init__()
assert size % 2 == 0, 'size must be even'
self.size = size
self.complex = complex
self.mul_op = complex_mul if complex else operator.mul
ABCD_shape = (size // 2, 2, 2) if not complex else (2, 2, size // 2, 2)
scaling = 1.0 / 2 if complex else 1.0 / math.sqrt(2)
if ABCD is None:
self.ABCD = nn.Parameter(torch.randn(ABCD_shape) * scaling)
else:
assert ABCD.shape == ABCD_shape, f'ABCD must have shape {ABCD_shape}'
self.ABCD = ABCD
def forward(self, input):
"""
Parameters:
input: (size, batch_size) if real or (size, batch_size, 2) if complex
Return:
output: (size, batch_size) if real or (size, batch_size, 2) if complex
"""
if not self.complex:
# return ((self.ABCD * input.view(input.shape[:-1] + (1, 2, self.size // 2))).sum(dim=-2)).view(input.shape)
# return butterfly_factor_mult(self.ABCD, input.view(-1, 2, self.size // 2)).view(input.shape)
return (self.ABCD @ input.view(self.size // 2, 2, -1)).view(input.shape)
else:
# return (self.mul_op(self.ABCD, input.view(input.shape[:-2] + (1, 2, self.size // 2, 2))).sum(dim=-3)).view(input.shape)
return butterfly_factor_mult(self.ABCD, input.view(-1, 2, self.size // 2, 2)).view(input.shape)
class Block2x2DiagProductBmm(nn.Module):
"""Product of block 2x2 diagonal matrices.
"""
def __init__(self, size, complex=False, decreasing_size=True):
super().__init__()
m = int(math.log2(size))
assert size == 1 << m, "size must be a power of 2"
self.size = size
self.complex = complex
sizes = [size >> i for i in range(m)] if decreasing_size else [size >> i for i in range(m)[::-1]]
self.factors = nn.ModuleList([Block2x2DiagBmm(size_, complex=complex) for size_ in sizes])
self.br_perm = bitreversal_permutation(size)
def forward(self, input):
"""
Parameters:
input: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
output = input.t()[self.br_perm]
for factor in self.factors[::-1]:
if not self.complex:
output = factor(output.view((factor.size, -1))).view(output.shape)
else:
output = factor(output.view(output.shape[:-2] + (-1, factor.size, 2))).view(output.shape)
return output[self.br_perm].t()
class BlockPerm(nn.Module):
"""Block permutation matrix of size n x n.
"""
def __init__(self, size, logit=None, complex=False):
"""
Parameters:
size: size of permutation matrix
complex: real of complex input
logit: (3, ) nn.Parameter, containing logits for probability of
separating even and odd (logit[0]), probability of reversing
the first half (logit[1]), and probability of reversing the
second half (logit[2]).
"""
super().__init__()
assert size % 2 == 0, 'size must be even'
self.size = size
self.complex = complex
if logit is None:
self.logit = nn.Parameter(torch.randn(3))
else:
self.logit = logit
self.reverse_perm = nn.Parameter(torch.arange(self.size // 2 - 1, -1, -1), requires_grad=False)
def forward(self, input):
"""
Parameters:
input: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
prob = torch.sigmoid(self.logit)
output = input
if not self.complex:
# There's a lot of complicated logic here buried under the reshape's and unsqueeze's and so on
# First step: weighted mean of identity permutation and permutation that yields [even, odd]
# output = ((1 - prob[0]) * output.view(-1, 2, self.size // 2) + prob[0] * output.view(-1, self.size // 2, 2).transpose(-1, -2)).view(-1, self.size)
output = permutation_factor_even_odd_mult(prob[:1], output.view(-1, self.size))
# output = output.view(-1, 2, self.size // 2)
# Second step: weighted mean of identity permutation and permutation that reverses the first and the second half
# output = output.reshape(output.shape[:-1] + (2, self.size // 2))
# output = (((1 - prob[1:]).unsqueeze(-1) * output + prob[1:].unsqueeze(-1) * output.flip(-1))).reshape(output.shape[:-2] + (self.size, ))
# output = (((1 - prob[1:]).unsqueeze(-1) * output + prob[1:].unsqueeze(-1) * output[..., self.reverse_perm])).reshape(output.shape[:-2] + (self.size, ))
output = permutation_factor_reverse_mult(prob[1:], output)
# output = output.reshape(input.shape)
else:
# output = (1 - prob[0]) * output.reshape(output.shape[:-2] + (2, self.size // 2, 2)) + prob[0] * output.reshape(output.shape[:-2] + (self.size // 2, 2, 2)).transpose(-2, -3)
output = permutation_factor_even_odd_mult(prob[:1], output.view(-1, self.size))
# output = (((1 - prob[1:]).unsqueeze(-1).unsqueeze(-1) * output + prob[1:].unsqueeze(-1).unsqueeze(-1) * output.flip(-2))).reshape(output.shape[:-3] + (self.size, 2))
output = permutation_factor_reverse_mult(prob[1:], output)
return output.view(input.shape)
def argmax(self):
"""
Return:
p: (self.size, ) array of int, the most probable permutation.
"""
logit = nn.Parameter(torch.where(self.logit >= 0, torch.tensor(float('inf'), device=self.logit.device), torch.tensor(float('-inf'), device=self.logit.device)))
argmax_instance = self.__class__(self.size, logit, complex=False)
p = argmax_instance.forward(torch.arange(self.size, dtype=torch.float, device=self.logit.device)).round().long()
return p
class BlockPermProduct(nn.Module):
"""Product of block permutation matrices.
"""
def __init__(self, size, complex=False, share_logit=False, increasing_size=True):
super().__init__()
m = int(math.log2(size))
assert size == 1 << m, "size must be a power of 2"
self.size = size
self.complex = complex
self.share_logit = share_logit
# We don't need the permutation with size 2 since it's always the identity
sizes = [size >> i for i in range(m - 1)[::-1]] if increasing_size else [size >> i for i in range(m - 1)]
if share_logit:
self.logit = nn.Parameter(torch.randn(3))
self.factors = nn.ModuleList([BlockPerm(size_, self.logit, complex=complex) for size_ in sizes])
else:
self.factors = nn.ModuleList([BlockPerm(size_, complex=complex) for size_ in sizes])
def forward(self, input):
"""
Parameters:
input: (..., size) if real or (..., size, 2) if complex
Return:
output: (..., size) if real or (..., size, 2) if complex
"""
output = input.contiguous()
for factor in self.factors[::-1]:
if not self.complex:
output = factor(output.view(output.shape[:-1] + (-1, factor.size))).view(output.shape)
else:
output = factor(output.view(output.shape[:-2] + (-1, factor.size, 2))).view(output.shape)
return output
def argmax(self):
"""
Return:
p: (self.size, ) array of int, the most probable permutation.
"""
p = torch.arange(self.size, device=self.factors[0].logit.device)
for factor in self.factors[::-1]:
p = p.reshape(-1, factor.size)[:, factor.argmax()].reshape(self.size)
return p
class FixedPermutation(nn.Module):
def __init__(self, permutation, complex=False):
"""Fixed permutation. Used to store argmax of BlockPerm and BlockPermProduct.
Parameter:
permutation: (n, ) tensor of ints
"""
super().__init__()
self.permutation = nn.Parameter(permutation, requires_grad=False)
self.complex = complex
def forward(self, input):
return input[..., self.permutation] if not self.complex else input[..., self.permutation, :]
def test_butterfly():
size = 4
diag = torch.tensor([[1, 2], [2, 3], [3, 4], [4, 5]], dtype=torch.float)
subdiag = torch.tensor([[11, 12], [12, 13], [13, 14]], dtype=torch.float)
model = Butterfly(size, diagonal=1, complex=True, diag=diag, subdiag=subdiag, superdiag=subdiag)
matrix_real = torch.tensor([[ 1., 11., 0., 0.],
[11., 2., 12., 0.],
[ 0., 12., 3., 13.],
[ 0., 0., 13., 4.]])
matrix_imag = torch.tensor([[ 2., 12., 0., 0.],
[12., 3., 13., 0.],
[ 0., 13., 4., 14.],
[ 0., 0., 14., 5.]])
assert torch.allclose(model.matrix()[..., 0], matrix_real)
assert torch.allclose(model.matrix()[..., 1], matrix_imag)
batch_size = 3
x = torch.randn((batch_size, size, 2))
prod = torch.stack((x[..., 0] @ matrix_real.t() - x[..., 1] @ matrix_imag.t(),
x[..., 0] @ matrix_imag.t() + x[..., 1] @ matrix_real.t()), dim=-1)
assert torch.allclose(model.forward(x), complex_matmul(x, model.matrix().transpose(0, 1)))
def test_butterfly_product():
size = 4
model = ButterflyProduct(size, complex=True)
model.logit = nn.Parameter(torch.tensor([[1.0, float('-inf')], [float('-inf'), 1.0]]))
assert torch.allclose(model.matrix(),
complex_matmul(model.factors[0].matrix(), model.factors[1].matrix()))
batch_size = 3
x = torch.randn((batch_size, size, 2))
assert torch.allclose(model.forward(x), complex_matmul(x, model.matrix().transpose(0, 1)))
def test_butterfly_fft():
# DFT matrix for n = 4
size = 4
DFT = torch.fft(real_to_complex(torch.eye(size)), 1)
P = real_to_complex(torch.tensor([[1., 0., 0., 0.],
[0., 0., 1., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.]]))
M0 = Butterfly(size,
diagonal=2,
complex=True,
diag=torch.tensor([[1.0, 0.0], [1.0, 0.0], [-1.0, 0.0], [0.0, 1.0]], requires_grad=True),
subdiag=torch.tensor([[1.0, 0.0], [1.0, 0.0]], requires_grad=True),
superdiag=torch.tensor([[1.0, 0.0], [0.0, -1.0]], requires_grad=True))
M1 = Butterfly(size,
diagonal=1,
complex=True,
diag=torch.tensor([[1.0, 0.0], [-1.0, 0.0], [1.0, 0.0], [-1.0, 0.0]], requires_grad=True),
subdiag=torch.tensor([[1.0, 0.0], [0.0, 0.0], [1.0, 0.0]], requires_grad=True),
superdiag=torch.tensor([[1.0, 0.0], [0.0, 0.0], [1.0, 0.0]], requires_grad=True))
assert torch.allclose(complex_matmul(M0.matrix(), complex_matmul(M1.matrix(), P)), DFT)
br_perm = torch.tensor(bitreversal_permutation(size))
assert torch.allclose(complex_matmul(M0.matrix(), M1.matrix())[:, br_perm], DFT)
D = complex_matmul(DFT, P.transpose(0, 1))
assert torch.allclose(complex_matmul(M0.matrix(), M1.matrix()), D)
def test_butterfly_dct():
from scipy.fftpack import dct
# DCT matrix for n = 4
size = 4
# Need to transpose as dct acts on rows of matrix np.eye, not columns
DCT = torch.tensor(dct(np.eye(size)).T, dtype=torch.float)
M0diag=torch.tensor([[1.0, 0.0], [1.0, 0.0], [-1.0, 0.0], [0.0, 1.0]], requires_grad=True)
M0subdiag=torch.tensor([[1.0, 0.0], [1.0, 0.0]], requires_grad=True)
M0superdiag=torch.tensor([[1.0, 0.0], [0.0, -1.0]], requires_grad=True)
M0 = Butterfly(size, diagonal=2, complex=True, diag=M0diag, subdiag=M0subdiag, superdiag=M0superdiag)
M1 = Butterfly(size,
diagonal=1,
complex=True,
diag=torch.tensor([[1.0, 0.0], [-1.0, 0.0], [1.0, 0.0], [-1.0, 0.0]], requires_grad=True),
subdiag=torch.tensor([[1.0, 0.0], [0.0, 0.0], [1.0, 0.0]], requires_grad=True),
superdiag=torch.tensor([[1.0, 0.0], [0.0, 0.0], [1.0, 0.0]], requires_grad=True))
arange_ = np.arange(size)
dct_perm = np.concatenate((arange_[::2], arange_[::-2]))
br_perm = bitreversal_permutation(size)
perm = torch.arange(size)[dct_perm][br_perm]
arange_ = torch.arange(size, dtype=torch.float)
diag_real = 2 * torch.cos(-math.pi * arange_ / (2 * size))
diag_imag = 2 * torch.sin(-math.pi * arange_ / (2 * size))
diag = torch.stack((torch.diag(diag_real), torch.diag(diag_imag)), dim=-1)
assert torch.allclose(complex_matmul(diag, complex_matmul(M0.matrix(), M1.matrix()))[:, perm, 0], DCT)
D = torch.stack((diag_real, diag_imag), dim=-1)
DM0 = Butterfly(size,
diagonal=2,
complex=True,
diag=complex_mul(D, M0diag),
subdiag=complex_mul(D[2:], M0subdiag),
superdiag=complex_mul(D[:2], M0superdiag))
assert torch.allclose(complex_matmul(DM0.matrix(), M1.matrix())[:, perm, 0], DCT)
def test_block2x2diagproduct():
# Factorization of the DFT matrix
size = 4
model = Block2x2DiagProduct(size, complex=True)
model.factors[1].ABCD = nn.Parameter(torch.tensor([[[[1.0, 0.0]], [[1.0, 0.0]]], [[[1.0, 0.0]], [[-1.0, 0.0]]]]))
model.factors[0].ABCD = nn.Parameter(torch.tensor([[[[1.0, 0.0],
[1.0, 0.0]],
[[1.0, 0.0],
[0.0, -1.0]]],
[[[1.0, 0.0],
[1.0, 0.0]],
[[-1.0, 0.0],
[0.0, 1.0]]]]))
input = torch.stack((torch.eye(size), torch.zeros(size, size)), dim=-1)
assert torch.allclose(model(input[:, [0, 2, 1, 3]]), torch.fft(input, 1))
def test_blockpermproduct():
size = 8
input = torch.randn(3, size, 2)
perm = BlockPermProduct(size, complex=True, share_logit=True)
perm.logit[0] = float('inf')
from utils import bitreversal_permutation
assert torch.allclose(perm(input), input[:, bitreversal_permutation(size)])
def main():
test_butterfly()
test_butterfly_product()
if __name__ == '__main__':
main()
|
butterfly-master
|
learning_transforms/butterfly_old.py
|
import unittest
import torch
from butterfly_factor import butterfly_factor_mult, butterfly_factor_mult_intermediate
from butterfly import Block2x2DiagProduct
from complex_utils import complex_mul
from factor_multiply import butterfly_multiply_intermediate, butterfly_multiply_intermediate_backward
def twiddle_list_concat(B: Block2x2DiagProduct):
# Assume ordering from largest size to smallest size
if not B.complex:
return torch.cat([factor.ABCD.permute(2, 0, 1) for factor in B.factors[::-1]])
else:
return torch.cat([factor.ABCD.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
class ButterflyFactorTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_butterfly_factor_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n)
input_ = torch.randn(batch_size, n, requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (d_twiddle - d_twiddle_slow).abs().max().item())
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
def test_butterfly_factor_complex_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True)
input_ = torch.randn(batch_size, n, 2, requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2, 2)).view(prev.shape)
output_slow = (complex_mul(factor.ABCD, prev.view(-1, 1, 2, factor.size // 2, 2)).sum(dim=-3)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (d_twiddle - d_twiddle_slow).abs().max().item())
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_cuda(self):
batch_size = 100
n = 4096 # To test n > MAX_BLOCK_SIZE
B = Block2x2DiagProduct(n).to('cuda')
input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (factor.size, (d_twiddle - d_twiddle_slow).abs().max().item()))
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
def test_butterfly_factor_intermediate_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n)
input_ = torch.randn(batch_size, n, requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
def test_butterfly_factor_intermediate_complex_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True)
input_ = torch.randn(batch_size, n, 2, requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2, 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_intermediate_cuda(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n).to('cuda')
input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_intermediate_complex_cuda(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True).to('cuda')
input_ = torch.randn(batch_size, n, 2, device='cuda', requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2, 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
if __name__ == "__main__":
unittest.main()
# batch_size = 2
# n = 4
# B = Block2x2DiagProduct(n).to('cuda')
# # input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
# input_ = torch.arange(batch_size * n, dtype=torch.float, device='cuda', requires_grad=True).view(batch_size, n)
# output = input_
# factor = B.factors[0]
# prev = output
# output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
# output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
# grad = input_
# d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
# d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
# print(d_twiddle)
# print(d_twiddle_slow)
# print((factor.size, (d_twiddle - d_twiddle_slow).abs().max().item()))
|
butterfly-master
|
learning_transforms/test_factor_multiply.py
|
import math
import multiprocessing as mp
import os
from pathlib import Path
import pickle
import random
import numpy as np
from scipy.linalg import hadamard
import torch
from torch import nn
from torch import optim
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from, run_experiments
from ray.tune.schedulers import AsyncHyperBandScheduler
from butterfly import Butterfly, ButterflyProduct
from semantic_loss import semantic_loss_exactly_one
from utils import PytorchTrainable
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
N_LBFGS_STEPS = 300
N_TRIALS_TO_POLISH = 20
def hadamard_test():
# Hadamard matrix for n = 4
size = 4
M0 = Butterfly(size,
diagonal=2,
diag=torch.tensor([1.0, 1.0, -1.0, -1.0], requires_grad=True),
subdiag=torch.ones(2, requires_grad=True),
superdiag=torch.ones(2, requires_grad=True))
M1 = Butterfly(size,
diagonal=1,
diag=torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True),
subdiag=torch.tensor([1.0, 0.0, 1.0], requires_grad=True),
superdiag=torch.tensor([1.0, 0.0, 1.0], requires_grad=True))
H = M0.matrix() @ M1.matrix()
assert torch.allclose(H, torch.tensor(hadamard(4), dtype=torch.float))
M = ButterflyProduct(size, fixed_order=True)
M.factors[0] = M0
M.factors[1] = M1
assert torch.allclose(M.matrix(), H)
class TrainableHadamard(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.tensor(hadamard(config['size']), dtype=torch.float)
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
def polish_hadamard(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix(), trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix(), trainable.target_matrix)
return loss.item()
ex = Experiment('Hadamard_factorization')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.named_config
def softmax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.named_config
def sparsemax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'sparsemax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.config
def fixed_order_config():
fixed_order = True # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
size = 8 # Size of matrix to factor, must be power of 2
ntrials = 20 # Number of trials for hyperparameter tuning
nsteps = 400 # Number of steps per epoch
nmaxepochs = 200 # Maximum number of epochs
result_dir = 'results' # Directory to store results
nthreads = 1 # Number of CPU threads per job
smoke_test = False # Finish quickly for testing
@ex.capture
def hadamard_experiment(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Hadamard_factorization_{fixed_order}_{softmax_fn}_{size}',
run=TrainableHadamard,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.automain
def run(result_dir, nmaxepochs, nthreads):
experiment = hadamard_experiment()
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = str(nthreads)
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=nmaxepochs)
trials = run_experiments(experiment, scheduler=ahb, raise_on_failed_trial=False)
losses = [-trial.last_result['negative_loss'] for trial in trials]
# Polish solutions with L-BFGS
pool = mp.Pool()
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result['negative_loss'])
polished_losses = pool.map(polish_hadamard, sorted_trials[:N_TRIALS_TO_POLISH])
pool.close()
pool.join()
for i in range(N_TRIALS_TO_POLISH):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
print(np.array(losses))
print(np.sort(losses))
# print(np.sort(losses)[:N_TRIALS_TO_POLISH])
print(np.sort(polished_losses))
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return min(losses + polished_losses)
|
butterfly-master
|
learning_transforms/learning_hadamard.py
|
import math
import multiprocessing as mp
import os
from pathlib import Path
import pickle
import random
import sys
import numpy as np
from numpy.polynomial import legendre
import torch
from torch import nn
from torch import optim
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from, run_experiments
from ray.tune.schedulers import AsyncHyperBandScheduler
from hstack_diag import HstackDiagProduct
from utils import PytorchTrainable, bitreversal_permutation
N_LBFGS_STEPS = 300
N_TRIALS_TO_POLISH = 20
class TrainableOps(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = HstackDiagProduct(size=config['size'])
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
# Target: Legendre polynomials
P = np.zeros((size, size), dtype=np.float64)
for i, coef in enumerate(np.eye(size)):
P[i, :i + 1] = legendre.leg2poly(coef)
self.target_matrix = torch.tensor(P)
self.br_perm = bitreversal_permutation(size)
self.input = (torch.eye(size)[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
self.input_permuted = self.input[:, self.br_perm]
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model(self.input_permuted)
loss = nn.functional.mse_loss(y.double(), self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
def polish_ops(trial):
"""Load model from checkpoint, and re-optimize using L-BFGS to find
the nearest local optimum.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = HstackDiagProduct(size=config['size'])
polished_model.factors = model.factors
polished_model.P_init = model.P_init
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
eye = torch.eye(polished_model.size)
x = (eye[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
y = polished_model(x[:, trainable.br_perm])
loss = nn.functional.mse_loss(y, trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
eye = torch.eye(polished_model.size)
x = (eye[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
y = polished_model(x[:, trainable.br_perm])
loss = nn.functional.mse_loss(y, trainable.target_matrix)
return loss.item()
ex = Experiment('Ops_factorization')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.config
def fixed_order_config():
size = 8 # Size of matrix to factor, must be power of 2
ntrials = 20 # Number of trials for hyperparameter tuning
nsteps = 400 # Number of steps per epoch
nmaxepochs = 200 # Maximum number of epochs
result_dir = 'results' # Directory to store results
nthreads = 1 # Number of CPU threads per job
smoke_test = False # Finish quickly for testing
@ex.capture
def ops_experiment(size, ntrials, nsteps, result_dir, nthreads, smoke_test):
config={
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
experiment = RayExperiment(
name=f'Ops_factorization_{size}',
run=TrainableOps,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.automain
def run(result_dir, nmaxepochs, nthreads):
experiment = ops_experiment()
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = str(nthreads)
os.environ['OMP_NUM_THREADS'] = str(nthreads) # For some reason we need this for OPs otherwise it'll thrash
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=nmaxepochs)
trials = run_experiments(experiment, scheduler=ahb, raise_on_failed_trial=False)
losses = [-trial.last_result['negative_loss'] for trial in trials]
# Polish solutions with L-BFGS
pool = mp.Pool()
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result['negative_loss'])
polished_losses = pool.map(polish_ops, sorted_trials[:N_TRIALS_TO_POLISH])
pool.close()
pool.join()
for i in range(N_TRIALS_TO_POLISH):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
print(np.array(losses))
print(np.sort(losses))
# print(np.sort(losses)[:N_TRIALS_TO_POLISH])
print(np.sort(polished_losses))
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return min(losses + polished_losses)
# TODO: there might be a memory leak, trying to find it here
# import gc
# import operator as op
# from functools import reduce
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# print(reduce(op.mul, obj.size()) if len(obj.size()) > 0 else 0, type(obj), obj.size())
# # print(type(obj), obj.size(), obj.type())
# except:
# pass
|
butterfly-master
|
learning_transforms/learning_ops.py
|
import os
import pickle
from pathlib import Path
import numpy as np
import multiprocessing as mp
import torch
from torch import nn
from torch import optim
import ray
from butterfly import ButterflyProduct
from learning_hadamard import TrainableHadamardFactorFixedOrder, TrainableHadamardFactorSoftmax, TrainableHadamardFactorSparsemax
from learning_fft import TrainableFftFactorFixedOrder, TrainableFftFactorSoftmax, TrainableFftFactorSparsemax
N_LBFGS_STEPS = 300
N_TRIALS_TO_POLISH = 20
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = '1'
# @ray.remote
def polish_hadamard(trial):
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
# print(maxes)
# if torch.all(maxes >= 0.99):
polished_model.butterflies = nn.ModuleList([model.butterflies[argmax] for argmax in argmaxes])
# else:
# return -trial.last_result['negative_loss']
else:
polished_model.butterflies = model.butterflies
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix(), trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix(), trainable.target_matrix)
return loss.item()
def polish_fft(trial):
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
# print(maxes)
# if torch.all(maxes >= 0.99):
polished_model.butterflies = nn.ModuleList([model.butterflies[argmax] for argmax in argmaxes])
# else:
# return -trial.last_result['negative_loss']
else:
polished_model.butterflies = model.butterflies
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
return loss.item()
if __name__ == '__main__':
# ray.init()
result_dir = 'results'
experiment_names = [[f'Hadamard_factorization_fixed_order_{size}' for size in [8, 16, 32, 64, 128, 256]]]
experiment_names += [[f'Hadamard_factorization_softmax_{size}' for size in [8, 16, 32, 64, 128, 256]]]
experiment_names += [[f'Hadamard_factorization_sparsemax_{size}' for size in [8, 16, 32, 64, 128]]]
experiment_names += [[f'Fft_factorization_fixed_order_{size}' for size in [8, 16, 32, 64, 128]]]
experiment_names += [[f'Fft_factorization_softmax_{size}' for size in [8, 16, 32, 64, 128]]]
experiment_names += [[f'Fft_factorization_sparsemax_{size}' for size in [8, 16, 32, 64, 128]]]
pool = mp.Pool()
for experiment_names_ in experiment_names:
# print(experiment_names_[0])
for experiment_name in experiment_names_:
print(experiment_name)
checkpoint_path = Path(result_dir) / experiment_name / 'trial.pkl'
with checkpoint_path.open('rb') as f:
trials = pickle.load(f)
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result['negative_loss'])
losses = [-trial.last_result['negative_loss'] for trial in sorted_trials]
# polished_losses = ray.get([polish.remote(trial) for trial in sorted_trials[:N_TRIALS_TO_POLISH]])
if experiment_name.startswith('Hadamard'):
polished_losses = pool.map(polish_hadamard, sorted_trials[:20])
elif experiment_name.startswith('Fft'):
polished_losses = pool.map(polish_fft, sorted_trials[:20])
else:
assert False, 'Unknown experiment'
print(np.sort(losses)[:N_TRIALS_TO_POLISH])
for i in range(N_TRIALS_TO_POLISH):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
print(np.array([trial.last_result['polished_negative_loss'] for trial in sorted_trials[:N_TRIALS_TO_POLISH]]))
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
|
butterfly-master
|
learning_transforms/polish.py
|
import copy
import os
import torch
from torch import nn
from torch import optim
from ray.tune import Trainable
N_LBFGS_STEPS_VALIDATION = 15
class PytorchTrainable(Trainable):
"""Abstract Trainable class for Pytorch models, which checkpoints the model
and the optimizer.
Subclass must initialize self.model and self.optimizer in _setup.
"""
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model_optimizer.pth")
state = {'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()}
torch.save(state, checkpoint_path)
return checkpoint_path
def _restore(self, checkpoint_path):
if hasattr(self, 'device'):
checkpoint = torch.load(checkpoint_path, self.device)
else:
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
class TrainableFixedData(PytorchTrainable):
"""Abstract Trainable class for Pytorch models with fixed data.
Subclass must initialize self.model, self.optimizer, and
self.n_steps_per_epoch in _setup, and have to implement self.loss().
"""
def loss(self):
raise NotImplementedError
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
loss = self.loss()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableMatrixFactorization(TrainableFixedData):
"""Abstract Trainable class for Pytorch models that factor a target matrix.
Subclass must initialize self.model, self.optimizer,
self.n_steps_per_epoch, self.n_epochs_per_validation, self.target_matrix,
and self.input in _setup, and may override self.freeze() to freeze model
(e.g. taking argmax of logit instead of logit).
"""
def forward(self):
return self.model(self.input)
def loss(self):
# Take transpose since the transform acts on the rows of the input
output = self.forward().transpose(0, 1)
if self.target_matrix.dim() == 2 and output.dim() == 3: # Real target matrix, take real part
output = output[:, :, 0]
return nn.functional.mse_loss(output, self.target_matrix)
def freeze(self):
pass
def polish(self, nmaxsteps=50, patience=5, threshold=1e-10, save_to_self_model=False):
if not save_to_self_model:
model_bak = self.model
self.model = copy.deepcopy(self.model)
self.freeze()
optimizer = optim.LBFGS(filter(lambda p: p.requires_grad, self.model.parameters()))
def closure():
optimizer.zero_grad()
loss = self.loss()
loss.backward()
return loss
n_bad_steps = 0
best_loss = float('inf')
for i in range(nmaxsteps):
loss = optimizer.step(closure)
if loss.item() < best_loss - threshold:
best_loss = loss.item()
n_bad_steps = 0
else:
n_bad_steps += 1
if n_bad_steps > patience:
break
if not save_to_self_model:
self.model = model_bak
return loss.item()
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
loss = self.loss()
loss.backward()
self.optimizer.step()
loss = loss.item()
if (self._iteration + 1) % self.n_epochs_per_validation == 0:
loss = min(loss, self.polish(N_LBFGS_STEPS_VALIDATION, save_to_self_model=False))
return {'negative_loss': -loss, 'mean_loss': loss, 'nparameters': self.nparameters}
|
butterfly-master
|
learning_transforms/training.py
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.patches as mpatches
plt.rcParams['font.family'] = 'serif'
rs = [1]
markers = ['o', 'v', 'D', 'p', 's', '>']
loc = 'speed_training_data.pkl'
data = pickle.load(open(loc,'rb'))
colors = ['red', 'orange', 'green', 'blue']
speedups_fft = data['speedup_fft']
speedups_butterfly = data['speedup_butterfly']
sizes = data['sizes']
lw = 3
msize = 6
print('data: ', data)
start_idx = 0
print('fft speedup: ', speedups_fft[start_idx:])
print('butterfly speedup: ', speedups_butterfly[start_idx:])
print('sizes, speedups: ', sizes.size, speedups_fft.shape)
plt.plot(sizes[start_idx:],speedups_fft[start_idx:], linewidth=lw, label='FFT',marker=markers[0],color=colors[0],
markeredgecolor=colors[0],markersize=msize)
plt.plot(sizes[start_idx:],speedups_butterfly[start_idx:], linewidth=lw, label='Butterfly',marker=markers[0],color=colors[3],
markeredgecolor=colors[3],markersize=msize)
plt.axhline(y=1.0, color='black',linewidth=3)
plt.xscale('log', basex=2)
plt.yscale('log')
plt.xlabel(r'$N$',fontsize=14)
# plt.ylabel("Speedup over GEMM", fontsize=14)
plt.ylabel("Speedup over dense multiply", fontsize=18)
classes = [mpatches.Patch(color=colors[0], label='FFT'),
mpatches.Patch(color=colors[3], label='Butterfly')]
plt.legend(handles=classes, ncol=4, bbox_to_anchor=(0.75, -0.15))#, loc='upper left')
plt.savefig('speed_training_plot.pdf', bbox_inches='tight')
|
butterfly-master
|
learning_transforms/speed_training_plot.py
|
import argparse
import math
import multiprocessing as mp
import os
from pathlib import Path
import pickle
import random
import sys
import numpy as np
import torch
from torch import nn
from torch import optim
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from, run_experiments
from ray.tune.schedulers import AsyncHyperBandScheduler
from butterfly import Butterfly, ButterflyProduct, sinkhorn, Block2x2DiagProduct, BlockPermProduct
from semantic_loss import semantic_loss_exactly_one
from utils import PytorchTrainable, bitreversal_permutation
from complex_utils import real_to_complex, complex_matmul
N_LBFGS_STEPS = 300
N_LBFGS_STEPS_VALIDATION = 15
N_TRIALS_TO_POLISH = 60
def fft_test():
# DFT matrix for n = 4
size = 4
DFT = torch.fft(real_to_complex(torch.eye(size)), 1)
P = torch.stack((torch.tensor([[1., 0., 0., 0.],
[0., 0., 1., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.]]),
torch.zeros((size, size))), dim=-1)
M0 = Butterfly(size,
diagonal=2,
complex=True,
diag=torch.tensor([[1.0, 0.0], [1.0, 0.0], [-1.0, 0.0], [0.0, 1.0]], requires_grad=True),
subdiag=torch.tensor([[1.0, 0.0], [1.0, 0.0]], requires_grad=True),
superdiag=torch.tensor([[1.0, 0.0], [0.0, -1.0]], requires_grad=True))
M1 = Butterfly(size,
diagonal=1,
complex=True,
diag=torch.tensor([[1.0, 0.0], [-1.0, 0.0], [1.0, 0.0], [-1.0, 0.0]], requires_grad=True),
subdiag=torch.tensor([[1.0, 0.0], [0.0, 0.0], [1.0, 0.0]], requires_grad=True),
superdiag=torch.tensor([[1.0, 0.0], [0.0, 0.0], [1.0, 0.0]], requires_grad=True))
assert torch.allclose(complex_matmul(M0.matrix(), complex_matmul(M1.matrix(), P)), DFT)
br_perm = torch.tensor(bitreversal_permutation(size))
assert torch.allclose(complex_matmul(M0.matrix(), M1.matrix())[:, br_perm], DFT)
D = complex_matmul(DFT, P.transpose(0, 1))
assert torch.allclose(complex_matmul(M0.matrix(), M1.matrix()), D)
class TrainableFftFactorFixedOrder(PytorchTrainable):
def _setup(self, config):
size = config['size']
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=size, complex=True, fixed_order=True)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.br_perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftFactorSoftmax(PytorchTrainable):
def _setup(self, config):
size = config['size']
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=size, complex=True, fixed_order=False)
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.br_perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
total_loss = loss + self.semantic_loss_weight * semantic_loss.mean()
total_loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftFactorSparsemax(TrainableFftFactorFixedOrder):
def _setup(self, config):
size = config['size']
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=size, complex=True, fixed_order=False, softmax_fn='sparsemax')
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
class TrainableFftFactorSparsemaxNoPerm(TrainableFftFactorSparsemax):
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftFactorSoftmaxNoPerm(TrainableFftFactorSoftmax):
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableRandnFactorSoftmaxNoPerm(PytorchTrainable):
def _setup(self, config):
size = config['size']
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=size, complex=False, fixed_order=False, softmax_fn='softmax')
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.target_matrix = torch.rand(size, size, requires_grad=False)
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftFactorSparsemaxPermFront(TrainableFftFactorSparsemax):
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[self.br_perm, :]
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
def fft_factorization_fixed_order(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_fixed_order_{args.size}',
run=TrainableFftFactorFixedOrder,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_softmax(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_softmax_{args.size}',
run=TrainableFftFactorSoftmax,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'is_nan': True,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'semantic_loss_weight': sample_from(lambda spec: math.exp(random.uniform(math.log(5e-4), math.log(5e-1)))),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_sparsemax(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_sparsemax_{args.size}',
run=TrainableFftFactorSparsemax,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_sparsemax_no_perm(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_sparsemax_no_perm_{args.size}',
run=TrainableFftFactorSparsemaxNoPerm,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_softmax_no_perm(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_softmax_no_perm_{args.size}',
run=TrainableFftFactorSoftmaxNoPerm,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def randn_factorization_softmax_no_perm(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Randn_factorization_softmax_no_perm_{args.size}',
run=TrainableRandnFactorSoftmaxNoPerm,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
def fft_factorization_sparsemax_perm_front(argv):
parser = argparse.ArgumentParser(description='Learn to factor Fft matrix')
parser.add_argument('--size', type=int, default=8, help='Size of matrix to factor, must be power of 2')
parser.add_argument('--ntrials', type=int, default=20, help='Number of trials for hyperparameter tuning')
parser.add_argument('--nsteps', type=int, default=200, help='Number of steps per epoch')
parser.add_argument('--nmaxepochs', type=int, default=200, help='Maximum number of epochs')
parser.add_argument('--result-dir', type=str, default='./results', help='Directory to store results')
parser.add_argument('--nthreads', type=int, default=1, help='Number of CPU threads per job')
parser.add_argument('--smoke-test', action='store_true', help='Finish quickly for testing')
args = parser.parse_args(argv)
experiment = RayExperiment(
name=f'Fft_factorization_sparsemax_perm_front_{args.size}',
run=TrainableFftFactorSparsemaxPermFront,
local_dir=args.result_dir,
num_samples=args.ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': args.nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if args.smoke_test else 99999,
'negative_loss': -1e-8
},
config={
'size': args.size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': args.nsteps,
},
)
return experiment, args
# if __name__ == '__main__':
# # experiment, args = fft_factorization_fixed_order(sys.argv[1:])
# experiment, args = fft_factorization_softmax(sys.argv[1:])
# # experiment, args = fft_factorization_sparsemax(sys.argv[1:])
# # experiment, args = fft_factorization_sparsemax_no_perm(sys.argv[1:])
# # experiment, args = fft_factorization_softmax_no_perm(sys.argv[1:])
# # experiment, args = randn_factorization_softmax_no_perm(sys.argv[1:])
# # experiment, args = fft_factorization_sparsemax_perm_front(sys.argv[1:])
# # We'll use multiple processes so disable MKL multithreading
# os.environ['MKL_NUM_THREADS'] = str(args.nthreads)
# ray.init()
# ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=args.nmaxepochs)
# trials = run_experiments(experiment, scheduler=ahb)
# losses = [-trial.last_result['negative_loss'] for trial in trials]
# print(np.array(losses))
# print(np.sort(losses))
# checkpoint_path = Path(args.result_dir) / experiment.name
# checkpoint_path.mkdir(parents=True, exist_ok=True)
# checkpoint_path /= 'trial.pkl'
# with checkpoint_path.open('wb') as f:
# pickle.dump(trials, f)
class TrainableFft(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=True,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'])
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
# br_perm = bitreversal_permutation(size)
# br_reverse = torch.tensor(list(br_perm[::-1]))
# br_reverse = torch.cat((torch.tensor(list(br_perm[:size//2][::-1])), torch.tensor(list(br_perm[size//2:][::-1]))))
# Same as [6, 2, 4, 0, 7, 3, 5, 1], which is [0, 1]^4 * [0, 2, 1, 3]^2 * [6, 4, 2, 0, 7, 5, 3, 1]
# br_reverse = torch.cat((torch.tensor(list(br_perm[:size//4][::-1])), torch.tensor(list(br_perm[size//4:size//2][::-1])), torch.tensor(list(br_perm[size//2:3*size//4][::-1])), torch.tensor(list(br_perm[3*size//4:][::-1]))))
# self.br_perm = br_reverse
# self.br_perm = torch.tensor([0, 7, 4, 3, 2, 5, 6, 1]) # Doesn't work
# self.br_perm = torch.tensor([7, 3, 0, 4, 2, 6, 5, 1]) # Doesn't work
# self.br_perm = torch.tensor([4, 0, 6, 2, 5, 1, 7, 3]) # This works, [0, 1]^4 * [2, 0, 3, 1]^2 * [0, 2, 4, 6, 1, 3, 5, 7] or [1, 0]^4 * [0, 2, 1, 3]^2 * [0, 2, 4, 6, 1, 3, 5, 7]
# self.br_perm = torch.tensor([4, 0, 2, 6, 5, 1, 3, 7]) # Doesn't work, [0, 1]^4 * [2, 0, 1, 3]^2 * [0, 2, 4, 6, 1, 3, 5, 7]
# self.br_perm = torch.tensor([1, 5, 3, 7, 0, 4, 2, 6]) # This works, [0, 1]^4 * [4, 6, 5, 7, 0, 4, 2, 6]
# self.br_perm = torch.tensor([4, 0, 6, 2, 5, 1, 3, 7]) # Doesn't work
# self.br_perm = torch.tensor([4, 0, 6, 2, 1, 5, 3, 7]) # Doesn't work
# self.br_perm = torch.tensor([0, 4, 6, 2, 1, 5, 7, 3]) # Doesn't work
# self.br_perm = torch.tensor([4, 1, 6, 2, 5, 0, 7, 3]) # This works, since it's just swapping 0 and 1
# self.br_perm = torch.tensor([5, 1, 6, 2, 4, 0, 7, 3]) # This works, since it's swapping 4 and 5
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix()[:, self.br_perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftBlock2x2(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = Block2x2DiagProduct(size=config['size'], complex=True)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.br_perm = torch.tensor(bitreversal_permutation(size))
self.input = real_to_complex(torch.eye(size))[:, self.br_perm]
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model(self.input)
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftBlockPerm(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = nn.Sequential(
BlockPermProduct(size=config['size'], complex=True, share_logit=False),
Block2x2DiagProduct(size=config['size'], complex=True)
)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)))
# self.target_matrix = size * torch.ifft(real_to_complex(torch.eye(size)))
self.input = real_to_complex(torch.eye(size))
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model(self.input)
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftBlockPermTranspose(TrainableFftBlockPerm):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = nn.Sequential(
Block2x2DiagProduct(size=config['size'], complex=True, decreasing_size=False),
BlockPermProduct(size=config['size'], complex=True, share_logit=False, increasing_size=True),
)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
self.input = real_to_complex(torch.eye(size))
def _train(self):
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model(self.input)
loss = nn.functional.mse_loss(y, self.target_matrix)
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftTempAnnealing(TrainableFft):
def _train(self):
temperature = 1.0 / (0.1 * self._iteration + 1)
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix(temperature)[:, self.br_perm]
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -loss.item()}
class TrainableFftLearnPerm(PytorchTrainable):
def _setup(self, config):
torch.manual_seed(config['seed'])
self.model = ButterflyProduct(size=config['size'],
complex=True,
fixed_order=config['fixed_order'],
softmax_fn=config['softmax_fn'],
learn_perm=True)
if (not config['fixed_order']) and config['softmax_fn'] == 'softmax':
self.semantic_loss_weight = config['semantic_loss_weight']
self.optimizer = optim.Adam(self.model.parameters(), lr=config['lr'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
size = config['size']
self.target_matrix = torch.fft(real_to_complex(torch.eye(size)), 1)
def _train(self):
temperature = 1.0 / (0.3 * self._iteration + 1)
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
y = self.model.matrix(temperature)
loss = nn.functional.mse_loss(y, self.target_matrix)
if (not self.model.fixed_order) and hasattr(self, 'semantic_loss_weight'):
semantic_loss = semantic_loss_exactly_one(nn.functional.log_softmax(self.model.logit, dim=-1))
loss += self.semantic_loss_weight * semantic_loss.mean()
loss.backward()
self.optimizer.step()
return {'negative_loss': -polished_loss_fft_learn_perm(self)}
def polish_fft(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.br_perm], trainable.target_matrix)
return loss.item()
def polish_fft_learn_perm(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = ButterflyProduct(size=config['size'], complex=model.complex, fixed_order=True)
temperature = 1.0 / (0.3 * trainable._iteration + 1)
trainable.perm = torch.argmax(sinkhorn(model.perm_logit / temperature), dim=1)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
return loss.item()
def polished_loss_fft_learn_perm(trainable):
model = trainable.model
polished_model = ButterflyProduct(size=model.size, complex=model.complex, fixed_order=True)
temperature = 1.0 / (0.3 * trainable._iteration + 1)
trainable.perm = torch.argmax(sinkhorn(model.perm_logit / temperature), dim=1)
if not model.fixed_order:
prob = model.softmax_fn(model.logit)
maxes, argmaxes = torch.max(prob, dim=-1)
polished_model.factors = nn.ModuleList([model.factors[argmax] for argmax in argmaxes])
else:
polished_model.factors = model.factors
preopt_loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS_VALIDATION):
optimizer.step(closure)
loss = nn.functional.mse_loss(polished_model.matrix()[:, trainable.perm], trainable.target_matrix)
# return loss.item() if not torch.isnan(loss) else preopt_loss.item() if not torch.isnan(preopt_loss) else float('inf')
return loss.item() if not torch.isnan(loss) else preopt_loss.item() if not torch.isnan(preopt_loss) else 9999.0
def polish_fft_block2x2(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
polished_model = Block2x2DiagProduct(size=config['size'], complex=model.complex)
polished_model.factors = model.factors
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model(trainable.input), trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model(trainable.input), trainable.target_matrix)
return loss.item()
def polish_fft_blockperm(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
perm = model[0].argmax()
polished_model = Block2x2DiagProduct(size=config['size'], complex=True)
polished_model.load_state_dict(model[1].state_dict())
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model(trainable.input[:, perm]), trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model(trainable.input[:, perm]), trainable.target_matrix)
return loss.item()
def polish_fft_blockperm_transpose(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
trainable = eval(trial.trainable_name)(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
model = trainable.model
config = trial.config
perm = model[1].argmax()
polished_model = Block2x2DiagProduct(size=config['size'], complex=True, decreasing_size=False)
polished_model.load_state_dict(model[0].state_dict())
optimizer = optim.LBFGS(polished_model.parameters())
def closure():
optimizer.zero_grad()
loss = nn.functional.mse_loss(polished_model(trainable.input)[:, perm], trainable.target_matrix)
loss.backward()
return loss
for i in range(N_LBFGS_STEPS):
optimizer.step(closure)
torch.save(polished_model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
loss = nn.functional.mse_loss(polished_model(trainable.input)[:, perm], trainable.target_matrix)
return loss.item()
ex = Experiment('Fft_factorization')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.named_config
def softmax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.named_config
def sparsemax_config():
fixed_order = False # Whether the order of the factors are fixed
softmax_fn = 'sparsemax' # Whether to use softmax (+ semantic loss) or sparsemax
@ex.config
def fixed_order_config():
fixed_order = True # Whether the order of the factors are fixed
softmax_fn = 'softmax' # Whether to use softmax (+ semantic loss) or sparsemax
size = 8 # Size of matrix to factor, must be power of 2
ntrials = 20 # Number of trials for hyperparameter tuning
nsteps = 400 # Number of steps per epoch
nmaxepochs = 200 # Maximum number of epochs
result_dir = 'results' # Directory to store results
nthreads = 1 # Number of CPU threads per job
smoke_test = False # Finish quickly for testing
@ex.capture
def fft_experiment(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Fft_factorization_{fixed_order}_{softmax_fn}_{size}',
run=TrainableFft,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def fft_experiment_temp_annealing(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Fft_factorization_Temp_{fixed_order}_{softmax_fn}_{size}',
run=TrainableFftTempAnnealing,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def fft_experiment_learn_perm(fixed_order, softmax_fn, size, ntrials, nsteps, result_dir, nthreads, smoke_test):
assert softmax_fn in ['softmax', 'sparsemax']
config={
'fixed_order': fixed_order,
'softmax_fn': softmax_fn,
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
if (not fixed_order) and softmax_fn == 'softmax':
config['semantic_loss_weight'] = sample_from(lambda spec: math.exp(random.uniform(math.log(5e-3), math.log(5e-1))))
experiment = RayExperiment(
name=f'Fft_factorization_Learnperm_{fixed_order}_{softmax_fn}_{size}',
run=TrainableFftLearnPerm,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
# 'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def fft_experiment_block2x2(size, ntrials, nsteps, result_dir, nthreads, smoke_test):
config={
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
experiment = RayExperiment(
name=f'Fft_factorization_block_{size}',
run=TrainableFftBlock2x2,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def fft_experiment_blockperm(size, ntrials, nsteps, result_dir, nthreads, smoke_test):
config={
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
experiment = RayExperiment(
name=f'Fft_factorization_block_perm_{size}',
run=TrainableFftBlockPerm,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.capture
def fft_experiment_blockperm_transpose(size, ntrials, nsteps, result_dir, nthreads, smoke_test):
config={
'size': size,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-4), math.log(5e-1)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'n_steps_per_epoch': nsteps,
}
experiment = RayExperiment(
name=f'Fft_factorization_block_perm_transpose_{size}',
run=TrainableFftBlockPermTranspose,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
resources_per_trial={'cpu': nthreads, 'gpu': 0},
stop={
'training_iteration': 1 if smoke_test else 99999,
'negative_loss': -1e-8
},
config=config,
)
return experiment
@ex.automain
def run(result_dir, nmaxepochs, nthreads):
# experiment = fft_experiment()
# experiment = fft_experiment_temp_annealing()
# experiment = fft_experiment_learn_perm()
# experiment = fft_experiment_block2x2()
# experiment = fft_experiment_blockperm()
experiment = fft_experiment_blockperm_transpose()
# We'll use multiple processes so disable MKL multithreading
os.environ['MKL_NUM_THREADS'] = str(nthreads)
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='negative_loss', max_t=nmaxepochs)
trials = run_experiments(experiment, scheduler=ahb, raise_on_failed_trial=False)
losses = [-trial.last_result['negative_loss'] for trial in trials]
# Polish solutions with L-BFGS
pool = mp.Pool()
sorted_trials = sorted(trials, key=lambda trial: -trial.last_result['negative_loss'])
# polished_losses = pool.map(polish_fft, sorted_trials[:N_TRIALS_TO_POLISH])
# polished_losses = pool.map(polish_fft_learn_perm, sorted_trials[:N_TRIALS_TO_POLISH])
# polished_losses = pool.map(polish_fft_block2x2, sorted_trials[:N_TRIALS_TO_POLISH])
# polished_losses = pool.map(polish_fft_blockperm, sorted_trials[:N_TRIALS_TO_POLISH])
polished_losses = pool.map(polish_fft_blockperm_transpose, sorted_trials[:N_TRIALS_TO_POLISH])
pool.close()
pool.join()
for i in range(min(N_TRIALS_TO_POLISH, len(trials))):
sorted_trials[i].last_result['polished_negative_loss'] = -polished_losses[i]
print(np.array(losses))
print(np.sort(losses))
# print(np.sort(losses)[:N_TRIALS_TO_POLISH])
print(np.sort(polished_losses))
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return min(losses + polished_losses)
polished_losses = [-trial.last_result['polished_negative_loss'] for trial in sorted_trials[:N_TRIALS_TO_POLISH]]
|
butterfly-master
|
learning_transforms/old/learning_fft_old.py
|
import models
name_to_model = {
'LeNet': lambda args: models.LeNet(**args),
'AlexNet': lambda args: models.AlexNet(**args),
'MLP': lambda args: models.MLP(**args),
'ResNet18': lambda args: models.ResNet18(**args),
'PResNet18': lambda args: models.PResNet18(**args),
'Permutation': lambda args: models.TensorPermutation(32, 32, **args),
'ResNet20Original': lambda args: models.resnet20original(),
'MobileNet': lambda args: models.MobileNet(**args),
'ShuffleNet': lambda args: models.ShuffleNetG2(),
'WideResNet28': lambda args: models.WideResNet28(**args),
}
def get_model(model_config):
name = model_config['name']
return name_to_model[name](model_config.get('args', None))
|
butterfly-master
|
cnn/model_utils.py
|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
import torch
from cnn.models.butterfly_conv import ButterflyConv2d
from butterfly.butterfly import ButterflyBmm
from butterfly.butterfly_multiply import butterfly_conv2d
import time
nsteps = 1000
in_planes = 256
out_planes = 256
kernel_size = 3
stride = 1
batch_size = 128
f_dim = 8
padding = 1
conv1 = torch.nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=1, bias=False).to('cuda')
bfly = ButterflyConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=1, bias=False, tied_weight=False, fused_unfold=False).to('cuda')
bfly_fused = ButterflyConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=1, bias=False, tied_weight=False, fused_unfold=True).to('cuda')
x = torch.randn(batch_size, in_planes, f_dim, f_dim, requires_grad=True).to('cuda')
grad = torch.randn_like(x)
# Conv2d
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
output = conv1.forward(x) # Do it once to initialize cuDNN handle and such
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = conv1.forward(x)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'Conv2d forward: {end - start}s {(mem2-mem1)/1e6}MB')
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
torch.autograd.grad(output, (conv1.weight, x), grad, retain_graph=True) # Do it once
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
torch.autograd.grad(output, (conv1.weight, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'Conv2d backward: {end - start}s {(mem2-mem1)/1e6}MB')
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
output = conv1.forward(x)
torch.autograd.grad(output, (conv1.weight, x), grad)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = conv1.forward(x)
torch.autograd.grad(output, (conv1.weight, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'Conv2d together: {end - start}s {(mem2-mem1)/1e6}MB')
# Butterfly Conv
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = bfly.forward(x)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'ButterflyConv2d forward: {end - start}s {(mem2-mem1)/1e6}MB')
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
torch.autograd.grad(output, (bfly.twiddle, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'ButterflyConv2d backward: {end - start}s {(mem2-mem1)/1e6}MB')
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = bfly.forward(x)
torch.autograd.grad(output, (bfly.twiddle, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'ButterflyConv2d together: {end - start}s {(mem2-mem1)/1e6}MB')
# Fused-unfold butterfly
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = bfly_fused.forward(x)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'ButterflyConv2d fused-unfold forward: {end - start}s {(mem2-mem1)/1e6}MB')
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
torch.autograd.grad(output, (bfly_fused.twiddle, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'ButterflyConv2d fused-unfold backward: {end - start}s {(mem2-mem1)/1e6}MB')
mem1 = torch.cuda.memory_allocated()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = bfly_fused.forward(x)
torch.autograd.grad(output, (bfly_fused.twiddle, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
mem2 = torch.cuda.max_memory_allocated()
print(f'ButterflyConv2d fused-unfold together: {end - start}s {(mem2-mem1)/1e6}MB')
|
butterfly-master
|
cnn/benchmark_cnn.py
|
import pickle
import json
from pathlib import Path
import numpy as np
# butterfly_acc = [56.4, 65.0, 70.1, 71.2]
# butterfly_param = [0.70, 1.54, 3.62, 4.04]
# Butterfly w/ channel pooling
butterfly_smpool_acc = [54.027, 62.840, 68.418]
butterfly_smpool_param = np.array([439688, 1024808, 2597480]) / 1e6
# Butterfly w/ compressed softmax
# butterfly_smstruct_acc = [59.0, 66.1, 70.5]
# butterfly_smstruct_param = [0.60, 1.77, 5.69]
# width_acc = [51.6, 63.9, 70.9]
# width_param = [0.47, 1.33, 4.23]
width_acc = [51.971, 61.770, 67.799]
width_param = np.array([470072, 1019304, 2585560]) / 1e6
lowrank_acc = [47.248, 56.064, 62.727]
lowrank_param = np.array([426376, 997160, 2540136]) / 1e6
sparse_ind_acc = [56.145]
sparse_ind_param = np.array([439688+84992]) / 1e6
import matplotlib.pyplot as plt
plt.switch_backend('agg')
markers = ['o', 'v', 'D', 'p', 's', '>']
colors = ['red', 'orange', 'green', 'blue', 'black']
# plt.plot(butterfly_param, butterfly_acc, marker=markers[0], color=colors[0], label='Butterfly')
plt.plot(width_param, width_acc, marker=markers[2], color=colors[2], label='Reducing width')
plt.plot(butterfly_smpool_param, butterfly_smpool_acc, marker=markers[0], color=colors[0], label='Kaleidoscope')
plt.plot(lowrank_param, lowrank_acc, marker=markers[1], color=colors[1], label='Low-rank')
plt.plot(sparse_ind_param, sparse_ind_acc, marker=markers[3], color=colors[3], label='Sparse (values+indices)')
# plt.plot(butterfly_smstruct_param, butterfly_smstruct_acc, marker=markers[3], color=colors[3], label='Butterfly w/ smstruct')
# plt.plot(mobilenet_numparams / toeplitzlike_numparams[1:], all_accuracy[2, 1:], marker=markers[2], color=colors[2], label='Toeplitz-like')
# plt.plot(mobilenet_numparams / resnet_numparams, resnet_accuracy, marker=markers[3], color=colors[3], label='Resnet18')
# plt.plot(1, butterfly_acc, marker=markers[4], color=colors[4], label='MobileNet')
# plt.axhline(butterfly_acc, color='black', linewidth=3)
ax = plt.gca()
# ax.text(0.55, butterfly_acc + 0.005, 'MobileNet 3.2M', color=colors[4])
# ax.text(0.55 * mobilenet_numparams / resnet_numparams, resnet_accuracy - 0.0075, 'ResNet18 11M', color=colors[3])
# ax.text(0.55 * mobilenet_numparams / all_params[6], all_accuracy[0, 6] + 0.005, 'Structured 0.6M', color=colors[0])
# plt.xscale('log')
plt.xlabel('Number of parameters (millions)', fontsize=14)
plt.ylabel('Accuracy (%)', fontsize=14)
# plt.xticks([0.3, 1, 2, 5, 10, 25], [0.3, 1, 2, 5, 10, 25])
# plt.xticks([1, 2, 5, 10, 25], [1, 2, 5, 10, 25])
# plt.legend(['Butterfly', 'Circulant', 'Toeplitz-like'])
# plt.legend(['Butterfly', 'Reducing width'])
plt.legend()
plt.savefig('imagenet_compression.pdf', bbox_inches='tight')
|
butterfly-master
|
cnn/imagenet_analysis.py
|
import io
import argparse, shutil, time, warnings
import subprocess
from pathlib import Path
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import logging
from mobilenet_imagenet import MobileNet
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
from imagenet.dataloaders import *
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])) + ['mobilenetv1', 'mobilenetv1_struct']
def get_parser():
parser = argparse.ArgumentParser(description='Save PyTorch ImageNet covariance of activations.')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--output-dir', type=str,
default=Path.cwd(), help='Directory to save intermediates.')
parser.add_argument('--data-backend', metavar='BACKEND', default='dali-cpu',
choices=DATA_BACKEND_CHOICES)
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('--width', default=1.0, type=float,
metavar='WIDTH', help='Width multiplier of the CNN (default 1.0)')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='Print frequency (default: 10)')
parser.add_argument('--layers', nargs='+', type=str, required=True,
help='Layers to save inputs and outputs for.'
'Use as a list: e.g. layer1 layer2 layer3.'
'More layers takes longer.')
parser.add_argument('--model-path', type=str, required=True,
help='Path for teacher model.')
parser.add_argument('--max_batches', type=int, help='Maximum number of batches'
'to collect activations for')
# parser.add_argument('--dataset', type=str, help='Dataset name for selecting train loader and augmentation.')
return parser
cudnn.benchmark = True
args = get_parser().parse_args()
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.info(args)
def get_teacher_intermediates(teacher_model, train_loader, layers_to_replace):
if args.max_batches is not None:
num_batches = args.max_batches
else:
num_batches = train_loader._len
# hook to capture intermediate inputs
def hook(module, input):
x, = input
b, c, h, w = x.shape
x = x.permute(0, 2, 3, 1).reshape(b * h * w, c)
if not hasattr(module, '_count'):
module._count = 1
else:
module._count += 1
# Compute the first moment E[X], averaged over batches.
current_mean = x.mean(dim=0)
if not hasattr(module, '_mean'):
module._mean = current_mean
else:
module._mean += (current_mean - module._mean) / module._count
# Compute the covariance (actually 2nd moment) E[X^T X], averaged over batches.
current_cov = (x.t() @ x) / x.shape[0]
if not hasattr(module, '_cov'):
module._cov = current_cov
else:
module._cov += (current_cov - module._cov) / module._count
module_dict = dict(teacher_model.named_modules())
for layer_name in layers_to_replace:
module_dict[layer_name].register_forward_pre_hook(hook)
teacher_model.eval()
batch_time = AverageMeter()
end = time.time()
data_iter = enumerate(train_loader)
for batch_idx, (input, _) in data_iter:
input = input.cuda()
with torch.no_grad():
teacher_model(input)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.print_freq == 0:
logger.info('Batch:{0}/{1}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, num_batches, batch_time=batch_time))
if args.max_batches is not None and batch_idx == args.max_batches:
break
logging.info("Computed teacher intermediates. ")
saved_mean = {layer_name + '.mean': module_dict[layer_name]._mean for layer_name in layers_to_replace}
saved_cov = {layer_name: module_dict[layer_name]._cov for layer_name in layers_to_replace}
with open(f'{args.output_dir}/input_cov.pt', 'wb') as f:
torch.save({**saved_mean, **saved_cov}, f)
def main():
# resnet models are different for imagenet
if args.arch == 'mobilenetv1':
teacher_model = MobileNet(width_mult=args.width)
else:
teacher_model = models.__dict__[args.arch]()
print(teacher_model)
loaded_state_dict = torch.load(args.model_path)['state_dict']
# Strip names to be compatible with Pytorch 1.2, i.e. 'module.conv1.weight' -> 'conv1.weight'
loaded_state_dict = {name.replace('module.', ''): weight for name, weight in loaded_state_dict.items()}
teacher_model.load_state_dict(loaded_state_dict)
teacher_model = teacher_model.cuda()
logging.info(teacher_model)
modules = [name for name, _ in teacher_model.named_modules()]
logging.info(modules)
# filter modules to save inputs and outputs as candidates for
# butterfly replacement
# tradeoff in potentially needing to rerun v. needing to support
# loading and saving more activations
layers_to_replace = args.layers
for layer in layers_to_replace:
assert layer in modules, f"{layer} not in network"
logger.info(layers_to_replace)
# load data
if args.data_backend == 'pytorch':
get_train_loader = get_pytorch_train_loader
elif args.data_backend == 'dali-gpu':
get_train_loader = get_dali_train_loader(dali_cpu=False)
elif args.data_backend == 'dali-cpu':
get_train_loader = get_dali_train_loader(dali_cpu=True)
train_loader, train_loader_len = get_train_loader(args.data, args.batch_size, 1000, False, workers=args.workers, fp16=False)
train_loader._len = train_loader_len
logger.info('Loaded data')
get_teacher_intermediates(teacher_model, train_loader, layers_to_replace)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__': main()
|
butterfly-master
|
cnn/teacher_covariance.py
|
import torch
from collections import OrderedDict
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def add_prefix(state_dict, prefix):
keys = sorted(state_dict.keys())
if all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[prefix + key] = value
return stripped_state_dict
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class data_prefetcher():
def __init__(self, loader, prefetch=True):
self.loader,self.prefetch = iter(loader),prefetch
if prefetch:
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
def next(self):
if not self.prefetch:
input,target = next(self.loader)
return input.cuda(non_blocking=True),target.cuda(non_blocking=True)
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
|
butterfly-master
|
cnn/train_utils.py
|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import io
import argparse, shutil, time, warnings
import subprocess
from pathlib import Path
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
from models.butterfly_conv import ButterflyConv2d, ButterflyConv2dBBT
import logging
from train_utils import AverageMeter, data_prefetcher, strip_prefix_if_present
import models.resnet_imagenet as imagenet_models # only use imagenet models
# model_names = sorted(name for name in models.__dict__
# if name.islower() and not name.startswith("__")
# and callable(models.__dict__[name]))
def get_parser():
parser = argparse.ArgumentParser(description='Save PyTorch ImageNet intermediate activations.')
parser.add_argument('--data', metavar='DIR', help='path to dataset')
parser.add_argument('--output-dir', type=str,
default=Path.cwd(), help='Directory to save intermediates.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='Print frequency (default: 10)')
parser.add_argument('--layers', type=str, required=True,
help='Layers to save inputs and outputs for.'
'Use as a list: e.g. layer1,layer2,layer3.'
'More layers takes longer.')
parser.add_argument('--model-path', type=str, required=True,
help='Path for teacher model.')
parser.add_argument('--max_batches', type=int, help='Maximum number of batches'
'to collect activations for')
parser.add_argument('--dataset', type=str, help='Dataset name for selecting train loader and augmentation.')
return parser
cudnn.benchmark = True
args = get_parser().parse_args()
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.info(args)
def get_size(input):
# don't include batch size
if isinstance(input, tuple):
input_sizes = input[0].size()[1:]
assert len(input) == 1, "input tuple size greater than 1"
else:
input_sizes = input.size()[1:]
return input_sizes
def get_teacher_intermediates(teacher_model, train_loader, layers_to_replace):
if args.max_batches is not None:
num_batches = args.max_batches
else:
num_batches = len(train_loader)
# define hook to capture intermediate inputs/activations and intermediate outputs
# serialize outputs
# keep dictionary to io objects for different layers
teacher_inputs = {}
teacher_outputs = {}
teacher_input_size = {}
teacher_output_size = {}
# store ids of modules to names -- can't figure out
# other way to get same name of module within hook
name_map = {}
for name, module in teacher_model.named_modules():
name_map[id(module)] = name
batch_size = args.batch_size
# use memory mapping to manage large activations
def make_mmap_file(path, input_size):
view_size = torch.Size([num_batches * args.batch_size]) + input_size
# shared needs to be true for file to be created
return torch.from_file(path, size=int(np.prod(view_size)),
shared=True).view(view_size)
batch_idx = 0
data_idx = 0
# TODO: store only inputs or outputs (otherwise we may be storing duplicate info
# if we already stored neighboring layer)
# won't cause answers to be wrong, but could be wasteful
def hook(module, input, output):
current_batch_size = output.size(0)
mod_id = id(module)
input_size = get_size(input)
output_size = get_size(output)
if mod_id not in teacher_inputs:
teacher_inputs[mod_id] = make_mmap_file(
f'{args.output_dir}/{name_map[mod_id]}_input.pt', input_size)
teacher_outputs[mod_id] = make_mmap_file(
f'{args.output_dir}/{name_map[mod_id]}_output.pt', output_size)
if mod_id not in teacher_input_size:
teacher_input_size[mod_id] = input_size
teacher_output_size[mod_id] = output_size
# save inputs to memory mapped files
# TODO: input always a length-1 tuple?
teacher_inputs[mod_id][data_idx:data_idx+current_batch_size] = input[0].cpu().detach()
teacher_outputs[mod_id][data_idx:data_idx+current_batch_size] = output.cpu().detach()
teacher_model.eval()
for name, module in teacher_model.named_modules():
if name in layers_to_replace:
module.register_forward_hook(hook)
batch_time = AverageMeter()
prefetcher = data_prefetcher(train_loader)
input, _ = prefetcher.next()
end = time.time()
while input is not None:
input = input.cuda()
teacher_model(input)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.print_freq == 0:
logger.info('Batch:{0}/{1}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, num_batches, batch_time=batch_time))
batch_idx += 1
data_idx += input.size(0) # for variable size batches
if args.max_batches is not None and batch_idx == args.max_batches:
break
input, _ = prefetcher.next()
logging.info("Computed teacher intermediates. ")
# write sizes to disk for easy loading of memory maps at a later time
for layer_id in teacher_inputs:
layer_name = name_map[layer_id]
# write sizes
with open(f'{args.output_dir}/{layer_name}_input_sz.pt', 'wb') as f:
input_size = torch.Size([data_idx]) + teacher_input_size[layer_id]
torch.save(input_size, f)
with open(f'{args.output_dir}/{layer_name}_output_sz.pt', 'wb') as f:
output_size = torch.Size([data_idx]) + teacher_output_size[layer_id]
torch.save(output_size, f)
def main():
# resnet models are different for imagenet
if args.dataset == 'imagenet':
teacher_model = imagenet_models.__dict__[args.arch]()
loaded_state_dict = torch.load(args.model_path)['state_dict']
loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.")
else:
teacher_model = models.__dict__[args.arch]()
print(teacher_model)
loaded_state_dict = torch.load(args.model_path)['model']
teacher_model.load_state_dict(loaded_state_dict)
teacher_model.cuda()
logging.info(teacher_model)
modules = [name for name, _ in teacher_model.named_modules()]
logging.info(modules)
# filter modules to save inputs and outputs as candidates for
# butterfly replacement
# tradeoff in potentially needing to rerun v. needing to support
# loading and saving more activations
layers_to_replace = [layer.strip() for layer in args.layers.split(',')]
for layer in layers_to_replace:
assert layer in modules, f"{layer} not in network"
logger.info(layers_to_replace)
# load data
# TODO: combine with dataset_utils
if args.dataset == 'imagenet':
traindir = os.path.join(args.data, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
tensor_tfm = [transforms.ToTensor(), normalize]
# train on standard ImageNet size images
train_dataset = datasets.ImageFolder(
traindir, transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
] + tensor_tfm))
elif args.dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = datasets.CIFAR10(root=project_root+'/data', train=True, download=True, transform=transform_train)
np_random_state = np.random.get_state() # To get exactly the same training and validation sets
np.random.seed(0)
indices = np.random.permutation(range(len(trainset)))
np.random.set_state(np_random_state)
train_dataset = torch.utils.data.Subset(trainset, indices[:45000])
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
logger.info('Loaded data')
get_teacher_intermediates(teacher_model, train_loader, layers_to_replace)
if __name__ == '__main__': main()
|
butterfly-master
|
cnn/teacher.py
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torch import nn
from torch import optim
import torch.nn.functional as F
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import dataset_utils
import model_utils
import permutation_utils as perm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# perm_path = 'sinkhorn.samples_40.epochs_400'
# perm_path = 'sinkhorn.was237'
# method = 'sinkhorn'
# perm_path = 'butterfly-samples16-anneal.63-temp.06-lr.0008'
perm_path = 'T5.2'
method = 'butterfly'
if __name__ == '__main__':
# original_dataset = {'name': 'PPCIFAR10', 'batch': 8, 'transform': 'original'}
# permuted_dataset = {'name': 'PPCIFAR10', 'batch': 8, 'transform': 'permute'}
# normalize_dataset = {'name': 'PPCIFAR10', 'batch': 8, 'transform': 'normalize'}
training_dataset = {'name': 'PPCIFAR10', 'batch': 128}
# torch.manual_seed(0)
# orig_train_loader, orig_test_loader = dataset_utils.get_dataset(original_dataset)
# torch.manual_seed(0)
# perm_train_loader, perm_test_loader = dataset_utils.get_dataset(permuted_dataset)
# torch.manual_seed(0)
# norm_train_loader, norm_test_loader = dataset_utils.get_dataset(normalize_dataset)
torch.manual_seed(0)
train_train_loader, train_test_loader = dataset_utils.get_dataset(training_dataset)
def imshow(img, name):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# plt.show()
plt.savefig(name, bbox_inches='tight')
# get some random training images
# torch.manual_seed(0)
# dataiter = iter(orig_train_loader)
# orig_images, labels = dataiter.next()
# torch.manual_seed(0)
# dataiter = iter(perm_train_loader)
# perm_images, _ = dataiter.next()
# torch.manual_seed(0)
# dataiter = iter(norm_train_loader)
# norm_images, _ = dataiter.next()
# torch.manual_seed(0)
dataiter = iter(train_train_loader)
train_images, _ = dataiter.next()
# show images
# imshow(torchvision.utils.make_grid(images), 'examples')
perm_path = 'saved_perms/' + perm_path
# model = model_utils.get_model({'name': 'Permutation', 'args': {'method': method, 'stochastic':True, 'param': 'logit', 'temp': 0.1}})
# # model = model_utils.get_model({'name': 'Permutation', 'args': {'method': method}})
# model.load_state_dict(torch.load(perm_path))
# New version:
saved_model = torch.load(perm_path)
model = model_utils.get_model(saved_model['args'])
model.load_state_dict(saved_model['state'])
print(device)
model.to(device)
# data, target = data.to(device), target.to(device)
# permutation_params = filter(lambda p: hasattr(p, '_is_perm_param') and p._is_perm_param, self.model.parameters())
# unstructured_params = filter(lambda p: not (hasattr(p, '_is_perm_param') and p._is_perm_param), self.model.parameters())
optimizer = optim.Adam(model.parameters(), lr=0.0001, weight_decay=0.0)
# TODO: nsamples should be able to be passed into forward pass
for p in model.permute:
p.samples = 64
# breakpoint()
# print(model)
model.train()
# with torch.autograd.profiler.profile() as prof:
train_images = train_images.to(device)
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for iter in range(100):
print("iter", iter)
output = model(train_images)
H = model.entropy(p='logit')
loss = perm.tv(output) + 1.0 * H
loss.backward()
optimizer.step()
# print("epoch", epoch)
# for data, target in train_train_loader:
# # data, target = data.to(device), target.to(device)
# optimizer.zero_grad()
# # output = model(data)
# H = model.entropy(p='logit')
# loss = perm.tv(output) + 1.0 * H
# loss.backward()
# optimizer.step()
# # breakpoint()
# # self.optimizer.step()
sorted_events = torch.autograd.profiler.EventList(sorted(prof.key_averages(), key=lambda event: event.cuda_time_total, reverse=True))
print(sorted_events)
# all_images = torch.cat([orig_images, perm_images, norm_images, train_images, sample_output, mean_output, mle_output], dim=0)
# imshow(torchvision.utils.make_grid(all_images), 'all_examples')
|
butterfly-master
|
cnn/profile_perm.py
|
import argparse
import os
import shutil
import time
import random
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
import imagenet.logger as log
from imagenet.smoothing import LabelSmoothing, KnowledgeDistillationLoss
from imagenet.mixup import NLLMultiLabelSmooth, MixUpWrapper
from imagenet.dataloaders import DATA_BACKEND_CHOICES
from imagenet.dataloaders import get_pytorch_train_loader, get_pytorch_val_loader
from imagenet.dataloaders import get_dali_train_loader, get_dali_val_loader
from imagenet.training import ModelAndLoss, get_optimizer, train_loop
from imagenet.training import lr_step_policy, lr_cosine_policy, lr_linear_policy
from imagenet.utils import should_backup_checkpoint, save_checkpoint
def add_parser_arguments(parser):
custom_model_names = ['mobilenetv1', 'shufflenetv1']
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])) + custom_model_names
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--data-backend', metavar='BACKEND', default='dali-cpu',
choices=DATA_BACKEND_CHOICES)
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--struct', metavar='STRUCT', default='odo_4',
type=str,
help='structure for 1x1 conv: ' + ' (default: odo_4)')
parser.add_argument('--softmax-struct', metavar='SMSTRUCT', default='D',
type=str,
help='structure for softmax layer: ' + ' (default: D)')
parser.add_argument('--sm-pooling', metavar='SMPOOL', default=1,
type=int,
help='pooling before the softmax layer: ' + ' (default: 1)')
parser.add_argument('--n-struct-layers', default=0, type=int,
metavar='NSL', help='Number of structured layer (default 7)')
parser.add_argument('--width', default=1.0, type=float,
metavar='WIDTH', help='Width multiplier of the CNN (default 1.0)')
parser.add_argument('--groups', default=8, type=int,
metavar='GROUPS', help='Group parameter of ShuffleNet (default 8)')
parser.add_argument('--shuffle', default='P', type=str,
metavar='SHUFFLE', help='Type of shuffle (P for usual channel shuffle, odo_1 for butterfly)')
parser.add_argument('--preact', action='store_true',
help='Whether to use pre-activation of ShuffleNet')
parser.add_argument('--distilled-param-path', default='', type=str, metavar='PATH',
help='path to distilled parameters (default: none)')
parser.add_argument('--full-model-path', default='', type=str, metavar='PATH',
help='path to full model checkpoint (default: none)')
parser.add_argument("--temperature", default=1., type=float,
help="Temperature for the softmax temperature.")
parser.add_argument("--alpha-ce", default=0.0, type=float,
help="Linear weight for the distillation loss. Must be >=0.")
parser.add_argument('-j', '--workers', default=5, type=int, metavar='N',
help='number of data loading workers (default: 5)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256) per gpu')
parser.add_argument('--optimizer-batch-size', default=-1, type=int,
metavar='N', help='size of a total batch size, for simulating bigger batches')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr-schedule', default='step', type=str, metavar='SCHEDULE', choices=['step','linear','cosine'])
parser.add_argument('--warmup', default=5, type=int,
metavar='E', help='number of warmup epochs')
parser.add_argument('--label-smoothing', default=0.0, type=float,
metavar='S', help='label smoothing')
parser.add_argument('--mixup', default=0.0, type=float,
metavar='ALPHA', help='mixup alpha')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--structured-momentum', default=0.9, type=float, metavar='M',
help='momentum for structured layers')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--bn-weight-decay', action='store_true',
help='use weight_decay on batch normalization learnable parameters, default: false)')
parser.add_argument('--nesterov', action='store_true',
help='use nesterov momentum, default: false)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained-weights', default='', type=str, metavar='PATH',
help='load weights from here')
parser.add_argument('--fp16', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
parser.add_argument('--prof', type=int, default=-1,
help='Run only N iterations')
parser.add_argument('--amp', action='store_true',
help='Run model AMP (automatic mixed precision) mode.')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--seed', default=None, type=int,
help='random seed used for np and pytorch')
parser.add_argument('--gather-checkpoints', action='store_true',
help='Gather checkpoints throughout the training')
parser.add_argument('--raport-file', default='experiment_raport.json', type=str,
help='file in which to store JSON experiment raport')
parser.add_argument('--final-weights', default='model.pth.tar', type=str,
help='file in which to store final model weights')
parser.add_argument('--evaluate', action='store_true', help='evaluate checkpoint/model')
parser.add_argument('--training-only', action='store_true', help='do not evaluate')
parser.add_argument('--no-checkpoints', action='store_false', dest='save_checkpoints')
parser.add_argument('--workspace', type=str, default='./')
def main(args):
exp_start_time = time.time()
global best_prec1
best_prec1 = 0
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
if args.amp and args.fp16:
print("Please use only one of the --fp16/--amp flags")
exit(1)
if args.seed is not None:
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed + args.local_rank)
torch.cuda.manual_seed(args.seed + args.local_rank)
np.random.seed(seed=args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
def _worker_init_fn(id):
np.random.seed(seed=args.seed + args.local_rank + id)
random.seed(args.seed + args.local_rank + id)
else:
def _worker_init_fn(id):
pass
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
if args.static_loss_scale != 1.0:
if not args.fp16:
print("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
if args.optimizer_batch_size < 0:
batch_size_multiplier = 1
else:
tbs = args.world_size * args.batch_size
if args.optimizer_batch_size % tbs != 0:
print("Warning: simulated batch size {} is not divisible by actual batch size {}".format(args.optimizer_batch_size, tbs))
batch_size_multiplier = int(args.optimizer_batch_size/ tbs)
print("BSM: {}".format(batch_size_multiplier))
pretrained_weights = None
if args.pretrained_weights:
if os.path.isfile(args.pretrained_weights):
print("=> loading pretrained weights from '{}'".format(args.pretrained_weights))
pretrained_weights = torch.load(args.pretrained_weights)
else:
print("=> no pretrained weights found at '{}'".format(args.resume))
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model_state = checkpoint['state_dict']
# Strip names to be compatible with Pytorch 1.2, i.e. 'module.conv1.weight' -> 'conv1.weight'
model_state = {name.replace('module.', ''): weight for name, weight in model_state.items()}
optimizer_state = checkpoint['optimizer']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
model_state = None
optimizer_state = None
else:
model_state = None
optimizer_state = None
loss = nn.CrossEntropyLoss
if args.mixup > 0.0:
loss = lambda: NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
loss = lambda: LabelSmoothing(args.label_smoothing)
if args.alpha_ce > 0.0:
loss_og = loss()
loss = lambda: KnowledgeDistillationLoss(loss_og, args.temperature, args.alpha_ce)
model_and_loss = ModelAndLoss(
args.arch,
loss,
pretrained_weights=pretrained_weights,
cuda = True, fp16 = args.fp16,
width=args.width, n_struct_layers=args.n_struct_layers,
struct=args.struct, softmax_struct=args.softmax_struct, sm_pooling=args.sm_pooling,
groups=args.groups, shuffle=args.shuffle)
if args.arch == 'mobilenetv1' and args.distilled_param_path:
model_state = model_and_loss.model.mixed_model_state_dict(args.full_model_path, args.distilled_param_path)
if args.alpha_ce > 0.0:
teacher_model_and_loss = ModelAndLoss(
args.arch,
loss,
pretrained_weights=pretrained_weights,
cuda = True, fp16 = args.fp16,
width=args.width, n_struct_layers=0)
checkpoint = torch.load(args.full_model_path, map_location = lambda storage, loc: storage.cuda(args.gpu))
teacher_model_state = checkpoint['state_dict']
# Strip names to be compatible with Pytorch 1.2, i.e. 'module.conv1.weight' -> 'conv1.weight'
teacher_model_state = {name.replace('module.', ''): weight for name, weight in teacher_model_state.items()}
teacher_model_and_loss.model.load_state_dict(teacher_model_state)
model_and_loss._teacher_model = teacher_model_and_loss.model
# Create data loaders and optimizers as needed
if args.data_backend == 'pytorch':
get_train_loader = get_pytorch_train_loader
get_val_loader = get_pytorch_val_loader
elif args.data_backend == 'dali-gpu':
get_train_loader = get_dali_train_loader(dali_cpu=False)
get_val_loader = get_dali_val_loader()
elif args.data_backend == 'dali-cpu':
get_train_loader = get_dali_train_loader(dali_cpu=True)
get_val_loader = get_dali_val_loader()
train_loader, train_loader_len = get_train_loader(args.data, args.batch_size, 1000, args.mixup > 0.0, workers=args.workers, fp16=args.fp16)
if args.mixup != 0.0:
train_loader = MixUpWrapper(args.mixup, 1000, train_loader)
val_loader, val_loader_len = get_val_loader(args.data, args.batch_size, 1000, False, workers=args.workers, fp16=args.fp16)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger = log.Logger(
args.print_freq,
[
log.JsonBackend(os.path.join(args.workspace, args.raport_file), log_level=1),
log.StdOut1LBackend(train_loader_len, val_loader_len, args.epochs, log_level=0),
])
for k, v in args.__dict__.items():
logger.log_run_tag(k, v)
else:
logger = None
# Scale learning rate based on global batch size
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
optimizer = get_optimizer(list(model_and_loss.model.named_parameters()),
args.fp16, args.lr, args.momentum, args.structured_momentum, args.weight_decay,
nesterov = args.nesterov,
bn_weight_decay = args.bn_weight_decay,
state=optimizer_state,
static_loss_scale = args.static_loss_scale,
dynamic_loss_scale = args.dynamic_loss_scale)
if args.lr_schedule == 'step':
lr_policy = lr_step_policy(args.lr, [30,60,80], 0.1, args.warmup, train_loader_len, logger=logger)
elif args.lr_schedule == 'cosine':
lr_policy = lr_cosine_policy(args.lr, args.warmup, args.epochs, train_loader_len, logger=logger)
elif args.lr_schedule == 'linear':
lr_policy = lr_linear_policy(args.lr, args.warmup, args.epochs, train_loader_len, logger=logger)
if args.amp:
model_and_loss, optimizer = amp.initialize(
model_and_loss, optimizer,
opt_level="O1",
loss_scale="dynamic" if args.dynamic_loss_scale else args.static_loss_scale)
if args.distributed:
model_and_loss.distributed()
model_and_loss.load_model_state(model_state)
train_loop(
model_and_loss, optimizer,
lr_policy,
train_loader, val_loader, args.epochs,
args.fp16, logger, should_backup_checkpoint(args), args.print_freq, use_amp=args.amp,
batch_size_multiplier = batch_size_multiplier,
start_epoch = args.start_epoch, best_prec1 = best_prec1, prof=args.prof,
skip_training = args.evaluate, skip_validation = args.training_only,
save_checkpoints=args.save_checkpoints and not args.evaluate, checkpoint_dir=args.workspace)
exp_duration = time.time() - exp_start_time
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
add_parser_arguments(parser)
args = parser.parse_args()
cudnn.benchmark = True
main(args)
|
butterfly-master
|
cnn/imagenet_main.py
|
import pickle
import json
from pathlib import Path
import numpy as np
butterfly_bleu = [32.99, 33.8, 34.32, 34.3, 34.23, 34.1]
lr_bleu = [30.05, 32.71, 33.6, 33.08, 34.15, 34.3]
sparse_bleu = [34.08, 34.31, 34.39, 34.49, 34.586667, 34.3]
param_fraction = np.array([9, 18, 36, 72, 108, 128]) / 128
import matplotlib.pyplot as plt
plt.switch_backend('agg')
markers = ['o', 'v', 'D', 'p', 's', '>']
colors = ['red', 'orange', 'green', 'blue', 'black']
plt.plot(param_fraction, butterfly_bleu, marker=markers[0], color=colors[0], label='Butterfly')
plt.plot(param_fraction, lr_bleu, marker=markers[1], color=colors[1], label='Low-rank')
plt.plot(np.concatenate(([4.375/128], param_fraction)), np.concatenate(([32.84666666667], sparse_bleu)), marker=markers[3], color=colors[3], linestyle=':', label='Sparse (values only)')
sparse_indices = np.concatenate(([9/128], param_fraction[:-2]*2 + 1/512))
sparse_indices_bleu = np.concatenate(([32.84666666667], sparse_bleu[:-2]))
plt.plot(sparse_indices, sparse_indices_bleu, marker=markers[3], color='purple', label='Sparse (values+indices)') # TODO make sure correct. Note last point omitted for aesthetics
# plt.plot(mobilenet_numparams / toeplitzlike_numparams[1:], all_accuracy[2, 1:], marker=markers[2], color=colors[2], label='Toeplitz-like')
# plt.plot(mobilenet_numparams / resnet_numparams, resnet_accuracy, marker=markers[3], color=colors[3], label='Resnet18')
# plt.plot(1, butterfly_bleu, marker=markers[4], color=colors[4], label='MobileNet')
# plt.axhline(butterfly_bleu, color='black', linewidth=3)
ax = plt.gca()
# ax.text(0.55, butterfly_bleu + 0.005, 'MobileNet 3.2M', color=colors[4])
# ax.text(0.55 * mobilenet_numparams / resnet_numparams, resnet_accuracy - 0.0075, 'ResNet18 11M', color=colors[3])
# ax.text(0.55 * mobilenet_numparams / all_params[6], all_accuracy[0, 6] + 0.005, 'Structured 0.6M', color=colors[0])
# plt.xscale('log')
plt.xlabel('Fraction query/key projection params vs standard attention', fontsize=14)
plt.ylabel('Test BLEU', fontsize=14)
# plt.xticks([0.3, 1, 2, 5, 10, 25], [0.3, 1, 2, 5, 10, 25])
# plt.xticks([1, 2, 5, 10, 25], [1, 2, 5, 10, 25])
plt.xlim([0, 1.19])
plt.ylim([29.75, 35])
# plt.legend(['Butterfly', 'Circulant', 'Toeplitz-like'])
plt.legend(['Kaleidoscope', 'Low-rank', 'Sparse (values only)', 'Sparse (values+indices)'])
plt.savefig('transformer_compression.pdf', bbox_inches='tight')
|
butterfly-master
|
cnn/transformer_analysis.py
|
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
from mobilenet_imagenet import MobileNet
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
from imagenet.dataloaders import *
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])) + ['mobilenetv1', 'mobilenetv1_struct']
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--data-backend', metavar='BACKEND', default='dali-cpu',
choices=DATA_BACKEND_CHOICES)
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('--struct', metavar='STRUCT', default='odo_4',
type=str,
help='structure for 1x1 conv: ' + ' (default: odo_4)')
parser.add_argument('--softmax-struct', metavar='SMSTRUCT', default='D',
type=str,
help='structure for softmax layer: ' + ' (default: D)')
parser.add_argument('--n-struct-layers', default=7, type=int,
metavar='NSL', help='Number of structured layer (default 7)')
parser.add_argument('--width', default=1.0, type=float,
metavar='WIDTH', help='Width multiplier of the CNN (default 1.0)')
parser.add_argument('--distilled-param-path', default='', type=str, metavar='PATH',
help='path to distilled parameters (default: none)')
parser.add_argument('--full-model-path', default='', type=str, metavar='PATH',
help='path to full model checkpoint (default: none)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size per process (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='Initial learning rate. Will be scaled by <global batch size>/256: args.lr = args.lr*float(args.batch_size*args.world_size)/256. A warmup schedule will also be applied over the first 5 epochs.')
parser.add_argument('--lr-multiplier', default=1.0, type=float,
metavar='LR_MULT', help='Learning rate multiplier for structured parameters')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--scaled-wd', dest='scaled_wd', action='store_true',
help='whether to scale the weight decay by the compression ratio wrt MobileNet')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--prof', default=-1, type=int,
help='Only run 10 iterations for profiling.')
parser.add_argument('--deterministic', action='store_true')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
cudnn.benchmark = True
best_prec1 = 0
args = parser.parse_args()
print("opt_level = {}".format(args.opt_level))
print("keep_batchnorm_fp32 = {}".format(args.keep_batchnorm_fp32), type(args.keep_batchnorm_fp32))
print("loss_scale = {}".format(args.loss_scale), type(args.loss_scale))
print("\nCUDNN VERSION: {}\n".format(torch.backends.cudnn.version()))
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
torch.set_printoptions(precision=10)
def main():
global best_prec1, args
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
if args.arch == 'mobilenetv1':
model = MobileNet(width_mult=args.width)
elif args.arch == 'mobilenetv1_struct':
model = MobileNet(width_mult=args.width, structure=[args.struct] * args.n_struct_layers,
softmax_structure=args.softmax_struct)
if args.distilled_param_path:
model.load_state_dict(model.mixed_model_state_dict(args.full_model_path, args.distilled_param_path))
else:
model = models.__dict__[args.arch]()
if args.local_rank == 0:
print(model)
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda()
# Scale learning rate based on global batch size
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
params = list(filter(lambda p: p.requires_grad, model.parameters()))
structured_params = list(filter(lambda p: getattr(p, '_is_structured', False), params))
no_wd_params = list(filter(lambda p: getattr(p, '_no_wd', False), params))
unstructured_params = list(filter(lambda p: not (getattr(p, '_is_structured', False))
and not getattr(p, '_no_wd', False), params))
if args.scaled_wd:
# Scale by the ratio between the number of params vs number of params of MobileNet
args.weight_decay *= sum(p.numel() for p in unstructured_params) / 4.2e6
params_dict = [{'params': structured_params, 'weight_decay': 0.0, 'lr_multiplier': args.lr_multiplier},
{'params': no_wd_params, 'weight_decay': 0.0},
{'params': unstructured_params}]
optimizer = torch.optim.SGD(params_dict, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Initialize Amp. Amp accepts either values or strings for the optional override arguments,
# for convenient interoperation with argparse.
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale
)
# For distributed training, wrap the model with apex.parallel.DistributedDataParallel.
# This must be done AFTER the call to amp.initialize. If model = DDP(model) is called
# before model, ... = amp.initialize(model, ...), the call to amp.initialize may alter
# the types of model's parameters in a way that disrupts or destroys DDP's allreduce hooks.
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# model = DDP(model)
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# Optionally resume from a checkpoint
if args.resume:
# Use a local scope to avoid dangling references
def resume():
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
# Strip names to be compatible with Pytorch 1.2, i.e. 'module.conv1.weight' -> 'conv1.weight'
# model.load_state_dict(checkpoint['state_dict'])
model.load_state_dict({name.replace('module.', ''): weight for name, weight in checkpoint['state_dict'].items()})
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
resume()
# Data loading code
if(args.arch == "inception_v3"):
raise RuntimeError("Currently, inception_v3 is not supported by this example.")
# crop_size = 299
# val_size = 320 # I chose this value arbitrarily, we can adjust.
else:
crop_size = 224
val_size = 256
if args.data_backend == 'pytorch':
get_train_loader = get_pytorch_train_loader
get_val_loader = get_pytorch_val_loader
elif args.data_backend == 'dali-gpu':
get_train_loader = get_dali_train_loader(dali_cpu=False)
get_val_loader = get_dali_val_loader()
elif args.data_backend == 'dali-cpu':
get_train_loader = get_dali_train_loader(dali_cpu=True)
get_val_loader = get_dali_val_loader()
train_loader, train_loader_len = get_train_loader(args.data, args.batch_size, 1000, False, workers=args.workers, fp16=False)
val_loader, val_loader_len = get_val_loader(args.data, args.batch_size, 1000, False, workers=args.workers, fp16=False)
train_loader._len = train_loader_len
val_loader._len = val_loader_len
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
if args.local_rank == 0:
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
data_iter = enumerate(train_loader)
for i, (input, target) in data_iter:
if args.prof >= 0 and i == args.prof:
print("Profiling begun at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStart()
if args.prof >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i))
adjust_learning_rate(optimizer, epoch, i, train_loader._len)
# compute output
if args.prof >= 0: torch.cuda.nvtx.range_push("forward")
output = model(input)
if args.prof >= 0: torch.cuda.nvtx.range_pop()
loss = criterion(output, target)
# compute gradient and do SGD step
optimizer.zero_grad()
if args.prof >= 0: torch.cuda.nvtx.range_push("backward")
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if args.prof >= 0: torch.cuda.nvtx.range_pop()
# for param in model.parameters():
# print(param.data.double().sum().item(), param.grad.data.double().sum().item())
if args.prof >= 0: torch.cuda.nvtx.range_push("optimizer.step()")
optimizer.step()
if args.prof >= 0: torch.cuda.nvtx.range_pop()
if i%args.print_freq == 0:
# Every print_freq iterations, check the loss, accuracy, and speed.
# For best performance, it doesn't make sense to print these metrics every
# iteration, since they incur an allreduce and some host<->device syncs.
# Measure accuracy
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
# Average loss and accuracy across processes for logging
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
# to_python_float incurs a host<->device sync
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
torch.cuda.synchronize()
batch_time.update((time.time() - end)/args.print_freq)
end = time.time()
if args.local_rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, train_loader._len,
args.world_size*args.batch_size/batch_time.val,
args.world_size*args.batch_size/batch_time.avg,
batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
if args.prof >= 0: torch.cuda.nvtx.range_push("prefetcher.next()")
if args.prof >= 0: torch.cuda.nvtx.range_pop()
# Pop range "Body of iteration {}".format(i)
if args.prof >= 0: torch.cuda.nvtx.range_pop()
if args.prof >= 0 and i == args.prof + 10:
print("Profiling ended at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStop()
quit()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
data_iter = enumerate(val_loader)
for i, (input, target) in data_iter:
# compute output
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# TODO: Change timings to mirror train().
if args.local_rank == 0 and i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, val_loader._len,
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.lr*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.local_rank == 0):
# print("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_group.get('lr_multiplier', 1.0)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
|
butterfly-master
|
cnn/imagenet_amp.py
|
'''MobileNet in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
from butterfly import Butterfly
from cnn.models.low_rank_conv import LowRankConv2d
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class Butterfly1x1Conv(Butterfly):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
"""
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w) if real or (batch, c, h, w, 2) if complex
Return:
output: (batch, nstack * c, h, w) if real or (batch, nstack * c, h, w, 2) if complex
"""
batch, c, h, w = input.shape
input_reshape = input.view(batch, c, h * w).transpose(1, 2).reshape(-1, c)
output = super().forward(input_reshape)
return output.view(batch, h * w, self.nstack * c).transpose(1, 2).view(batch, self.nstack * c, h, w)
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1, structure='D'):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.conv1.weight._no_wd = True
self.bn1 = nn.BatchNorm2d(in_planes)
self.bn1.weight._no_wd = True
self.bn1.bias._no_wd = True
if structure == 'D':
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
elif structure.startswith('LR'):
odo_nblocks = int(structure.split('_')[1])
rank = int(odo_nblocks * math.log2(out_planes) / 2)
self.conv2 = LowRankConv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False, rank=rank)
else:
param = structure.split('_')[0]
nblocks = 0 if len(structure.split('_')) <= 1 else int(structure.split('_')[1])
self.residual = False if len(structure.split('_')) <= 2 else (structure.split('_')[2] == 'res')
# self.residual = self.residual and in_planes == out_planes
self.conv2 = Butterfly1x1Conv(in_planes, out_planes, bias=False, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks)
self.bn2 = nn.BatchNorm2d(out_planes)
self.bn2.weight._no_wd = True
self.bn2.bias._no_wd = True
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
if not getattr(self, 'residual', False):
out = F.relu(self.bn2(self.conv2(out)), inplace=True)
else:
prev = out
out = self.conv2(out)
if out.shape[1] == 2 * prev.shape[1]:
b, c, h, w = prev.shape
out = (out.reshape(b, 2, c, h, w) + prev.reshape(b, 1, c, h, w)).reshape(b, 2 * c, h, w)
else:
out = out + prev
out = F.relu(self.bn2(out), inplace=True)
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=1000, width_mult=1.0, round_nearest=8, structure=None, softmax_structure='D', sm_pooling=1):
"""
structure: list of string
"""
super(MobileNet, self).__init__()
self.width_mult = width_mult
self.round_nearest = round_nearest
self.structure = [] if structure is None else structure
self.n_structure_layer = len(self.structure)
self.structure = ['D'] * (len(self.cfg) - self.n_structure_layer) + self.structure
self.sm_pooling = sm_pooling
input_channel = _make_divisible(32 * width_mult, round_nearest)
self.conv1 = nn.Conv2d(3, input_channel, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(input_channel)
self.bn1.weight._no_wd = True
self.bn1.bias._no_wd = True
self.layers = self._make_layers(in_planes=input_channel)
self.last_channel = _make_divisible(1024 * width_mult // sm_pooling, round_nearest)
if softmax_structure == 'D':
self.linear = nn.Linear(self.last_channel, num_classes)
else:
param = softmax_structure.split('_')[0]
nblocks = 0 if len(softmax_structure.split('_')) <= 1 else int(softmax_structure.split('_')[1])
self.linear = Butterfly(self.last_channel, num_classes, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks)
def _make_layers(self, in_planes):
layers = []
for x, struct in zip(self.cfg, self.structure):
out_planes = _make_divisible((x if isinstance(x, int) else x[0]) * self.width_mult, self.round_nearest)
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride, structure=struct))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = self.layers(out)
out = out.mean([2, 3])
if self.sm_pooling != 1:
b, n = out.shape
out = out.reshape(b, self.sm_pooling, n // self.sm_pooling).mean(1)
out = self.linear(out)
return out
def mixed_model_state_dict(self, full_model_path, distilled_param_path):
current_state_dict_keys = self.state_dict().keys()
full_model_state_dict = torch.load(full_model_path, map_location='cpu')['state_dict']
full_model_state_dict = {name.replace('module.', ''): param for name, param in full_model_state_dict.items()}
distilled_params = torch.load(distilled_param_path, map_location='cpu')
state_dict = {name: param for name, param in full_model_state_dict.items() if name in current_state_dict_keys}
for i, struct in enumerate(self.structure):
# Only support butterfly for now
if struct.startswith('odo') or struct.startswith('regular'):
layer = f'layers.{i}.conv2'
nblocks = int(struct.split('_')[1])
structured_param = distilled_params[layer, nblocks]
state_dict.update({layer + '.' + name: param for name, param in structured_param.items()})
return state_dict
def test():
net = MobileNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
|
butterfly-master
|
cnn/mobilenet_imagenet.py
|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
def get_dataset(config_dataset):
if config_dataset['name'] == 'CIFAR10':
normalize = transforms.Normalize(
mean=[0.49139765, 0.48215759, 0.44653141],
std=[0.24703199, 0.24348481, 0.26158789]
)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
normalize,
])
trainset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=True, transform=transform_train)
validset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=False, transform=transform_test)
testset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=False, download=True, transform=transform_test)
np_random_state = np.random.get_state() # To get exactly the same training and validation sets
np.random.seed(0)
indices = np.random.permutation(range(len(trainset)))
np.random.set_state(np_random_state)
trainset = torch.utils.data.Subset(trainset, indices[:45000])
# trainset = torch.utils.data.Subset(trainset, indices[:5000])
validset = torch.utils.data.Subset(validset, indices[-5000:])
trainloader = torch.utils.data.DataLoader(trainset, batch_size=config_dataset['batch'], shuffle=True, num_workers=2)
validloader = torch.utils.data.DataLoader(validset, batch_size=config_dataset['batch'], shuffle=False, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
return trainloader, validloader, testloader
elif config_dataset['name'] == 'TinyImageNet':
# Copied from https://github.com/tjmoon0104/pytorch-tiny-imagenet/blob/851b65f1843d4c9a7d0e737d4743254e0ee6c107/ResNet18_64.ipynb
normalize = transforms.Normalize(
mean=[0.4802, 0.4481, 0.3975],
std=[0.2302, 0.2265, 0.2262]
)
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]),
'val': transforms.Compose([transforms.ToTensor(), normalize,]),
'test': transforms.Compose([transforms.ToTensor(), normalize,])
}
data_dir = project_root + '/data/tiny-imagenet-200'
datasets = {x: torchvision.datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val','test']}
num_workers = {'train': 8, 'val': 4, 'test': 4}
dataloaders = {x: torch.utils.data.DataLoader(datasets[x], batch_size=config_dataset['batch'],
shuffle=True, num_workers=num_workers[x])
for x in ['train', 'val', 'test']}
return dataloaders['train'], dataloaders['val', dataloaders['test']]
else:
assert False, 'Dataset not implemented'
def get_mmap_files(traindir, layer):
input_size = torch.load(f'{traindir}/{layer}_input_sz.pt')
output_size = torch.load(f'{traindir}/{layer}_output_sz.pt')
teacher_input = torch.from_file(f'{traindir}/{layer}_input.pt',
size=int(np.prod(input_size))).view(input_size)
teacher_output = torch.from_file(f'{traindir}/{layer}_output.pt',
size=int(np.prod(output_size))).view(output_size)
return teacher_input, teacher_output
|
butterfly-master
|
cnn/dataset_utils.py
|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import math
from pathlib import Path
import pickle
import random
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from
from ray.tune.schedulers import AsyncHyperBandScheduler
import model_utils
import dataset_utils
import models
from mobilenet_imagenet import MobileNet, Butterfly1x1Conv
# from models.butterfly_conv import ButterflyConv2d, ButterflyConv2dBBT
N_LBFGS_STEPS_VALIDATION = 50
class TrainableDistillCovModel(Trainable):
"""Trainable object for a Pytorch model, to be used with Ray's Hyperband tuning.
"""
def _setup(self, config):
model_args = config['model_args']
device = config['device']
self.device = device
torch.manual_seed(config['seed'])
if self.device == 'cuda':
torch.cuda.manual_seed(config['seed'])
self.layer = model_args['layer']
# Load teacher model and weight
if config['dataset'] == 'cifar10':
teacher_model = models.__dict__[config['teacher_model']]()
elif config['dataset'] == 'imagenet':
assert config['teacher_model'].startswith('mobilenetv1')
width_mult = 1.0
if len(config['teacher_model'].split('_')) >= 2:
width_mult = float(config['teacher_model'].split('_')[1])
teacher_model = MobileNet(width_mult=width_mult)
teacher_model = teacher_model.to(self.device)
loaded_state_dict = torch.load(config['teacher_model_path'], map_location=self.device)['state_dict']
# Strip names to be compatible with Pytorch 1.2, i.e. 'module.conv1.weight' -> 'conv1.weight'
loaded_state_dict = {name.replace('module.', ''): weight for name, weight in loaded_state_dict.items()}
teacher_model.load_state_dict(loaded_state_dict)
module_dict = dict(teacher_model.named_modules())
assert model_args['layer'] in module_dict, f"{model_args['layer']} not in network"
# get parameters from layer to replace to use in butterfly
teacher_module = module_dict[self.layer]
try:
in_channels = teacher_module.in_channels
out_channels = teacher_module.out_channels
# kernel_size = teacher_module.kernel_size
# stride = teacher_module.stride
# padding = teacher_module.padding
except:
raise ValueError("Only convolutional layers currently supported.")
# create butterfly for specific layer and train
if model_args['structure_type'] == 'B':
param = model_args['param']
self.residual = param.endswith('res')
param = param.replace('res', '')
self.student_module = Butterfly1x1Conv(in_channels, out_channels,
bias=False, tied_weight=model_args['tied_weight'], ortho_init=True,
param=param, nblocks=model_args['nblocks'])
self.student_module = self.student_module.to(device)
if config['objective'] == 'frob':
self.input = torch.eye(in_channels, device=self.device)
else: # config['objective'] == 'cov'
input_cov = torch.load(config['input_cov_path'], map_location='cpu')[self.layer].numpy()
Sigma, U = np.linalg.eigh(input_cov)
# Normalized so that each entry of sqrt of input_cov has magnitude about 1.0
# iid standard Gaussian has spectral norm about sqrt(in_channels) + sqrt(out_channels) (Bai-Yin's law)
# So we normalize the eigenvalues of input_cov to have size (sqrt(in_channels) + sqrt(out_channels))^2.
# Sigma *= (math.sqrt(in_channels) + math.sqrt(out_channels)) ** 2 / Sigma.max()
Sigma = Sigma.clip(0) # avoid small negative eigenvalues
self.input = torch.tensor(np.diag(np.sqrt(Sigma)) @ U.T, dtype=torch.float, device=self.device)
with torch.no_grad():
self.input = self.input.reshape(in_channels, in_channels, 1, 1) # To be compatible with conv2d
self.target = teacher_module(self.input)
# Normalize input so that output has MSE 1.0
self.input /= math.sqrt((self.target ** 2).mean())
self.target = teacher_module(self.input)
if config['optimizer'] == 'Adam':
self.optimizer = optim.Adam(self.student_module.parameters(), lr=config['lr'])
else:
self.optimizer = optim.SGD(self.student_module.parameters(), lr=config['lr'], momentum=config['momentum'])
self.n_steps_per_epoch = config['n_steps_per_epoch']
self.n_epochs_per_validation = config['n_epochs_per_validation']
def loss(self):
output = self.student_module(self.input)
if getattr(self, 'residual', False):
if output.shape[1] == 2 * self.input.shape[1]:
b, c, h, w = self.input.shape
output = (output.reshape(b, 2, c, h, w) + self.input.reshape(b, 1, c, h, w)).reshape(b, 2 * c, h, w)
else:
output = output + self.input
return F.mse_loss(output, self.target)
def _train(self):
self.student_module.train()
for _ in range(self.n_steps_per_epoch):
self.optimizer.zero_grad()
loss = self.loss()
loss.backward()
self.optimizer.step()
loss = loss.item()
if (self._iteration + 1) % self.n_epochs_per_validation == 0:
loss = min(loss, self.polish(N_LBFGS_STEPS_VALIDATION, save_to_self_model=True))
return {'mean_loss': loss}
def polish(self, nmaxsteps=50, patience=5, threshold=1e-8, save_to_self_model=False):
if not save_to_self_model:
student_module_bak = self.student_module
self.student_module = copy.deepcopy(self.student_module)
optimizer = optim.LBFGS(filter(lambda p: p.requires_grad, self.student_module.parameters()),
tolerance_grad=1e-7, # Pytorch 1.2 sets this too high https://github.com/pytorch/pytorch/pull/25240
line_search_fn='strong_wolfe')
def closure():
optimizer.zero_grad()
loss = self.loss()
loss.backward()
return loss
n_bad_steps = 0
best_loss = float('inf')
for i in range(nmaxsteps):
loss = optimizer.step(closure)
if loss.item() < best_loss - threshold:
best_loss = loss.item()
n_bad_steps = 0
else:
n_bad_steps += 1
# print(loss.item())
if n_bad_steps > patience:
break
if not save_to_self_model:
self.student_module = student_module_bak
return loss.item()
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model_optimizer.pth")
state = {'student_module': self.student_module.state_dict(),
'optimizer': self.optimizer.state_dict()
}
torch.save(state, checkpoint_path)
return checkpoint_path
def _restore(self, checkpoint_path):
if hasattr(self, 'device'):
checkpoint = torch.load(checkpoint_path, self.device)
else:
checkpoint = torch.load(checkpoint_path)
self.student_module.load_state_dict(checkpoint['student_module'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
ex = Experiment('ImageNet covariance distillation_experiment')
ex.observers.append(FileStorageObserver.create('distill_cov_logs'))
slack_config_path = Path('../config/slack.json') # Add webhook_url there for Slack notification
# if slack_config_path.exists():
# ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.config
def default_config():
model_args = {'structure_type': 'B',
'nblocks': 1,
'param': 'odo',
'tied_weight': False,
'layer': 'layers.6.conv2'} # Arguments to be passed to the model, as a dictionary
objective = 'cov' # 'cov' means minimize wrt input covariance, 'frob' means minimize wrt identity input
optimizer = 'SGD' # Which optimizer to use, either Adam or SGD
ntrials = 20 # Number of trials for hyperparameter tuning
nmaxepochs = 100 # Maximum number of epochs
result_dir = project_root + '/cnn/distill_cov_results' # Directory to store results
cuda = torch.cuda.is_available() # Whether to use GPU
smoke_test = False # Finish quickly for testing
dataset = 'imagenet'
teacher_model = 'mobilenetv1_0.5'
teacher_model_path = project_root + '/cnn/' + teacher_model + '/checkpoint.pth.tar'
input_cov_path = project_root + '/cnn/' + teacher_model + '/input_cov.pt'
min_lr = 1e-4
max_lr = 1e-2
grace_period = 10
momentum = 0.9
nsteps = 2000 # Number of steps per epoch
nepochsvalid = nmaxepochs # Frequency of validation (polishing), in terms of epochs
@ex.capture
def distillation_experiment(model_args, objective, optimizer, ntrials, result_dir,
cuda, smoke_test, teacher_model, teacher_model_path,
input_cov_path, dataset, min_lr, max_lr, momentum,
nsteps, nepochsvalid):
# config={'objective': objective, 'optimizer': optimizer, 'lr': 0.001, 'seed': 42, 'device': 'cuda', 'model_args': dict(model_args), 'teacher_model': teacher_model, 'teacher_model_path': teacher_model_path, 'input_cov_path': input_cov_path, 'dataset': dataset, 'momentum': momentum, 'n_steps_per_epoch': nsteps, 'n_epochs_per_validation': nepochsvalid,}
assert optimizer in ['Adam', 'SGD'], 'Only Adam and SGD are supported'
config={
'objective': objective,
'optimizer': optimizer,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(min_lr), math.log(max_lr)) if optimizer == 'Adam'
else random.uniform(math.log(min_lr), math.log(max_lr)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'device': 'cuda' if cuda else 'cpu',
'model_args': dict(model_args), # Need to copy @encoder as sacred created a read-only dict
'teacher_model': teacher_model,
'teacher_model_path': teacher_model_path,
'input_cov_path': input_cov_path,
'dataset': dataset,
'momentum': momentum,
'n_steps_per_epoch': nsteps,
'n_epochs_per_validation': nepochsvalid,
}
model_args_print = '_'.join([f'{key}_{value}' for key,value in model_args.items()])
experiment = RayExperiment(
name=f'{teacher_model}_{objective}_{model_args_print}_{optimizer}',
run=TrainableDistillCovModel,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
checkpoint_freq=1000, # Just to enable recovery with @max_failures
max_failures=-1,
resources_per_trial={'cpu': 2, 'gpu': 0.5 if cuda else 0},
stop={"training_iteration": 1 if smoke_test else 9999},
config=config,
)
return experiment
@ex.automain
def run(model_args, objective, teacher_model, result_dir, nmaxepochs, grace_period):
experiment = distillation_experiment()
try:
with open('../config/redis_address', 'r') as f:
address = f.read().strip()
ray.init(redis_address=address)
except:
ray.init()
ahb = AsyncHyperBandScheduler(metric='mean_loss', mode='min', grace_period=grace_period,
max_t=nmaxepochs)
# reduction_factor=2, brackets=3, max_t=nmaxepochs)
trials = ray.tune.run(experiment, scheduler=ahb, raise_on_failed_trial=False,
queue_trials=True, reuse_actors=True).trials
trials = [trial for trial in trials if trial.last_result is not None]
loss = [trial.last_result.get('mean_loss', float('inf')) for trial in trials]
return teacher_model, model_args, objective, min(loss)
|
butterfly-master
|
cnn/distill_cov_experiment.py
|
'''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
from butterfly.utils import twiddle_normal_to_fast_format
from cnn.mobilenet_imagenet import _make_divisible
from cnn.mobilenet_imagenet import Butterfly1x1Conv
from factor_multiply_fast import butterfly_multiply_untied_forward_fast
class HadamardTransformCuda(torch.autograd.Function):
'''The unnormalized Hadamard transform (i.e. without dividing by sqrt(2))
'''
@staticmethod
def forward(ctx, twiddle, x):
ctx.save_for_backward(twiddle)
return butterfly_multiply_untied_forward_fast(twiddle, x, True)
@staticmethod
def backward(ctx, grad):
twiddle, = ctx.saved_tensors
return None, HadamardTransformCuda.apply(twiddle, grad)
hadamard_transform_cuda = HadamardTransformCuda.apply
class Hadamard(nn.Module):
def __init__(self, n):
super().__init__()
m = int(math.ceil(math.log2(n)))
self.n = n
self.extended_n = 1 << m
with torch.no_grad():
twiddle = torch.tensor([[1, 1], [1, -1]], dtype=torch.float) / math.sqrt(2)
twiddle = twiddle.reshape(1, 1, 1, 2, 2).expand((1, m, self.extended_n//2, 2, 2))
twiddle = twiddle_normal_to_fast_format(twiddle)
self.register_buffer('twiddle', twiddle)
def forward(self, x):
if self.n < self.extended_n: # Zero-pad
x = F.pad(x, (0, self.extended_n - self.n))
output = hadamard_transform_cuda(self.twiddle, x.unsqueeze(1)).squeeze(1)
if self.n < self.extended_n: # Zero-pad
output = output[:, :self.n]
return output
class Hadamard1x1Conv(Hadamard):
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w)
Return:
output: (batch, c, h, w)
"""
batch, c, h, w = input.shape
input_reshape = input.view(batch, c, h * w).transpose(1, 2).reshape(-1, c)
output = super(Hadamard1x1Conv, self).forward(input_reshape)
return output.view(batch, h * w, c).transpose(1, 2).view(batch, c, h, w)
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups, grouped_conv_1st_layer=True, shuffle='P', preact=False):
super(Bottleneck, self).__init__()
self.stride = stride
self.shuffle = shuffle
self.preact = preact
mid_planes = _make_divisible(out_planes // 4, groups)
if stride == 2: # Reduce out_planes due to concat
out_planes -= in_planes
g = groups if grouped_conv_1st_layer else 1 # No grouped conv for the first layer of stage 2
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes if not self.preact else in_planes)
if shuffle == 'P':
self.shuffle0 = nn.Identity()
self.shuffle1 = ShuffleBlock(groups=g)
elif shuffle.startswith('Hadamard'):
self.shuffle0 = nn.Identity() if shuffle.endswith('onesided') else Hadamard1x1Conv(in_planes)
self.shuffle1 = Hadamard1x1Conv(mid_planes)
else:
param = shuffle.split('_')[0]
nblocks = 0 if len(shuffle.split('_')) <= 1 else int(shuffle.split('_')[1])
if shuffle.endswith('onesided'):
self.shuffle0 = nn.Identity()
else:
self.shuffle0 = Butterfly1x1Conv(in_planes, in_planes, bias=False, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks)
self.shuffle1 = Butterfly1x1Conv(mid_planes, mid_planes, bias=False, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.conv2.weight._no_wd = True
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes if not self.preact else mid_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
if not self.preact:
# out = F.relu(self.bn1(self.shuffle1(self.conv1(self.shuffle0(x)))), inplace=True)
out = F.relu(self.bn1(self.conv1(self.shuffle0(x))), inplace=True)
out = self.shuffle1(out)
out = self.bn2(self.conv2(out))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out,res], 1), inplace=True) if self.stride==2 else F.relu(out+res, inplace=True)
else:
out = F.relu(self.bn1(x), inplace=True)
res = self.shortcut(out)
out = self.shuffle1(self.conv1(self.shuffle0(x)))
out = self.conv2(F.relu(self.bn2(out), inplace=True))
out = self.conv3(self.bn3(out))
out = torch.cat([out,res], 1) if self.stride==2 else out+res
return out
class ShuffleNet(nn.Module):
def __init__(self, num_classes=1000, groups=8, width_mult=1.0, shuffle='P', preact=False):
super(ShuffleNet, self).__init__()
num_blocks = [4, 8, 4]
groups_to_outplanes = {1: [144, 288, 576],
2: [200, 400, 800],
3: [240, 480, 960],
4: [272, 544, 1088],
8: [384, 768, 1536]}
out_planes = groups_to_outplanes[groups]
out_planes = [_make_divisible(p * width_mult, groups) for p in out_planes]
input_channel = _make_divisible(24 * width_mult, groups)
self.conv1 = nn.Conv2d(3, input_channel, kernel_size=3, stride=2, padding=1, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.bn1 = nn.BatchNorm2d(input_channel)
self.in_planes = input_channel
self.stage2 = self._make_layer(out_planes[0], num_blocks[0], groups, grouped_conv_1st_layer=False, shuffle=shuffle, preact=preact)
self.stage3 = self._make_layer(out_planes[1], num_blocks[1], groups, shuffle=shuffle, preact=preact)
self.stage4 = self._make_layer(out_planes[2], num_blocks[2], groups, shuffle=shuffle, preact=preact)
self.linear = nn.Linear(out_planes[2], num_classes)
def _make_layer(self, out_planes, num_blocks, groups, grouped_conv_1st_layer=True, shuffle='P', preact=False):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
layers.append(Bottleneck(self.in_planes, out_planes, stride=stride, groups=groups,
grouped_conv_1st_layer=grouped_conv_1st_layer, shuffle=shuffle, preact=preact))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = self.maxpool(out)
out = self.stage2(out)
out = self.stage3(out)
out = self.stage4(out)
out = out.mean([2, 3])
out = self.linear(out)
return out
def test():
net = ShuffleNet()
x = torch.randn(1, 3, 224, 224)
y = net(x)
print(y)
# test()
|
butterfly-master
|
cnn/shufflenet_imagenet.py
|
# https://github.com/fastai/imagenet-fast/blob/master/imagenet_nv/distributed.py
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed as dist
from torch.nn.modules import Module
'''
This version of DistributedDataParallel is designed to be used in conjunction with the multiproc.py
launcher included with this example. It assumes that your run is using multiprocess with 1
GPU/process, that the model is on the correct device, and that torch.set_device has been
used to set the device.
Parameters are broadcasted to the other processes on initialization of DistributedDataParallel,
and will be allreduced at the finish of the backward pass.
'''
def flat_dist_call(tensors, call, extra_args=None):
flat_dist_call.warn_on_half = True
buckets = {}
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
if flat_dist_call.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case.")
flat_dist_call.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
coalesced = _flatten_dense_tensors(bucket)
if extra_args is not None:
call(coalesced, *extra_args)
else:
call(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(bucket, _unflatten_dense_tensors(coalesced, bucket)):
buf.copy_(synced)
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
param_list = [param for param in self.module.state_dict().values() if torch.is_tensor(param)]
if dist._backend == dist.dist_backend.NCCL:
for param in param_list:
assert param.is_cuda, "NCCL backend only supports model parameters to be on GPU."
#broadcast parameters
flat_dist_call(param_list, dist.broadcast, (0,) )
#all reduce gradient hook
def allreduce_params():
if(self.needs_reduction):
self.needs_reduction = False
else:
return
grads = [param.grad.data for param in self.module.parameters() if param.grad is not None]
flat_dist_call(grads, dist.all_reduce)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
torch.autograd.Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
'''
def _sync_buffers(self):
buffers = list(self.module._all_buffers())
if len(buffers) > 0:
# cross-node buffer sync
flat_buffers = _flatten_dense_tensors(buffers)
dist.broadcast(flat_buffers, 0)
for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):
buf.copy_(synced)
def train(self, mode=True):
# Clear NCCL communicator and CUDA event cache of the default group ID,
# These cache will be recreated at the later call. This is currently a
# work-around for a potential NCCL deadlock.
if dist._backend == dist.dist_backend.NCCL:
dist._clear_group_cache()
super(DistributedDataParallel, self).train(mode)
self.module.train(mode)
'''
|
butterfly-master
|
cnn/distributed.py
|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import argparse
import torchvision.models as torch_models
import models.resnet_imagenet as models # only use imagenet models
import torch
import logging
from collections import OrderedDict
import torchvision.datasets as datasets
import time
from train_utils import AverageMeter, data_prefetcher
import train_utils
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--num_structured_layers', default=0, type=int,
help='Number of layers starting from the back that are structured')
parser.add_argument('--structure_type', default='B', type=str, choices=['B', 'BBT', 'BBTBBT'],
help='Structure of butterflies')
parser.add_argument('--nblocks', default=1, type=int, help='Number of blocks for each butterfly')
parser.add_argument('--param', default='regular', type=str, help='Parametrization of butterfly factors')
parser.add_argument('--input-dir', default='/distillation/resnet18/butterflies', type=str, help='Input directory for distilled butterflies')
parser.add_argument('--output-dir', default='.', help='Output directory for initialized resnets with butterflies.')
parser.add_argument('--random', action='store_true', help='Use randomly initialized butterflies so don\'t load weights for butterflies')
parser.add_argument('--start-epoch', type=int, help='Starting epoch for training.', default=0)
return parser
# from imagenet_experiment import get_loaders, validate
args = get_parser().parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.info(args)
os.makedirs(args.output_dir, exist_ok=True)
# initialize new model
student_model = models.__dict__['resnet18'](num_structured_layers=args.num_structured_layers,
structure_type=args.structure_type, nblocks=args.nblocks, param=args.param)
print(student_model.state_dict().keys())
checkpoint = torch.load("resnet18.pth.tar", map_location = lambda storage, loc: storage.cuda())
best_acc1 = checkpoint['best_prec1']
loaded_state_dict = train_utils.strip_prefix_if_present(checkpoint['state_dict'], prefix="module.")
if not args.random:
# add butterflies
if args.structure_type == "B":
loaded_state_dict['features.7.0.conv1.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.7.0.conv1_B_1_regular.pt"
).twiddle
loaded_state_dict['features.7.0.conv2.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.7.0.conv2_B_1_regular.pt"
).twiddle
loaded_state_dict['features.7.0.downsample.0.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.7.0.downsample.0_B_1_regular.pt"
).twiddle
loaded_state_dict['features.7.1.conv1.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.7.1.conv1_B_1_regular.pt"
).twiddle
loaded_state_dict['features.7.1.conv2.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.7.1.conv2_B_1_regular.pt"
).twiddle
# also load features.6
if args.num_structured_layers == 2:
loaded_state_dict['features.6.0.conv1.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.6.0.conv1_B_1_regular.pt"
).twiddle
loaded_state_dict['features.6.0.conv2.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.6.0.conv2_B_1_regular.pt"
).twiddle
loaded_state_dict['features.6.0.downsample.0.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.6.0.downsample.0_B_1_regular.pt"
).twiddle
loaded_state_dict['features.6.1.conv1.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.6.1.conv1_B_1_regular.pt"
).twiddle
loaded_state_dict['features.6.1.conv2.twiddle'] = torch.load(
f"{args.input_dir}/butterfly_features.6.1.conv2_B_1_regular.pt"
).twiddle
elif args.structure_type == 'BBT':
features_7_0_conv1 = torch.load(
f"{args.input_dir}/butterfly_features.7.0.conv1_BBT_{args.nblocks}_regular.pt"
)
features_7_0_conv2 = torch.load(
f"{args.input_dir}/butterfly_features.7.0.conv2_BBT_{args.nblocks}_regular.pt")
features_7_0_downsample_0 = torch.load(
f"{args.input_dir}/butterfly_features.7.0.downsample.0_BBT_{args.nblocks}_regular.pt")
features_7_1_conv1 = torch.load(
f"{args.input_dir}/butterfly_features.7.1.conv1_BBT_{args.nblocks}_regular.pt")
features_7_1_conv2 = torch.load(
f"{args.input_dir}/butterfly_features.7.1.conv2_BBT_{args.nblocks}_regular.pt")
# nblocks doubled since BBT
for i in range(args.nblocks*2):
loaded_state_dict[f'features.7.0.conv1.layers.{i}.twiddle'] = features_7_0_conv1.layers[i].twiddle
loaded_state_dict[f'features.7.0.conv2.layers.{i}.twiddle'] = features_7_0_conv2.layers[i].twiddle
loaded_state_dict[f'features.7.0.downsample.0.layers.{i}.twiddle'] = features_7_0_downsample_0.layers[i].twiddle
loaded_state_dict[f'features.7.1.conv1.layers.{i}.twiddle'] = features_7_1_conv1.layers[i].twiddle
loaded_state_dict[f'features.7.1.conv2.layers.{i}.twiddle'] = features_7_1_conv2.layers[i].twiddle
if args.num_structured_layers == 2:
features_6_0_conv1 = torch.load(
f"{args.input_dir}/butterfly_features.6.0.conv1_BBT_{args.nblocks}_regular.pt")
features_6_0_conv2 = torch.load(
f"{args.input_dir}/butterfly_features.6.0.conv2_BBT_{args.nblocks}_regular.pt")
features_6_0_downsample_0 = torch.load(
f"{args.input_dir}/butterfly_features.6.0.downsample.0_BBT_{args.nblocks}_regular.pt")
features_6_1_conv1 = torch.load(
f"{args.input_dir}/butterfly_features.6.1.conv1_BBT_{args.nblocks}_regular.pt")
features_6_1_conv2 = torch.load(
f"{args.input_dir}/butterfly_features.6.1.conv2_BBT_{args.nblocks}_regular.pt")
# nblocks doubled since BBT
for i in range(args.nblocks*2):
loaded_state_dict[f'features.6.0.conv1.layers.{i}.twiddle'] = features_6_0_conv1.layers[i].twiddle
loaded_state_dict[f'features.6.0.conv2.layers.{i}.twiddle'] = features_6_0_conv2.layers[i].twiddle
loaded_state_dict[f'features.6.0.downsample.0.layers.{i}.twiddle'] = features_6_0_downsample_0.layers[i].twiddle
loaded_state_dict[f'features.6.1.conv1.layers.{i}.twiddle'] = features_6_1_conv1.layers[i].twiddle
loaded_state_dict[f'features.6.1.conv2.layers.{i}.twiddle'] = features_6_1_conv2.layers[i].twiddle
else:
raise ValueError("Invalid structure type")
# delete replaced keys so we can use strict loading
del loaded_state_dict["features.7.0.conv1.weight"]
del loaded_state_dict["features.7.0.conv2.weight"]
del loaded_state_dict["features.7.1.conv1.weight"]
del loaded_state_dict["features.7.1.conv2.weight"]
del loaded_state_dict["features.7.0.downsample.0.weight"]
if args.num_structured_layers == 2:
del loaded_state_dict["features.6.0.conv1.weight"]
del loaded_state_dict["features.6.0.conv2.weight"]
del loaded_state_dict["features.6.1.conv1.weight"]
del loaded_state_dict["features.6.1.conv2.weight"]
del loaded_state_dict["features.6.0.downsample.0.weight"]
if not args.random:
# keep strict=True to make sure keys aren't messed up in naming and weights are actually loaded
student_model.load_state_dict(loaded_state_dict, strict=True)
else:
# reuse randomly initialized butterfly weights (so they aren't keys)
student_model.load_state_dict(loaded_state_dict, strict=False)
# add module prefix bc imagenet training code will expect it for loading model weights
student_model_dict = train_utils.add_prefix(student_model.state_dict(), prefix="module.")
state = {'state_dict': student_model_dict,
'best_acc1': 0,
'epoch': args.start_epoch}
# save model state for loading into code for fine-tuning
if args.random:
torch.save(state, f"{args.output_dir}/resnet18_butterfly_random_{args.num_structured_layers}_{args.structure_type}_{args.nblocks}_{args.param}.pth.tar")
else:
torch.save(state, f"{args.output_dir}/resnet18_butterfly_distilled_{args.num_structured_layers}_{args.structure_type}_{args.nblocks}_{args.param}.pth.tar")
|
butterfly-master
|
cnn/imagenet_model_surgery.py
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import dataset_utils
import model_utils
# perm_path = 'sinkhorn.samples_40.epochs_400'
# perm_path = 'sinkhorn.was237'
# method = 'sinkhorn'
# perm_path = 'butterfly-samples16-anneal.63-temp.06-lr.0008'
# perm_path = 'T5.10000.model_optimizer.pth'
perm_path = 'T5.wd.10000.model_optimizer.pth'
method = 'butterfly'
if __name__ == '__main__':
perm_path = 'saved_perms/' + perm_path
# model = model_utils.get_model({'name': 'Permutation', 'args': {'method': method, 'stochastic':True, 'param': 'logit', 'temp': 0.1}})
# # model = model_utils.get_model({'name': 'Permutation', 'args': {'method': method}})
# model.load_state_dict(torch.load(perm_path))
# New version:
# saved_model = torch.load(perm_path)
# model = model_utils.get_model(saved_model['args'])
# model.load_state_dict(saved_model['state'])
# Newer version with full model optimizer:
saved_model = torch.load(perm_path)
model = model_utils.get_model(saved_model['model']['args'])
model.load_state_dict(saved_model['model']['state'])
# extract model from resnet:
# model = model.permute
n_images = 2
original_dataset = {'name': 'PPCIFAR10', 'batch': n_images, 'transform': 'original'}
permuted_dataset = {'name': 'PPCIFAR10', 'batch': n_images, 'transform': 'permute'}
normalize_dataset = {'name': 'PPCIFAR10', 'batch': n_images, 'transform': 'normalize'}
training_dataset = {'name': 'PPCIFAR10', 'batch': n_images}
torch.manual_seed(0)
orig_train_loader, orig_test_loader = dataset_utils.get_dataset(original_dataset)
torch.manual_seed(0)
perm_train_loader, perm_test_loader = dataset_utils.get_dataset(permuted_dataset)
torch.manual_seed(0)
norm_train_loader, norm_test_loader = dataset_utils.get_dataset(normalize_dataset)
torch.manual_seed(0)
train_train_loader, train_test_loader = dataset_utils.get_dataset(training_dataset)
def imshow(img, name):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# plt.show()
plt.savefig(name, bbox_inches='tight')
# get some random training images
torch.manual_seed(0)
dataiter = iter(orig_train_loader)
orig_images, labels = dataiter.next()
torch.manual_seed(0)
dataiter = iter(perm_train_loader)
perm_images, _ = dataiter.next()
torch.manual_seed(0)
dataiter = iter(norm_train_loader)
norm_images, _ = dataiter.next()
torch.manual_seed(0)
dataiter = iter(train_train_loader)
train_images, _ = dataiter.next()
# show images
# imshow(torchvision.utils.make_grid(images), 'examples')
# TODO: nsamples should be able to be passed into forward pass
for p in model.permute:
p.samples = 1
# breakpoint()
# print(model)
model.eval()
with torch.no_grad():
# breakpoint()
sample_output = model(perm_images, perm='sample')
mean_output = model(perm_images, perm='mean')
mle_output = model(perm_images, perm='mle')
# imshow(torchvision.utils.make_grid(mean_output), 'examples_mean')
# imshow(torchvision.utils.make_grid(mle_output), 'examples_mle')
# all_images = torch.cat([orig_images, perm_images, norm_images, train_images, sample_output, mean_output, mle_output], dim=0)
all_images = torch.stack([orig_images, perm_images, mle_output], dim=0).transpose(0,1).reshape((-1, 3, 32, 32))
imshow(torchvision.utils.make_grid(all_images, nrow=3), 'all_examples.pdf')
|
butterfly-master
|
cnn/visualize_perm.py
|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import argparse, shutil, time, warnings
import subprocess
from datetime import datetime
from pathlib import Path
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models.resnet_imagenet as models # only use imagenet models
from distributed import DistributedDataParallel as DDP
import logging
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--save-dir', type=str, default=Path.cwd(), help='Directory to save logs and models.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='Print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--small', action='store_true', help='start with smaller images')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')
parser.add_argument('--dp', action='store_true', help='Use nn.DataParallel instead of fastai DistributedDataParallel.')
parser.add_argument('--sz', default=224, type=int, help='Size of transformed image.')
parser.add_argument('--decay-int', default=30, type=int, help='Decay LR by 10 every decay-int epochs')
parser.add_argument('--loss-scale', type=float, default=1,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--prof', dest='prof', action='store_true', help='Only run a few iters for profiling.')
parser.add_argument('--dist-url', default='file://sync.file', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--distributed-init-method')
parser.add_argument('--world-size', default=1, type=int,
help='Number of GPUs to use. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
parser.add_argument('--rank', default=0, type=int,
help='Used for multi-process training. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
parser.add_argument('--num_structured_layers', default=0, type=int,
help='Number of layers starting from the back that are structured')
parser.add_argument('--structure_type', default='B', type=str, choices=['B', 'BBT', 'BBTBBT'],
help='Structure of butterflies')
parser.add_argument('--nblocks', default=1, type=int, help='Number of blocks for each butterfly')
parser.add_argument('--param', default='regular', type=str, help='Parametrization of butterfly factors')
return parser
cudnn.benchmark = True
args = get_parser().parse_args()
logging.basicConfig(
level=logging.INFO,
handlers=[
logging.FileHandler(f'{args.save_dir}/main_thread.log'),
logging.StreamHandler()
])
logger = logging.getLogger()
logger.info(args)
label = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).decode('ascii').strip()
logger.info(f'Git hash: {label}')
def get_loaders(traindir, valdir, use_val_sampler=True, min_scale=0.08):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
tensor_tfm = [transforms.ToTensor(), normalize]
train_dataset = datasets.ImageFolder(
traindir, transforms.Compose([
transforms.RandomResizedCrop(args.sz, scale=(min_scale, 1.0)),
transforms.RandomHorizontalFlip(),
] + tensor_tfm))
val_dataset = datasets.ImageFolder(
valdir, transforms.Compose([
transforms.Resize(int(args.sz*1.14)),
transforms.CenterCrop(args.sz),
] + tensor_tfm))
train_sampler = (torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None)
val_sampler = (torch.utils.data.distributed.DistributedSampler(val_dataset) if args.distributed else None)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=int(args.batch_size), shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=val_sampler if use_val_sampler else None)
return train_loader,val_loader,train_sampler,val_sampler
def main():
start_time = datetime.now()
args.distributed = True #args.world_size > 1
args.gpu = 0
if args.distributed:
import socket
args.gpu = args.rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
logger.info('| distributed init (rank {}): {}'.format(args.rank, args.distributed_init_method))
dist.init_process_group( backend=args.dist_backend, init_method=args.distributed_init_method,
world_size=args.world_size, rank=args.rank, )
logger.info('| initialized host {} as rank {}'.format(socket.gethostname(), args.rank))
#args.gpu = args.rank % torch.cuda.device_count()
#torch.cuda.set_device(args.gpu)
#logger.info('initializing...')
#dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size)
#logger.info('initialized')
# create model
if args.pretrained: model = models.__dict__[args.arch](pretrained=True)
else: model = models.__dict__[args.arch](num_structured_layers=args.num_structured_layers,
structure_type=args.structure_type, nblocks=args.nblocks, param=args.param)
model = model.cuda()
n_dev = torch.cuda.device_count()
logger.info('Created model')
if args.distributed: model = DDP(model)
elif args.dp:
model = nn.DataParallel(model)
args.batch_size *= n_dev
logger.info('Set up data parallel')
global structured_params
global unstructured_params
structured_params = filter(lambda p: hasattr(p, '_is_structured') and p._is_structured, model.parameters())
unstructured_params = filter(lambda p: not (hasattr(p, '_is_structured') and p._is_structured), model.parameters())
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD([{'params': structured_params, 'weight_decay': 0.0},
{'params': unstructured_params}], args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
logger.info('Created optimizer')
best_acc1 = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
else: logger.info("=> no checkpoint found at '{}'".format(args.resume))
if args.small:
traindir = os.path.join(args.data+'-sz/160', 'train')
valdir = os.path.join(args.data+'-sz/160', 'val')
args.sz = 128
else:
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
args.sz = 224
train_loader,val_loader,train_sampler,val_sampler = get_loaders(traindir, valdir, use_val_sampler=True)
logger.info('Loaded data')
if args.evaluate: return validate(val_loader, model, criterion, epoch, start_time)
logger.info(model)
logger.info('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
logger.info('| num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
for epoch in range(args.start_epoch, args.epochs):
logger.info(f'Epoch {epoch}')
adjust_learning_rate(optimizer, epoch)
if epoch==int(args.epochs*0.4+0.5):
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
args.sz = 224
train_loader,val_loader,train_sampler,val_sampler = get_loaders( traindir, valdir)
if epoch==int(args.epochs*0.92+0.5):
args.sz=288
args.batch_size=128
train_loader,val_loader,train_sampler,val_sampler = get_loaders(
traindir, valdir, use_val_sampler=False, min_scale=0.5)
if args.distributed:
train_sampler.set_epoch(epoch)
val_sampler.set_epoch(epoch)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
train(train_loader, model, criterion, optimizer, epoch)
if args.prof: break
acc1 = validate(val_loader, model, criterion, epoch, start_time)
if args.rank == 0:
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint({
'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(),
'best_acc1': best_acc1, 'optimizer' : optimizer.state_dict(),
}, is_best)
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class data_prefetcher():
def __init__(self, loader, prefetch=True):
self.loader,self.prefetch = iter(loader),prefetch
if prefetch:
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
def next(self):
if not self.prefetch:
input,target = next(self.loader)
return input.cuda(non_blocking=True),target.cuda(non_blocking=True)
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
prefetcher = data_prefetcher(train_loader, prefetch=True)
input, target = prefetcher.next()
i = -1
while input is not None:
i += 1
if args.prof and (i > 200): break
# measure data loading time
data_time.update(time.time() - end)
input_var = Variable(input)
target_var = Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(acc1), input.size(0))
top5.update(to_python_float(acc5), input.size(0))
loss = loss*args.loss_scale
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
input, target = prefetcher.next()
if args.rank == 0 and i % args.print_freq == 0 and i > 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion, epoch, start_time):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader)
input, target = prefetcher.next()
i = -1
while input is not None:
i += 1
target = target.cuda(non_blocking=True)
input_var = Variable(input)
target_var = Variable(target)
# compute output
with torch.no_grad():
output = model(input_var)
loss = criterion(output, target_var)
reduced_loss = reduce_tensor(loss.data)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.data, target, topk=(1, 5))
reduced_acc1 = reduce_tensor(acc1)
reduced_acc5 = reduce_tensor(acc5)
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(acc1), input.size(0))
top5.update(to_python_float(acc5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.rank == 0 and i % args.print_freq == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
input, target = prefetcher.next()
time_diff = datetime.now()-start_time
logger.info(f'Epoch {epoch}: {float(time_diff.total_seconds())} sec')
logger.info(f' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}\n')
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, os.path.join(args.save_dir, filename))
if is_best:
shutil.copyfile(os.path.join(args.save_dir, filename), f'{args.save_dir}/model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every few epochs"""
if epoch<4 : lr = args.lr/(4-epoch)
elif epoch<int(args.epochs*0.47+0.5): lr = args.lr/1
elif epoch<int(args.epochs*0.78+0.5): lr = args.lr/10
elif epoch<int(args.epochs*0.95+0.5): lr = args.lr/100
else : lr = args.lr/1000
for param_group in optimizer.param_groups: param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the acccuracy@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__': main()
|
butterfly-master
|
cnn/imagenet_experiment.py
|
import os, sys, subprocess
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import math
from pathlib import Path
import pickle
import random
import datetime
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from, grid_search
from ray.tune.schedulers import AsyncHyperBandScheduler
import model_utils
import dataset_utils
class TrainableModel(Trainable):
"""Trainable object for a Pytorch model, to be used with Ray's Hyperband tuning.
"""
def _setup(self, config):
self.config = config
device = config['device']
self.device = device
torch.manual_seed(config['seed'])
if self.device == 'cuda':
torch.cuda.manual_seed(config['seed'])
# model
self.model = model_utils.get_model(config['model']).to(device)
self.model_args = config['model']
# count parameters
self.nparameters = sum(param.nelement() for param in self.model.parameters())
print("Parameter count: ", self.nparameters)
# dataset
self.train_loader, self.valid_loader, self.test_loader = dataset_utils.get_dataset(config['dataset'])
structured_params = filter(lambda p: hasattr(p, '_is_structured') and p._is_structured, self.model.parameters())
unstructured_params = filter(lambda p: not (hasattr(p, '_is_structured') and p._is_structured), self.model.parameters())
if config['optimizer'] == 'Adam':
self.optimizer = optim.Adam([{'params': structured_params, 'weight_decay': 0.0},
{'params': unstructured_params}],
lr=config['lr'], weight_decay=config['weight_decay'])
else:
self.optimizer = optim.SGD([{'params': structured_params, 'weight_decay': 0.0},
{'params': unstructured_params}],
lr=config['lr'], momentum=0.9, weight_decay=config['weight_decay'])
# scheduler
if config['lr_decay']['milestones'] is not None:
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=config['lr_decay']['milestones'], gamma=config['lr_decay']['factor'])
else:
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=config['lr_decay']['period'], gamma=config['lr_decay']['factor'])
self.switch_ams = config['switch_ams']
# if self.switch_ams is not None:
# self.ams_optimizer = optim.Adam([{'params': structured_params, 'weight_decay': 0.0},
# {'params': unstructured_params}],
# lr=config['lr'], weight_decay=config['weight_decay'])
def _train_iteration(self): #TODO report train loss and acc
self.model.train()
# with torch.autograd.set_detect_anomaly(True):
for data, target in self.train_loader:
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = F.cross_entropy(output, target)
loss.backward()
self.optimizer.step()
def _test(self):
self.model.eval()
valid_loss = 0.0
correct = 0
with torch.no_grad():
for data, target in self.valid_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
valid_loss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += (pred == target.data.view_as(pred)).long().cpu().sum()
valid_loss = valid_loss / len(self.valid_loader.dataset)
valid_accuracy = correct.item() / len(self.valid_loader.dataset)
test_loss = 0.0
correct = 0
with torch.no_grad():
for data, target in self.test_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += (pred == target.data.view_as(pred)).long().cpu().sum()
test_loss = test_loss / len(self.test_loader.dataset)
test_accuracy = correct.item() / len(self.test_loader.dataset)
return {"nparameters": self.nparameters, "mean_loss": valid_loss, "mean_accuracy": valid_accuracy, "test_loss": test_loss, "test_accuracy": test_accuracy}
def _train(self):
if self.switch_ams is not None and self._iteration == self.switch_ams:
print("Switching to AMSGrad")
structured_params = filter(lambda p: hasattr(p, '_is_structured') and p._is_structured, self.model.parameters())
unstructured_params = filter(lambda p: not (hasattr(p, '_is_structured') and p._is_structured), self.model.parameters())
self.optimizer = optim.Adam([{'params': structured_params, 'weight_decay': 0.0},
{'params': unstructured_params}],
lr=self.config['lr'], weight_decay=self.config['weight_decay'], amsgrad=True)
# self.optimizer = self.ams_optimizer
# for group in self.optimizer.param_groups:
# group['amsgrad'] = True
self._train_iteration()
metrics = self._test()
self.scheduler.step()
return metrics
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model_optimizer.pth")
full_model = {
'state': self.model.state_dict(),
'args': self.model_args,
}
state = {'model': full_model,
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict()}
torch.save(state, checkpoint_path)
return checkpoint_path
def _restore(self, checkpoint_path):
if hasattr(self, 'device'):
checkpoint = torch.load(checkpoint_path, self.device)
else:
checkpoint = torch.load(checkpoint_path)
self.model = model_utils.get_model(checkpoint['model']['args'])
self.model.to(self.device)
self.model.load_state_dict(checkpoint['model']['state'])
# TODO: refactor this into an optimizer constructing helper
structured_params = filter(lambda p: hasattr(p, '_is_structured') and p._is_structured, self.model.parameters())
unstructured_params = filter(lambda p: not (hasattr(p, '_is_structured') and p._is_structured), self.model.parameters())
self.optimizer = optim.Adam([{'params': structured_params},
{'params': unstructured_params}],)
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.scheduler.optimizer = self.optimizer
ex = Experiment('Cifar10_experiment')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('../config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.config
def default_config():
dataset = 'CIFAR10'
model = 'LeNet' # Name of model, see model_utils.py
args = {} # Arguments to be passed to the model, as a dictionary
optimizer = 'SGD' # Which optimizer to use, either Adam or SGD
nmaxepochs = 200 # Maximum number of epochs
use_hyperband = False
lr = {'grid': [0.025, 0.05, 0.1, 0.2]}
lr_decay = {'factor': 0.2, 'period': None, 'milestones': [int(30 * nmaxepochs / 100), int(60 * nmaxepochs / 100), int(80 * nmaxepochs / 100)]}
# lr_decay = True # Whether to use learning rate decay
lr_decay_period = 25 # Period of learning rate decay
weight_decay = False # Whether to use weight decay
ntrials = 20 # Number of trials for hyperparameter tuning
batch = 128
grace_period = 25
# decay_milestones = [int(30 * nmaxepochs / 100), int(60 * nmaxepochs / 100), int(80 * nmaxepochs / 100)]
resume_pth = None
result_dir = project_root + '/cnn/results' # Directory to store results
cuda = torch.cuda.is_available() # Whether to use GPU
smoke_test = False # Finish quickly for testing
@ex.named_config
def adam():
optimizer = 'Adam' # Which optimizer to use, either Adam or SGD
use_hyperband = True
lr = {'min': 1e-4, 'max': 1e-2, 'grid': None}
# lr_decay = False # Whether to use learning rate decay
lr_decay = None # {'factor': 0.2, 'period': 25, 'milestones': [int(30 * nmaxepochs / 100), int(60 * nmaxepochs / 100), int(80 * nmaxepochs / 100)]}}
lr_decay_period = 25 # Period of learning rate decay
weight_decay = False # Whether to use weight decay
grace_period = 100
@ex.named_config
def sgd():
# abbreviated sgd schedule for resnet
optimizer = 'SGD' # Which optimizer to use, either Adam or SGD
use_hyperband = True
lr = {'min': 2e-2, 'max': 2e-1, 'grid': None}
lr_decay = {'factor': 0.2, 'period': 25, 'milestones': None}
# lr_decay = True # Whether to use learning rate decay
# lr_decay_period = 25 # Period of learning rate decay
weight_decay = True # Whether to use weight decay
nmaxepochs = 100
@ex.capture
def cifar10_experiment(dataset, model, args, optimizer, use_hyperband, lr, lr_decay, weight_decay, ntrials, nmaxepochs, batch, resume_pth, result_dir, cuda, smoke_test):
assert optimizer in ['Adam', 'SGD'], 'Only Adam and SGD are supported'
if lr_decay is None:
lr_decay = {'factor': 1.0, 'period': 1000, 'milestones': None}
config={
'optimizer': optimizer,
'switch_ams': int(0.5 * nmaxepochs) if optimizer == 'Adam' else None,
'lr': grid_search(lr['grid']) if lr['grid'] is not None else sample_from(lambda spec: math.exp(random.uniform(math.log(lr['min']), math.log(lr['max'])))),
# 'lr_decay_factor': 0.2 if lr_decay else 1.0,
# 'lr_decay_period': lr_decay_period if lr_decay else 10000,
# 'decay_milestones': decay_milestones,
'lr_decay' : lr_decay,
'weight_decay': 5e-4 if weight_decay else 0.0,
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'device': 'cuda' if cuda else 'cpu',
'model': {'name': model, 'args': args},
'dataset': {'name': dataset, 'batch': batch},
}
smoke_str = 'smoke_' if smoke_test else '' # for easy finding and deleting unimportant logs
args_str = '_'.join([k+':'+str(v) for k,v in args.items()])
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
commit_id = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip().decode('utf-8')
experiment = RayExperiment(
name=f'{smoke_str}{dataset.lower()}_{model}_{args_str}_{optimizer}_epochs_{nmaxepochs}_{timestamp}_{commit_id}',
run=TrainableModel,
local_dir=result_dir,
num_samples=ntrials if not smoke_test else 1,
checkpoint_at_end=True,
checkpoint_freq=1000, # Just to enable recovery with @max_failures
max_failures=0,
resources_per_trial={'cpu': 4, 'gpu': 1 if cuda else 0},
stop={"training_iteration": 1 if smoke_test else nmaxepochs},
restore=resume_pth,
config=config,
)
return experiment
@ex.automain
def run(model, args, result_dir, nmaxepochs, use_hyperband, grace_period):
experiment = cifar10_experiment()
try:
with open('../config/redis_address', 'r') as f:
address = f.read().strip()
ray.init(redis_address=address)
except:
ray.init()
if use_hyperband:
if grace_period == -1: grace_period = nmaxepochs
ahb = AsyncHyperBandScheduler(reward_attr='mean_accuracy', max_t=nmaxepochs, grace_period=grace_period)
trials = ray.tune.run(experiment, scheduler=ahb, raise_on_failed_trial=False, queue_trials=True)
else:
trials = ray.tune.run(experiment, raise_on_failed_trial=False, queue_trials=True)
trials = [trial for trial in trials if trial.last_result is not None]
accuracy = [trial.last_result.get('mean_accuracy', float('-inf')) for trial in trials]
nparameters = trials[0].last_result['nparameters']
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return max(accuracy), model, nparameters, args
|
butterfly-master
|
cnn/cifar_experiment.py
|
import argparse
import os
import shutil
import time
import random
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
import imagenet.logger as log
from imagenet.smoothing import LabelSmoothing, KnowledgeDistillationLoss
from imagenet.mixup import NLLMultiLabelSmooth, MixUpWrapper
from imagenet.dataloaders import DATA_BACKEND_CHOICES
from imagenet.dataloaders import get_pytorch_train_loader, get_pytorch_val_loader
from imagenet.dataloaders import get_dali_train_loader, get_dali_val_loader
from imagenet.training import ModelAndLoss, get_optimizer, train_loop
from imagenet.training import lr_step_policy, lr_cosine_policy, lr_linear_policy
from imagenet.training import get_input_cov, butterfly_projection_cov
from imagenet.utils import should_backup_checkpoint, save_checkpoint
def add_parser_arguments(parser):
custom_model_names = ['mobilenetv1']
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])) + custom_model_names
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--data-backend', metavar='BACKEND', default='dali-cpu',
choices=DATA_BACKEND_CHOICES)
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--struct', metavar='STRUCT', default='odo_4',
type=str,
help='structure for 1x1 conv: ' + ' (default: odo_4)')
parser.add_argument('--softmax-struct', metavar='SMSTRUCT', default='D',
type=str,
help='structure for softmax layer: ' + ' (default: D)')
parser.add_argument('--sm-pooling', metavar='SMPOOL', default=1,
type=int,
help='pooling before the softmax layer: ' + ' (default: 1)')
parser.add_argument('--n-struct-layers', default=0, type=int,
metavar='NSL', help='Number of structured layer (default 7)')
parser.add_argument('--width', default=1.0, type=float,
metavar='WIDTH', help='Width multiplier of the CNN (default 1.0)')
parser.add_argument('--distilled-param-path', default='', type=str, metavar='PATH',
help='path to distilled parameters (default: none)')
parser.add_argument('--full-model-path', default='', type=str, metavar='PATH',
help='path to full model checkpoint (default: none)')
parser.add_argument("--temperature", default=1., type=float,
help="Temperature for the softmax temperature.")
parser.add_argument("--alpha-ce", default=0.0, type=float,
help="Linear weight for the distillation loss. Must be >=0.")
parser.add_argument('-j', '--workers', default=5, type=int, metavar='N',
help='number of data loading workers (default: 5)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256) per gpu')
parser.add_argument('--optimizer-batch-size', default=-1, type=int,
metavar='N', help='size of a total batch size, for simulating bigger batches')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr-schedule', default='step', type=str, metavar='SCHEDULE', choices=['step','linear','cosine'])
parser.add_argument('--warmup', default=5, type=int,
metavar='E', help='number of warmup epochs')
parser.add_argument('--label-smoothing', default=0.0, type=float,
metavar='S', help='label smoothing')
parser.add_argument('--mixup', default=0.0, type=float,
metavar='ALPHA', help='mixup alpha')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--bn-weight-decay', action='store_true',
help='use weight_decay on batch normalization learnable parameters, default: false)')
parser.add_argument('--nesterov', action='store_true',
help='use nesterov momentum, default: false)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained-weights', default='', type=str, metavar='PATH',
help='load weights from here')
parser.add_argument('--fp16', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
parser.add_argument('--prof', type=int, default=-1,
help='Run only N iterations')
parser.add_argument('--amp', action='store_true',
help='Run model AMP (automatic mixed precision) mode.')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--seed', default=None, type=int,
help='random seed used for np and pytorch')
parser.add_argument('--gather-checkpoints', action='store_true',
help='Gather checkpoints throughout the training')
parser.add_argument('--raport-file', default='experiment_raport.json', type=str,
help='file in which to store JSON experiment raport')
parser.add_argument('--final-weights', default='model.pth.tar', type=str,
help='file in which to store final model weights')
parser.add_argument('--evaluate', action='store_true', help='evaluate checkpoint/model')
parser.add_argument('--training-only', action='store_true', help='do not evaluate')
parser.add_argument('--no-checkpoints', action='store_false', dest='save_checkpoints')
parser.add_argument('--workspace', type=str, default='./')
def main(args):
exp_start_time = time.time()
global best_prec1
best_prec1 = 0
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
if args.amp and args.fp16:
print("Please use only one of the --fp16/--amp flags")
exit(1)
if args.seed is not None:
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed + args.local_rank)
torch.cuda.manual_seed(args.seed + args.local_rank)
np.random.seed(seed=args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
def _worker_init_fn(id):
np.random.seed(seed=args.seed + args.local_rank + id)
random.seed(args.seed + args.local_rank + id)
else:
def _worker_init_fn(id):
pass
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
if args.static_loss_scale != 1.0:
if not args.fp16:
print("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
if args.optimizer_batch_size < 0:
batch_size_multiplier = 1
else:
tbs = args.world_size * args.batch_size
if args.optimizer_batch_size % tbs != 0:
print("Warning: simulated batch size {} is not divisible by actual batch size {}".format(args.optimizer_batch_size, tbs))
batch_size_multiplier = int(args.optimizer_batch_size/ tbs)
print("BSM: {}".format(batch_size_multiplier))
pretrained_weights = None
if args.pretrained_weights:
if os.path.isfile(args.pretrained_weights):
print("=> loading pretrained weights from '{}'".format(args.pretrained_weights))
pretrained_weights = torch.load(args.pretrained_weights)
else:
print("=> no pretrained weights found at '{}'".format(args.resume))
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model_state = checkpoint['state_dict']
# Strip names to be compatible with Pytorch 1.2, i.e. 'module.conv1.weight' -> 'conv1.weight'
model_state = {name.replace('module.', ''): weight for name, weight in model_state.items()}
optimizer_state = checkpoint['optimizer']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
model_state = None
optimizer_state = None
else:
model_state = None
optimizer_state = None
loss = nn.CrossEntropyLoss
if args.mixup > 0.0:
loss = lambda: NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
loss = lambda: LabelSmoothing(args.label_smoothing)
if args.alpha_ce > 0.0:
loss_og = loss()
loss = lambda: KnowledgeDistillationLoss(loss_og, args.temperature, args.alpha_ce)
# Create data loaders and optimizers as needed
if args.data_backend == 'pytorch':
get_train_loader = get_pytorch_train_loader
get_val_loader = get_pytorch_val_loader
elif args.data_backend == 'dali-gpu':
get_train_loader = get_dali_train_loader(dali_cpu=False)
get_val_loader = get_dali_val_loader()
elif args.data_backend == 'dali-cpu':
get_train_loader = get_dali_train_loader(dali_cpu=True)
get_val_loader = get_dali_val_loader()
train_loader, train_loader_len = get_train_loader(args.data, args.batch_size, 1000, args.mixup > 0.0, workers=args.workers, fp16=args.fp16)
if args.mixup != 0.0:
train_loader = MixUpWrapper(args.mixup, 1000, train_loader)
train_loader._len = train_loader_len
val_loader, val_loader_len = get_val_loader(args.data, args.batch_size, 1000, False, workers=args.workers, fp16=args.fp16)
val_loader._len = val_loader_len
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger = log.Logger(
args.print_freq,
[
log.JsonBackend(os.path.join(args.workspace, args.raport_file), log_level=1),
log.StdOut1LBackend(train_loader_len, val_loader_len, args.epochs * args.n_struct_layers, log_level=0),
])
for k, v in args.__dict__.items():
logger.log_run_tag(k, v)
else:
logger = None
# Scale learning rate based on global batch size
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
if args.lr_schedule == 'step':
lr_policy = lr_step_policy(args.lr, [15,20], 0.1, args.warmup, train_loader_len, logger=logger)
elif args.lr_schedule == 'cosine':
lr_policy = lr_cosine_policy(args.lr, args.warmup, args.epochs, train_loader_len, logger=logger)
elif args.lr_schedule == 'linear':
lr_policy = lr_linear_policy(args.lr, args.warmup, args.epochs, train_loader_len, logger=logger)
model_and_loss = ModelAndLoss(
args.arch,
loss,
pretrained_weights=pretrained_weights,
cuda = True, fp16 = args.fp16,
width=args.width, n_struct_layers=0,
struct=args.struct, softmax_struct=args.softmax_struct, sm_pooling=args.sm_pooling)
checkpoint = torch.load(args.full_model_path, map_location = lambda storage, loc: storage.cuda(args.gpu))
model_state = checkpoint['state_dict']
# Strip names to be compatible with Pytorch 1.2, i.e. 'module.conv1.weight' -> 'conv1.weight'
model_state = {name.replace('module.', ''): weight for name, weight in model_state.items()}
model_and_loss.model.load_state_dict(model_state)
if args.distributed:
model_and_loss.distributed()
teacher_model = model_and_loss.model
layers_to_replace = ['layers.12.conv2', 'layers.11.conv2', 'layers.10.conv2', 'layers.9.conv2',
'layers.8.conv2', 'layers.7.conv2', 'layers.6.conv2']
for n_struct_layers in range(1, args.n_struct_layers + 1):
print(f'Fine-tuning the {n_struct_layers}-th to last layer.')
layer_name = layers_to_replace[n_struct_layers - 1]
# if args.local_rank == 0:
# print(dict(model_and_loss.model.named_modules()).keys())
start = time.perf_counter()
# layer name seems to have 'module.' x n_struct_layers for some reason
layer_name_prefix = ''.join(['module.'] * (1 if n_struct_layers == 1 else 2))
input_cov = get_input_cov(model_and_loss.model, train_loader, [layer_name_prefix + layer_name], max_batches=100)[layer_name_prefix + layer_name]
end = time.perf_counter()
print(f'Got input_cov in {end - start}s')
teacher_module = dict(model_and_loss.model.named_modules())[layer_name_prefix + layer_name]
start = time.perf_counter()
student_module, _ = butterfly_projection_cov(teacher_module, input_cov, args.struct)
end = time.perf_counter()
print(f'Butterfly projection in {end - start}s')
prev_state_dict = model_and_loss.model.state_dict()
prev_state_dict = {name.replace('module.', ''): weight for name, weight in prev_state_dict.items()}
structured_params = student_module.state_dict()
model_and_loss = ModelAndLoss(
args.arch,
loss,
pretrained_weights=pretrained_weights,
cuda = True, fp16 = args.fp16,
width=args.width, n_struct_layers=n_struct_layers,
struct=args.struct, softmax_struct=args.softmax_struct, sm_pooling=args.sm_pooling)
current_state_dict_keys = model_and_loss.model.state_dict().keys()
state_dict = {name: param for name, param in prev_state_dict.items() if name in current_state_dict_keys}
state_dict.update({layer_name + '.' + name: param for name, param in structured_params.items()})
model_and_loss.model.load_state_dict(state_dict)
if args.distributed:
model_and_loss.distributed()
optimizer = get_optimizer(list(model_and_loss.model.named_parameters()),
args.fp16,
args.lr, args.momentum, args.momentum, args.weight_decay,
nesterov = args.nesterov,
bn_weight_decay = args.bn_weight_decay,
state=optimizer_state,
static_loss_scale = args.static_loss_scale,
dynamic_loss_scale = args.dynamic_loss_scale)
if args.amp:
model_and_loss, optimizer = amp.initialize(
model_and_loss, optimizer,
opt_level="O1",
loss_scale="dynamic" if args.dynamic_loss_scale else args.static_loss_scale)
if args.distributed:
model_and_loss.distributed()
if args.alpha_ce > 0.0:
model_and_loss._teacher_model = teacher_model
model_and_loss._teacher_model.eval()
train_loop(
model_and_loss, optimizer,
lr_policy,
train_loader, val_loader, args.epochs,
args.fp16, logger, should_backup_checkpoint(args), args.print_freq, use_amp=args.amp,
batch_size_multiplier = batch_size_multiplier,
start_epoch = args.start_epoch, best_prec1 = best_prec1, prof=args.prof,
skip_training = args.evaluate, skip_validation = args.training_only,
save_checkpoints=args.save_checkpoints and not args.evaluate, checkpoint_dir=args.workspace)
exp_duration = time.time() - exp_start_time
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
add_parser_arguments(parser)
args = parser.parse_args()
cudnn.benchmark = True
main(args)
|
butterfly-master
|
cnn/imagenet_finetune.py
|
import os, sys
# project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import numpy as np
import torch
# import torchvision
def listperm2matperm(listperm):
"""Converts permutation list to matrix form.
Args:
listperm: (..., n) - tensor of list permutations of the set [n].
Return:
matperm: (..., n, n) - permutation matrices,
matperm[t, i, listperm[t,i]] = 1
"""
n = listperm.size(-1)
P = torch.eye(n, dtype=torch.long, device=listperm.device)[listperm]
return P
def matperm2listperm(matperm):
"""Converts permutation matrix to its enumeration (list) form.
Args:
matperm: (..., n, n)
Returns:
listperm: (..., n) - listperm[t,i] is the index of the only non-zero entry in matperm[t, i, :]
"""
batch_size = matperm.size(0)
n = matperm.size(-1)
assert matperm.size(-2) == matperm.size(-1)
#argmax is the index location of each maximum value found(argmax)
# _, argmax = torch.max(matperm, dim=-1, keepdim=False)
argmax = torch.argmax(matperm, dim=-1, keepdim=False)
# argmax = argmax.view(batch_size, n_objects)
return argmax
def invert_listperm(listperm):
return matperm2listperm(torch.transpose(listperm2matperm(listperm), -1, -2))
def mse(perm, true):
""" perm is matrix, true is list """
return nn.functional.mse_loss(perm, listperm2matperm(true))
def nll(perm, true):
"""
perm: (n, n) or (s, n, n)
true: (n)
"""
n = true.size(-1)
# i = torch.arange(n, device=perm.device)
# j = true.to(perm.device)
# print("perm.nll:", perm.size(), true.size())
elements = perm.cpu()[..., torch.arange(n), true]
# elements = perm.cpu()[torch.arange(n), true]
nll = -torch.sum(torch.log2(elements.to(perm.device)))
if perm.dim() == 3: # normalize by number samples
nll = nll / perm.size(0)
# print("nll", nll)
return nll
def dist(perm1, perm2, fn='nll'):
"""
perm1: iterable of permutation tensors
each tensor can have shape (n, n) or (s, n, n)
perm2: iterable of permutation lists (n)
"""
# TODO: is the scaling of this consistent across permutations of multiple "ranks"?
loss = 0.0
# if not isinstance(perm1, tuple):
# perm1, perm2 = (perm1,), (perm2,)
if fn == 'nll':
loss_fn = nll
elif fn == 'mse':
loss_fn = mse
elif fn == 'was':
loss_fn = transport
else: assert False, f"perm.dist: fn {fn} not supported."
loss1, loss2 = 0.0, 0.0
for p1, p2 in zip(perm1, perm2):
n = p2.size(-1)
# print(p1.size(), p1.type())
# print(p2.size(), p2.type())
# print(p2, type(p2))
if fn == 'was': # temporary casework
l1, l2 = loss_fn(p1, p2)
l1_, l2_ = loss_fn(p1, n-1-p2) # reversed permutation also good
if l2_ < l2:
loss1 += l1_
loss2 += l2_
else:
loss1 += l1
loss2 += l2
else:
loss = loss + loss_fn(p1, p2)
# print(loss, loss.type())
if fn == 'was':
return loss1, loss2
else:
return loss
def entropy(p, reduction='mean'):
"""
p: (..., n, n)
Returns: avg
Note: Max entropy of n x n matrix is n\log(n)
"""
n = p.size(-1)
eps = 1e-10
entropy = -(p * torch.log2(eps+p)).sum(dim=[-2,-1])
if reduction is None:
return entropy
elif reduction == 'sum':
return torch.sum(entropy)
elif reduction == 'mean':
return torch.mean(entropy) # entropy / p.view(-1, n, n).size(0)
else: assert False, f"perm.entropy: reduction {reduction} not supported."
def transport(ds, p, reduction='mean'):
# TODO: figure out correct transport between "rank 2" permutations
"""
"Transport" distance between a doubly-stochastic matrix and a permutation
ds: (..., n, n)
p: (n)
Returns: avg
Note:
uniform ds has transport distance (n^2-1)/3
ds[...,i,p[i]] = 1 has transport 0
If distance raised to power p, average case becomes 2n^{p+1}/(p+1)(p+2)
Scratchwork:
true p permuted input with inp = orig[p], i.e. inp[i] = orig[p[i]]
want to restore out[i] = orig[i] = inp[pinv[i]]
model multiplies by input by some DS,
out[i] = inp[j]ds[j,i] = inp[pinv[j]]ds[pinv[j], i]
want ds[pinv[i],i] = 1, rest = 0
define this matrix as P
i.e. P[i, p[i]] = 1
what's an acceptable error? can handle
out[i] = orig[i+d] = inp[pinv[i+d]]
i.e. ds[pinv[i+d], i] = 1
i.e. ds[j, p[j]+-d] = 1
so penalization function should be cost[i,j] = f(j - p[i])
equivalent to optimal transport between rows of ds and P
"""
n = p.size(-1)
dist = torch.arange(n).repeat(n,1).t() - p.repeat(n,1) # dist[i,j] = i - p[j]
# TODO transposing dist should switch t1 and t2
# dist = torch.arange(n).repeat(n,1) - p.repeat(n,1).t() # dist[i,j] = j - p[i]
dist = torch.abs(dist).to(ds.device, dtype=torch.float)
# dist = torch.tensor(dist, dtype=torch.float, device=ds.device)
t1 = torch.sum(ds * dist, dim=[-2,-1])
t2 = torch.sum(ds.transpose(-1,-2) * dist, dim=[-2,-1])
# print("TRANSPORT: ", t1.cpu(), t2.cpu())
t = t1+t2
if reduction is None:
return t
elif reduction == 'sum':
return torch.sum(t)
elif reduction == 'mean':
# return torch.mean(t)
# QUICK DEBUG
return (torch.mean(t1), torch.mean(t2))
else: assert False, f"perm.transport: reduction {reduction} not supported."
def tv(x, norm=2, p=1, symmetric=False, reduction='mean'):
""" Image total variation
x: (b, c, w, h)
If D = (dx, dy) is the vector of differences at a given pixel,
sum up ||D||_norm^p over image
Note that reduction='mean' only averages over the batch dimension
"""
# each pixel wants all channels as part of its delta vector
x = x.transpose(-3, -2).transpose(-2, -1) # (b, w, h, c)
delta = x.new_zeros(*x.size(), 2)
if not symmetric:
dx = x[:, 1:, :, :] - x[:, :-1, :, :]
dy = x[:, :, 1:, :] - x[:, :, :-1, :]
delta[:, :-1, :, :, 0] = dx
delta[:, :, :-1, :, 1] = dy
else:
dx = x[:, 2:, :, :] - x[:, :-2, :, :]
dy = x[:, :, 2:, :] - x[:, :, :-2, :]
delta[:, 1:-1, :, :, 0] = dx / 2.0
delta[:, :, 1:-1, :, 1] = dy / 2.0
# old symmetric version (4-sided)
# delta = x.new_zeros(*x.size(), 4) # TODO do casework on symmetric to either 2 or 4?
# dx_ = x[:, :-1, :, :] - x[:, 1:, :, :]
# dy_ = x[:, :, :-1, :] - x[:, :, 1:, :]
# delta[:, 1:, :, :, 0] = torch.abs(dx_)
# delta[:, :, 1:, :, 1] = torch.abs(dy_)
delta = delta.flatten(-2, -1) # (b, w, h, 2*c [or 4*c])
if norm == p:
v = torch.sum(torch.abs(delta) ** norm, dim=-1)
else:
v = torch.norm(torch.abs(delta), dim=-1, p=norm)
if p != 1:
v = v ** p
if reduction is None:
return v
elif reduction == 'sum':
return torch.sum(v)
elif reduction == 'mean':
return torch.sum(v) / v.size(0)
else: assert False, f"perm.tv: reduction {reduction} not supported."
def tv_kernel(x, norm=2, p=1, reduction='mean'):
""" Image total variation
x: (b, c, w, h)
If D = (dx, dy) is the vector of differences at a given pixel,
sum up ||D||_norm^p over image
Note that reduction='mean' only averages over the batch dimension
"""
# each pixel wants all channels as part of its delta vector
x = x.transpose(-3, -2).transpose(-2, -1) # (b, w, h, c)
if not symmetric:
delta = x.new_zeros(*x.size(), 2)
dx = x[:, 1:, :, :] - x[:, :-1, :, :]
dy = x[:, :, 1:, :] - x[:, :, :-1, :]
delta[:, :-1, :, :, 0] = dx
delta[:, :, :-1, :, 1] = dy
else:
# dx = x[:, 2:, :, :] - x[:, :-2, :, :]
# dy = x[:, :, 2:, :] - x[:, :, :-2, :]
# delta[:, 1:-1, :, :, 0] = dx / 2.0
# delta[:, :, 1:-1, :, 1] = dy / 2.0
# old symmetric version (4-sided)
delta = x.new_zeros(*x.size(), 4)
dx = x[:, 1:, :, :] - x[:, :-1, :, :]
dy = x[:, :, 1:, :] - x[:, :, :-1, :]
delta[:, :-1, :, :, 0] = dx
delta[:, :, :-1, :, 1] = dy
# dx_ = x[:, :-1, :, :] - x[:, 1:, :, :]
# dy_ = x[:, :, :-1, :] - x[:, :, 1:, :]
delta[:, 1:, :, :, 0] = -dx
delta[:, :, 1:, :, 1] = -dy
delta = delta.flatten(-2, -1) # (b, w, h, 2*c [or 4*c])
if norm == p:
v = torch.sum(torch.abs(delta) ** norm, dim=-1)
else:
v = torch.norm(delta, dim=-1, p=norm)
if p != 1:
v = v ** p
if reduction is None:
return v
elif reduction == 'sum':
return torch.sum(v)
elif reduction == 'mean':
return torch.sum(v) / v.size(0)
else: assert False, f"perm.tv: reduction {reduction} not supported."
def sample_gumbel(shape, device=torch.device('cpu')):
eps = 1e-10
U = torch.rand(shape, dtype=torch.float, device=device)
return -torch.log(eps - torch.log(U + eps))
def add_gumbel_noise(log_alpha, sample_shape=()):
"""
Args:
log_alpha: shape (N, N)
temp: temperature parameter, a float.
sample_shape: represents shape of independent draws
Returns:
log_alpha_noise: a tensor of shape [sample_shape + (N, N)]
"""
# batch = log_alpha.size(0)
n = log_alpha.size(-1)
# noise = sample_gumbel(sample_shape + log_alpha.shape)
# log_alpha_noise = log_alpha + noise.to(log_alpha.device)
noise = sample_gumbel(sample_shape + log_alpha.shape, device=log_alpha.device)
log_alpha_noise = log_alpha + noise
return log_alpha_noise
|
butterfly-master
|
cnn/permutation_utils.py
|
# modified from https://github.com/fastai/imagenet-fast/blob/master/imagenet_nv/multiproc.py
import argparse
import torch
import sys
import subprocess
from pathlib import Path
import random
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--world-size' in argslist:
argslist[argslist.index('--world-size')+1] = str(world_size)
else:
argslist.append('--world-size')
argslist.append(str(world_size))
save_dir = Path.cwd()
if '--save-dir' in argslist:
save_dir = argslist[argslist.index('--save-dir')+1]
workers = []
port = random.randint(10000, 20000)
argslist.append('--distributed-init-method')
argslist.append('tcp://localhost:{port}'.format(port=port))
for i in range(world_size):
if '--rank' in argslist:
argslist[argslist.index('--rank')+1] = str(i)
else:
argslist.append('--rank')
argslist.append(str(i))
stdout = None if i == 0 else open(f'{save_dir}/GPU_{i}.log', "w")
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
for p in workers: p.wait()
|
butterfly-master
|
cnn/multiproc.py
|
import os, sys, subprocess
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import math
from pathlib import Path
import pickle
import random
import datetime
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from
from ray.tune.schedulers import AsyncHyperBandScheduler
import model_utils
import pdataset_utils as dataset_utils
import permutation_utils as perm
class TrainableModel(Trainable):
"""Trainable object for a Pytorch model, to be used with Ray's Hyperband tuning.
"""
def _setup(self, config):
device = config['device']
self.device = device
torch.manual_seed(config['seed'])
if self.device == 'cuda':
torch.cuda.manual_seed(config['seed'])
self.model = model_utils.get_model(config['model'])
# restore permutation
if config['restore_perm'] is not None:
# checkpoint = torch.load(config['restore_perm'], self.device)
checkpoint = torch.load(config['restore_perm'])
# don't restore args, so that you can change temp etc when plugging into end model
# TODO: implement an update_args() method for the models
# self.model.permute = model_utils.get_model(checkpoint['model']['args'])
self.model.permute.load_state_dict(checkpoint['model']['state'])
self.model.to(self.device)
self.nparameters = sum(param.nelement() for param in self.model.parameters())
self.train_loader, self.valid_loader, self.test_loader = dataset_utils.get_dataset(config['dataset'])
permutation_params = filter(lambda p: hasattr(p, '_is_perm_param') and p._is_perm_param, self.model.parameters())
unstructured_params = filter(lambda p: not (hasattr(p, '_is_perm_param') and p._is_perm_param), self.model.parameters())
if config['optimizer'] == 'Adam':
self.optimizer = optim.Adam([{'params': permutation_params, 'weight_decay': config['pwd'], 'lr': config['plr']},
{'params': unstructured_params}],
lr=config['lr'], weight_decay=config['weight_decay'])
else:
self.optimizer = optim.SGD([{'params': permutation_params, 'weight_decay': config['pwd'], 'lr': config['plr']},
{'params': unstructured_params}],
lr=config['lr'], momentum=0.9, weight_decay=config['weight_decay'])
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=config['lr_decay_period'], gamma=config['lr_decay_factor'])
#
self.unsupervised = config['unsupervised']
self.tv = config['tv']
self.anneal_entropy_factor = config['anneal_entropy'] # NOTE: restoring a model does not call the sample_from function
self.anneal_sqrt = config['anneal_sqrt']
self.entropy_p = config['entropy_p']
self.model_args = config['model']
def _train_iteration(self):
self.model.train()
inv_temp = math.sqrt(self._iteration) if self.anneal_sqrt else self._iteration
# print(f"ITERATION {self._iteration} INV TEMP {inv_temp} ANNEAL ENTROPY {self.anneal_entropy_factor}")
inv_temp *= self.anneal_entropy_factor
print(f"ITERATION {self._iteration} INV TEMP {inv_temp}")
for data, target in self.train_loader:
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
if self.unsupervised:
# p = self.model.get_permutations()
# assert p.requires_grad
# print("train_iteration REQUIRES GRAD: ", p.requires_grad)
# H = perm.entropy(p, reduction='mean')
H = self.model.entropy(p=self.entropy_p)
assert H.requires_grad
loss = perm.tv(output, norm=self.tv['norm'], p=self.tv['p'], symmetric=self.tv['sym']) + inv_temp * H
# loss = perm.tv(output, norm=self.tv['norm'], p=self.tv['p'], symmetric=self.tv['sym']) # + inv_temp * H
# print("LOSS ", loss.item())
else:
# target = target.expand(output.size()[:-1]).flatten()
# outupt = output.flatten(0, -2)
# print(output.shape, target.shape)
# assert self.model.samples == output.size(0) // target.size(0)
target = target.repeat(output.size(0) // target.size(0))
# print(output.shape, target.shape)
loss = F.cross_entropy(output, target)
# tw0 = list(self.model.permute)[0].twiddle
# tw1 = list(self.model.permute)[1].twiddle
# assert torch.all(tw0 == tw0)
# assert torch.all(tw1 == tw1)
loss.backward()
# breakpoint()
self.optimizer.step()
# tw0 = list(self.model.permute)[0].twiddle
# tw1 = list(self.model.permute)[1].twiddle
# assert torch.all(tw0 == tw0)
# assert torch.all(tw1 == tw1)
def _test(self):
self.model.eval()
test_loss = 0.0
correct = 0.0
total_samples = 0
if self.unsupervised:
mean_loss = 0.0
mle_loss = 0.0
with torch.no_grad():
for data, target in self.valid_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
total_samples += output.size(0)
if self.unsupervised:
test_loss += perm.tv(output, reduction='sum').item()
mean_output = self.model(data, perm ='mean')
mean_loss += perm.tv(mean_output, reduction='sum').item()
mle_output = self.model(data, perm ='mle')
mle_loss += perm.tv(mle_output, reduction='sum').item()
else:
target = target.repeat(output.size(0)//target.size(0))
test_loss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += (pred == target.data.view_as(pred)).long().cpu().sum().item()
if self.unsupervised:
true = self.valid_loader.true_permutation[0]
p = self.model.get_permutations() # (rank, sample, n, n)
# p0 = p[0]
# elements = p0[..., torch.arange(len(true)), true]
# print("max in true perm elements", elements.max(dim=-1)[0])
# print(p0)
sample_ent = perm.entropy(p, reduction='mean')
sample_nll = perm.dist(p, self.valid_loader.true_permutation, fn='nll')
# sample_was = perm.dist(p, self.valid_loader.true_permutation, fn='was')
sample_was1, sample_was2 = perm.dist(p, self.valid_loader.true_permutation, fn='was')
mean = self.model.get_permutations(perm='mean') # (rank, n, n)
# print("MEAN PERMUTATION", mean)
mean_ent = perm.entropy(mean, reduction='mean')
mean_nll = perm.dist(mean, self.valid_loader.true_permutation, fn='nll')
# mean_was = perm.dist(mean, self.valid_loader.true_permutation, fn='was')
mean_was1, mean_was2 = perm.dist(mean, self.valid_loader.true_permutation, fn='was')
mean_was1_abs, mean_was2_abs = torch.abs(682.-mean_was1), torch.abs(682.-mean_was2)
unif = torch.ones_like(mean) / mean.size(-1)
# mean_unif_dist = nn.functional.mse_loss(mean, unif, reduction='sum')
mean_unif_dist = torch.sum((mean-unif)**2) / mean.size(0)
mle = self.model.get_permutations(perm='mle') # (rank, n, n)
# mle_ent = perm.entropy(mle, reduction='mean')
# mle_nll = perm.dist(mle, self.valid_loader.true_permutation, fn='nll')
# mle_was = perm.dist(mle, self.valid_loader.true_permutation, fn='was')
mle_was1, mle_was2 = perm.dist(mle, self.valid_loader.true_permutation, fn='was')
H = self.model.entropy(p=self.entropy_p)
# TODO calculate average case wasserstein automatically in terms of rank/dims and power p
return {
"sample_loss": test_loss / total_samples,
"sample_ent": sample_ent.item(),
"sample_nll": sample_nll.item(),
# "sample_was": sample_was.item(),
"sample_was1": sample_was1.item(),
"sample_was2": sample_was2.item(),
"mean_loss": mean_loss / total_samples,
"mean_ent": mean_ent.item(),
"mean_nll": mean_nll.item(),
"mean_was1": mean_was1.item(),
"mean_was2": mean_was2.item(),
"neg_was2": 682.0-mean_was2.item(),
"mle_loss": mle_loss / total_samples,
# "mle_ent": mle_ent.item(),
# "mle_nll": mle_nll.item(),
"mle_was1": mle_was1.item(),
"mle_was2": mle_was2.item(),
# "mean_accuracy": 682.0-mean_was2.item(),
# "neg_sample_loss": -test_loss / total_samples,
"mean_unif_dist": mean_unif_dist.item(),
# "mean_was1_abs": mean_was1_abs.item(),
# "mean_was2_abs": mean_was2_abs.item(),
"model_ent": H.item(),
# "neg_ent_floor": -int(mean_ent.item()),
"neg_ent": -H.item(),
}
# test_loss = test_loss / len(self.valid_loader.dataset)
# accuracy = correct / len(self.valid_loader.dataset)
test_loss = test_loss / total_samples
accuracy = correct / total_samples
return {"mean_loss": test_loss, "mean_accuracy": accuracy}
def _train(self):
# self.scheduler.step()
# self._train_iteration()
# return self._test()
self._train_iteration()
metrics = self._test()
self.scheduler.step()
return metrics
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model_optimizer.pth")
full_model = {
'state': self.model.state_dict(),
'args': self.model_args,
}
state = {'model': full_model,
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict()}
torch.save(state, checkpoint_path)
# model_path = os.path.join(checkpoint_dir, "saved_model.pth")
# torch.save(full_model, model_path)
# model_args = os.path.join(checkpoint_dir, "saved_model.args")
# torch.save(self.model_args, model_args)
return checkpoint_path
def _restore(self, checkpoint_path):
if hasattr(self, 'device'):
checkpoint = torch.load(checkpoint_path, self.device)
else:
checkpoint = torch.load(checkpoint_path)
# saved_model = torch.load(checkpoint_path + '.args')
# self.model = model_utils.get_model(saved_model['args'])
self.model = model_utils.get_model(checkpoint['model']['args'])
self.model.to(self.device)
self.model.load_state_dict(checkpoint['model']['state'])
permutation_params = filter(lambda p: hasattr(p, '_is_perm_param') and p._is_perm_param, self.model.parameters())
unstructured_params = filter(lambda p: not (hasattr(p, '_is_perm_param') and p._is_perm_param), self.model.parameters())
self.optimizer = optim.Adam([{'params': permutation_params},
{'params': unstructured_params}],)
# self.optimizer = optim.Adam([{'params': permutation_params, 'weight_decay': 0.0, 'lr': config['plr']},
# {'params': unstructured_params}],
# lr=config['lr'], weight_decay=config['weight_decay'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
# self.optimizer.param_groups[1].update({'weight_decay': 0.0})
# self.optimizer.param_groups[0].update({'params': permutation_params})
# self.optimizer.param_groups[1].update({'params': unstructured_params})
# self.scheduler = optim.lr_scheduler.StepLR(self.optimizer)
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.scheduler.optimizer = self.optimizer
ex = Experiment('Permutation_experiment')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('../config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.config
def default_config():
dataset = 'PPCIFAR10'
model = 'PResNet18' # Name of model, see model_utils.py
args = {} # Arguments to be passed to the model, as a dictionary
optimizer = 'Adam' # Which optimizer to use, either Adam or SGD
lr_decay = True # Whether to use learning rate decay
lr_decay_period = 50 # Period of learning rate decay
plr_min = 1e-4
plr_max = 1e-3
weight_decay = True # Whether to use weight decay
pwd = True
pwd_min = 1e-4
pwd_max = 5e-4
ntrials = 20 # Number of trials for hyperparameter tuning
nmaxepochs = 200 # Maximum number of epochs
unsupervised = False
batch = 128
tv_norm = 2
tv_p = 1
tv_sym = None
anneal_ent_min = 0.0
anneal_ent_max = 0.0
anneal_sqrt = True
entropy_p = None
temp_min = None
temp_max = None
restore_perm = None
resume_pth = None
result_dir = project_root + '/cnn/results' # Directory to store results
cuda = torch.cuda.is_available() # Whether to use GPU
smoke_test = False # Finish quickly for testing
@ex.named_config
def sgd():
optimizer = 'SGD' # Which optimizer to use, either Adam or SGD
lr_decay = True # Whether to use learning rate decay
lr_decay_period = 25 # Period of learning rate decay
weight_decay = True # Whether to use weight decay
@ex.capture
def cifar10_experiment(dataset, model, args, optimizer, nmaxepochs, lr_decay, lr_decay_period, plr_min, plr_max, weight_decay, pwd, pwd_min, pwd_max, ntrials, result_dir, cuda, smoke_test, unsupervised, batch, tv_norm, tv_p, tv_sym, temp_min, temp_max, anneal_ent_min, anneal_ent_max, anneal_sqrt, entropy_p, restore_perm, resume_pth): # TODO clean up and set min,max to pairs/dicts
assert optimizer in ['Adam', 'SGD'], 'Only Adam and SGD are supported'
assert restore_perm is None or resume_pth is None # If we're fully resuming training from the checkpoint, no point in restoring any part of the model
if restore_perm is not None:
restore_perm = '/dfs/scratch1/albertgu/learning-circuits/cnn/saved_perms/' + restore_perm
print("Restoring permutation from", restore_perm)
args_rand = args.copy()
if temp_min is not None and temp_max is not None:
args_rand['temp'] = sample_from(lambda spec: math.exp(random.uniform(math.log(temp_min), math.log(temp_max))))
# args_rand['samples'] = sample_from(lambda _: np.random.choice((8,16)))
# args_rand['sig'] = sample_from(lambda _: np.random.choice(('BT1', 'BT4')))
tv = {'norm': tv_norm, 'p': tv_p}
if tv_sym is 'true':
tv['sym'] = sample_from(lambda _: np.random.choice((True,)))
elif tv_sym is 'false':
tv['sym'] = sample_from(lambda _: np.random.choice((False,)))
elif tv_sym is 'random':
tv['sym'] = sample_from(lambda _: np.random.choice((True,False)))
else:
assert tv_sym is None, 'tv_sym must be true, false, or random'
tv['sym'] = False
if anneal_ent_max == 0.0:
anneal_entropy = 0.0
else:
anneal_entropy = sample_from(lambda _: math.exp(random.uniform(math.log(anneal_ent_min), math.log(anneal_ent_max)))),
name_smoke_test = 'smoke_' if smoke_test else '' # for easy finding and deleting unimportant logs
name_args = '_'.join([k+':'+str(v) for k,v in args.items()])
config={
'optimizer': optimizer,
# 'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(2e-5), math.log(1e-2)) if optimizer == 'Adam'
'lr': 2e-4 if optimizer == 'Adam' else math.exp(random.uniform(math.log(0.025), math.log(0.2))),
'plr': sample_from(lambda spec: math.exp(random.uniform(math.log(plr_min), math.log(plr_max)))),
# 'lr_decay_factor': sample_from(lambda spec: random.choice([0.1, 0.2])) if lr_decay else 1.0,
'lr_decay_factor': 0.2 if lr_decay else 1.0,
'lr_decay_period': lr_decay_period,
# 'weight_decay': sample_from(lambda spec: math.exp(random.uniform(math.log(1e-6), math.log(5e-4)))) if weight_decay else 0.0,
'weight_decay': 5e-4 if weight_decay else 0.0,
'pwd': sample_from(lambda spec: math.exp(random.uniform(math.log(pwd_min), math.log(pwd_max)))) if pwd else 0.0,
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'device': 'cuda' if cuda else 'cpu',
'model': {'name': model, 'args': args_rand},
# 'model': {'name': model, 'args': args.update({'temp': sample_from(lambda spec: math.exp(random.uniform(math.log(temp_min), math.log(temp_max))))})},
'dataset': {'name': dataset, 'batch': batch},
'unsupervised': unsupervised,
# 'tv': {'norm': tv_norm, 'p': tv_p, 'sym': tv_sym},
# 'tv': {'norm': tv_norm, 'p': tv_p, 'sym': sample_from(lambda _: np.random.choice((True,False)))},
'tv': tv if unsupervised else None,
# 'anneal_entropy': anneal_entropy,
# 'anneal_entropy': sample_from(lambda _: random.uniform(anneal_ent_min, anneal_ent_max)),
'anneal_entropy': 0.0 if anneal_ent_max==0.0 else sample_from(lambda _: math.exp(random.uniform(math.log(anneal_ent_min), math.log(anneal_ent_max)))),
'anneal_sqrt': anneal_sqrt,
'entropy_p': entropy_p,
'restore_perm': restore_perm,
}
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
commit_id = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip().decode('utf-8')
stopping_criteria = {"training_iteration": 1 if smoke_test else nmaxepochs}
if unsupervised: # TODO group all the unsupervised casework together
stopping_criteria.update({'model_ent': 200, 'neg_ent': -5.0})
experiment = RayExperiment(
# name=f'pcifar10_{model}_{args}_{optimizer}_lr_decay_{lr_decay}_weight_decay_{weight_decay}',
name=f'{name_smoke_test}{dataset.lower()}_{model}_{name_args}_{optimizer}_epochs_{nmaxepochs}_plr_{plr_min}-{plr_max}_{timestamp}_{commit_id}',
# name=f'{dataset.lower()}_{model}_{args_orig}_{optimizer}_epochs_{nmaxepochs}_lr_decay_{lr_decay}_plr_{plr_min}-{plr_max}_tvsym_{tv_sym}_{timestamp}_{commit_id}',
run=TrainableModel,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
checkpoint_freq=500, # Just to enable recovery with @max_failures
max_failures=0,
# resources_per_trial={'cpu': 4, 'gpu': 0.5 if cuda else 0},
resources_per_trial={'cpu': 4, 'gpu': 1 if cuda else 0},
# stop={"training_iteration": 1 if smoke_test else nmaxepochs, 'model_ent': 200, 'neg_ent': -5.0},
stop=stopping_criteria,
# stop={"training_iteration": 1 if smoke_test else nmaxepochs},
restore=resume_pth,
config=config,
)
return experiment
@ex.automain
def run(model, result_dir, nmaxepochs, unsupervised):
experiment = cifar10_experiment()
try:
with open('../config/redis_address', 'r') as f:
address = f.read().strip()
# ray.init(redis_address=address, temp_dir='/tmp/ray2/')
ray.init(redis_address=address)
except:
ray.init()
if unsupervised:
ahb = AsyncHyperBandScheduler(reward_attr='neg_was2', max_t=nmaxepochs, grace_period=nmaxepochs, reduction_factor=2, brackets=1)
else:
ahb = AsyncHyperBandScheduler(reward_attr='mean_accuracy', max_t=nmaxepochs)
trials = ray.tune.run(
experiment, scheduler=ahb,
raise_on_failed_trial=False, queue_trials=True,
# with_server=True, server_port=4321,
)
trials = [trial for trial in trials if trial.last_result is not None]
accuracy = [trial.last_result.get('mean_accuracy', float('-inf')) for trial in trials]
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return max(accuracy)
|
butterfly-master
|
cnn/permuted_experiment.py
|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import math
import numpy as np
def bitreversal_permutation(n):
"""Return the bit reversal permutation used in FFT.
Parameter:
n: integer, must be a power of 2.
Return:
perm: bit reversal permutation, numpy array of size n
"""
m = int(math.log2(n))
assert n == 1 << m, 'n must be a power of 2'
perm = np.arange(n).reshape(n, 1)
for i in range(m):
n1 = perm.shape[0] // 2
perm = np.hstack((perm[:n1], perm[n1:]))
return torch.tensor(perm.squeeze(0))
def get_dataset(config_dataset):
if config_dataset['name'] in ['CIFAR10', 'PCIFAR10', 'PPCIFAR10']:
if config_dataset['name'] == 'PCIFAR10':
# fix permutation
rng_state = torch.get_rng_state()
torch.manual_seed(0)
true_perm = torch.randperm(1024)
torch.set_rng_state(rng_state)
permutation_transforms = [transforms.Lambda(lambda x: x.view(-1,1024)[:,true_perm].view(-1,32,32))]
elif config_dataset['name'] == 'PPCIFAR10':
# rng_state = torch.get_rng_state()
# torch.manual_seed(0)
# true_perm1 = torch.randperm(32)
# true_perm2 = torch.randperm(32)
true_perm1 = bitreversal_permutation(32)
true_perm2 = bitreversal_permutation(32)
# torch.set_rng_state(rng_state)
def fn(x):
# dumb hack because torch doesn't support multiple LongTensor indexing
return x.transpose(-1,-2)[...,true_perm2].transpose(-1,-2)[...,true_perm1]
permutation_transforms = [transforms.Lambda(fn)]
else:
permutation_transforms = []
normalize = transforms.Normalize(
mean=[0.49139765, 0.48215759, 0.44653141],
std=[0.24703199, 0.24348481, 0.26158789]
)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
normalize
] + permutation_transforms)
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
normalize
] + permutation_transforms)
if 'transform' in config_dataset and config_dataset['transform'] == 'original':
trainset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=True, transform=transforms.ToTensor())
validset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=False, transform=transforms.ToTensor())
elif 'transform' in config_dataset and config_dataset['transform'] == 'permute':
transforms_ = transforms.Compose([transforms.ToTensor()] + permutation_transforms)
trainset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=True, transform=transforms_)
validset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=False, transform=transforms_)
elif 'transform' in config_dataset and config_dataset['transform'] == 'normalize':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
normalize
])
trainset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=True, transform=transform_train)
validset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=False, transform=transform_train)
else:
trainset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=True, transform=transform_train)
validset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=True, download=False, transform=transform_test)
testset = torchvision.datasets.CIFAR10(root=project_root+'/data', train=False, download=True, transform=transform_test)
np_random_state = np.random.get_state() # To get exactly the same training and validation sets
np.random.seed(0)
indices = np.random.permutation(range(len(trainset)))
np.random.set_state(np_random_state)
trainset = torch.utils.data.Subset(trainset, indices[:45000])
# trainset = torch.utils.data.Subset(trainset, indices[:5000])
validset = torch.utils.data.Subset(validset, indices[-5000:])
trainloader = torch.utils.data.DataLoader(trainset, batch_size=config_dataset['batch'], shuffle=True, num_workers=4)
validloader = torch.utils.data.DataLoader(validset, batch_size=config_dataset['batch'], shuffle=False, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=config_dataset['batch'], shuffle=False, num_workers=4)
if config_dataset['name'] == 'PCIFAR10':
# trainloader.true_permutation = true_perm
validloader.true_permutation = true_perm
testloader.true_permutation = true_perm
elif config_dataset['name'] == 'PPCIFAR10':
# trainloader.true_permutation1 = true_perm1
# trainloader.true_permutation2 = true_perm2
validloader.true_permutation = (true_perm1, true_perm2)
testloader.true_permutation = (true_perm1, true_perm2)
return trainloader, validloader, testloader
else:
assert False, 'Dataset not implemented'
|
butterfly-master
|
cnn/pdataset_utils.py
|
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
# Add to $PYTHONPATH in addition to sys.path so that ray workers can see
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
import math
from pathlib import Path
import pickle
import random
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
import ray
from ray.tune import Trainable, Experiment as RayExperiment, sample_from
from ray.tune.schedulers import AsyncHyperBandScheduler
import model_utils
import dataset_utils
import models.resnet_imagenet as imagenet_models # only use imagenet models
import models
from models.butterfly_conv import ButterflyConv2d, ButterflyConv2dBBT
from models.low_rank_conv import LowRankConv2d
from models.sparse_conv import SparseConv2d
class TrainableModel(Trainable):
"""Trainable object for a Pytorch model, to be used with Ray's Hyperband tuning.
"""
def _setup(self, config):
model_args = config['model']['args']
device = config['device']
self.device = device
torch.manual_seed(config['seed'])
if self.device == 'cuda':
torch.cuda.manual_seed(config['seed'])
self.layer = model_args['layer']
# make butterfly
if config['dataset'] == 'cifar10':
teacher_model = models.__dict__[config['teacher_model']]()
elif config['dataset'] == 'imagenet':
teacher_model = imagenet_models.__dict__[config['teacher_model']]()
modules = set([name for name, _ in teacher_model.named_modules()])
assert model_args['layer'] in modules, f"{model_args['layer']} not in network"
# get parameters from layer to replace to use in butterfly
for name, module in teacher_model.named_modules():
if name == model_args['layer']:
try:
in_channels = module.in_channels
out_channels = module.out_channels
kernel_size = module.kernel_size
stride = module.stride
padding = module.padding
except:
raise ValueError("Only convolutional layers currently supported.")
# create butterfly for specific layer and train
if model_args['structure_type'] == 'B':
structured_layer = ButterflyConv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
bias=False, tied_weight=bool(model_args['tied_weight']), ortho_init=bool(model_args['ortho_init']),
param=model_args['param'], nblocks=model_args['nblocks'],
expansion=model_args['expansion'], diag_init=model_args['diag_init'])
elif model_args['structure_type'] == 'LR':
assert out_channels >= in_channels, "Out channels < in channels"
rank = model_args.get('rank', int(math.log2(out_channels))
if model_args['nblocks'] == 0
else model_args['nblocks'] * 2 * int(math.log2(out_channels)))
structured_layer = LowRankConv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
bias=False, rank=rank)
elif model_args['structure_type'] == 'sparse':
structured_layer = SparseConv2d(nparams=model_args['nparams'],
layer=model_args['layer'], pretrained=config['pretrained'],
dataset=config['dataset'], model=config['teacher_model'],
device=device, in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
bias=False)
self.model = structured_layer.to(device)
def load_teacher(traindir):
teacher_input, teacher_output = dataset_utils.get_mmap_files(traindir, self.layer)
train = torch.utils.data.TensorDataset(teacher_input, teacher_output)
train_loader = torch.utils.data.DataLoader(train, batch_size=256, shuffle=True,
num_workers=config['workers'], pin_memory=True)
return train_loader
# load activations
self.train_loader = load_teacher(config['train_dir'])
if config['optimizer'] == 'Adam':
self.optimizer = optim.Adam(structured_layer.parameters(), lr=config['lr'])
else:
self.optimizer = optim.SGD(structured_layer.parameters(), lr=config['lr'], momentum=config['momentum'])
def _train_iteration(self):
self.model.train()
for data, target in self.train_loader:
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = F.mse_loss(output, target)
loss.backward()
self.optimizer.step()
def _test(self):
self.model.eval()
loss = 0.0
with torch.no_grad():
for data, target in self.train_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss += F.mse_loss(output, target).item()
training_loss = loss / len(self.train_loader)
return {"mean_loss": training_loss, "inverse_loss": 1/training_loss}
def _train(self):
self._train_iteration()
return self._test()
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model_optimizer.pth")
state = {'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()
}
torch.save(state, checkpoint_path)
return checkpoint_path
def _restore(self, checkpoint_path):
if hasattr(self, 'device'):
checkpoint = torch.load(checkpoint_path, self.device)
else:
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
ex = Experiment('Distillation_experiment')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('../config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.config
def default_config():
model = 'resnet18' # Name of model, see model_utils.py
model_args = {'structure_type': 'B',
'nblocks': 0,
'param': 'regular',
'diag_init': 'one',
'expansion': 1,
'tied_weight': 0,
'ortho_init': 1} # Arguments to be passed to the model, as a dictionary
optimizer = 'SGD' # Which optimizer to use, either Adam or SGD
ntrials = 100 # Number of trials for hyperparameter tuning
nmaxepochs = 10 # Maximum number of epochs
result_dir = project_root + '/cnn/ray_results' # Directory to store results
train_dir = '/distillation/imagenet/activations'
cuda = torch.cuda.is_available() # Whether to use GPU
smoke_test = False # Finish quickly for testing
workers = 4
dataset = 'imagenet'
teacher_model = 'resnet18'
min_lr = 1e-4
max_lr=1
grace_period=2
momentum=0.9
pretrained=None
@ex.capture
def distillation_experiment(model, model_args, optimizer,
ntrials, result_dir, train_dir, workers, cuda, smoke_test, teacher_model,
dataset, min_lr, max_lr, momentum, pretrained):
assert optimizer in ['Adam', 'SGD'], 'Only Adam and SGD are supported'
config={
'optimizer': optimizer,
'lr': sample_from(lambda spec: math.exp(random.uniform(math.log(min_lr), math.log(max_lr)) if optimizer == 'Adam'
else random.uniform(math.log(min_lr), math.log(max_lr)))),
'seed': sample_from(lambda spec: random.randint(0, 1 << 16)),
'device': 'cuda' if cuda else 'cpu',
'model': {'name': model, 'args': model_args},
'teacher_model': teacher_model,
'train_dir': train_dir,
'workers': workers,
'dataset': dataset,
'momentum': momentum,
'pretrained': pretrained
}
model_args_print = '_'.join([f'{key}_{value}' for key,value in model_args.items()])
experiment = RayExperiment(
name=f'{model}_{model_args_print}_{optimizer}',
run=TrainableModel,
local_dir=result_dir,
num_samples=ntrials,
checkpoint_at_end=True,
checkpoint_freq=1000, # Just to enable recovery with @max_failures
max_failures=-1,
resources_per_trial={'cpu': 4, 'gpu': 1 if cuda else 0},
stop={"training_iteration": 1 if smoke_test else 9999},
config=config,
)
return experiment
@ex.automain
def run(model, result_dir, nmaxepochs, grace_period):
experiment = distillation_experiment()
try:
with open('../config/redis_address', 'r') as f:
address = f.read().strip()
ray.init(redis_address=address)
except:
ray.init()
ahb = AsyncHyperBandScheduler(reward_attr='inverse_loss', grace_period=grace_period, reduction_factor=2, brackets=3, max_t=nmaxepochs)
trials = ray.tune.run(experiment, scheduler=ahb, raise_on_failed_trial=False, queue_trials=True)
trials = [trial for trial in trials if trial.last_result is not None]
loss = [trial.last_result.get('mean_loss', float('inf')) for trial in trials]
checkpoint_path = Path(result_dir) / experiment.name
checkpoint_path.mkdir(parents=True, exist_ok=True)
checkpoint_path /= 'trial.pkl'
with checkpoint_path.open('wb') as f:
pickle.dump(trials, f)
ex.add_artifact(str(checkpoint_path))
return min(loss)
|
butterfly-master
|
cnn/distill_experiment.py
|
'''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5):
super(BasicBlock, self).__init__()
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
out = F.relu(self.bn3(self.conv3(out)))
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], 10)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
configs = {
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def test():
net = ShuffleNetV2(net_size=0.5)
x = torch.randn(3, 3, 32, 32)
y = net(x)
print(y.shape)
# test()
|
butterfly-master
|
cnn/models/shufflenetv2.py
|
import torch
from torch import nn
import torch.nn.functional as F
from butterfly import Butterfly
from butterfly.butterfly import ButterflyBmm
from butterfly.butterfly_multiply import butterfly_mult_conv2d, butterfly_mult_conv2d_svd, bbt_mult_conv2d
import math
class Butterfly1x1Conv(Butterfly):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
"""
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w) if real or (batch, c, h, w, 2) if complex
Return:
output: (batch, nstack * c, h, w) if real or (batch, nstack * c, h, w, 2) if complex
"""
# TODO: Only doing real for now
batch, c, h, w = input.shape
input_reshape = input.view(batch, c, h * w).transpose(1, 2).reshape(-1, c)
output = super().forward(input_reshape)
return output.view(batch, h * w, self.nstack * c).transpose(1, 2).view(batch, self.nstack * c, h, w)
class ButterflyConv2d(ButterflyBmm):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
Parameters:
in_channels: size of input
out_channels: size of output
kernel_size: int or (int, int)
stride: int or (int, int)
padding; int or (int, int)
dilation: int or (int, int)
**kwargs: args to ButterflyBmm, see Butterfly class
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, fused_unfold=False, **kwargs):
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
self.stride = (stride, stride) if isinstance(stride, int) else stride
self.padding = (padding, padding) if isinstance(padding, int) else padding
self.dilation = (dilation, dilation) if isinstance(dilation, int) else dilation
self.fused_unfold = fused_unfold
super().__init__(in_channels, out_channels, self.kernel_size[0] * self.kernel_size[1],
complex=False, **kwargs)
# Don't need bias for each of 9 matrices, only one bias is enough
if self.bias is not None:
self.bias_conv = nn.Parameter(self.bias[0].clone())
self.bias = None
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w) if real or (batch, c, h, w, 2) if complex
Return:
output: (batch, nstack * c, h, w) if real or (batch, nstack * c, h, w, 2) if complex
"""
# TODO: Only doing real for now
batch, c, h, w = input.shape
h_out = (h + 2 * self.padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) // self.stride[0] + 1
w_out = (h + 2 * self.padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) // self.stride[1] + 1
# if not (self.fused_unfold and self.stride == (1, 1) and self.kernel_size[0] == self.kernel_size[1]
# and self.padding[0] == self.padding[1] and self.dilation == (1, 1) and c <= 1024 and input.is_cuda):
if True: # Not using fused unfolding for now to quickly try variants of butterfly
# unfold input into patches and call batch matrix multiply
input_patches = F.unfold(input, self.kernel_size, self.dilation, self.padding, self.stride).view(
batch, c, self.kernel_size[0] * self.kernel_size[1], h_out * w_out)
input = input_patches.permute(0, 3, 2, 1).reshape(batch * h_out * w_out, self.kernel_size[0] * self.kernel_size[1], c)
output = super().forward(input)
else:
batch_out = batch * h_out * w_out
if self.param == 'regular':
if self.nblocks == 0:
output = butterfly_mult_conv2d(self.twiddle, input, self.kernel_size[0],
self.padding[0], self.increasing_stride)
else:
output = bbt_mult_conv2d(self.twiddle, input, self.kernel_size[0], self.padding[0])
elif self.param == 'ortho':
c, s = torch.cos(self.twiddle), torch.sin(self.twiddle)
twiddle = torch.stack((torch.stack((c, -s), dim=-1),
torch.stack((s, c), dim=-1)), dim=-2)
output = butterfly_mult_conv2d(self.twiddle, input, self.kernel_size[0],
self.padding[0], self.increasing_stride)
elif self.param == 'svd':
with torch.no_grad(): # Projected SGD
self.twiddle[..., 1, :].clamp_(min=1 / self.max_gain_per_factor, max=self.max_gain_per_factor)
output = butterfly_mult_conv2d_svd(self.twiddle, input, self.kernel_size[0],
self.padding[0], self.increasing_stride)
output = super().post_process(input, output)
# combine matrix batches
output = output.mean(dim=1)
if hasattr(self, 'bias_conv'):
output = output + self.bias_conv
return output.view(batch, h_out * w_out, self.out_channels).transpose(1, 2).view(batch, self.out_channels, h_out, w_out)
class ButterflyConv2dBBT(nn.Module):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
Parameters:
in_channels: size of input
out_channels: size of output
kernel_size: int or (int, int)
stride: int or (int, int)
padding; int or (int, int)
dilation: int or (int, int)
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
nblocks: number of BBT blocks in the product
tied_weight: whether the weights in the butterfly factors are tied.
If True, will have 4N parameters, else will have 2 N log N parameters (not counting bias)
increasing_stride: whether to multiply with increasing stride (e.g. 1, 2, ..., n/2) or
decreasing stride (e.g., n/2, n/4, ..., 1).
Note that this only changes the order of multiplication, not how twiddle is stored.
In other words, twiddle[@log_stride] always stores the twiddle for @stride.
ortho_init: whether the weight matrix should be initialized to be orthogonal/unitary.
param: The parameterization of the 2x2 butterfly factors, either 'regular' or 'ortho' or 'svd'.
'ortho' and 'svd' only support real, not complex.
max_gain: (only for svd parameterization) controls the maximum and minimum singular values
of the whole BB^T matrix (not of each factor).
For example, max_gain=10.0 means that the singular values are in [0.1, 10.0].
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True,
tied_weight=True, nblocks=1, ortho_init=False, param='regular', max_gain=10.0):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
self.stride = (stride, stride) if isinstance(stride, int) else stride
self.padding = (padding, padding) if isinstance(padding, int) else padding
self.dilation = (dilation, dilation) if isinstance(dilation, int) else dilation
self.nblocks = nblocks
max_gain_per_block = max_gain ** (1 / (2 * nblocks))
# layers = [ButterflyConv2d(in_channels, out_channels, self.kernel_size,
# self.stride, self.padding, self.dilation, bias=False,
# tied_weight=tied_weight, increasing_stride=False,
# ortho_init=ortho_init, param=param,
# max_gain=max_gain_per_block),
# ButterflyBmm(out_channels, out_channels,
# self.kernel_size[0] * self.kernel_size[1],
# False, bias if 0 == nblocks - 1 else False,
# tied_weight, increasing_stride=True,
# ortho_init=ortho_init, param=param,
# max_gain=max_gain_per_block)]
# for i in range(nblocks - 1):
layers = []
for i in range(nblocks):
layers.append(ButterflyBmm(in_channels if i == 0 else out_channels,
out_channels, self.kernel_size[0] *
self.kernel_size[1], False, False,
tied_weight, increasing_stride=False,
ortho_init=ortho_init, param=param,
max_gain=max_gain_per_block))
layers.append(ButterflyBmm(out_channels, out_channels,
self.kernel_size[0] *
self.kernel_size[1], False, bias if i == nblocks - 1 else False,
tied_weight, increasing_stride=True,
ortho_init=ortho_init, param=param,
max_gain=max_gain_per_block))
self.layers = nn.Sequential(*layers)
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w) if real or (batch, c, h, w, 2) if complex
Return:
output: (batch, nstack * c, h, w) if real or (batch, nstack * c, h, w, 2) if complex
"""
# TODO: Only doing real for now
batch, c, h, w = input.shape
h_out = (h + 2 * self.padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) // self.stride[0] + 1
w_out = (h + 2 * self.padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) // self.stride[1] + 1
input_patches = F.unfold(input, self.kernel_size, self.dilation, self.padding, self.stride).view(batch, c, self.kernel_size[0] * self.kernel_size[1], h_out * w_out)
input_reshape = input_patches.permute(0, 3, 2, 1).reshape(batch * h_out * w_out, self.kernel_size[0] * self.kernel_size[1], c)
output = self.layers(input_reshape).mean(dim=1)
# output = self.layers(input).mean(dim=1)
return output.view(batch, h_out * w_out, self.out_channels).transpose(1, 2).view(batch, self.out_channels, h_out, w_out)
class ButterflyConv2dBBTBBT(nn.Module):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
Parameters:
in_channels: size of input
out_channels: size of output
kernel_size: int or (int, int)
stride: int or (int, int)
padding; int or (int, int)
dilation: int or (int, int)
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
tied_weight: whether the weights in the butterfly factors are tied.
If True, will have 4N parameters, else will have 2 N log N parameters (not counting bias)
increasing_stride: whether to multiply with increasing stride (e.g. 1, 2, ..., n/2) or
decreasing stride (e.g., n/2, n/4, ..., 1).
Note that this only changes the order of multiplication, not how twiddle is stored.
In other words, twiddle[@log_stride] always stores the twiddle for @stride.
ortho_init: whether the weight matrix should be initialized to be orthogonal/unitary.
param: The parameterization of the 2x2 butterfly factors, either 'regular' or 'ortho' or 'svd'.
'ortho' and 'svd' only support real, not complex.
max_gain: (only for svd parameterization) controls the maximum and minimum singular values
of the whole BB^T BB^T matrix (not of each factor).
For example, max_gain=10.0 means that the singular values are in [0.1, 10.0].
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True,
tied_weight=True, ortho_init=False, param='regular', max_gain=10.0):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
self.stride = (stride, stride) if isinstance(stride, int) else stride
self.padding = (padding, padding) if isinstance(padding, int) else padding
self.dilation = (dilation, dilation) if isinstance(dilation, int) else dilation
self.layers = nn.Sequential(
ButterflyBmm(in_channels, out_channels, self.kernel_size[0] * self.kernel_size[1], False, False, tied_weight, increasing_stride=False, ortho_init=ortho_init, param=param, max_gain=max_gain ** (1 / 4)),
ButterflyBmm(out_channels, out_channels, self.kernel_size[0] * self.kernel_size[1], False, False, tied_weight, increasing_stride=True, ortho_init=ortho_init, param=param, max_gain=max_gain ** (1 / 4)),
ButterflyBmm(out_channels, out_channels, self.kernel_size[0] * self.kernel_size[1], False, False, tied_weight, increasing_stride=False, ortho_init=ortho_init, param=param, max_gain=max_gain ** (1 / 4)),
ButterflyBmm(out_channels, out_channels, self.kernel_size[0] * self.kernel_size[1], bias, False, tied_weight, increasing_stride=True, ortho_init=ortho_init, param=param, max_gain=max_gain ** (1 / 4))
)
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w) if real or (batch, c, h, w, 2) if complex
Return:
output: (batch, nstack * c, h, w) if real or (batch, nstack * c, h, w, 2) if complex
"""
# TODO: Only doing real for now
batch, c, h, w = input.shape
h_out = (h + 2 * self.padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) // self.stride[0] + 1
w_out = (h + 2 * self.padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) // self.stride[1] + 1
input_patches = F.unfold(input, self.kernel_size, self.dilation, self.padding, self.stride).view(batch, c, self.kernel_size[0] * self.kernel_size[1], h_out * w_out)
input_reshape = input_patches.permute(0, 3, 2, 1).reshape(batch * h_out * w_out, self.kernel_size[0] * self.kernel_size[1], c)
output = self.layers(input_reshape).mean(dim=1)
return output.view(batch, h_out * w_out, self.out_channels).transpose(1, 2).view(batch, self.out_channels, h_out, w_out)
|
butterfly-master
|
cnn/models/butterfly_conv.py
|
'''
Properly implemented ResNetOriginal-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNetOriginal-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['ResNetOriginal', 'resnet20original', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlockOriginal(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlockOriginal, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNetOriginal paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNetOriginal(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNetOriginal, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20original():
return ResNetOriginal(BasicBlockOriginal, [3, 3, 3])
def resnet32():
return ResNetOriginal(BasicBlockOriginal, [5, 5, 5])
def resnet44():
return ResNetOriginal(BasicBlockOriginal, [7, 7, 7])
def resnet56():
return ResNetOriginal(BasicBlockOriginal, [9, 9, 9])
def resnet110():
return ResNetOriginal(BasicBlockOriginal, [18, 18, 18])
def resnet1202():
return ResNetOriginal(BasicBlockOriginal, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
|
butterfly-master
|
cnn/models/resnet_original.py
|
'''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = GoogLeNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
|
butterfly-master
|
cnn/models/googlenet.py
|
'''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def test():
net = VGG('VGG11')
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
|
butterfly-master
|
cnn/models/vgg.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000):
super(SqueezeNet, self).__init__()
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d((1, 1))
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal_(m.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def squeezenet1_0(pretrained=False, **kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNet(version=1.0, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0']))
return model
def squeezenet1_1(pretrained=False, **kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNet(version=1.1, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1']))
return model
|
butterfly-master
|
cnn/models/squeezenet.py
|
'''SENet in PyTorch.
SENet is the winner of ImageNet-2017. The paper is not released yet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SENet18():
return SENet(PreActBlock, [2,2,2,2])
def test():
net = SENet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
|
butterfly-master
|
cnn/models/senet.py
|
import math
import torch
from torch import nn
import torch.nn.functional as F
class LowRankConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, rank=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
self.stride = (stride, stride) if isinstance(stride, int) else stride
self.padding = (padding, padding) if isinstance(padding, int) else padding
self.dilation = (dilation, dilation) if isinstance(dilation, int) else dilation
self.rank = rank
self.G = nn.Parameter(torch.Tensor(self.kernel_size[0] * self.kernel_size[1], self.rank, self.in_channels))
self.H = nn.Parameter(torch.Tensor(self.kernel_size[0] * self.kernel_size[1], self.out_channels, self.rank))
if bias:
self.bias = nn.Parameter(torch.Tensor(self.out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
# Identical initialization to torch.nn.Linear
fan_in, fan_out = self.in_channels, self.out_channels
nn.init.uniform_(self.G, -1 / math.sqrt(fan_in), 1 / math.sqrt(fan_in))
nn.init.uniform_(self.H, -1 / math.sqrt(self.rank), 1 / math.sqrt(self.rank))
if self.bias is not None:
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
M = torch.bmm(self.H, self.G).permute(1, 2, 0).reshape(
self.out_channels, self.in_channels, *self.kernel_size)
return F.conv2d(x, M, self.bias, self.stride, self.padding, self.dilation)
# def forward(self, x):
# batch, c, h, w = x.shape
# c_out = self.out_channels
# h_out = (h + 2 * self.padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) // self.stride[0] + 1
# w_out = (h + 2 * self.padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) // self.stride[1] + 1
# # unfold x into patches and call batch matrix multiply
# input_patches = F.unfold(x, self.kernel_size, self.dilation, self.padding, self.stride).view(
# batch, c, self.kernel_size[0] * self.kernel_size[1], h_out * w_out)
# x = input_patches.permute(2, 0, 3, 1).reshape(self.kernel_size[0] * self.kernel_size[1], batch * h_out * w_out, c)
# output = x @ self.G.transpose(1, 2)
# output = output @ self.H.transpose(1, 2)
# # combine matrix batches
# output = output.mean(dim=0).view(batch, h_out * w_out, c_out).transpose(1, 2).view(batch, c_out, h_out, w_out)
# if self.bias is not None:
# output = output + self.bias.unsqueeze(-1).unsqueeze(-1)
# return output
|
butterfly-master
|
cnn/models/low_rank_conv.py
|
'''DenseNet in PyTorch.'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)
def densenet_cifar():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12)
def test():
net = densenet_cifar()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
|
butterfly-master
|
cnn/models/densenet.py
|
from .vgg import *
from .dpn import *
from .lenet import *
from .senet import *
from .pnasnet import *
from .densenet import *
from .googlenet import *
from .shufflenet import *
from .shufflenetv2 import *
from .resnet import *
from .resnet_original import *
from .resnext import *
from .preact_resnet import *
from .mobilenet import *
from .mobilenetv2 import *
from .presnet import *
from .wide_resnet import *
|
butterfly-master
|
cnn/models/__init__.py
|
import math
import torch
from torch import nn
from butterfly.complex_utils import complex_mul
class CirculantLinear(nn.Module):
def __init__(self, size, nstack=1):
super().__init__()
self.size = size
self.nstack = nstack
init_stddev = math.sqrt(1. / self.size)
c = torch.randn(nstack, size) * init_stddev
self.c_f = nn.Parameter(torch.rfft(c, 1))
self.c_f._is_structured = True # Flag to avoid weight decay
def forward(self, input):
"""
Parameters:
input: (batch, size)
Return:
output: (batch, nstack * size)
"""
batch = input.shape[0]
input_f = torch.rfft(input, 1)
prod = complex_mul(self.c_f, input_f.unsqueeze(1))
return torch.irfft(prod, 1, signal_sizes=(self.size, )).view(batch, self.nstack * self.size)
class Circulant1x1Conv(CirculantLinear):
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w)
Return:
output: (batch, nstack * c, h, w)
"""
batch, c, h, w = input.shape
input_reshape = input.view(batch, c, h * w).transpose(1, 2).reshape(-1, c)
output = super().forward(input_reshape)
return output.view(batch, h * w, self.nstack * c).transpose(1, 2).view(batch, self.nstack * c, h, w)
# Code below is for testing different implementations of circulant multiply
from butterfly.complex_utils import *
class CirculantMult(torch.autograd.Function):
@staticmethod
def forward(ctx, c, x):
n = x.shape[-1]
x_f = torch.rfft(x, 1)
c_f = torch.rfft(c, 1)
ctx.save_for_backward(c_f, x_f)
# prod = complex_mul(c_f, x_f)
# prod = cupy2torch((torch2cupy(c_f).view('complex64') * torch2cupy(x_f).view('complex64')).view('float32'))
prod = torch.empty_like(x_f)
cp.multiply(torch2cupy(c_f).view('complex64'), torch2cupy(x_f).view('complex64'), out=torch2cupy(prod).view('complex64'))
return torch.irfft(prod, 1, signal_sizes=(n, ))
@staticmethod
def backward(ctx, grad):
n = grad.shape[-1]
c_f, x_f = ctx.saved_tensors
grad_f = torch.rfft(grad, 1)
# dx_f = complex_mul(grad_f, conjugate(c_f))
grad_f_cp = torch2cupy(grad_f).view('complex64')
# dx_f = cupy2torch((torch2cupy(c_f).view('complex64').conj() * grad_f_cp).view('float32'))
dx_f = torch.empty_like(x_f)
cp.multiply(torch2cupy(c_f).view('complex64').conj(), grad_f_cp, out=torch2cupy(dx_f).view('complex64'))
# dc_f = complex_mul(grad_f, conjugate(x_f)).sum(dim=0)
# dc_f = cupy2torch((torch2cupy(x_f).view('complex64').conj() * grad_f_cp).view('float32')).sum(dim=0)
dc_f = torch.empty_like(x_f)
cp.multiply(torch2cupy(x_f).view('complex64').conj(), grad_f_cp, out=torch2cupy(dc_f).view('complex64'))
dc_f = dc_f.sum(dim=0)
# t1 = torch2cupy(x_f).view('complex64').conj().squeeze(-1)
# t2 = grad_f_cp.squeeze(-1)
# temp = (t1.T[:, np.newaxis] @ t2.T[..., np.newaxis]).squeeze()
dx = torch.irfft(dx_f, 1, signal_sizes=(n, ))
dc = torch.irfft(dc_f, 1, signal_sizes=(n, ))
return dc, dx
circulant_custom_backward = CirculantMult.apply
def circulant_fft(c, x):
n = x.shape[-1]
x_f = torch.rfft(x, 1)
c_f = torch.rfft(c, 1)
prod = complex_mul(c_f, x_f)
return torch.irfft(prod, 1, signal_sizes=(n, ))
def circulant_indexing(c, x):
n = x.shape[-1]
a = torch.arange(n, device=c.device)
b = -a
indices = a + b.unsqueeze(-1)
C = c[indices]
return x @ C
def anticirculant_as_strided(c, x):
n = x.shape[-1]
c_ext = torch.cat((c, c), dim=-1)
C = c_ext.as_strided((n, n), (1, 1))
return x @ C.contiguous().t()
def circulant_as_strided(c, x):
n = x.shape[-1]
reverse_idx = torch.arange(n - 1, -1, -1, device=c.device)
c_rev = c[reverse_idx]
c_ext = torch.cat((c_rev, c_rev), dim=-1)
C = c_ext.as_strided((n, n), (1, 1))[:, reverse_idx]
return x @ C
if __name__ == '__main__':
import time
nsteps = 1000
n = 512
batch_size = 128
x = torch.randn(batch_size, n, device='cuda', requires_grad=True)
c = torch.randn(n, device='cuda', requires_grad=True)
grad = torch.randn_like(x)
output = circulant_fft(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = circulant_fft(c, x)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Circulant_fft forward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Circulant_fft backward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = circulant_fft(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Circulant_fft together: {end - start}s')
output = circulant_custom_backward(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = circulant_custom_backward(c, x)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Circulant_custom_backward forward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Circulant_custom_backward backward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = circulant_custom_backward(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Circulant_custom_backward together: {end - start}s')
output = circulant_indexing(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = circulant_indexing(c, x)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'circulant_indexing forward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'circulant_indexing backward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = circulant_indexing(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'circulant_indexing together: {end - start}s')
output = circulant_as_strided(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = circulant_as_strided(c, x)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'circulant_as_strided forward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'circulant_as_strided backward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = circulant_as_strided(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'circulant_as_strided together: {end - start}s')
output = anticirculant_as_strided(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = anticirculant_as_strided(c, x)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'anticirculant_as_strided forward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'anticirculant_as_strided backward: {end - start}s')
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(nsteps):
output = anticirculant_as_strided(c, x)
torch.autograd.grad(output, (c, x), grad, retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'anticirculant_as_strided together: {end - start}s')
|
butterfly-master
|
cnn/models/circulant1x1conv.py
|
'''ResNeXt in PyTorch.
See the paper "Aggregated Residual Transformations for Deep Neural Networks" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Grouped convolution block.'''
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = cardinality * bottleneck_width
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*group_width)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*group_width:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*group_width)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
# self.layer4 = self._make_layer(num_blocks[3], 2)
self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes)
def _make_layer(self, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width
# Increase bottleneck_width by 2 after each stage.
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
def ResNeXt29_4x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=4, bottleneck_width=64)
def ResNeXt29_8x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=8, bottleneck_width=64)
def ResNeXt29_32x4d():
return ResNeXt(num_blocks=[3,3,3], cardinality=32, bottleneck_width=4)
def test_resnext():
net = ResNeXt29_2x64d()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test_resnext()
|
butterfly-master
|
cnn/models/resnext.py
|
'''LeNet in PyTorch.'''
import sys, os, subprocess
import torch
import torch.nn as nn
import torch.nn.functional as F
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
from butterfly import Butterfly
from butterfly.butterfly_multiply import butterfly_mult_untied
# import baselines.toeplitz as toeplitz
# import structure.layer as sl
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class LeNet(nn.Module):
def __init__(self, method='linear', **kwargs):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5, padding=2)
self.conv2 = nn.Conv2d(6, 16, 5, padding=2)
# print(method, tied_weight, kwargs)
if method == 'linear':
self.fc = nn.Linear(1024, 1024)
elif method == 'butterfly':
self.fc = Butterfly(1024, 1024, bias=True, **kwargs)
# self.fc = Butterfly(1024, 1024, tied_weight=False, bias=False, param='regular', nblocks=0)
# self.fc = Butterfly(1024, 1024, tied_weight=False, bias=False, param='odo', nblocks=1)
elif method == 'low-rank':
self.fc = nn.Sequential(nn.Linear(1024, kwargs['rank'], bias=False), nn.Linear(kwargs['rank'], 1024))
elif method == 'toeplitz':
self.fc = sl.ToeplitzLikeC(layer_size=1024, bias=True, **kwargs)
else: assert False, f"method {method} not supported"
# self.bias = nn.Parameter(torch.zeros(1024))
self.logits = nn.Linear(1024, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc(out))
# out = out + self.bias
out = self.logits(out)
return out
class MLP(nn.Module):
def __init__(self, method='linear', **kwargs):
super().__init__()
if method == 'linear':
make_layer = lambda name: self.add_module(name, nn.Linear(1024, 1024, bias=True))
elif method == 'butterfly':
make_layer = lambda name: self.add_module(name, Butterfly(1024, 1024, bias=True, **kwargs))
# self.fc = Butterfly(1024, 1024, tied_weight=False, bias=False, param='regular', nblocks=0)
# self.fc = Butterfly(1024, 1024, tied_weight=False, bias=False, param='odo', nblocks=1)
elif method == 'low-rank':
make_layer = lambda name: self.add_module(name, nn.Sequential(nn.Linear(1024, kwargs['rank'], bias=False), nn.Linear(kwargs['rank'], 1024, bias=True)))
elif method == 'toeplitz':
make_layer = lambda name: self.add_module(name, sl.ToeplitzLikeC(layer_size=1024, bias=True, **kwargs))
else: assert False, f"method {method} not supported"
# self.fc10 = make_layer()
# self.fc11 = make_layer()
# self.fc12 = make_layer()
# self.fc2 = make_layer()
make_layer('fc10')
make_layer('fc11')
make_layer('fc12')
make_layer('fc2')
make_layer('fc3')
self.logits = nn.Linear(1024, 10)
def forward(self, x):
x = x.view(-1, 3, 1024)
x = self.fc10(x[:,0,:]) + self.fc11(x[:,1,:]) + self.fc12(x[:,2,:])
x = F.relu(x)
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.logits(x)
return x
class AlexNet(nn.Module):
def __init__(self, num_classes=10, dropout=False, method='linear', tied_weight=False, **kwargs):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
# nn.ReLU(inplace=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
# nn.ReLU(inplace=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
# nn.ReLU(inplace=True),
nn.ReLU(),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
# nn.ReLU(inplace=True),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
# nn.ReLU(inplace=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.dropout = nn.Dropout() if dropout else nn.Identity()
self.features_size = 256 * 4 * 4
self.fc1 = nn.Linear(self.features_size, self.features_size)
if method == 'linear':
self.fc = nn.Linear(self.features_size, self.features_size, bias=False)
elif method == 'butterfly':
self.fc = Butterfly(self.features_size, self.features_size, tied_weight=tied_weight, bias=False, **kwargs)
# self.fc = Butterfly(self.features_size, self.features_size, tied_weight=False, bias=False, param='regular', nblocks=0)
# self.fc = Butterfly(self.features_size, self.features_size, tied_weight=False, bias=False, param='odo', nblocks=1)
elif method == 'low-rank':
self.fc = nn.Sequential(nn.Linear(self.features_size, kwargs['rank'], bias=False), nn.Linear(kwargs['rank'], self.features_size, bias=False))
else: assert False, f"method {method} not supported"
self.bias = nn.Parameter(torch.zeros(self.features_size))
self.fc2 = nn.Linear(4096, 4096)
# self.fc2 = nn.Identity()
self.classifier = nn.Sequential(
# nn.Dropout(),
# self.dropout,
# self.fc1,
# nn.ReLU(),
# nn.Dropout(),
self.dropout,
self.fc2,
nn.ReLU(),
nn.Linear(self.features_size, num_classes),
)
def forward(self, x):
x = self.features(x)
# print("HELLO ", x.size())
x = x.view(-1, self.features_size)
x = self.dropout(x)
x = nn.ReLU(self.fc1(x) + self.bias)
x = self.classifier(x)
return x
|
butterfly-master
|
cnn/models/lenet.py
|
'''MobileNet in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .butterfly_conv import Butterfly1x1Conv, ButterflyConv2d
from .circulant1x1conv import Circulant1x1Conv
from .toeplitzlike1x1conv import Toeplitzlike1x1Conv
from .low_rank_conv import LowRankConv2d
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1, is_structured=False, structure_type='B',
nblocks=0, param='regular'):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
if is_structured:
# if structure_type == 'B':
# self.conv2 = Butterfly1x1Conv(in_planes, out_planes, bias=False, tied_weight=False, ortho_init=True)
if structure_type == 'B':
self.conv2 = ButterflyConv2d(in_planes, out_planes,
kernel_size=1, bias=False, nblocks=nblocks, tied_weight=False,
ortho_init=True, param=param, stride=1, padding=0)
elif structure_type == 'Circulant' and out_planes % in_planes == 0:
self.conv2 = Circulant1x1Conv(in_planes, out_planes // in_planes)
elif structure_type == 'Toeplitzlike' and out_planes % in_planes == 0:
self.conv2 = Toeplitzlike1x1Conv(in_planes, out_planes // in_planes)
elif structure_type == 'LR':
# Low rank should match the number of parameters of butterfly
rank = int(math.log2(out_planes)) if nblocks == 0 else nblocks * 2 * int(math.log2(out_planes))
self.conv2 = LowRankConv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0,
bias=False, rank=rank)
else:
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=10, num_structured_layers=0, structure_type='B', nblocks=0, param='regular'):
assert structure_type in ['B', 'LR', 'Circulant', 'Toeplitzlike']
assert num_structured_layers <= len(self.cfg)
super(MobileNet, self).__init__()
self.structure_type = structure_type
self.param = param
self.nblocks = nblocks
self.is_structured = [False] * (len(self.cfg) - num_structured_layers) + [True] * num_structured_layers
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x, is_structured in zip(self.cfg, self.is_structured):
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride, is_structured, structure_type=self.structure_type,
param=self.param, nblocks=self.nblocks))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
|
butterfly-master
|
cnn/models/mobilenet.py
|
'''PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SepConv(nn.Module):
'''Separable Convolution.'''
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(SepConv, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes,
kernel_size, stride,
padding=(kernel_size-1)//2,
bias=False, groups=in_planes)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
return self.bn1(self.conv1(x))
class CellA(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellA, self).__init__()
self.stride = stride
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y2 = self.bn1(self.conv1(y2))
return F.relu(y1+y2)
class CellB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellB, self).__init__()
self.stride = stride
# Left branch
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride)
# Right branch
self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
# Reduce channels
self.conv2 = nn.Conv2d(2*out_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
# Left branch
y1 = self.sep_conv1(x)
y2 = self.sep_conv2(x)
# Right branch
y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y3 = self.bn1(self.conv1(y3))
y4 = self.sep_conv3(x)
# Concat & reduce channels
b1 = F.relu(y1+y2)
b2 = F.relu(y3+y4)
y = torch.cat([b1,b2], 1)
return F.relu(self.bn2(self.conv2(y)))
class PNASNet(nn.Module):
def __init__(self, cell_type, num_cells, num_planes):
super(PNASNet, self).__init__()
self.in_planes = num_planes
self.cell_type = cell_type
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_planes)
self.layer1 = self._make_layer(num_planes, num_cells=6)
self.layer2 = self._downsample(num_planes*2)
self.layer3 = self._make_layer(num_planes*2, num_cells=6)
self.layer4 = self._downsample(num_planes*4)
self.layer5 = self._make_layer(num_planes*4, num_cells=6)
self.linear = nn.Linear(num_planes*4, 10)
def _make_layer(self, planes, num_cells):
layers = []
for _ in range(num_cells):
layers.append(self.cell_type(self.in_planes, planes, stride=1))
self.in_planes = planes
return nn.Sequential(*layers)
def _downsample(self, planes):
layer = self.cell_type(self.in_planes, planes, stride=2)
self.in_planes = planes
return layer
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = F.avg_pool2d(out, 8)
out = self.linear(out.view(out.size(0), -1))
return out
def PNASNetA():
return PNASNet(CellA, num_cells=6, num_planes=44)
def PNASNetB():
return PNASNet(CellB, num_cells=6, num_planes=32)
def test():
net = PNASNetB()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
|
butterfly-master
|
cnn/models/pnasnet.py
|
'''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = out_planes//4
g = 1 if in_planes==24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out,res], 1)) if self.stride==2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleNetG2():
cfg = {
'out_planes': [200,400,800],
'num_blocks': [4,8,4],
'groups': 2
}
return ShuffleNet(cfg)
def ShuffleNetG3():
cfg = {
'out_planes': [240,480,960],
'num_blocks': [4,8,4],
'groups': 3
}
return ShuffleNet(cfg)
def test():
net = ShuffleNetG2()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
|
butterfly-master
|
cnn/models/shufflenet.py
|
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from cnn.models.butterfly_conv import ButterflyConv2d, ButterflyConv2dBBT
from cnn.models.low_rank_conv import LowRankConv2d
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_structured=False, structure_type='B', **kwargs):
super(BasicBlock, self).__init__()
nblocks = kwargs.get('nblocks', 0)
if is_structured:
if structure_type == 'B':
self.conv1 = ButterflyConv2d(in_planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, ortho_init=True, **kwargs)
elif structure_type == 'LR':
# Low rank should match the number of parameters of butterfly
rank = kwargs.get('rank', int(math.log2(planes)) if nblocks == 0 else nblocks * 2 * int(math.log2(planes)))
self.conv1 = LowRankConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, rank=rank)
else:
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
if is_structured:
if structure_type == 'B':
self.conv2 = ButterflyConv2d(planes, planes, kernel_size=3, stride=1, padding=1,
bias=False, ortho_init=True, **kwargs)
elif structure_type == 'LR':
rank = kwargs.get('rank', int(math.log2(planes)) if nblocks == 0 else nblocks * 2 * int(math.log2(planes)))
self.conv2 = LowRankConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, rank=rank)
else:
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
if is_structured:
if structure_type == 'B':
conv = ButterflyConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride,
bias=False, ortho_init=True, **kwargs)
elif structure_type == 'LR':
rank = kwargs.get('rank', int(math.log2(self.expansion * planes)) if nblocks == 0 else nblocks * 2 * int(math.log2(self.expansion * planes)))
conv = LowRankConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False, rank=rank)
else:
conv = nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
self.shortcut = nn.Sequential(
conv,
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, num_structured_layers=0, structure_type='B', **kwargs):
assert num_structured_layers <= 4
assert structure_type in ['B', 'LR']
super(ResNet, self).__init__()
self.is_structured = [False] * (4 - num_structured_layers) + [True] * num_structured_layers
self.butterfly_expansion = kwargs.pop('expansion', [0] * 4)
self.rank = kwargs.pop('rank', [-1] * 4)
if isinstance(self.butterfly_expansion, int):
self.butterfly_expansion = [self.butterfly_expansion] * 4
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, is_structured=self.is_structured[0])
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, is_structured=self.is_structured[1])
# Only stacking butterflies in the 3rd layer for now
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, is_structured=self.is_structured[2],
structure_type=structure_type, expansion=self.butterfly_expansion[2],
**{**kwargs, **({'rank': self.rank[2]} if structure_type=='LR' else {})})
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, is_structured=self.is_structured[3],
structure_type=structure_type, expansion=self.butterfly_expansion[3],
**{**kwargs, **({'rank': self.rank[3]} if structure_type=='LR' else {})})
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride, is_structured, structure_type='B', **kwargs):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, is_structured,
structure_type=structure_type, **kwargs))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_structured_layers=0, structure_type='B', **kwargs):
return ResNet(BasicBlock, [2,2,2,2], num_structured_layers=num_structured_layers,
structure_type=structure_type, **kwargs)
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
# class BasicButterflyConv2d(nn.Module):
# expansion = 1
# def __init__(self, in_planes, planes):
# # stride=1
# super().__init__()
# self.conv1 = ButterflyConv2d(in_planes, planes, kernel_size=3, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(planes)
# self.conv2 = ButterflyConv2d(planes, planes, kernel_size=3, padding=1, bias=False)
# self.bn2 = nn.BatchNorm2d(planes)
# self.shortcut = nn.Sequential()
# if in_planes != self.expansion*planes:
# self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, bias=False),
# nn.BatchNorm2d(self.expansion*planes)
# )
# def forward(self, x):
# out = F.relu(self.bn1(self.conv1(x)))
# out = self.bn2(self.conv2(out))
# out += self.shortcut(x)
# out = F.relu(out)
# return out
# class ButterflyNet(nn.Module):
# def __init__(self, block, num_blocks, num_classes=10):
# super().__init__()
# self.in_planes = 64
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(64)
# self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
# self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
# self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
# self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# self.linear = nn.Linear(512*block.expansion, num_classes)
# def _make_layer(self, block, planes, num_blocks, stride):
# strides = [stride] + [1]*(num_blocks-1)
# layers = []
# for stride in strides:
# if stride > 1:
# layers.append(block(self.in_planes, planes, stride))
# else:
# layers.append(BasicButterflyConv2d(self.in_planes, planes))
# self.in_planes = planes * block.expansion
# return nn.Sequential(*layers)
# def forward(self, x):
# out = F.relu(self.bn1(self.conv1(x)))
# out = self.layer1(out)
# out = self.layer2(out)
# out = self.layer3(out)
# out = self.layer4(out)
# out = F.avg_pool2d(out, 4)
# out = out.view(out.size(0), -1)
# out = self.linear(out)
# return out
# def ButterflyNet18():
# return ButterflyNet(BasicBlock, [2,2,2,2])
# x = torch.randn(100, 256, 8, 8, device='cuda')
# w = torch.randn(256, 256, 1, 1, device='cuda')
# res = F.conv2d(x, w, padding=0)
# x_reshape = x.view(100, 256, 8 * 8).transpose(1, 2).reshape(-1, 256)
# w_reshape = w.view(256, 256).t()
# res_mm = x_reshape @ w_reshape
# res_mm = res_mm.view(100, 64, 256).transpose(1, 2).view(100, 256, 8, 8)
# assert torch.allclose(res, res_mm)
|
butterfly-master
|
cnn/models/resnet.py
|
'''Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18():
return PreActResNet(PreActBlock, [2,2,2,2])
def PreActResNet34():
return PreActResNet(PreActBlock, [3,4,6,3])
def PreActResNet50():
return PreActResNet(PreActBottleneck, [3,4,6,3])
def PreActResNet101():
return PreActResNet(PreActBottleneck, [3,4,23,3])
def PreActResNet152():
return PreActResNet(PreActBottleneck, [3,8,36,3])
def test():
net = PreActResNet18()
y = net((torch.randn(1,3,32,32)))
print(y.size())
# test()
|
butterfly-master
|
cnn/models/preact_resnet.py
|
'''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
# out = F.avg_pool2d(out, 4)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
|
butterfly-master
|
cnn/models/mobilenetv2.py
|
import math
import numpy as np
import torch
from torch import nn
from butterfly.complex_utils import complex_mul, conjugate
def toeplitz_krylov_transpose_multiply(v, u, f=0.0):
"""Multiply Krylov(Z_f, v_i)^T @ u.
Parameters:
v: (nstack, rank, n)
u: (batch_size, n)
f: real number
Returns:
product: (batch, nstack, rank, n)
"""
_, n = u.shape
_, _, n_ = v.shape
assert n == n_, 'u and v must have the same last dimension'
if f != 0.0: # cycle version
# Computing the roots of f
mod = abs(f) ** (torch.arange(n, dtype=u.dtype, device=u.device) / n)
if f > 0:
arg = torch.stack((torch.ones(n, dtype=u.dtype, device=u.device),
torch.zeros(n, dtype=u.dtype, device=u.device)), dim=-1)
else: # Find primitive roots of -1
angles = torch.arange(n, dtype=u.dtype, device=u.device) / n * np.pi
arg = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1)
eta = mod[:, np.newaxis] * arg
eta_inverse = (1.0 / mod)[:, np.newaxis] * conjugate(arg)
u_f = torch.ifft(eta_inverse * u[..., np.newaxis], 1)
v_f = torch.fft(eta * v.unsqueeze(-1), 1)
uv_f = complex_mul(u_f.unsqueeze(1).unsqueeze(1), v_f)
uv = torch.fft(uv_f, 1)
# We only need the real part of complex_mul(eta, uv)
return eta[..., 0] * uv[..., 0] - eta[..., 1] * uv[..., 1]
else:
u_f = torch.rfft(torch.cat((u.flip(1), torch.zeros_like(u)), dim=-1), 1)
v_f = torch.rfft(torch.cat((v, torch.zeros_like(v)), dim=-1), 1)
uv_f = complex_mul(u_f.unsqueeze(1).unsqueeze(1), v_f)
return torch.irfft(uv_f, 1, signal_sizes=(2 * n, ))[..., :n].flip(3)
def toeplitz_krylov_multiply(v, w, f=0.0):
"""Multiply \sum_i Krylov(Z_f, v_i) @ w_i.
Parameters:
v: (nstack, rank, n)
w: (batch_size, nstack, rank, n)
f: real number
Returns:
product: (batch, nstack, n)
"""
_, nstack, rank, n = w.shape
nstack_, rank_, n_ = v.shape
assert n == n_, 'w and v must have the same last dimension'
assert rank == rank_, 'w and v must have the same rank'
assert nstack == nstack_, 'w and v must have the same nstack'
if f != 0.0: # cycle version
# Computing the roots of f
mod = abs(f) ** (torch.arange(n, dtype=w.dtype, device=w.device) / n)
if f > 0:
arg = torch.stack((torch.ones(n, dtype=w.dtype, device=w.device),
torch.zeros(n, dtype=w.dtype, device=w.device)), dim=-1)
else: # Find primitive roots of -1
angles = torch.arange(n, dtype=w.dtype, device=w.device) / n * np.pi
arg = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1)
eta = mod[:, np.newaxis] * arg
eta_inverse = (1.0 / mod)[:, np.newaxis] * conjugate(arg)
w_f = torch.fft(eta * w[..., np.newaxis], 1)
v_f = torch.fft(eta * v[..., np.newaxis], 1)
wv_sum_f = complex_mul(w_f, v_f).sum(dim=2)
wv_sum = torch.ifft(wv_sum_f, 1)
# We only need the real part of complex_mul(eta_inverse, wv_sum)
return eta_inverse[..., 0] * wv_sum[..., 0] - eta_inverse[..., 1] - wv_sum[..., 1]
else:
w_f = torch.rfft(torch.cat((w, torch.zeros_like(w)), dim=-1), 1)
v_f = torch.rfft(torch.cat((v, torch.zeros_like(v)), dim=-1), 1)
wv_sum_f = complex_mul(w_f, v_f).sum(dim=2)
return torch.irfft(wv_sum_f, 1, signal_sizes=(2 * n, ))[..., :n]
def toeplitz_mult(G, H, x, cycle=True):
"""Multiply \sum_i Krylov(Z_f, G_i) @ Krylov(Z_f, H_i) @ x.
Parameters:
G: Tensor of shape (nstack, rank, n)
H: Tensor of shape (nstack, rank, n)
x: Tensor of shape (batch_size, n)
cycle: whether to use f = (1, -1) or f = (0, 0)
Returns:
product: Tensor of shape (batch_size, nstack, n)
"""
# f = (1,-1) if cycle else (1,1)
f = (1, -1) if cycle else (0, 0)
transpose_out = toeplitz_krylov_transpose_multiply(H, x, f[1])
return toeplitz_krylov_multiply(G, transpose_out, f[0])
class ToeplitzlikeLinear(nn.Module):
def __init__(self, in_size, out_size, rank=4, bias=True, corner=False):
super().__init__()
self.in_size = in_size
self.out_size = out_size
self.nstack = int(math.ceil(out_size / self.in_size))
self.rank = rank
assert not corner, 'corner not currently supported'
self.corner = corner
init_stddev = math.sqrt(1. / (rank * in_size))
self.G = nn.Parameter(torch.randn(self.nstack, rank, in_size) * init_stddev)
self.H = nn.Parameter(torch.randn(self.nstack, rank, in_size) * init_stddev)
self.G._is_structured = True # Flag to avoid weight decay
self.H._is_structured = True
self.register_buffer('reverse_idx', torch.arange(in_size - 1, -1, -1))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""Initialize bias the same way as torch.nn.Linear."""
if self.bias is not None:
bound = 1 / math.sqrt(self.in_size)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Parameters:
input: (batch, *, in_size)
Return:
output: (batch, *, out_size)
"""
u = input.view(np.prod(input.size()[:-1]), input.size(-1))
batch = u.shape[0]
# output = toeplitz_mult(self.G, self.H, input, self.corner)
# return output.reshape(batch, self.nstack * self.size)
n = self.in_size
v = self.H
# u_f = torch.rfft(torch.cat((u.flip(1), torch.zeros_like(u)), dim=-1), 1)
u_f = torch.rfft(torch.cat((u[:, self.reverse_idx], torch.zeros_like(u)), dim=-1), 1)
v_f = torch.rfft(torch.cat((v, torch.zeros_like(v)), dim=-1), 1)
uv_f = complex_mul(u_f.unsqueeze(1).unsqueeze(1), v_f)
# transpose_out = torch.irfft(uv_f, 1, signal_sizes=(2 * n, ))[..., :n].flip(3)
transpose_out = torch.irfft(uv_f, 1, signal_sizes=(2 * n, ))[..., self.reverse_idx]
v = self.G
w = transpose_out
w_f = torch.rfft(torch.cat((w, torch.zeros_like(w)), dim=-1), 1)
v_f = torch.rfft(torch.cat((v, torch.zeros_like(v)), dim=-1), 1)
wv_sum_f = complex_mul(w_f, v_f).sum(dim=2)
output = torch.irfft(wv_sum_f, 1, signal_sizes=(2 * n, ))[..., :n]
output = output.reshape(batch, self.nstack * self.in_size)[:, :self.out_size]
if self.bias is not None:
output = output + self.bias
return output.view(*input.size()[:-1], self.out_size)
def extra_repr(self):
return 'in_size={}, out_size={}, bias={}, rank={}, corner={}'.format(
self.in_size, self.out_size, self.bias is not None, self.rank, self.corner
)
class Toeplitzlike1x1Conv(ToeplitzlikeLinear):
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w)
Return:
output: (batch, nstack * c, h, w)
"""
# TODO: this is for old code with square Toeplitzlike, need to be updated
batch, c, h, w = input.shape
input_reshape = input.view(batch, c, h * w).transpose(1, 2).reshape(-1, c)
output = super().forward(input_reshape)
return output.view(batch, h * w, self.nstack * c).transpose(1, 2).view(batch, self.nstack * c, h, w)
|
butterfly-master
|
cnn/models/toeplitzlike1x1conv.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import sys
import numpy as np
from cnn.models.butterfly_conv import ButterflyConv2d
from cnn.models.low_rank_conv import LowRankConv2d
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1, structure_type=None, **kwargs):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
if structure_type == 'B':
self.conv1 = ButterflyConv2d(in_planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True, ortho_init=True, **kwargs)
elif structure_type == 'LR':
# Low rank should match the number of parameters of butterfly
rank = kwargs.get('rank', 1)
self.conv1 = LowRankConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=True, rank=rank)
else:
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
if structure_type == 'B':
self.conv2 = ButterflyConv2d(planes, planes, kernel_size=3, stride=1, padding=1,
bias=True, ortho_init=True, **kwargs)
elif structure_type == 'LR':
rank = kwargs.get('rank', 1)
self.conv2 = LowRankConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=True, rank=rank)
else:
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if structure_type == 'B':
conv = ButterflyConv2d(in_planes, planes, kernel_size=1, stride=stride,
bias=True, ortho_init=True, **kwargs)
elif structure_type == 'LR':
rank = kwargs.get('rank', 1)
conv = LowRankConv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True, rank=rank)
else:
conv = nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True)
self.shortcut = nn.Sequential(
conv
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes, structure_type=None, **kwargs):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)//6
k = widen_factor
# print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3,nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2, structure_type=structure_type, **kwargs)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride, structure_type=None, **kwargs):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride, structure_type, **kwargs))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
if __name__ == '__main__':
net=Wide_ResNet(28, 8, 0.0, 10)
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
def WideResNet28(structure_type=None, **kwargs):
return Wide_ResNet(28, 2, 0.0, 10, structure_type=structure_type, **kwargs)
|
butterfly-master
|
cnn/models/wide_resnet.py
|
# from https://github.com/fastai/imagenet-fast/blob/master/imagenet_nv/models/layers.py
import torch
from torch import nn
class AdaptiveConcatPool2d(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1,1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
class Lambda(nn.Module):
def __init__(self, f): super().__init__(); self.f=f
def forward(self, x): return self.f(x)
class Flatten(nn.Module):
def __init__(self): super().__init__()
def forward(self, x): return x.view(x.size(0), -1)
|
butterfly-master
|
cnn/models/layers.py
|
'''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 10)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
|
butterfly-master
|
cnn/models/dpn.py
|
# modified from https://github.com/fastai/imagenet-fast/blob/master/imagenet_nv/models/resnet.py
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from .layers import Flatten
from .butterfly_conv import ButterflyConv2d, ButterflyConv2dBBT
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def butterfly3x3(in_planes, planes, stride=1, structure_type='B', nblocks=1,
param='regular'):
if structure_type == 'B':
bfly = ButterflyConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, tied_weight=False, ortho_init=True, param=param)
elif structure_type == 'BBT':
bfly = ButterflyConv2dBBT(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, nblocks=nblocks, tied_weight=False, ortho_init=True, param=param)
else:
raise ValueError("Structure type isn't supported.")
return bfly
def butterfly1x1(in_planes, planes, stride=1, structure_type='B', nblocks=1,
param='regular'):
if structure_type == 'B':
bfly = ButterflyConv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False, tied_weight=False, ortho_init=True, param=param)
elif structure_type == 'BBT':
bfly = ButterflyConv2dBBT(in_planes, planes, kernel_size=1, stride=stride, bias=False, nblocks=nblocks, tied_weight=False, ortho_init=True, param=param)
else:
raise ValueError("Structure type isn't supported.")
return bfly
def bn1(planes):
m = nn.BatchNorm1d(planes)
m.weight.data.fill_(1)
m.bias.data.zero_()
return m
def bn(planes, init_zero=False):
m = nn.BatchNorm2d(planes)
m.weight.data.fill_(0 if init_zero else 1)
m.bias.data.zero_()
return m
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_structured=False, structure_type='B', nblocks=1,
param='regular'):
super().__init__()
if is_structured:
self.conv1 = butterfly3x3(inplanes, planes, stride=stride, structure_type=structure_type,
nblocks=nblocks, param=param)
else:
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = bn(planes)
self.relu = nn.ReLU(inplace=True)
if is_structured:
self.conv2 = butterfly3x3(planes, planes, structure_type=structure_type,
nblocks=nblocks, param=param)
else:
self.conv2 = conv3x3(planes, planes)
self.bn2 = bn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None: residual = self.downsample(x)
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out)
out = self.conv2(out)
out += residual
out = self.relu(out)
out = self.bn2(out)
return out
class BottleneckFinal(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = bn(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = bn(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = bn(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None: residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out += residual
out = self.bn3(out)
out = self.relu(out)
return out
class BottleneckZero(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = bn(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = bn(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = bn(planes * 4, init_zero=True)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None: residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = bn(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = bn(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = bn(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None: residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, k=1, vgg_head=False,
num_structured_layers=0, structure_type='B', nblocks=1, param='regular'):
assert num_structured_layers <= 4
assert structure_type in ['B', 'BBT', 'BBTBBT']
super().__init__()
self.is_structured = [False] * (4 - num_structured_layers) + [True] * num_structured_layers
self.inplanes = 64
features = [nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
, bn(64) , nn.ReLU(inplace=True) , nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
, self._make_layer(block, int(64*k), layers[0], is_structured=self.is_structured[0],
structure_type=structure_type, nblocks=nblocks, param=param)
, self._make_layer(block, int(128*k), layers[1], stride=2, is_structured=self.is_structured[1],
structure_type=structure_type, nblocks=nblocks, param=param)
# Only stacking butterflies in the 3rd layer for now
, self._make_layer(block, int(256*k), layers[2], stride=2, is_structured=self.is_structured[2],
structure_type=structure_type, nblocks=nblocks, param=param)
, self._make_layer(block, int(512*k), layers[3], stride=2, is_structured=self.is_structured[3],
structure_type=structure_type, nblocks=nblocks, param=param)]
out_sz = int(512*k) * block.expansion
if vgg_head:
features += [nn.AdaptiveAvgPool2d(3), Flatten()
, nn.Linear(out_sz*3*3, 4096), nn.ReLU(inplace=True), bn1(4096), nn.Dropout(0.25)
, nn.Linear(4096, 4096), nn.ReLU(inplace=True), bn1(4096), nn.Dropout(0.25)
, nn.Linear(4096, num_classes)]
else: features += [nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(out_sz, num_classes)]
self.features = nn.Sequential(*features)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
def _make_layer(self, block, planes, blocks, stride=1, is_structured=False,
structure_type='B', nblocks=1, param='regular'):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if is_structured:
downsample = nn.Sequential(
butterfly1x1(self.inplanes, planes * block.expansion, stride=stride, structure_type=structure_type,
nblocks=nblocks, param=param),
bn(planes * block.expansion),
)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
bn(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,
is_structured=is_structured, structure_type=structure_type,
nblocks=nblocks, param=param))
self.inplanes = planes * block.expansion
for i in range(1, blocks): layers.append(block(self.inplanes, planes,
is_structured=is_structured, structure_type=structure_type,
nblocks=nblocks, param=param))
return nn.Sequential(*layers)
def forward(self, x): return self.features(x)
# resnet50 does not support currently support structure
# def resnet50(**kwargs):
# raise ValueError('resnet50
# model = ResNet(Bottleneck, [3, 4, 6, 3])
# return model
def resnet18(num_structured_layers=0, structure_type='B', nblocks=1, param='regular'):
model = ResNet(BasicBlock, [2, 2, 2, 2], num_structured_layers=num_structured_layers,
structure_type=structure_type, nblocks=nblocks, param=param)
return model
|
butterfly-master
|
cnn/models/resnet_imagenet.py
|
import os, sys
import math
import random
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
os.environ['PYTHONPATH'] = project_root + ":" + os.environ.get('PYTHONPATH', '')
from butterfly import Butterfly
from butterfly.butterfly_multiply import butterfly_mult_untied
import permutation_utils as perm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
# 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class PResNet(nn.Module):
def __init__(self, block=BasicBlock, layers=[2,2,2,2], num_classes=10, zero_init_residual=False, **perm_args):
super().__init__()
self.block = block
self.layers = layers
self.num_classes = num_classes
self.zero_init_residual = zero_init_residual
self.permute = TensorPermutation(32, 32, **perm_args)
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(self.block, 64, self.layers[0])
self.layer2 = self._make_layer(self.block, 128, self.layers[1], stride=2)
self.layer3 = self._make_layer(self.block, 256, self.layers[2], stride=2)
self.layer4 = self._make_layer(self.block, 512, self.layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * self.block.expansion, 512 * self.block.expansion)
self.logits = nn.Linear(512 * self.block.expansion, self.num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# print(x.size())
# print(x)
batch = x.size(0)
x = self.permute(x)
x = x.view(-1, 3, 32, 32)
# print(x.size())
x = self.conv1(x)
# print(x.size())
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
# print(x.size())
x = self.layer2(x)
# print(x.size())
x = self.layer3(x)
# print(x.size())
x = self.layer4(x)
# print(x.size())
# x = self.avgpool(x)
x = F.avg_pool2d(x, 4)
# print(x.size())
x = x.view(x.size(0), -1)
# print(x.size())
# print(x.size())
# x = F.relu(self.fc(x))
x = self.logits(x)
# print(x.size())
# x = x.view(-1, batch, self.num_classes)
return x
class TensorPermutation(nn.Module):
def __init__(self, w, h, method='identity', rank=2, train=True, **kwargs):
super().__init__()
self.w = w
self.h = h
if method == 'linear':
self.perm_type = LinearPermutation
elif method == 'butterfly':
self.perm_type = ButterflyPermutation
elif method == 'identity':
self.perm_type = IdentityPermutation
else:
assert False, f"Permutation method {method} not supported."
self.rank = rank
if self.rank == 1:
self.permute = nn.ModuleList([self.perm_type(w*h, **kwargs)])
elif self.rank == 2:
self.permute = nn.ModuleList([self.perm_type(w, **kwargs), self.perm_type(h, **kwargs)])
# self.permute2 = self.perm_type(h, **kwargs)
else:
assert False, "prank must be 1 or 2"
# TODO: maybe it makes sense to set ._is_perm_param here
# if stochastic:
# self.perm_fn = self.perm_type.sample_soft_perm
# else:
# self.perm_fn =self.perm_type.mean_perm
# elif acqfn == 'mean':
# self.perm_fn =self.perm_type.mean_perm
# elif acqfn == 'sample':
# self.perm_fn = self.perm_type.sample_soft_perm
# else:
# assert False, f"Permutation acquisition function {acqfn} not supported."
if train == False:
for p in self.parameters():
p.requires_grad = False
def forward(self, x, perm=None):
if perm is None:
perm_fn = self.perm_type.generate_perm
elif perm == 'mean':
perm_fn = self.perm_type.mean_perm
elif perm == 'mle':
perm_fn = self.perm_type.mle_perm
elif perm == 'sample':
perm_fn = self.perm_type.sample_perm
else: assert False, f"Permutation type {perm} not supported."
if self.rank == 1:
perm = perm_fn(self.permute[0])
x = x.view(-1, self.w*self.h)
x = x @ perm
x = x.view(-1, 3, self.w, self.h) # TODO make this channel agnostic
elif self.rank == 2:
x = x.transpose(-1, -2)
perm2 = perm_fn(self.permute[1])
x = x @ perm2.unsqueeze(-3).unsqueeze(-3) # unsqueeze to explicitly call matmul, can use einsum too
x = x.transpose(-1, -2)
perm1 = perm_fn(self.permute[0])
x = x @ perm1.unsqueeze(-3).unsqueeze(-3)
# collapse samples with batch
x = x.view(-1, 3, self.w, self.h)
return x
def get_permutations(self, perm=None):
if perm is None:
perm_fn = self.perm_type.generate_perm
elif perm == 'mean':
perm_fn = self.perm_type.mean_perm
elif perm == 'mle':
perm_fn = self.perm_type.mle_perm
elif perm == 'sample':
perm_fn = self.perm_type.sample_perm
else: assert False, f"Permutation type {perm} not supported."
# return shape (rank, s, n, n)
perms = torch.stack([perm_fn(p) for p in self.permute], dim=0)
# print("get_permutations:", perms.shape)
return perms
def entropy(self, p):
ents = torch.stack([perm.entropy(p) for perm in self.permute], dim=0) # (rank,)
return torch.mean(ents)
class Permutation(nn.Module):
def forward(self, x, samples=1):
soft_perms = self.sample_soft_perm((samples, x.size(0)))
return x.unsqueeze(0) @ soft_perms
def mean_perm(self):
pass
def sample_soft_perm(self, sample_shape=()):
""" Return soft permutation of shape sample_shape + (size, size) """
pass
class IdentityPermutation(Permutation):
def __init__(self, size):
super().__init__()
self.size = size
def generate_perm(self):
return torch.eye(self.size, device=device)
def mean_perm(self):
return torch.eye(self.size, device=device)
def mle_perm(self):
return torch.eye(self.size, device=device)
def sample_perm(self):
return torch.eye(self.size, device=device)
class LinearPermutation(Permutation):
def __init__(self, size):
super().__init__()
self.size = size
self.W = nn.Parameter(torch.empty(size, size))
self.W.is_perm_param = True
nn.init.kaiming_uniform_(self.W)
def generate_perm(self):
return self.W
def mean_perm(self):
return self.W
def mle_perm(self):
return self.W
def sample_perm(self):
return self.W
# def hard_perm(self):
# return self.W
# def sample_soft_perm(self, sample_shape=()):
# return self.W.view(*([1]*len(sample_shape)), size, size)
class ButterflyPermutation(Permutation):
def __init__(self, size, sig='BT1', param='ortho2', stochastic=False, temp=1.0, samples=1, sample_method='gumbel', hard=False):
super().__init__()
self.size = size
self.sig = sig
self.param = param
self.stochastic = stochastic # TODO align this block
self.temp = temp
self.samples = samples
self.sample_method = sample_method
self.hard = hard
self.m = int(math.ceil(math.log2(size)))
assert size == (1<<self.m), "ButterflyPermutation: Only power of 2 supported."
if self.stochastic:
self.mean_temp = 1.0
self.sample_temp = temp
if hard:
self.generate_fn = self.sample_hard_perm
else:
self.generate_fn = self.sample_soft_perm # add this attr for efficiency (avoid casing in every call to generate())
# self.sample_method = 'gumbel'
else:
self.mean_temp = temp
self.generate_fn = self.mean_perm
# no sample_temp; soft perm shouldn't be called in the non-stochastic case
self.hard_temp = 0.02
self.hard_iters = int(1./self.hard_temp)
# assume square matrices so 'nstack' is always 1
if sig[:2] == 'BT' and (sig[2:]).isdigit(): # TODO: empty number indicates 1
depth = int(sig[2:])
self.twiddle_core_shape = (2*depth, 1, self.m, self.size//2)
self.strides = [0,1] * depth # 1 for increasing, 0 for decreasing
elif sig[0] == 'B' and (sig[1:]).isdigit():
depth = int(sig[1:])
self.twiddle_core_shape = (depth, 1, self.m, self.size//2)
self.strides = [1] * depth # 1 for increasing, 0 for decreasing
elif sig[0] == 'T' and (sig[1:]).isdigit():
depth = int(sig[1:])
self.twiddle_core_shape = (depth, 1, self.m, self.size//2)
self.strides = [0] * depth # 1 for increasing, 0 for decreasing
else:
assert False, f"ButterflyPermutation: signature {sig} not supported."
# self.twiddle has shape (depth, 1, log n, n/2)
self.depth = self.twiddle_core_shape[0]
margin = 1e-3
# sample from [margin, 1-margin]
init = (1-2*margin)*(torch.rand(self.twiddle_core_shape)) + margin
if self.param == 'ds':
self.twiddle = nn.Parameter(init)
elif self.param == 'logit':
# self.twiddle = nn.Parameter(torch.rand(self.twiddle_core_shape)*2-1)
init = perm.sample_gumbel(self.twiddle_core_shape) - perm.sample_gumbel(self.twiddle_core_shape)
# init_temp = random.uniform(0.2, 0.4)
# init_temp = random.uniform(0.5, )
init_temp = 1.0 / self.depth
# init_temp = random.uniform(0.1, 0.2)
# init_temp = 0.2
self.twiddle = nn.Parameter(init / init_temp)
# self.twiddle = nn.Parameter(init)
# self.twiddle = nn.Parameter(torch.log(init / (1.-init)))
# logits = torch.log(init / (1.-init))
# self.twiddle = nn.Parameter( logits / temp)
# breakpoint()
elif param == 'ortho2':
# TODO change initialization for this type
# self.twiddle = nn.Parameter(torch.rand(self.twiddle_core_shape) * 2*math.pi)
self.twiddle = nn.Parameter(torch.acos(torch.sqrt(init)))
else:
assert False, f"ButterflyPermutation: Parameter type {self.param} not supported."
self.twiddle._is_perm_param = True
def entropy(self, p=None):
""" TODO: How does this compare to the matrix entropy of the expanded mean matrix? """
if p == 'logit':
assert self.param=='logit'
def binary_ent(p):
eps = 1e-10
return -(p * torch.log2(eps+p) + (1-p)*torch.log2(1-p+eps))
_twiddle = self.map_twiddle(self.twiddle)
ent1 = torch.sum(binary_ent(_twiddle))
return ent1
# could be better to not map at all
x = torch.exp(-self.twiddle)
ent2 = torch.log2(1. + x) + self.twiddle * (x/(1.+x))
ent2 = torch.sum(ent2)
print(ent1-ent2)
return ent2
if p is None:
perms = self.generate_perm()
elif p == 'mean':
perms = self.mean_perm()
elif p == 'mle':
perms = self.mle_perm()
elif p == 'sample':
perms = self.sample_perm()
else: assert False, f"Permutation type {p} not supported."
return perm.entropy(perms, reduction='mean')
def generate_perm(self):
""" Generate (a batch of) permutations for training """
# TODO add the extra dimension even with mean for consistency
return self.generate_fn()
def map_twiddle(self, twiddle): # TODO static
if self.param=='ds':
return twiddle
elif self.param=='logit':
return 1.0/(1.0 + torch.exp(-twiddle))
elif self.param=='ortho2':
return torch.cos(twiddle)**2
else:
assert False, f"Unreachable"
def compute_perm(self, twiddle, strides, squeeze=True):
"""
# twiddle: (depth, 1, log n, n/2)
twiddle: (depth, samples, log n, n/2)
strides: (depth,) bool
Returns: (samples, n, n)
"""
samples = twiddle.size(1)
# print("compute_perm twiddle REQUIRES GRAD: ", twiddle.requires_grad)
P = torch.eye(self.size, device=twiddle.device).unsqueeze(1).repeat((1,samples,1)) # (n, s, n) : put samples in the 'nstack' parameter of butterfly_mult
# print("compute_perm REQUIRES GRAD: ", P.requires_grad)
for t, stride in zip(twiddle, strides):
twiddle_factor_mat = torch.stack((torch.stack((t, 1-t), dim=-1),
torch.stack((1-t, t), dim=-1)), dim=-2) # TODO efficiency by stacking other order?
P = butterfly_mult_untied(twiddle_factor_mat, P, stride, self.training)
# print("REQUIRES GRAD: ", P.requires_grad)
P = P.transpose(0, 1) # (s, n, n)
return P.squeeze() if squeeze else P
# return P.view(self.size, self.size) # (n, n)
def mean_perm(self):
# TODO isn't scaling mean by temperature
# print("mean_perm twiddle REQUIRES GRAD: ", self.twiddle.requires_grad)
_twiddle = self.map_twiddle(self.twiddle)
p = self.compute_perm(_twiddle, self.strides)
# print("mean_perm REQUIRES GRAD: ", p.requires_grad)
return p
def mle_perm(self):
_twiddle = self.map_twiddle(self.twiddle)
hard_twiddle = torch.where(_twiddle > 0.5, torch.tensor(1.0, device=_twiddle.device), torch.tensor(0.0, device=_twiddle.device))
p = self.compute_perm(hard_twiddle, self.strides)
return p
def sample_perm(self, sample_shape=()):
if self.stochastic:
return self.sample_soft_perm()
else:
return self.sample_hard_perm()
def sample_soft_perm(self, sample_shape=()):
sample_shape = (self.samples,)
if self.param == 'logit':
# # TODO use pytorch's gumbel distribution...
# assert torch.all(self.twiddle == self.twiddle), "NANS FOUND"
# logits = torch.stack((self.twiddle, torch.zeros_like(self.twiddle)), dim=-1) # (depth, 1, log n, n/2, 2)
# assert torch.all(logits == logits), "NANS FOUND"
# logits_noise = perm.add_gumbel_noise(logits, sample_shape)
# assert torch.all(logits_noise == logits_noise), "NANS FOUND"
# sample_twiddle = torch.softmax(logits_noise / self.sample_temp, dim=-1)[..., 0] # shape (s, depth, 1, log n, n/2)
# assert torch.all(sample_twiddle == sample_twiddle), "NANS FOUND"
logits = torch.stack((self.twiddle, torch.zeros_like(self.twiddle)), dim=-1) # (depth, 1, log n, n/2, 2)
shape = logits.size()
# noise = perm.sample_gumbel((logits.size(0), self.samples)+logits.size()[2:])
# logits_noise = logits + noise.to(logits.device) # (d, s, log n, n/2, 2)
noise = perm.sample_gumbel((logits.size(0), self.samples)+logits.size()[2:], device=logits.device)
logits_noise = logits + noise # (d, s, log n, n/2, 2)
sample_twiddle = torch.softmax(logits_noise / self.sample_temp, dim=-1)[..., 0] # (depth, s, log n, n/2)
perms = self.compute_perm(sample_twiddle, self.strides, squeeze=False)
return perms
else: # TODO make this case batched over samples too
_twiddle = self.map_twiddle(self.twiddle)
if self.sample_method == 'gumbel':
# TODO: Can't take log!! multiply by exponential instead
logits = torch.stack((torch.log(_twiddle), torch.log(1.-_twiddle)), dim=-1) # (depth, 1, log n, n/2, 2)
logits_noise = perm.add_gumbel_noise(logits, sample_shape) # alternate way of doing this: sample one uniform parameter instead of two gumbel
sample_twiddle = torch.softmax(logits_noise / self.sample_temp, dim=-1)[..., 0] # shape (s, depth, 1, log n, n/2)
elif self.sample_method == 'uniform':
r = torch.rand(_twiddle.size())
_twiddle = _twiddle - r
sample_twiddle = 1.0 / (1.0 + torch.exp(-_twiddle / self.sample_temp))
else: assert False, "sample_method {self.sample_method} not supported"
perms = torch.stack([self.compute_perm(twiddle, self.strides) for twiddle in sample_twiddle], dim=0) # (s, n, n)
return perms
def sample_hard_perm(self, sample_shape=()):
sample_shape = (self.samples,)
_twiddle = self.map_twiddle(self.twiddle)
r = torch.rand(_twiddle.size(), device=_twiddle.device)
_twiddle = _twiddle - r
# sample_twiddle = 1.0 / (1.0 + torch.exp(-_twiddle / self.sample_temp))
# hard_twiddle = torch.where(_twiddle>0, torch.tensor(1.0, device=_twiddle.device), torch.tensor(0.0, device=_twiddle.device)) # shape (s, depth, 1, log n, n/2)
sample_twiddle = _twiddle.repeat(*sample_shape, *([1]*_twiddle.dim())) # TODO try expand
hard_twiddle = torch.where(sample_twiddle>0,
torch.ones_like(sample_twiddle),
torch.zeros_like(sample_twiddle)
) # shape (s, depth, 1, log n, n/2)
# print("HARD_TWIDDLE SHAPE", hard_twiddle.shape)
# sample_twiddle = _twiddle.expand(sample_shape+_twiddle.shape)
sample_twiddle.data = hard_twiddle # straight through estimator
if self.training: assert sample_twiddle.requires_grad
# TODO can make this a lot faster
perms = torch.stack([self.compute_perm(twiddle, self.strides) for twiddle in sample_twiddle], dim=0) # (s, n, n)
return perms
# logits = torch.stack((torch.log(tw), torch.zeros_like(tw)), dim=-1) # (depth, 1, log n, n/2, 2)
# logits_noise = perm.add_gumbel_noise(logits, sample_shape) # alternate way of doing this: sample one uniform parameter instead of two gumbel
# logits_noise = logits_noise[..., 0] - logits_noise[..., 1]
# sample_twiddle = torch.where(logits_noise>0, torch.tensor(1.0, device=_twiddle.device), torch.tensor(0.0, device=_twiddle.device)) # shape (s, depth, 1, log n, n/2)
# return sample_twiddle
def PResNet18(pretrained=False, **kwargs):
model = PResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def PResNet34(pretrained=False, **kwargs):
model = PResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def PResNet50(pretrained=False, **kwargs):
model = PResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def PResNet101(pretrained=False, **kwargs):
model = PResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def PResNet152(pretrained=False, **kwargs):
model = PResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
|
butterfly-master
|
cnn/models/presnet.py
|
import torch
import torch.nn as nn
import numpy as np
def mixup(alpha, num_classes, data, target):
with torch.no_grad():
bs = data.size(0)
c = np.random.beta(alpha, alpha)
perm = torch.randperm(bs).cuda()
md = c * data + (1-c) * data[perm, :]
mt = c * target + (1-c) * target[perm, :]
return md, mt
class MixUpWrapper(object):
def __init__(self, alpha, num_classes, dataloader):
self.alpha = alpha
self.dataloader = dataloader
self.num_classes = num_classes
def mixup_loader(self, loader):
for input, target in loader:
i, t = mixup(self.alpha, self.num_classes, input, target)
yield i, t
def __iter__(self):
return self.mixup_loader(self.dataloader)
class NLLMultiLabelSmooth(nn.Module):
def __init__(self, smoothing = 0.0):
super(NLLMultiLabelSmooth, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
if self.training:
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim = -1)
nll_loss = -logprobs * target
nll_loss = nll_loss.sum(-1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
else:
return torch.nn.functional.cross_entropy(x, target)
|
butterfly-master
|
cnn/imagenet/mixup.py
|
import os
import torch
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
DATA_BACKEND_CHOICES = ['pytorch']
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
DATA_BACKEND_CHOICES.append('dali-gpu')
DATA_BACKEND_CHOICES.append('dali-cpu')
except ImportError:
print("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = True)
if dali_cpu:
dali_device = "cpu"
self.decode = ops.ImageDecoderRandomCrop(device=dali_device, output_type=types.RGB,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
else:
dali_device = "gpu"
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
self.decode = ops.ImageDecoderRandomCrop(device="mixed", output_type=types.RGB, device_memory_padding=211025920, host_memory_padding=140544512,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability = 0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror = rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = False)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.res = ops.Resize(device = "gpu", resize_shorter = size)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
class DALIWrapper(object):
def gen_wrapper(dalipipeline, num_classes, one_hot):
for data in dalipipeline:
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
if one_hot:
target = expand(num_classes, torch.float, target)
yield input, target
dalipipeline.reset()
def __init__(self, dalipipeline, num_classes, one_hot):
self.dalipipeline = dalipipeline
self.num_classes = num_classes
self.one_hot = one_hot
def __iter__(self):
return DALIWrapper.gen_wrapper(self.dalipipeline, self.num_classes, self.one_hot)
def get_dali_train_loader(dali_cpu=False):
def gdtl(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
traindir = os.path.join(data_path, 'train')
pipe = HybridTrainPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = traindir, crop = 224, dali_cpu=dali_cpu)
pipe.build()
train_loader = DALIClassificationIterator(pipe, size = int(pipe.epoch_size("Reader") / world_size))
return DALIWrapper(train_loader, num_classes, one_hot), int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdtl
def get_dali_val_loader():
def gdvl(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
valdir = os.path.join(data_path, 'val')
pipe = HybridValPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = valdir,
crop = 224, size = 256)
pipe.build()
val_loader = DALIClassificationIterator(pipe, size = int(pipe.epoch_size("Reader") / world_size))
return DALIWrapper(val_loader, num_classes, one_hot), int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdvl
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
# TODO: Don't need tens?
tens = torch.from_numpy(nump_array)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
def expand(num_classes, dtype, tensor):
e = torch.zeros(tensor.size(0), num_classes, dtype=dtype, device=torch.device('cuda'))
e = e.scatter(1, tensor.unsqueeze(1), 1.0)
return e
class PrefetchedWrapper(object):
def prefetched_loader(loader, num_classes, fp16, one_hot):
mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
if fp16:
mean = mean.half()
std = std.half()
stream = torch.cuda.Stream()
first = True
for next_input, next_target in loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
if fp16:
next_input = next_input.half()
if one_hot:
next_target = expand(num_classes, torch.half, next_target)
else:
next_input = next_input.float()
if one_hot:
next_target = expand(num_classes, torch.float, next_target)
next_input = next_input.sub_(mean).div_(std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __init__(self, dataloader, num_classes, fp16, one_hot):
self.dataloader = dataloader
self.fp16 = fp16
self.epoch = 0
self.one_hot = one_hot
self.num_classes = num_classes
def __iter__(self):
if (self.dataloader.sampler is not None and
isinstance(self.dataloader.sampler,
torch.utils.data.distributed.DistributedSampler)):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return PrefetchedWrapper.prefetched_loader(self.dataloader, self.num_classes, self.fp16, self.one_hot)
def get_pytorch_train_loader(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False):
traindir = os.path.join(data_path, 'train')
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
]))
if torch.distributed.is_initialized():
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=(train_sampler is None),
num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate, drop_last=True)
return PrefetchedWrapper(train_loader, num_classes, fp16, one_hot), len(train_loader)
def get_pytorch_val_loader(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False):
valdir = os.path.join(data_path, 'val')
val_dataset = datasets.ImageFolder(
valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
]))
if torch.distributed.is_initialized():
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
val_sampler = None
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=val_sampler,
batch_size=batch_size, shuffle=False,
num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True,
collate_fn=fast_collate)
return PrefetchedWrapper(val_loader, num_classes, fp16, one_hot), len(val_loader)
|
butterfly-master
|
cnn/imagenet/dataloaders.py
|
import random
import json
from collections import OrderedDict
class IterationMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.last = 0
def record(self, val, n = 1):
self.last = val
def get_val(self):
return None
def get_last(self):
return self.last
class EpochMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
def record(self, val, n = 1):
self.val = val
def get_val(self):
return self.val
def get_last(self):
return None
class AverageMeter(object):
def __init__(self, ret_last=True, ret_val=True):
self.reset()
self.ret_last = ret_last
self.ret_val = ret_val
def reset(self):
self.n = 0
self.val = 0
self.last = 0
def record(self, val, n = 1):
self.last = val
self.n += n
self.val += val * n
def get_val(self):
if self.ret_val:
if self.n == 0:
return 0.0
return self.val / self.n
else:
return None
def get_last(self):
if self.ret_last:
return self.last
else:
return None
class RunningMeter(object):
def __init__(self, decay):
self.decay = decay
def reset(self):
self.val = 0
self.last = 0
def record(self, val, n = 1):
self.last = val
decay = 1 - ((1 - self.decay) ** n)
self.val = (1 - decay) * self.val + decay * val
def get_val(self):
return self.val
def get_last(self):
return self.last
class Logger(object):
def __init__(self, print_interval, backends, verbose=False):
self.epoch = -1
self.iteration = -1
self.val_iteration = -1
self.metrics = OrderedDict()
self.backends = backends
self.print_interval = print_interval
self.verbose = verbose
def log_run_tag(self, name, val):
for b in self.backends:
b.log_run_tag(name, val)
def register_metric(self, metric_name, meter, log_level=0):
if self.verbose:
print("Registering metric: {}".format(metric_name))
self.metrics[metric_name] = {'meter' : meter, 'level' : log_level}
def log_metric(self, metric_name, val, n=1):
self.metrics[metric_name]['meter'].record(val, n=n)
def start_iteration(self, val=False):
if val:
self.val_iteration += 1
else:
self.iteration += 1
def end_iteration(self, val=False):
it = self.val_iteration if val else self.iteration
if (it % self.print_interval == 0):
for b in self.backends:
if val:
b.log_iteration_metric('val.it', it)
else:
b.log_iteration_metric('it', it)
f = lambda l: filter(lambda m : m['level'] <= b.level)
for n, m in [(n, m) for n, m in self.metrics.items() if m['level'] <= b.level and n.startswith('val') == val]:
mv = m['meter'].get_last()
if mv is not None:
b.log_iteration_metric(n, mv)
b.log_end_iteration()
def start_epoch(self):
self.epoch += 1
self.iteration = 0
self.val_iteration = 0
for b in self.backends:
b.log_epoch_metric('ep', self.epoch)
for n, m in [(n, m) for n, m in self.metrics.items() if m['level'] <= b.level]:
m['meter'].reset()
def end_epoch(self):
for b in self.backends:
for n, m in [(n, m) for n, m in self.metrics.items() if m['level'] <= b.level]:
mv = m['meter'].get_val()
if mv is not None:
b.log_epoch_metric(n, mv)
b.log_end_epoch()
def end(self):
for b in self.backends:
b.end()
def iteration_generator_wrapper(self, gen, val = False):
for g in gen:
self.start_iteration(val = val)
yield g
self.end_iteration(val = val)
def epoch_generator_wrapper(self, gen):
for g in gen:
self.start_epoch()
yield g
self.end_epoch()
class JsonBackend(object):
def __init__(self, filename, log_level=0):
self.level = log_level
self.filename = filename
self.json_log = OrderedDict([
('run' , OrderedDict()),
('epoch', OrderedDict()),
('iter' , OrderedDict()),
('event', OrderedDict()),
])
def log_run_tag(self, name, val):
self.json_log['run'][name] = val
def log_end_epoch(self):
pass
def log_end_iteration(self):
pass
def log_epoch_metric(self, name, val):
if not name in self.json_log['epoch'].keys():
self.json_log['epoch'][name] = []
self.json_log['epoch'][name].append(val)
if name != 'ep':
if name in self.json_log['iter'].keys():
self.json_log['iter'][name].append([])
else:
if not 'it' in self.json_log['iter'].keys():
self.json_log['iter']['it'] = []
self.json_log['iter']['it'].append([])
def log_iteration_metric(self, name, val):
if not (name in self.json_log['iter'].keys()):
self.json_log['iter'][name] = [[]]
self.json_log['iter'][name][-1].append(val)
def end(self):
print(json.dump(self.json_log, open(self.filename, 'w')))
class StdOut1LBackend(object):
def __init__(self, iters, val_iters, epochs, log_level=0):
self.level = log_level
self.iteration = 0
self.total_iterations = iters
self.total_val_iterations = val_iters
self.epoch = 0
self.total_epochs = epochs
self.iteration_metrics = {}
self.epoch_metrics = {}
self.mode = 'train'
def log_run_tag(self, name, val):
print("{} : {}".format(name, val))
def log_end_epoch(self):
print("Summary Epoch: {}/{};\t{}".format(
self.epoch, self.total_epochs,
"\t".join(["{} : {:.3f}".format(m,v) for m, v in self.epoch_metrics.items()])))
self.epoch_metrics = {}
def log_end_iteration(self):
md = "Validation" if self.mode == 'val' else ""
ti = self.total_val_iterations if self.mode == 'val' else self.total_iterations
print("Epoch: {}/{} {} Iteration: {}/{};\t{}".format(
self.epoch, self.total_epochs, md, self.iteration, ti,
"\t".join(["{} : {:.3f}".format(m,v) for m, v in self.iteration_metrics.items()])))
self.iteration_metrics = {}
def log_epoch_metric(self, name, value):
if name == 'ep':
self.epoch = value
self.iteration = 0
else:
self.epoch_metrics[name] = value
def log_iteration_metric(self, name, value):
if name == 'it' or name == 'val.it':
self.mode = 'train' if name == 'it' else 'val'
self.iteration = value
else:
self.iteration_metrics[name] = value
def end(self):
pass
class StdOutBackend(object):
def __init__(self, iters, epochs, log_level=0):
self.level = log_level
self.iteration = 0
self.epoch = 0
def log_run_tag(self, name, val):
print("{} : {}".format(name, val))
def log_end_epoch(self):
pass
def log_end_iteration(self):
pass
def log_epoch_metric(self, name, value):
if name == 'ep':
self.epoch = value
self.iteration = 0
else:
print("Summary Epoch: {}; {} = {:.3f}".format(self.epoch, name, value))
def log_iteration_metric(self, name, value):
if name == 'it' or name == 'val.it':
self.iteration = value
else:
print("Epoch: {} Iteration: {}; {} = {:.3f}".format(self.epoch, self.iteration, name, value))
def end(self):
pass
|
butterfly-master
|
cnn/imagenet/logger.py
|
import math
import torch
import torch.nn as nn
import numpy as np
__all__ = ['ResNet', 'build_resnet', 'resnet_versions', 'resnet_configs']
# ResNetBuilder {{{
class ResNetBuilder(object):
def __init__(self, version, config):
self.config = config
self.L = sum(version['layers'])
self.M = version['block'].M
def conv(self, kernel_size, in_planes, out_planes, stride=1):
if kernel_size == 3:
conv = self.config['conv'](
in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
elif kernel_size == 1:
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
elif kernel_size == 5:
conv = nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,
padding=2, bias=False)
elif kernel_size == 7:
conv = nn.Conv2d(in_planes, out_planes, kernel_size=7, stride=stride,
padding=3, bias=False)
else:
return None
if self.config['nonlinearity'] == 'relu':
nn.init.kaiming_normal_(conv.weight,
mode=self.config['conv_init'],
nonlinearity=self.config['nonlinearity'])
return conv
def conv3x3(self, in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
c = self.conv(3, in_planes, out_planes, stride=stride)
return c
def conv1x1(self, in_planes, out_planes, stride=1):
"""1x1 convolution with padding"""
c = self.conv(1, in_planes, out_planes, stride=stride)
return c
def conv7x7(self, in_planes, out_planes, stride=1):
"""7x7 convolution with padding"""
c = self.conv(7, in_planes, out_planes, stride=stride)
return c
def conv5x5(self, in_planes, out_planes, stride=1):
"""5x5 convolution with padding"""
c = self.conv(5, in_planes, out_planes, stride=stride)
return c
def batchnorm(self, planes, last_bn=False):
bn = nn.BatchNorm2d(planes)
gamma_init_val = 0 if last_bn and self.config['last_bn_0_init'] else 1
nn.init.constant_(bn.weight, gamma_init_val)
nn.init.constant_(bn.bias, 0)
return bn
def activation(self):
return self.config['activation']()
# ResNetBuilder }}}
# BasicBlock {{{
class BasicBlock(nn.Module):
M = 2
expansion = 1
def __init__(self, builder, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = builder.conv3x3(inplanes, planes, stride)
self.bn1 = builder.batchnorm(planes)
self.relu = builder.activation()
self.conv2 = builder.conv3x3(planes, planes)
self.bn2 = builder.batchnorm(planes, last_bn=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.bn1 is not None:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.bn2 is not None:
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# BasicBlock }}}
# Bottleneck {{{
class Bottleneck(nn.Module):
M = 3
expansion = 4
def __init__(self, builder, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = builder.conv1x1(inplanes, planes)
self.bn1 = builder.batchnorm(planes)
self.conv2 = builder.conv3x3(planes, planes, stride=stride)
self.bn2 = builder.batchnorm(planes)
self.conv3 = builder.conv1x1(planes, planes * self.expansion)
self.bn3 = builder.batchnorm(planes * self.expansion, last_bn=True)
self.relu = builder.activation()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# Bottleneck }}}
# ResNet {{{
class ResNet(nn.Module):
def __init__(self, builder, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = builder.conv7x7(3, 64, stride=2)
self.bn1 = builder.batchnorm(64)
self.relu = builder.activation()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(builder, block, 64, layers[0])
self.layer2 = self._make_layer(builder, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(builder, block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(builder, block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, builder, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
dconv = builder.conv1x1(self.inplanes, planes * block.expansion,
stride=stride)
dbn = builder.batchnorm(planes * block.expansion)
if dbn is not None:
downsample = nn.Sequential(dconv, dbn)
else:
downsample = dconv
layers = []
layers.append(block(builder, self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(builder, self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# ResNet }}}
resnet_configs = {
'classic' : {
'conv' : nn.Conv2d,
'conv_init' : 'fan_out',
'nonlinearity' : 'relu',
'last_bn_0_init' : False,
'activation' : lambda: nn.ReLU(inplace=True),
},
'fanin' : {
'conv' : nn.Conv2d,
'conv_init' : 'fan_in',
'nonlinearity' : 'relu',
'last_bn_0_init' : False,
'activation' : lambda: nn.ReLU(inplace=True),
},
}
resnet_versions = {
'resnet18' : {
'net' : ResNet,
'block' : BasicBlock,
'layers' : [2, 2, 2, 2],
'num_classes' : 1000,
},
'resnet34' : {
'net' : ResNet,
'block' : BasicBlock,
'layers' : [3, 4, 6, 3],
'num_classes' : 1000,
},
'resnet50' : {
'net' : ResNet,
'block' : Bottleneck,
'layers' : [3, 4, 6, 3],
'num_classes' : 1000,
},
'resnet101' : {
'net' : ResNet,
'block' : Bottleneck,
'layers' : [3, 4, 23, 3],
'num_classes' : 1000,
},
'resnet152' : {
'net' : ResNet,
'block' : Bottleneck,
'layers' : [3, 8, 36, 3],
'num_classes' : 1000,
},
}
def build_resnet(version, config, model_state=None):
version = resnet_versions[version]
config = resnet_configs[config]
builder = ResNetBuilder(version, config)
print("Version: {}".format(version))
print("Config: {}".format(config))
model = version['net'](builder,
version['block'],
version['layers'],
version['num_classes'])
return model
|
butterfly-master
|
cnn/imagenet/resnet.py
|
import os
import numpy as np
import torch
import shutil
import torch.distributed as dist
def should_backup_checkpoint(args):
def _sbc(epoch):
return args.gather_checkpoints and (epoch < 10 or epoch % 10 == 0)
return _sbc
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', checkpoint_dir='./', backup_filename=None):
if (not torch.distributed.is_initialized()) or torch.distributed.get_rank() == 0:
filename = os.path.join(checkpoint_dir, filename)
print("SAVING {}".format(filename))
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(checkpoint_dir, 'model_best.pth.tar'))
if backup_filename is not None:
shutil.copyfile(filename, os.path.join(checkpoint_dir, backup_filename))
def timed_generator(gen):
start = time.time()
for g in gen:
end = time.time()
t = end - start
yield g, t
start = time.time()
def timed_function(f):
def _timed_function(*args, **kwargs):
start = time.time()
ret = f(*args, **kwargs)
return ret, time.time() - start
return _timed_function
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
return rt
|
butterfly-master
|
cnn/imagenet/utils.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class KnowledgeDistillationLoss(nn.Module):
"""
Loss with knowledge distillation.
"""
def __init__(self, original_loss, temperature=1.0, alpha_ce=0.5):
super().__init__()
self.original_loss = original_loss
self.temperature = temperature
self.alpha_ce = alpha_ce
def forward(self, s_logit, t_logit, target):
# Adapted from https://github.com/huggingface/pytorch-transformers/blob/master/examples/distillation/distiller.py
# Scaled by temperature^2 to balance the soft and hard loss
# See https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py
# or https://github.com/stanford-futuredata/lit-code/blob/master/cifar10/distillation_loss.py
loss_kd = F.kl_div(F.log_softmax(s_logit / self.temperature, dim=-1),
F.softmax(t_logit / self.temperature, dim=-1),
reduction='batchmean') * (self.temperature)**2
loss_og = self.original_loss(s_logit, target)
return (1 - self.alpha_ce) * loss_og + self.alpha_ce * loss_kd
|
butterfly-master
|
cnn/imagenet/smoothing.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.