python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Megatron tokenizers."""
from abc import ABC
from abc import abstractmethod
from .bert_tokenization import FullTokenizer as FullBertTokenizer
from .gpt2_tokenization import GPT2Tokenizer
def build_tokenizer(args):
"""Initialize tokenizer."""
if args.rank == 0:
print('> building {} tokenizer ...'.format(args.tokenizer_type),
flush=True)
# Select and instantiate the tokenizer.
if args.tokenizer_type == 'BertWordPieceLowerCase':
assert args.vocab_file is not None
tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file,
lower_case=True,
vocab_extra_ids=args.vocab_extra_ids)
elif args.tokenizer_type == 'BertWordPieceCase':
assert args.vocab_file is not None
tokenizer = _BertWordPieceTokenizer(vocab_file=args.vocab_file,
lower_case=False,
vocab_extra_ids=args.vocab_extra_ids)
elif args.tokenizer_type == 'GPT2BPETokenizer':
assert args.vocab_file is not None
assert args.merge_file is not None
tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)
elif args.tokenizer_type == 'SentencePieceTokenizer':
assert args.tokenizer_model is not None
tokenizer = _SentencePieceTokenizer(args.tokenizer_model, vocab_extra_ids=args.vocab_extra_ids)
elif args.tokenizer_type == 'GPTSentencePieceTokenizer':
assert args.tokenizer_model is not None
tokenizer = _GPTSentencePieceTokenizer(args.tokenizer_model)
elif args.tokenizer_type == 'Llama2Tokenizer':
assert args.tokenizer_model is not None
tokenizer = _Llama2Tokenizer(args.tokenizer_model)
elif args.tokenizer_type == 'NullTokenizer':
assert args.vocab_size is not None
tokenizer = _NullTokenizer(args.vocab_size)
else:
raise NotImplementedError('{} tokenizer is not '
'implemented.'.format(args.tokenizer_type))
# Add vocab size (if not already set from a checkpoint).
if getattr(args, "padded_vocab_size", None) is None:
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size,
args)
return tokenizer
def _vocab_size_with_padding(orig_vocab_size, args):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
multiple = args.make_vocab_size_divisible_by * \
args.tensor_model_parallel_size
while (after % multiple) != 0:
after += 1
if args.rank == 0:
print(' > padded vocab (size: {}) with {} dummy tokens '
'(new size: {})'.format(
orig_vocab_size, after - orig_vocab_size, after), flush=True)
return after
class AbstractTokenizer(ABC):
"""Abstract class for tokenizer."""
def __init__(self, name):
self.name = name
super().__init__()
@property
@abstractmethod
def vocab_size(self):
pass
@property
@abstractmethod
def vocab(self):
"""Dictionary from vocab text token to id token."""
pass
@property
@abstractmethod
def inv_vocab(self):
"""Dictionary from vocab id token to text token."""
pass
@abstractmethod
def tokenize(self, text):
pass
def detokenize(self, token_ids):
raise NotImplementedError('detokenizer is not implemented for {} '
'tokenizer'.format(self.name))
@property
def cls(self):
raise NotImplementedError('CLS is not provided for {} '
'tokenizer'.format(self.name))
@property
def sep(self):
raise NotImplementedError('SEP is not provided for {} '
'tokenizer'.format(self.name))
@property
def pad(self):
raise NotImplementedError('PAD is not provided for {} '
'tokenizer'.format(self.name))
@property
def eod(self):
raise NotImplementedError('EOD is not provided for {} '
'tokenizer'.format(self.name))
@property
def mask(self):
raise NotImplementedError('MASK is not provided for {} '
'tokenizer'.format(self.name))
class _BertWordPieceTokenizer(AbstractTokenizer):
"""Original BERT wordpiece tokenizer."""
def __init__(self, vocab_file, lower_case=True, vocab_extra_ids=0):
if lower_case:
name = 'BERT Lower Case'
else:
name = 'BERT Upper Case'
super().__init__(name)
self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case)
self.cls_id = self.tokenizer.vocab['[CLS]']
self.sep_id = self.tokenizer.vocab['[SEP]']
self.pad_id = self.tokenizer.vocab['[PAD]']
self.mask_id = self.tokenizer.vocab['[MASK]']
self._additional_special_tokens = []
# (dsachan) Add BOS and EOS tokens
SPECIAL_TOKENS = {'eos_token': '[EOS]',
'bos_token': '[BOS]'}
self._bos_token = '[BOS]'
self.add_token(self._bos_token)
self._bos_token_id = self.vocab.get(self._bos_token)
self._eos_token = '[EOS]'
self.add_token(self._eos_token)
self._eos_token_id = self.vocab.get(self._eos_token)
# (dsachan) Add additional special tokens
# These can be used as sentinel tokens in T5 model inputs
additional_special_tokens = []
additional_special_tokens.extend(
["<extra_id_{}>".format(i) for i in range(vocab_extra_ids)])
self.add_additional_special_tokens(additional_special_tokens)
def add_token(self, token):
if token not in self.vocab:
self.inv_vocab[self.vocab_size] = token
# self.vocab_size comes from len(vocab)
# and it will increase as we add elements
self.vocab[token] = self.vocab_size
def add_additional_special_tokens(self, tokens_list):
setattr(self, "additional_special_tokens", tokens_list)
for value in tokens_list:
self.add_token(value)
@property
def vocab_size(self):
return self.tokenizer.vocab_size()
@property
def vocab(self):
return self.tokenizer.vocab
@property
def inv_vocab(self):
return self.tokenizer.inv_vocab
def tokenize(self, text):
text_tokens = self.tokenizer.tokenize(text)
return self.tokenizer.convert_tokens_to_ids(text_tokens)
def decode(self, ids):
tokens = self.tokenizer.convert_ids_to_tokens(ids)
return self.tokenizer.convert_tokens_to_string(tokens)
def decode_token_ids(self, token_ids):
tokens = self.tokenizer.convert_ids_to_tokens(token_ids)
exclude_list = ['[PAD]', '[CLS]']
non_pads = [t for t in tokens if t not in exclude_list]
result = ""
for s in non_pads:
if s.startswith("##"):
result += s[2:]
else:
result += " " + s
return result
@property
def cls(self):
return self.cls_id
@property
def sep(self):
return self.sep_id
@property
def pad(self):
return self.pad_id
@property
def mask(self):
return self.mask_id
@property
def bos_token(self):
""" Beginning of sentence token id """
return self._bos_token
@property
def eos_token(self):
""" End of sentence token id """
return self._eos_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings)."""
return self._additional_special_tokens
@property
def bos_token_id(self):
""" Id of the beginning of sentence token in the vocabulary."""
return self._bos_token_id
@property
def eos_token_id(self):
""" Id of the end of sentence token in the vocabulary."""
return self._eos_token_id
@property
def additional_special_tokens_ids(self):
""" Ids of all the additional special tokens in the vocabulary (list of integers)."""
return [self.vocab.get(token) for token in self._additional_special_tokens]
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
class _GPT2BPETokenizer(AbstractTokenizer):
"""Original GPT2 BPE tokenizer."""
def __init__(self, vocab_file, merge_file):
name = 'GPT2 BPE'
super().__init__(name)
self.tokenizer = GPT2Tokenizer(vocab_file, merge_file, errors='replace',
special_tokens=[], max_len=None)
self.eod_id = self.tokenizer.encoder['<|endoftext|>']
@property
def vocab_size(self):
return len(self.tokenizer.encoder)
@property
def vocab(self):
return self.tokenizer.encoder
@property
def inv_vocab(self):
return self.tokenizer.decoder
def tokenize(self, text):
return self.tokenizer.encode(text)
def detokenize(self, token_ids):
return self.tokenizer.decode(token_ids)
@property
def eod(self):
return self.eod_id
class _SentencePieceTokenizer(AbstractTokenizer):
"""SentencePieceTokenizer-Megatron wrapper"""
def __init__(self, model_file, vocab_extra_ids=0):
name = 'SentencePieceTokenizer'
super().__init__(name)
import sentencepiece
self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=model_file)
self._initalize(vocab_extra_ids)
def _populate_vocab(self):
self._vocab = {}
self._inv_vocab = {}
for i in range(len(self.tokenizer)):
t = self.tokenizer.id_to_piece(i)
self._inv_vocab[i] = t
self._vocab[t] = i
def _initalize(self, vocab_extra_ids):
self._populate_vocab()
self._special_tokens = {}
self._inv_special_tokens = {}
self._t5_tokens = []
def _add_special_token(t):
if t not in self._vocab:
next_id = len(self._vocab)
self._vocab[t] = next_id
self._inv_vocab[next_id] = t
self._special_tokens[t] = self._vocab[t]
self._inv_special_tokens[self._vocab[t]] = t
_add_special_token('<CLS>')
self._cls_id = self._vocab['<CLS>']
_add_special_token('<SEP>')
self._sep_id = self._vocab['<SEP>']
_add_special_token('<EOD>')
self._eod_id = self._vocab['<EOD>']
_add_special_token('<MASK>')
self._mask_id = self._vocab['<MASK>']
pad_id = self.tokenizer.pad_id()
try:
pad_token = self.tokenizer.id_to_piece(pad_id)
except IndexError:
pad_token = '<PAD>'
_add_special_token(pad_token)
self._pad_id = self._vocab[pad_token]
bos_id = self.tokenizer.bos_id()
try:
bos_token = self.tokenizer.id_to_piece(bos_id)
except IndexError:
bos_token = '<BOS>'
_add_special_token(bos_token)
self._bos_id = self._vocab[bos_token]
eos_id = self.tokenizer.eos_id()
try:
eos_token = self.tokenizer.id_to_piece(eos_id)
except IndexError:
eos_token = '<EOS>'
_add_special_token(eos_token)
self._eos_id = self._vocab[eos_token]
for i in range(vocab_extra_ids):
t = "<extra_id_{}>".format(i)
_add_special_token(t)
self._t5_tokens += [t]
@property
def vocab_size(self):
return len(self._vocab)
@property
def vocab(self):
return self._vocab
@property
def inv_vocab(self):
return self._inv_vocab
@property
def decoder(self):
return self._inv_vocab
@property
def encoder(self):
return self._vocab
# From:
# https://github.com/NVIDIA/NeMo/blob/c8fa217e811d60d11d014827c7f3845ff6c99ae7/nemo/collections/common/tokenizers/sentencepiece_tokenizer.py#L89
def tokenize(self, text):
ids = []
idx = 0
while 1:
indices = {}
for token in self._special_tokens:
try:
indices[token] = text[idx:].index(token)
except ValueError:
continue
if len(indices) == 0:
break
next_token = min(indices, key=indices.get)
next_idx = idx + indices[next_token]
ids.extend(self.tokenizer.encode_as_ids(text[idx:next_idx]))
ids.append(self._special_tokens[next_token])
idx = next_idx + len(next_token)
ids.extend(self.tokenizer.encode_as_ids(text[idx:]))
return ids
# From:
# https://github.com/NVIDIA/NeMo/blob/c8fa217e811d60d11d014827c7f3845ff6c99ae7/nemo/collections/common/tokenizers/sentencepiece_tokenizer.py#L125
def detokenize(self, ids):
text = ""
last_i = 0
for i, id in enumerate(ids):
if id in self._inv_special_tokens:
text += self.tokenizer.decode_ids(ids[last_i:i]) + " "
text += self._inv_special_tokens[id] + " "
last_i = i + 1
text += self.tokenizer.decode_ids(ids[last_i:])
return text
@property
def cls(self):
return self._cls_id
@property
def sep(self):
return self._sep_id
@property
def pad(self):
return self._pad_id
@property
def bos_token_id(self):
return self._bos_id
@property
def bos(self):
return self._bos_id
@property
def eod(self):
return self._eod_id
@property
def eos_token_id(self):
return self._eos_id
@property
def eos(self):
return self._eos_id
@property
def mask(self):
return self._mask_id
@property
def additional_special_tokens_ids(self):
return [self.vocab[k] for k in self._t5_tokens]
class _GPTSentencePieceTokenizer(_SentencePieceTokenizer):
"""SentencePieceTokenizer-Megatron wrapper"""
def __init__(self, model_file,):
super().__init__(model_file, vocab_extra_ids=0)
def _initalize(self, vocab_extra_ids):
self._populate_vocab()
self._pad_id = self.tokenizer.pad_id()
self._bos_id = self.tokenizer.bos_id()
self._eos_id = self.tokenizer.eos_id()
def tokenize(self, text):
return self.tokenizer.encode_as_ids(text)
def detokenize(self, ids):
return self.tokenizer.decode_ids(ids)
@property
def cls(self):
return -1
@property
def sep(self):
return -1
@property
def mask(self):
return -1
@property
def eod(self):
return self._eos_id
@property
def additional_special_tokens_ids(self):
return None
class _Llama2Tokenizer(_SentencePieceTokenizer):
"""SentencePieceTokenizer-Megatron wrapper"""
def __init__(self, model_file,):
super().__init__(model_file, vocab_extra_ids=0)
def _initalize(self, vocab_extra_ids):
self._populate_vocab()
# BOS / EOS token IDs
self.n_words: int = self.tokenizer.vocab_size()
self.bos_id: int = self.tokenizer.bos_id()
self.eos_id: int = self.tokenizer.eos_id()
self.pad_id: int = self.tokenizer.pad_id()
assert self.tokenizer.vocab_size() == self.tokenizer.get_piece_size()
def tokenize(self, s: str, bos=True, eos=False):
'''Default args for text completion, not chat/dialog.'''
assert type(s) is str
t = self.tokenizer.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def detokenize(self, ids):
return self.tokenizer.decode_ids(ids)
@property
def cls(self):
return -1
@property
def sep(self):
return -1
@property
def mask(self):
return -1
@property
def eod(self):
return self.eos_id
@property
def additional_special_tokens_ids(self):
return None
class _NullTokenizer:
def __init__(self, vocab_size):
vocab_size = int(vocab_size)
self._eos_id = vocab_size
self.vocab_size = vocab_size+1
def tokenize(self, text):
return [int(x) for x in text.split(' ')]
def detokenize(self, ids):
text = [str(x) for x in ids]
return ' '.join(text)
@property
def cls(self):
return -1
@property
def sep(self):
return -1
@property
def mask(self):
return -1
@property
def eod(self):
return self._eos_id
@property
def additional_special_tokens_ids(self):
return None
|
Megatron-LM-master
|
megatron/tokenizer/tokenizer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import pathlib
import subprocess
from torch.utils import cpp_extension
# Setting this param to a list has a problem of generating different
# compilation commands (with diferent order of architectures) and
# leading to recompilation of fused kernels. Set it to empty string
# to avoid recompilation and assign arch flags explicity in
# extra_cuda_cflags below
os.environ["TORCH_CUDA_ARCH_LIST"] = ""
def load(args):
# Check if cuda 11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version(
cpp_extension.CUDA_HOME
)
if int(bare_metal_major) >= 11:
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
if int(bare_metal_minor) >= 8:
cc_flag.append('-gencode')
cc_flag.append('arch=compute_90,code=sm_90')
# Build path
srcpath = pathlib.Path(__file__).parent.absolute()
buildpath = srcpath / "build"
_create_build_dir(buildpath)
# Helper function to build the kernels.
def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
return cpp_extension.load(
name=name,
sources=sources,
build_directory=buildpath,
extra_cflags=[
"-O3",
],
extra_cuda_cflags=[
"-O3",
"-gencode",
"arch=compute_70,code=sm_70",
"--use_fast_math",
]
+ extra_cuda_flags
+ cc_flag,
verbose=(args.rank == 0),
)
def _get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output(
[cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True
)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def _create_build_dir(buildpath):
try:
os.mkdir(buildpath)
except OSError:
if not os.path.isdir(buildpath):
print(f"Creation of the build directory {buildpath} failed")
|
Megatron-LM-master
|
megatron/fused_kernels/__init__.py
|
Megatron-LM-master
|
megatron/fused_kernels/tests/__init__.py
|
|
import math
import torch
from torch.nn import LayerNorm
from megatron.model.enums import AttnMaskType
from megatron.model.fused_layer_norm import MixedFusedLayerNorm
from megatron.model.fused_softmax import FusedScaleMaskSoftmax
from megatron.model.utils import attention_mask_func
from megatron.fused_kernels import load
def test_load_fused_kernels():
try:
import fused_layer_norm_cuda
import scaled_masked_softmax_cuda
import scaled_upper_triang_masked_softmax_cuda
import torch
print("[Success] load_fused_kernels")
except ImportError as e:
print("[Fail] load_fused_kernels")
raise e
def test_fused_softmax():
bert = BertModel.from_pretrained("bert-base-cased").cuda().half()
tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
test_text = (
"Hello. How are you? I am fine thank you and you? yes Good. "
"hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32
)
tokens = tokenizer(
[test_text] * 4,
return_tensors="pt",
)
embedding_output = bert.embeddings(
input_ids=tokens["input_ids"].cuda(),
position_ids=None,
token_type_ids=tokens["token_type_ids"].cuda(),
inputs_embeds=None,
past_key_values_length=0,
)
# (bsz, 1, 1, seq_len)
mask = bert.get_extended_attention_mask(
attention_mask=tokens["attention_mask"].cuda(),
input_shape=tokens["input_ids"].shape,
device=bert.device,
)
# (bsz, 1, seq_len, seq_len)
mask = mask.repeat(1, 1, mask.size()[-1], 1)
attention = bert.encoder.layer[0].attention.self
key_layer = attention.transpose_for_scores(attention.key(embedding_output))
query_layer = attention.transpose_for_scores(attention.query(embedding_output))
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores /= math.sqrt(key_layer.size()[-1])
fused_softmax = (
FusedScaleMaskSoftmax(
input_in_fp16=True,
input_in_bf16=False,
mask_func=attention_mask_func,
scale=None,
softmax_in_fp32=False,
attn_mask_type=AttnMaskType.padding,
scaled_masked_softmax_fusion=True,
)
.cuda()
.half()
)
fused_softmax_output = fused_softmax(
attention_scores,
(mask != 0),
)
torch_softmax = (
FusedScaleMaskSoftmax(
input_in_fp16=True,
input_in_bf16=False,
mask_func=attention_mask_func,
scale=None,
softmax_in_fp32=False,
attn_mask_type=AttnMaskType.padding,
scaled_masked_softmax_fusion=False,
)
.cuda()
.half()
)
torch_softmax_output = torch_softmax(
attention_scores,
(mask != 0),
)
test_result = (fused_softmax_output - torch_softmax_output).abs()
while test_result.dim() != 1:
test_result = test_result.mean(dim=-1)
diff = test_result.mean(dim=-1)
if diff <= 1e-3:
print(
f"\n[Success] test_fused_softmax"
f"\n > mean_difference={diff}"
f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}"
f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}"
)
else:
print(
f"\n[Fail] test_fused_softmax"
f"\n > mean_difference={diff}, "
f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, "
f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}"
)
def test_fused_upper_triangle_mask_softmax():
gpt = GPT2Model.from_pretrained("gpt2").cuda().half()
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
test_text = (
"Hello. How are you? I am fine thank you and you? yes Good. "
"hi hi hi hi hi hi hi" # 24
)
tokens = tokenizer(
[test_text] * 4,
return_tensors="pt",
)
attention_mask = tokens["attention_mask"].cuda()
attention_mask = attention_mask.view(attention_mask.size(0), -1)
attention_mask = attention_mask[:, None, None, :]
attention_mask = (1.0 - attention_mask) * -10000.0
attention_mask = attention_mask.repeat(1, 1, attention_mask.size()[-1], 1)
attn = gpt.h[0]
hidden_states = gpt.wte(tokens["input_ids"].cuda())
q, k, v = attn.attn.c_attn(hidden_states).split(768, dim=-1)
q = attn.attn._split_heads(q, attn.attn.num_heads, attn.attn.head_dim)
k = attn.attn._split_heads(k, attn.attn.num_heads, attn.attn.head_dim)
attn_weights = torch.matmul(q, k.transpose(-1, -2))
sq, sk = q.size(-2), k.size(-2)
causal_mask = attn.attn.bias[:, :, sk - sq : sk, :sk].bool()
total_mask = ~(causal_mask & (attention_mask == 0))
"""
tensor([[[[False, True, True, ..., True, True, True],
[False, False, True, ..., True, True, True],
[False, False, False, ..., True, True, True],
...,
[False, False, False, ..., False, True, True],
[False, False, False, ..., False, False, True],
[False, False, False, ..., False, False, False]]]
"""
fused_softmax = (
FusedScaleMaskSoftmax(
input_in_fp16=True,
input_in_bf16=False,
mask_func=attention_mask_func,
scale=None,
softmax_in_fp32=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=True,
)
.cuda()
.half()
)
fused_softmax_output = fused_softmax(
attn_weights,
total_mask,
)
torch_softmax = (
FusedScaleMaskSoftmax(
input_in_fp16=True,
input_in_bf16=False,
mask_func=attention_mask_func,
scale=None,
softmax_in_fp32=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=False,
)
.cuda()
.half()
)
torch_softmax_output = torch_softmax(
attn_weights,
total_mask,
)
test_result = (fused_softmax_output - torch_softmax_output).abs()
while test_result.dim() != 1:
test_result = test_result.mean(dim=-1)
diff = test_result.mean(dim=-1)
if diff <= 1e-3:
print(
f"\n[Success] test_fused_upper_triangle_mask_softmax"
f"\n > mean_difference={diff}"
f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}"
f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}"
)
else:
print(
f"\n[Fail] test_fused_upper_triangle_mask_softmax"
f"\n > mean_difference={diff}, "
f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, "
f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}"
)
def test_layer_norm():
bert = BertModel.from_pretrained("bert-base-cased").cuda().half()
tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
test_text = (
"Hello. How are you? I am fine thank you and you? yes Good. "
"hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32
)
tokens = tokenizer(
[test_text] * 4,
return_tensors="pt",
)
# [bsz, seq_len, d_model]
embedding_output = (
bert.embeddings(
input_ids=tokens["input_ids"].cuda(),
position_ids=None,
token_type_ids=tokens["token_type_ids"].cuda(),
inputs_embeds=None,
past_key_values_length=0,
)
.cuda()
.half()
)
fused_layernorm_layer = (
MixedFusedLayerNorm(normalized_shape=embedding_output.size(-1)).cuda().half()
)
torch_layernorm_layer = (
LayerNorm(normalized_shape=embedding_output.size(-1)).cuda().half()
)
fused_output = fused_layernorm_layer(embedding_output)
torch_output = torch_layernorm_layer(embedding_output)
test_result = (fused_output - torch_output).abs()
while test_result.dim() != 1:
test_result = test_result.mean(dim=-1)
diff = test_result.mean(dim=-1)
if diff <= 1e-3:
print(
f"\n[Success] test_layer_norm"
f"\n > mean_difference={diff}"
f"\n > fused_values={fused_output[-1][-1][:5].tolist()}"
f"\n > torch_values={torch_output[-1][-1][:5].tolist()}"
)
else:
print(
f"\n[Fail] test_layer_norm"
f"\n > mean_difference={diff}, "
f"\n > fused_values={fused_output[-1][-1][:5].tolist()}, "
f"\n > torch_values={torch_output[-1][-1][:5].tolist()}"
)
def attention_mask_func(attention_scores, attention_mask):
attention_scores.masked_fill_(attention_mask, -10000.0)
return attention_scores
def forward_torch_softmax(input, mask, scale):
input = input * scale
mask_output = attention_mask_func(input, mask) if mask is not None else input
probs = torch.nn.Softmax(dim=-1)(mask_output)
return probs
def test_masked_softmax_forward():
import scaled_masked_softmax_cuda
batch = 2
attn = 16
scale_t = torch.tensor([1.0])
for qlen in [128, 256, 1024, 2048, 4096]:
for klen in [128, 256, 1024, 2048]:
inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0')
masks = torch.randint(0, 2, (batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0')
softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item())
softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item())
error = (softmax_results_torch - softmax_results).abs().max()
assert error < 1e-3
def test_masked_softmax_backward():
import scaled_masked_softmax_cuda
batch = 2
attn = 16
scale_t = torch.tensor([1.0])
for qlen in [128, 256, 1024, 2048, 4096]:
for klen in [128, 256, 1024, 2048]:
inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0')
backward = torch.rand_like(inputs, dtype=torch.float16, device='cuda:0')
masks = torch.randint(0, 2, (batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0')
softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item())
back_grad = scaled_masked_softmax_cuda.backward(backward, softmax_results, scale_t[0].item())
inputs.requires_grad = True
softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item())
softmax_results_torch.backward(backward)
error = (back_grad - inputs.grad).abs().max()
assert error < 1e-3
def test_allmasked_softmax_forward():
import scaled_masked_softmax_cuda
batch = 2
attn = 16
scale_t = torch.tensor([1.0])
for qlen in [128, 256, 1024, 2048, 4096]:
for klen in [128, 256, 1024, 2048]:
inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0')
masks = torch.ones((batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0')
softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item())
softmax_results_torch = torch.zeros_like(inputs)
error = (softmax_results_torch - softmax_results).abs().max()
assert error == 0.0
def test_allmasked_softmax_backward():
import scaled_masked_softmax_cuda
batch = 2
attn = 16
scale_t = torch.tensor([1.0])
for qlen in [128, 256, 1024, 2048, 4096]:
for klen in [128, 256, 1024, 2048]:
inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0')
backward = torch.rand_like(inputs, dtype=torch.float16, device='cuda:0')
masks = torch.ones((batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0')
softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item())
back_grad = scaled_masked_softmax_cuda.backward(backward, softmax_results, scale_t[0].item())
inputs.requires_grad = True
softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item())
softmax_results_torch.backward(backward)
error = (back_grad - inputs.grad).abs().max()
assert error < 1e-3
if __name__ == "__main__":
try:
from transformers import BertTokenizer, GPT2Tokenizer
from transformers.models.bert.modeling_bert import BertModel
from transformers.models.gpt2.modeling_gpt2 import GPT2Model
import transformers
transformers.logging.set_verbosity(
transformers.logging.FATAL,
)
except:
print("\n[Fail] Please install `transformers` package to test fused kernels\n")
exit(-1)
load()
test_masked_softmax_forward()
test_masked_softmax_backward()
test_allmasked_softmax_forward()
test_allmasked_softmax_backward()
test_load_fused_kernels()
test_fused_softmax()
test_fused_upper_triangle_mask_softmax()
test_layer_norm()
|
Megatron-LM-master
|
megatron/fused_kernels/tests/test_fused_kernels.py
|
import os
import torch
import sys
from megatron import get_args, print_rank_0, get_tokenizer
from megatron.core import mpu
from megatron.checkpointing import fix_query_key_value_ordering
from megatron.checkpointing import get_checkpoint_tracker_filename
from megatron.checkpointing import get_checkpoint_name
from megatron.model.bert_model import bert_position_ids
from megatron.model.enums import AttnMaskType
from megatron.model.language_model import get_language_model
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
def get_model_provider(only_query_model=False, only_context_model=False,
biencoder_shared_query_context_model=False):
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
print_rank_0('building Bienoder model ...')
model = biencoder_model_provider(only_query_model=only_query_model,
only_context_model = only_context_model,
biencoder_shared_query_context_model = \
biencoder_shared_query_context_model,
pre_process=pre_process, post_process=post_process)
return model
return model_provider
def biencoder_model_provider(only_query_model=False,
only_context_model=False,
biencoder_shared_query_context_model=False,
pre_process=True,
post_process=True):
"""Build the model."""
assert mpu.get_tensor_model_parallel_world_size() == 1 and \
mpu.get_pipeline_model_parallel_world_size() == 1, \
"Model parallel size > 1 not supported for ICT"
print_rank_0('building BiEncoderModel...')
# simpler to just keep using 2 tokentypes since
# the LM we initialize with has 2 tokentypes
model = BiEncoderModel(
num_tokentypes=2,
parallel_output=False,
only_query_model=only_query_model,
only_context_model=only_context_model,
biencoder_shared_query_context_model=\
biencoder_shared_query_context_model,
pre_process=pre_process,
post_process=post_process)
return model
class BiEncoderModel(MegatronModule):
"""Bert-based module for Biencoder model."""
def __init__(self,
num_tokentypes=1,
parallel_output=True,
only_query_model=False,
only_context_model=False,
biencoder_shared_query_context_model=False,
pre_process=True,
post_process=True):
super(BiEncoderModel, self).__init__()
args = get_args()
bert_kwargs = dict(
num_tokentypes=num_tokentypes,
parallel_output=parallel_output,
pre_process=pre_process,
post_process=post_process)
self.biencoder_shared_query_context_model = \
biencoder_shared_query_context_model
assert not (only_context_model and only_query_model)
self.use_context_model = not only_query_model
self.use_query_model = not only_context_model
self.biencoder_projection_dim = args.biencoder_projection_dim
if self.biencoder_shared_query_context_model:
self.model = PretrainedBertModel(**bert_kwargs)
self._model_key = 'shared_model'
self.query_model, self.context_model = self.model, self.model
else:
if self.use_query_model:
# this model embeds (pseudo-)queries - Embed_input in the paper
self.query_model = PretrainedBertModel(**bert_kwargs)
self._query_key = 'query_model'
if self.use_context_model:
# this model embeds evidence blocks - Embed_doc in the paper
self.context_model = PretrainedBertModel(**bert_kwargs)
self._context_key = 'context_model'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
# this is just a placeholder and will be needed when model
# parallelism will be used
# self.language_model.set_input_tensor(input_tensor)
return
def forward(self, query_tokens, query_attention_mask, query_types,
context_tokens, context_attention_mask, context_types):
"""Run a forward pass for each of the models and
return the respective embeddings."""
if self.use_query_model:
query_logits = self.embed_text(self.query_model,
query_tokens,
query_attention_mask,
query_types)
else:
raise ValueError("Cannot embed query without the query model.")
if self.use_context_model:
context_logits = self.embed_text(self.context_model,
context_tokens,
context_attention_mask,
context_types)
else:
raise ValueError("Cannot embed block without the block model.")
return query_logits, context_logits
@staticmethod
def embed_text(model, tokens, attention_mask, token_types):
"""Embed a batch of tokens using the model"""
logits = model(tokens,
attention_mask,
token_types)
return logits
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""Save dict with state dicts of each of the models."""
state_dict_ = {}
if self.biencoder_shared_query_context_model:
state_dict_[self._model_key] = \
self.model.state_dict_for_save_checkpoint(
prefix=prefix, keep_vars=keep_vars)
else:
if self.use_query_model:
state_dict_[self._query_key] = \
self.query_model.state_dict_for_save_checkpoint(
prefix=prefix, keep_vars=keep_vars)
if self.use_context_model:
state_dict_[self._context_key] = \
self.context_model.state_dict_for_save_checkpoint(
prefix=prefix, keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Load the state dicts of each of the models"""
if self.biencoder_shared_query_context_model:
print_rank_0("Loading shared query-context model")
self.model.load_state_dict(state_dict[self._model_key], \
strict=strict)
else:
if self.use_query_model:
print_rank_0("Loading query model")
self.query_model.load_state_dict( \
state_dict[self._query_key], strict=strict)
if self.use_context_model:
print_rank_0("Loading context model")
self.context_model.load_state_dict( \
state_dict[self._context_key], strict=strict)
def init_state_dict_from_bert(self):
"""Initialize the state from a pretrained BERT model
on iteration zero of ICT pretraining"""
args = get_args()
if args.bert_load is None:
print_rank_0("bert-load argument is None")
return
tracker_filename = get_checkpoint_tracker_filename(args.bert_load)
if not os.path.isfile(tracker_filename):
raise FileNotFoundError("Could not find BERT checkpoint")
with open(tracker_filename, 'r') as f:
iteration = int(f.read().strip())
assert iteration > 0
checkpoint_name = get_checkpoint_name(args.bert_load, iteration, False)
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading BERT checkpoint {}'.format(
torch.distributed.get_rank(), checkpoint_name))
# Load the checkpoint.
try:
state_dict = torch.load(checkpoint_name, map_location='cpu')
except ModuleNotFoundError:
from megatron.fp16_deprecated import loss_scaler
# For backward compatibility.
print_rank_0(' > deserializing using the old code structure ...')
sys.modules['fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
sys.modules['megatron.fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
state_dict = torch.load(checkpoint_name, map_location='cpu')
sys.modules.pop('fp16.loss_scaler', None)
sys.modules.pop('megatron.fp16.loss_scaler', None)
except BaseException:
print_rank_0('could not load the BERT checkpoint')
sys.exit()
checkpoint_version = state_dict.get('checkpoint_version', 0)
# load the LM state dict into each model
model_dict = state_dict['model']['language_model']
if self.biencoder_shared_query_context_model:
self.model.language_model.load_state_dict(model_dict)
fix_query_key_value_ordering(self.model, checkpoint_version)
else:
if self.use_query_model:
self.query_model.language_model.load_state_dict(model_dict)
# give each model the same ict_head to begin with as well
if self.biencoder_projection_dim > 0:
query_proj_state_dict = \
self.state_dict_for_save_checkpoint()\
[self._query_key]['projection_enc']
fix_query_key_value_ordering(self.query_model, checkpoint_version)
if self.use_context_model:
self.context_model.language_model.load_state_dict(model_dict)
if self.query_model is not None and \
self.biencoder_projection_dim > 0:
self.context_model.projection_enc.load_state_dict\
(query_proj_state_dict)
fix_query_key_value_ordering(self.context_model, checkpoint_version)
class PretrainedBertModel(MegatronModule):
"""BERT-based encoder for queries or contexts used for
learned information retrieval."""
def __init__(self, num_tokentypes=2,
parallel_output=True, pre_process=True, post_process=True):
super(PretrainedBertModel, self).__init__()
args = get_args()
tokenizer = get_tokenizer()
self.pad_id = tokenizer.pad
self.biencoder_projection_dim = args.biencoder_projection_dim
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
init_method = init_method_normal(args.init_method_std)
scaled_init_method = scaled_init_method_normal(
args.init_method_std, args.num_layers)
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=False,
encoder_attn_mask_type=AttnMaskType.padding,
init_method=init_method,
scaled_init_method=scaled_init_method,
pre_process=self.pre_process,
post_process=self.post_process)
if args.biencoder_projection_dim > 0:
self.projection_enc = get_linear_layer(args.hidden_size,
args.biencoder_projection_dim,
init_method)
self._projection_enc_key = 'projection_enc'
def forward(self, input_ids, attention_mask, tokentype_ids=None):
extended_attention_mask = attention_mask.unsqueeze(1)
#extended_attention_mask = bert_extended_attention_mask(attention_mask)
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids)
# This mask will be used in average-pooling and max-pooling
pool_mask = (input_ids == self.pad_id).unsqueeze(2)
# Taking the representation of the [CLS] token of BERT
pooled_output = lm_output[0, :, :]
# Converting to float16 dtype
pooled_output = pooled_output.to(lm_output.dtype)
# Output.
if self.biencoder_projection_dim:
pooled_output = self.projection_enc(pooled_output)
return pooled_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(
prefix=prefix, keep_vars=keep_vars)
if self.biencoder_projection_dim > 0:
state_dict_[self._projection_enc_key] = \
self.projection_enc.state_dict(prefix=prefix,
keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
print_rank_0("loading pretrained weights")
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.biencoder_projection_dim > 0:
print_rank_0("loading projection head weights")
self.projection_enc.load_state_dict(
state_dict[self._projection_enc_key], strict=strict)
|
Megatron-LM-master
|
megatron/model/biencoder_model.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""This code is copied fron NVIDIA apex:
https://github.com/NVIDIA/apex
with some changes. """
import numbers
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
import importlib
from megatron.core.utils import make_viewless_tensor
try:
from apex.contrib.layer_norm.layer_norm import FastLayerNormFN
HAVE_PERSIST_LAYER_NORM = True
except:
HAVE_PERSIST_LAYER_NORM = False
try:
from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction
except:
FusedLayerNormAffineFunction = None
global fused_layer_norm_cuda
fused_layer_norm_cuda = None
class MixedFusedLayerNorm(torch.nn.Module):
def __init__(self, normalized_shape, eps=1e-5,
no_persist_layer_norm=True,
sequence_parallel=False,
apply_layernorm_1p=False):
super(MixedFusedLayerNorm, self).__init__()
self.apply_layernorm_1p = apply_layernorm_1p
global fused_layer_norm_cuda
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
# List of hiddens sizes supported in the persistent layer norm kernel
# If the hidden size is not supported, fall back to the non-persistent
# kernel.
persist_ln_hidden_sizes = [1024, 1536, 2048, 2304, 3072, 3840, 4096,
5120, 6144, 8192, 10240, 12288, 12800, 15360, 16384, 18432, 20480,
24576, 25600, 30720, 32768, 40960, 49152, 65536]
if normalized_shape not in persist_ln_hidden_sizes or \
not HAVE_PERSIST_LAYER_NORM:
no_persist_layer_norm = True
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
self.reset_parameters()
self.no_persist_layer_norm = no_persist_layer_norm
self.sequence_parallel = sequence_parallel
# set sequence parallelism flag on weight and bias parameters
setattr(self.weight, 'sequence_parallel', self.sequence_parallel)
setattr(self.bias, 'sequence_parallel', self.sequence_parallel)
def reset_parameters(self):
if self.apply_layernorm_1p:
init.zeros_(self.weight)
init.zeros_(self.bias)
else:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
weight = self.weight + 1 if self.apply_layernorm_1p else self.weight
if self.no_persist_layer_norm:
assert FusedLayerNormAffineFunction is not None, \
"FusedLayerNormAffineFunction is not available, please install apex from https://github.com/NVIDIA/apex"
return FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.normalized_shape, self.eps)
else:
output = FastLayerNormFN.apply(input, weight, self.bias, self.eps)
# Apex's fast layer norm function outputs a 'view' tensor (i.e., has
# a populated '_base' field). This will result in schedule.py's
# deallocate_output_tensor() throwing an error, so a viewless tensor is
# created to prevent this.
output = make_viewless_tensor(inp = output,
requires_grad = input.requires_grad,
keep_graph = True)
return output
|
Megatron-LM-master
|
megatron/model/fused_layer_norm.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Multiple choice model."""
import torch
from megatron import get_args, print_rank_last
from megatron.model.enums import AttnMaskType
from megatron.model.bert_model import bert_extended_attention_mask, bert_position_ids
from megatron.model.language_model import get_language_model
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
class MultipleChoice(MegatronModule):
def __init__(self,
config,
num_tokentypes=2,
pre_process=True,
post_process=True):
super(MultipleChoice, self).__init__(share_embeddings_and_output_weights=False)
args = get_args()
self.pre_process = pre_process
self.post_process = post_process
self.language_model, self._language_model_key = get_language_model(
config=config,
num_tokentypes=num_tokentypes,
add_pooler=True,
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=self.pre_process,
post_process=self.post_process)
# Multi-choice head.
if self.post_process:
self.multichoice_dropout = torch.nn.Dropout(args.hidden_dropout)
self.multichoice_head = get_linear_layer(args.hidden_size, 1,
init_method)
self._multichoice_head_key = 'multichoice_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, model_input, attention_mask, tokentype_ids=None):
# [batch, choices, sequence] --> [batch * choices, sequence] -->
# transformer --> [batch, choices] --> softmax
# Ensure the shape is [batch-size, choices, sequence]
assert len(attention_mask.shape) == 3
num_choices = attention_mask.shape[1]
# Reshape and treat choice dimension the same as batch.
attention_mask = attention_mask.view(-1, attention_mask.size(-1))
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = model_input
# Do the same as attention_mask for input_ids, tokentype_ids
assert len(input_ids.shape) == 3
assert len(tokentype_ids.shape) == 3
input_ids = input_ids.view(-1, input_ids.size(-1))
tokentype_ids = tokentype_ids.view(-1, tokentype_ids.size(-1))
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process:
_, pooled_output = lm_output
multichoice_output = self.multichoice_dropout(pooled_output)
multichoice_logits = self.multichoice_head(multichoice_output)
# Reshape back to separate choices.
multichoice_logits = multichoice_logits.view(-1, num_choices)
return multichoice_logits
return lm_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process:
state_dict_[self._multichoice_head_key] \
= self.multichoice_head.state_dict(prefix=prefix, keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
if self._multichoice_head_key in state_dict:
self.multichoice_head.load_state_dict(
state_dict[self._multichoice_head_key], strict=strict)
else:
print_rank_last('***WARNING*** could not find {} in the checkpoint, '
'initializing to random'.format(
self._multichoice_head_key))
|
Megatron-LM-master
|
megatron/model/multiple_choice.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Transformer based language model."""
import torch
import torch.nn.functional as F
from megatron import get_args
from megatron.core import mpu, tensor_parallel
from megatron.core.enums import ModelType
from megatron.core.models.common.rotary_pos_embedding import RotaryEmbedding
from .enums import AttnMaskType, LayerType
from .module import MegatronModule
from .transformer import ParallelTransformer
from .utils import get_linear_layer
from .utils import init_method_normal, scaled_init_method_normal
def parallel_lm_logits(input_, word_embeddings_weight, parallel_output,
bias=None):
"""LM logits using word embedding weights."""
args = get_args()
# Parallel logits.
if args.async_tensor_model_parallel_allreduce or\
args.sequence_parallel:
input_parallel = input_
model_parallel = mpu.get_tensor_model_parallel_world_size() > 1
async_grad_allreduce = args.async_tensor_model_parallel_allreduce and \
model_parallel and not args.sequence_parallel
else:
input_parallel = tensor_parallel.copy_to_tensor_model_parallel_region(input_)
async_grad_allreduce = False
# Matrix multiply.
logits_parallel = tensor_parallel.linear_with_grad_accumulation_and_async_allreduce(
input=input_parallel,
weight=word_embeddings_weight,
bias=bias,
gradient_accumulation_fusion=args.gradient_accumulation_fusion,
async_grad_allreduce=async_grad_allreduce,
sequence_parallel=args.sequence_parallel)
# Gather if needed.
if parallel_output:
return logits_parallel
return tensor_parallel.gather_from_tensor_model_parallel_region(logits_parallel)
def get_language_model(config, num_tokentypes, add_pooler,
encoder_attn_mask_type,
add_encoder=True,
add_decoder=False,
decoder_attn_mask_type=AttnMaskType.causal,
pre_process=True, post_process=True):
"""Build language model and return along with the key to save."""
args = get_args()
if config.init_method is None:
config.init_method = init_method_normal(config.init_method_std)
if config.output_layer_init_method is None:
config.output_layer_init_method = scaled_init_method_normal(config.init_method_std,
config.num_layers)
# Language model.
language_model = TransformerLanguageModel(
config,
encoder_attn_mask_type,
num_tokentypes=num_tokentypes,
add_encoder=add_encoder,
add_decoder=add_decoder,
decoder_attn_mask_type=decoder_attn_mask_type,
add_pooler=add_pooler,
pre_process=pre_process,
post_process=post_process
)
# key used for checkpoints.
language_model_key = 'language_model'
return language_model, language_model_key
class Pooler(MegatronModule):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, hidden_size, init_method):
super(Pooler, self).__init__()
args = get_args()
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
self.sequence_parallel = args.sequence_parallel
def forward(self, hidden_states, sequence_index=0):
# hidden_states: [s, b, h]
# sequence_index: index of the token to pool.
# gather data along sequence dimensions
# same pooler is run on all tensor parallel nodes
if self.sequence_parallel:
hidden_states = tensor_parallel.gather_from_sequence_parallel_region(
hidden_states,
tensor_parallel_output_grad=False)
pooled = hidden_states[sequence_index, :, :]
pooled = self.dense(pooled)
pooled = torch.tanh(pooled)
return pooled
class Embedding(MegatronModule):
"""Language model embeddings.
Arguments:
hidden_size: hidden size
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
init_method: weight initialization method
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
embedding_weights_in_fp32: casts word embedding weights to
fp32 before sampling. Required to
maintain reproducibility when
training in bf16.
"""
def __init__(self,
hidden_size,
vocab_size,
max_sequence_length,
embedding_dropout_prob,
config,
num_tokentypes=0,
embedding_weights_in_fp32=False):
super(Embedding, self).__init__()
self.hidden_size = hidden_size
self.init_method = config.init_method
self.num_tokentypes = num_tokentypes
args = get_args()
# Word embeddings (parallel).
self.embedding_weights_in_fp32 = embedding_weights_in_fp32
self.params_dtype = args.params_dtype
self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
vocab_size, self.hidden_size, config=config, init_method=config.init_method)
self._word_embeddings_key = 'word_embeddings'
# Position embedding (serial).
self.add_position_embedding = args.position_embedding_type == 'learned_absolute'
if self.add_position_embedding:
self.position_embeddings = torch.nn.Embedding(
max_sequence_length, self.hidden_size)
self._position_embeddings_key = 'position_embeddings'
# Initialize the position embeddings.
if args.perform_initialization:
self.init_method(self.position_embeddings.weight)
# Token type embedding.
# Add this as an optional field that can be added through
# method call so we can load a pretrain model without
# token types and add them as needed.
self._tokentype_embeddings_key = 'tokentype_embeddings'
if self.num_tokentypes > 0:
self.tokentype_embeddings = torch.nn.Embedding(self.num_tokentypes,
self.hidden_size)
# Initialize the token-type embeddings.
if args.perform_initialization:
self.init_method(self.tokentype_embeddings.weight)
else:
self.tokentype_embeddings = None
self.fp32_residual_connection = args.fp32_residual_connection
self.sequence_parallel = args.sequence_parallel
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
def zero_parameters(self):
"""Zero out all parameters in embedding."""
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
if self.add_position_embedding:
self.position_embeddings.weight.data.fill_(0)
self.position_embeddings.weight.shared = True
if self.num_tokentypes > 0:
self.tokentype_embeddings.weight.data.fill_(0)
self.tokentype_embeddings.weight.shared = True
def add_tokentype_embeddings(self, num_tokentypes):
"""Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding.
"""
if self.tokentype_embeddings is not None:
raise Exception('tokentype embeddings is already initialized')
if torch.distributed.get_rank() == 0:
print('adding embedding for {} tokentypes'.format(num_tokentypes),
flush=True)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes,
self.hidden_size)
# Initialize the token-type embeddings.
args = get_args()
self.init_method(self.tokentype_embeddings.weight)
def forward(self, input_ids, position_ids, tokentype_ids=None):
# Embeddings.
if self.embedding_weights_in_fp32:
self.word_embeddings = self.word_embeddings.to(torch.float32)
words_embeddings = self.word_embeddings(input_ids)
if self.embedding_weights_in_fp32:
words_embeddings = words_embeddings.to(self.params_dtype)
self.word_embeddings = self.word_embeddings.to(self.params_dtype)
if self.add_position_embedding:
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
else:
embeddings = words_embeddings
if tokentype_ids is not None:
assert self.tokentype_embeddings is not None
embeddings = embeddings + self.tokentype_embeddings(tokentype_ids)
else:
assert self.tokentype_embeddings is None
# Data format change to avoid explicit tranposes : [b s h] --> [s b h].
embeddings = embeddings.transpose(0, 1).contiguous()
# If the input flag for fp32 residual connection is set, convert for float.
if self.fp32_residual_connection:
embeddings = embeddings.float()
# Dropout.
if self.sequence_parallel:
embeddings = tensor_parallel.scatter_to_sequence_parallel_region(embeddings)
with tensor_parallel.get_cuda_rng_tracker().fork():
embeddings = self.embedding_dropout(embeddings)
else:
embeddings = self.embedding_dropout(embeddings)
return embeddings
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._word_embeddings_key] \
= self.word_embeddings.state_dict(prefix=prefix,
keep_vars=keep_vars)
if self.add_position_embedding:
state_dict_[self._position_embeddings_key] \
= self.position_embeddings.state_dict(prefix=prefix,
keep_vars=keep_vars)
if self.num_tokentypes > 0:
state_dict_[self._tokentype_embeddings_key] \
= self.tokentype_embeddings.state_dict(prefix=prefix,
keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Word embedding.
if self._word_embeddings_key in state_dict:
state_dict_ = state_dict[self._word_embeddings_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'word_embeddings' in key:
state_dict_[key.split('word_embeddings.')[1]] \
= state_dict[key]
self.word_embeddings.load_state_dict(state_dict_, strict=strict)
# Position embedding.
if self.add_position_embedding:
if self._position_embeddings_key in state_dict:
state_dict_ = state_dict[self._position_embeddings_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'position_embeddings' in key:
state_dict_[key.split('position_embeddings.')[1]] \
= state_dict[key]
self.position_embeddings.load_state_dict(state_dict_, strict=strict)
# Tokentype embedding.
if self.num_tokentypes > 0:
state_dict_ = {}
if self._tokentype_embeddings_key in state_dict:
state_dict_ = state_dict[self._tokentype_embeddings_key]
else:
# for backward compatibility.
for key in state_dict.keys():
if 'tokentype_embeddings' in key:
state_dict_[key.split('tokentype_embeddings.')[1]] \
= state_dict[key]
if len(state_dict_.keys()) > 0:
self.tokentype_embeddings.load_state_dict(state_dict_,
strict=strict)
else:
print('***WARNING*** expected tokentype embeddings in the '
'checkpoint but could not find it', flush=True)
class TransformerLanguageModel(MegatronModule):
"""Transformer language model.
Arguments:
transformer_hparams: transformer hyperparameters
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(self,
config,
encoder_attn_mask_type,
num_tokentypes=0,
add_encoder=True,
add_decoder=False,
decoder_attn_mask_type=AttnMaskType.causal,
add_pooler=False,
pre_process=True,
post_process=True):
args = get_args()
# TODO: passing share_embeddings_and_output_weights=False will not work correctly for T5 and embeddings will not be synced. Fix later for T5.
if args.untie_embeddings_and_output_weights: assert not add_decoder
super(TransformerLanguageModel, self).__init__(share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights)
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = config.hidden_size
self.num_tokentypes = num_tokentypes
self.init_method = config.init_method
self.add_encoder = add_encoder
self.encoder_attn_mask_type = encoder_attn_mask_type
self.add_decoder = add_decoder
self.decoder_attn_mask_type = decoder_attn_mask_type
self.add_pooler = add_pooler
self.encoder_hidden_state = None
self.add_retriever = args.retro_add_retriever
self.untie_embeddings_and_output_weights = args.untie_embeddings_and_output_weights
# Embeddings.
if self.pre_process:
self.embedding = Embedding(self.hidden_size,
args.padded_vocab_size,
args.max_position_embeddings,
args.hidden_dropout,
config,
self.num_tokentypes,
args.embedding_weights_in_fp32)
self._embedding_key = 'embedding'
# Rotary positional embeddings
self.use_rotary_position_embeddings = \
args.position_embedding_type == 'rope'
if self.use_rotary_position_embeddings:
self.seq_length = args.seq_length
rotary_dim = args.hidden_size // args.num_attention_heads \
if args.kv_channels is None else args.kv_channels
if args.rotary_percent < 1.0:
rotary_dim = int(rotary_dim * args.rotary_percent)
# partial rotary embeddings, which is better than full rotary
# Wang and Komatsuzaki et al
# https://github.com/kingoflolz/mesh-transformer-jax/
self.rotary_pos_emb = RotaryEmbedding(
rotary_dim,
seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor
)
# Encoder (usually set to True, False if part of an encoder-decoder
# architecture and in encoder-only stage).
if self.add_encoder:
self.encoder = ParallelTransformer(
config,
model_type=args.model_type if not args.retro_add_retriever \
else ModelType.retro_decoder,
self_attn_mask_type=self.encoder_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process,
)
self._encoder_key = 'encoder'
else:
self.encoder = None
# Decoder (usually set to False, True if part of an encoder-decoder
# architecture and in decoder-only stage).
if self.add_decoder:
self.decoder = ParallelTransformer(
config,
model_type=args.model_type,
layer_type=LayerType.decoder,
self_attn_mask_type=self.decoder_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process)
self._decoder_key = 'decoder'
else:
self.decoder = None
if self.post_process:
# Pooler.
if self.add_pooler:
self.pooler = Pooler(self.hidden_size, self.init_method)
self._pooler_key = 'pooler'
if self.untie_embeddings_and_output_weights:
self.output_layer = tensor_parallel.ColumnParallelLinear(
args.hidden_size,
args.padded_vocab_size,
config=config,
init_method=self.init_method,
bias=False) # Setting bias to False always to keep it consistent with embedding tying that also does not have a bias.
self._output_layer_key = 'output_layer'
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
if self.add_encoder and self.add_decoder:
assert len(input_tensor) == 1, \
'input_tensor should only be length 1 for stage with both encoder and decoder'
self.encoder.set_input_tensor(input_tensor[0])
elif self.add_encoder:
assert len(input_tensor) == 1, \
'input_tensor should only be length 1 for stage with only encoder'
self.encoder.set_input_tensor(input_tensor[0])
elif self.add_decoder:
if len(input_tensor) == 2:
self.decoder.set_input_tensor(input_tensor[0])
self.encoder_hidden_state = input_tensor[1]
elif len(input_tensor) == 1:
self.decoder.set_input_tensor(None)
self.encoder_hidden_state = input_tensor[0]
else:
raise Exception('input_tensor must have either length 1 or 2')
else:
raise Exception('Stage must have at least either encoder or decoder')
def forward(self, enc_input_ids, enc_position_ids, enc_attn_mask,
dec_input_ids=None, dec_position_ids=None, dec_attn_mask=None,
retriever_input_ids=None,
retriever_position_ids=None,
retriever_attn_mask=None,
enc_dec_attn_mask=None, tokentype_ids=None,
inference_params=None,
pooling_sequence_index=0,
enc_hidden_states=None, output_enc_hidden=False):
# Encoder embedding.
if self.pre_process:
encoder_input = self.embedding(enc_input_ids, enc_position_ids,
tokentype_ids=tokentype_ids)
else:
encoder_input = None
# Retriever embedding.
if self.add_retriever and self.pre_process:
retriever_input = self.embedding(retriever_input_ids,
retriever_position_ids,
tokentype_ids=tokentype_ids)
else:
retriever_input = None
# Rotary positional embeddings
rotary_pos_emb = None
if self.use_rotary_position_embeddings:
if inference_params is not None:
rotary_pos_emb = \
self.rotary_pos_emb(inference_params.max_sequence_length)
else:
rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
# Run encoder.
if enc_hidden_states is None:
if self.encoder is not None:
encoder_output = self.encoder(
encoder_input,
enc_attn_mask,
retriever_input=retriever_input,
retriever_attn_mask=retriever_attn_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb)
else:
encoder_output = self.encoder_hidden_state
else:
encoder_output = enc_hidden_states.to(encoder_input.dtype)
if self.post_process:
if self.add_pooler:
pooled_output = self.pooler(encoder_output,
pooling_sequence_index)
# output_enc_hidden refers to when we just need the encoder's
# output. For example, it is helpful to compute
# similarity between two sequences by average pooling
if not self.add_decoder or output_enc_hidden:
if self.add_pooler and self.post_process:
return encoder_output, pooled_output
else:
return encoder_output
# Decoder embedding.
if self.pre_process:
decoder_input = self.embedding(dec_input_ids,
dec_position_ids)
else:
decoder_input = None
# Run decoder.
decoder_output = self.decoder(
decoder_input,
dec_attn_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb)
if self.add_pooler and self.post_process:
return decoder_output, encoder_output, pooled_output
else:
return decoder_output, encoder_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load."""
state_dict_ = {}
if self.pre_process:
state_dict_[self._embedding_key] \
= self.embedding.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.add_encoder:
state_dict_[self._encoder_key] \
= self.encoder.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process:
if self.add_pooler:
state_dict_[self._pooler_key] \
= self.pooler.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.untie_embeddings_and_output_weights:
state_dict_[self._output_layer_key] \
= self.output_layer.state_dict(prefix=prefix, keep_vars=keep_vars)
if self.add_decoder:
state_dict_[self._decoder_key] \
= self.decoder.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Embedding.
if self.pre_process:
if self._embedding_key in state_dict:
state_dict_ = state_dict[self._embedding_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if '_embeddings' in key:
state_dict_[key] = state_dict[key]
self.embedding.load_state_dict(state_dict_, strict=strict)
# Encoder.
if self.add_encoder:
if self._encoder_key in state_dict:
state_dict_ = state_dict[self._encoder_key]
# For backward compatibility.
elif 'transformer' in state_dict:
state_dict_ = state_dict['transformer']
else:
# For backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'transformer.' in key:
state_dict_[key.split('transformer.')[1]] = state_dict[key]
# For backward compatibility.
state_dict_self_attention = {}
for key in state_dict_.keys():
if '.attention.' in key:
state_dict_self_attention[key.replace(".attention.",
".self_attention.")] = state_dict_[key]
else:
state_dict_self_attention[key] = state_dict_[key]
state_dict_ = state_dict_self_attention
self.encoder.load_state_dict(state_dict_, strict=strict)
# Pooler.
if self.post_process:
if self.add_pooler:
assert 'pooler' in state_dict, \
'could not find data for pooler in the checkpoint'
self.pooler.load_state_dict(state_dict[self._pooler_key],
strict=strict)
if self.untie_embeddings_and_output_weights:
assert 'output_layer' in state_dict, \
'could not find data for output_layer in the checkpoint'
self.output_layer.load_state_dict(state_dict[self._output_layer_key],
strict=strict)
# Decoder.
if self.add_decoder:
assert 'decoder' in state_dict, \
'could not find data for pooler in the checkpoint'
self.decoder.load_state_dict(state_dict[self._decoder_key],
strict=strict)
|
Megatron-LM-master
|
megatron/model/language_model.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import enum
class LayerType(enum.Enum):
encoder = 1
decoder = 2
retro_encoder = 3
retro_decoder = 4
retro_decoder_with_retriever = 5
class AttnType(enum.Enum):
self_attn = 1
cross_attn = 2
class AttnMaskType(enum.Enum):
padding = 1
causal = 2
# For backward compatibility with old model checkpoints
from megatron.core.enums import ModelType
|
Megatron-LM-master
|
megatron/model/enums.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
import torch.nn as nn
from megatron.model.enums import AttnMaskType
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply upper triangular mask (typically used in gpt models).
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
import scaled_upper_triang_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_upper_triang_masked_softmax_cuda.forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_upper_triang_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_upper_triang_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None
class ScaledMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply the mask.
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, mask, scale):
import scaled_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
class ScaledSoftmax(torch.autograd.Function):
"""
Fused operation which performs following two operations in sequence
1. Scale the tensor.
2. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
import scaled_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_softmax_cuda.forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
class FusedScaleMaskSoftmax(nn.Module):
"""
fused operation: scaling + mask + softmax
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
attn_mask_type: attention mask type (pad or causal)
scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self,
input_in_fp16,
input_in_bf16,
attn_mask_type,
scaled_masked_softmax_fusion,
mask_func,
softmax_in_fp32,
scale,
):
super(FusedScaleMaskSoftmax, self).__init__()
self.input_in_fp16 = input_in_fp16
self.input_in_bf16 = input_in_bf16
assert not (
self.input_in_fp16 and self.input_in_bf16
), "both fp16 and bf16 flags cannot be active at the same time."
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
self.attn_mask_type = attn_mask_type
self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.scale = scale
assert (
self.scale is None or softmax_in_fp32
), "softmax should be in fp32 when scaled"
def forward(self, input, mask):
# [b, np, sq, sk]
assert input.dim() == 4
if self.is_kernel_available(mask, *input.size()):
return self.forward_fused_softmax(input, mask)
else:
return self.forward_torch_softmax(input, mask)
def is_kernel_available(self, mask, b, np, sq, sk):
attn_batches = b * np
if (
self.scaled_masked_softmax_fusion # user want to fuse
and self.input_in_float16 # input must be fp16
and 16 < sk <= 16384 # sk must be 16 ~ 16384
and sq % 4 == 0 # sq must be divisor of 4
and sk % 4 == 0 # sk must be divisor of 4
and attn_batches % 4 == 0 # np * b must be divisor of 4
):
if 0 <= sk <= 16384:
batch_per_block = self.get_batch_per_block(sq, sk, b, np)
if self.attn_mask_type == AttnMaskType.causal:
if attn_batches % batch_per_block == 0:
return True
else:
if sq % batch_per_block == 0:
return True
return False
def forward_fused_softmax(self, input, mask):
b, np, sq, sk = input.size()
scale = self.scale if self.scale is not None else 1.0
if self.attn_mask_type == AttnMaskType.causal:
assert sq == sk, "causal mask is only for self attention"
# input is 3D tensor (attn_batches, sq, sk)
input = input.view(-1, sq, sk)
probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale)
return probs.view(b, np, sq, sk)
else:
# input is 4D tensor (b, np, sq, sk)
if mask is not None:
return ScaledMaskedSoftmax.apply(input, mask, scale)
else:
return ScaledSoftmax.apply(input, scale)
def forward_torch_softmax(self, input, mask):
if self.input_in_float16 and self.softmax_in_fp32:
input = input.float()
if self.scale is not None:
input = input * self.scale
mask_output = self.mask_func(input, mask) if mask is not None else input
probs = torch.nn.Softmax(dim=-1)(mask_output)
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = probs.half()
else:
probs = probs.bfloat16()
return probs
@staticmethod
def get_batch_per_block(sq, sk, b, np):
import scaled_masked_softmax_cuda
return scaled_masked_softmax_cuda.get_batch_per_block(sq, sk, b, np)
|
Megatron-LM-master
|
megatron/model/fused_softmax.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Classification model."""
import torch
from megatron import get_args, print_rank_last
from megatron.model.enums import AttnMaskType
from megatron.model.bert_model import bert_extended_attention_mask, bert_position_ids
from megatron.model.language_model import get_language_model
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
class Classification(MegatronModule):
def __init__(self,
config,
num_classes,
num_tokentypes=2,
pre_process=True,
post_process=True):
super().__init__(config=config, share_embeddings_and_output_weights=False)
args = get_args()
self.num_classes = num_classes
self.pre_process = pre_process
self.post_process = post_process
self.language_model, self._language_model_key = get_language_model(
config=config,
num_tokentypes=num_tokentypes,
add_pooler=True,
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=self.pre_process,
post_process=self.post_process)
# Multi-choice head.
if self.post_process:
self.classification_dropout = torch.nn.Dropout(args.hidden_dropout)
self.classification_head = get_linear_layer(args.hidden_size,
self.num_classes,
init_method)
self._classification_head_key = 'classification_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, model_input, attention_mask, tokentype_ids=None):
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = model_input
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process:
_, pooled_output = lm_output
classification_output = self.classification_dropout(pooled_output)
classification_logits = self.classification_head(classification_output)
# Reshape back to separate choices.
classification_logits = classification_logits.view(-1, self.num_classes)
return classification_logits
return lm_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process:
state_dict_[self._classification_head_key] \
= self.classification_head.state_dict(prefix=prefix, keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
if self._classification_head_key in state_dict:
self.classification_head.load_state_dict(
state_dict[self._classification_head_key], strict=strict)
else:
print_rank_last('***WARNING*** could not find {} in the checkpoint, '
'initializing to random'.format(
self._classification_head_key))
|
Megatron-LM-master
|
megatron/model/classification.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""BERT model."""
import torch
from megatron import get_args
from megatron.core import tensor_parallel
from megatron.model.enums import AttnMaskType
from megatron.model.language_model import parallel_lm_logits
from megatron.model.language_model import get_language_model
from megatron.model.utils import get_norm
from megatron.model.utils import openai_gelu, erf_gelu
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
def bert_extended_attention_mask(attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = (extended_attention_mask < 0.5)
return extended_attention_mask
def bert_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class BertLMHead(MegatronModule):
"""Masked LM head for Bert
Arguments:
config: TransformerConfig object
mpu_vocab_size: model parallel size of vocabulary.
parallel_output: whether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, config, parallel_output):
super().__init__(config=config)
args = get_args()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
tensor_parallel.set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
self.parallel_output = parallel_output
self.dense = get_linear_layer(config.hidden_size, config.hidden_size, config.init_method)
setattr(self.dense.weight, 'sequence_parallel', config.sequence_parallel)
setattr(self.dense.bias, 'sequence_parallel', config.sequence_parallel)
self.norm = get_norm(config)
self.gelu = torch.nn.functional.gelu
if args.openai_gelu:
self.gelu = openai_gelu
elif args.onnx_safe:
self.gelu = erf_gelu
def forward(self, hidden_states, word_embeddings_weight):
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.norm(hidden_states)
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
def load_state_dict(self, state_dict, strict=True):
"""Customize load."""
# Handle renaming layernorm -> norm in component names
state_dict_ = {}
for key in state_dict.keys():
newkey = key.replace("layernorm", "norm")
state_dict_[newkey] = state_dict[key]
super().load_state_dict(state_dict_, strict)
def post_language_model_processing(lm_output, pooled_output,
lm_head, binary_head,
lm_labels,
logit_weights,
fp16_lm_cross_entropy):
# Output.
lm_logits = lm_head(
lm_output, logit_weights)
binary_logits = None
if binary_head is not None:
binary_logits = binary_head(pooled_output)
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0,1).contiguous(), binary_logits
else:
# [b s] => [s b]
lm_labels = lm_labels.transpose(0,1).contiguous()
# lm_logits : [s, b, h] and lm_labels: [s, b]
if fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
# [s, b] => [b s]
lm_loss = lm_loss.transpose(0,1).contiguous()
return lm_loss, binary_logits
class BertModel(MegatronModule):
"""Bert Language model."""
def __init__(self,
config,
num_tokentypes=2,
add_binary_head=True,
parallel_output=True,
pre_process=True,
post_process=True):
super().__init__(config=config)
args = get_args()
# TODO this option is not yet implemented in BERT
assert args.untie_embeddings_and_output_weights is False
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.add_binary_head = add_binary_head
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.return_embeddings = args.output_bert_embeddings
if self.return_embeddings:
assert self.post_process and self.add_binary_head
self.language_model, self._language_model_key = get_language_model(
config=config,
num_tokentypes=num_tokentypes,
add_pooler=self.add_binary_head,
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings()
if self.post_process:
self.lm_head = BertLMHead(self.shared_embedding_or_output_weight().size(0), config, parallel_output)
self._lm_head_key = 'lm_head'
self.binary_head = None
if self.add_binary_head:
self.binary_head = get_linear_layer(config.hidden_size, 2,
config.init_method)
self._binary_head_key = 'binary_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, bert_model_input, attention_mask,
tokentype_ids=None, lm_labels=None):
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = bert_model_input
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process and self.add_binary_head:
lm_output, pooled_output = lm_output
# Return pooled output (e.g., when computing Bert embeddings).
if self.return_embeddings:
# Sum attention mask.
embeddings = torch.transpose(lm_output, 0, 1)
masks = torch.sum(attention_mask, dim=1)
# Collect masked embeddings.
output = torch.zeros(
size=(embeddings.shape[0], embeddings.shape[2]),
dtype=torch.float32,
device=torch.cuda.current_device())
for i, (embedding, mask) in enumerate(zip(embeddings, masks)):
output[i, :] = torch.mean(embedding[1: mask - 1], dim=0)
return output
else:
pooled_output = None
if self.post_process:
return post_language_model_processing(lm_output, pooled_output,
self.lm_head, self.binary_head,
lm_labels,
self.shared_embedding_or_output_weight(),
self.fp16_lm_cross_entropy)
else:
return lm_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process and self.add_binary_head:
state_dict_[self._binary_head_key] \
= self.binary_head.state_dict(prefix=prefix, keep_vars=keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(prefix=prefix, keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
self.lm_head.load_state_dict(
state_dict[self._lm_head_key], strict=strict)
if self.post_process and self.add_binary_head:
self.binary_head.load_state_dict(
state_dict[self._binary_head_key], strict=strict)
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
|
Megatron-LM-master
|
megatron/model/bert_model.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from .fused_layer_norm import MixedFusedLayerNorm as LayerNorm
from .rms_norm import RMSNorm
from .distributed import DistributedDataParallel
from .bert_model import BertModel
from .gpt_model import GPTModel
from .t5_model import T5Model
from .language_model import get_language_model
from .module import Float16Module
|
Megatron-LM-master
|
megatron/model/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
###### BIAS GELU FUSION/ NO AUTOGRAD ################
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@torch.jit.script
def bias_gelu_back(g, bias, y):
x = bias + y
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
return ff*g
class GeLUFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, input, bias):
ctx.save_for_backward(input, bias)
return bias_gelu(bias, input)
@staticmethod
def backward(ctx, grad_output):
input, bias = ctx.saved_tensors
tmp = bias_gelu_back(grad_output, bias, input)
return tmp, tmp
bias_gelu_impl = GeLUFunction.apply
|
Megatron-LM-master
|
megatron/model/fused_bias_gelu.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import math
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Dict, List
import torch
from megatron.core import mpu
from .module import MegatronModule
class MemoryBuffer:
def __init__(self, numel: int, numel_padded: int, dtype: torch.dtype):
self.numel = numel
self.numel_padded = numel_padded
self.dtype = dtype
self.data = torch.zeros(
self.numel_padded,
dtype=self.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
def zero(self):
"""Reset the buffer to zero."""
self.data.zero_()
def get(self, shape: torch.Size, start_index: int) -> torch.Tensor:
"""Return a tensor with the input `shape` as a view into the
1-D data starting at `start_index`."""
end_index = start_index + shape.numel()
assert end_index <= self.numel, 'Requested tensor is out of buffer range'
buffer_tensor = self.data[start_index:end_index]
buffer_tensor = buffer_tensor.view(shape)
return buffer_tensor
class Bucket:
"""
Bucket to all-reduce gradients for a set of parameters asynchronously. Provides
functionality to register when params in the bucket have grads available, and
automatically launches an asynchronous all_reduce when _all_ params in the bucket
have grads available.
"""
def __init__(
self,
params: List[torch.nn.Parameter],
data: torch.Tensor,
data_parallel_group: torch.distributed.ProcessGroup,
overlap_grad_reduce: bool,
):
# State for bookkeeping: params is the set of parameters this bucket is
# responsible for, params_with_grad is the set of parameters with grads
# available.
self.params_list = params
self.params = set(params)
self.params_with_grad = set()
self.data = data
self.data_parallel_group = data_parallel_group
self.overlap_grad_reduce = overlap_grad_reduce
self.data_parallel_size = torch.distributed.get_world_size(group=data_parallel_group)
self.reset()
def reset(self):
self.params_with_grad = set()
self.allreduce_handle = None
self.allreduce_issued = False
def all_reduce(self):
assert (
self.allreduce_handle is None and not self.allreduce_issued
), 'Should not have multiple all-reduces in flight at once'
self.data /= self.data_parallel_size
self.allreduce_handle = torch.distributed.all_reduce(
self.data, group=self.data_parallel_group, async_op=self.overlap_grad_reduce
) # Use async_op only when overlap_grad_reduce is True.
self.allreduce_issued = True
def set(self, param: torch.nn.Parameter):
assert param in self.params, 'Param is not in the bucket'
assert param not in self.params_with_grad, 'Cannot set grad twice'
assert self.overlap_grad_reduce, 'set() should be called only when overlapping grad reduce'
self.params_with_grad.add(param)
# If all params in bucket have grads available, issue all-reduce.
if len(self.params_with_grad) == len(self.params):
self.all_reduce()
def done(self):
# If not overlapping grad reduce, issue synchronous all-reduce here.
if not self.overlap_grad_reduce:
self.all_reduce()
return
assert self.allreduce_handle is not None and self.allreduce_issued, (
f'All-reduce is not issued for this bucket, '
f'only {len(self.params_with_grad)}/{len(self.params)} params with grad'
)
self.allreduce_handle.wait()
class GradBuffer(MemoryBuffer):
"""
Groups gradients into a contiguous buffer, and then breaks them into buckets with
roughly bucket_size parameters each.
"""
def __init__(
self,
numel: int,
numel_padded: int,
dtype: torch.dtype,
params: List[torch.nn.Parameter],
data_parallel_group: torch.distributed.ProcessGroup,
bucket_size: int,
param_to_name: Dict[torch.nn.Parameter, str],
overlap_grad_reduce: bool,
):
super(GradBuffer, self).__init__(numel, numel_padded, dtype)
self.buckets = []
self.param_to_bucket = {}
self.overlap_grad_reduce = overlap_grad_reduce
self.is_last_microbatch = True
# Check that params are unique.
unique_params = set()
for param in params:
assert param not in unique_params
unique_params.add(param)
del unique_params
# Helper function to create new bucket, add it to list of buckets, and
# also update param->bucket mapping.
def set_bucket_(
bucket_params: List[torch.nn.Parameter], data_start_index: int, data_end_index: int
):
# Get appropriate view into global GradBuffer.
bucket_data = self.get(
torch.Size([data_end_index - data_start_index]), data_start_index
)
bucket = Bucket(bucket_params, bucket_data, data_parallel_group, overlap_grad_reduce)
self.buckets.append(bucket)
for bucket_param in bucket_params:
self.param_to_bucket[bucket_param] = bucket
# Map the grads to the buffer and bucket them.
data_start_index = 0
bucket_data_start_index = data_start_index
bucket_params = set()
# Iterate through parameters in reverse order to roughly follow backprop order.
for param in params[::-1]:
# Skip parameters that don't require gradients.
if not param.requires_grad:
continue
this_numel = param.data.nelement()
data_end_index = data_start_index + this_numel
param.main_grad = self.get(param.data.shape, data_start_index)
bucket_params.add(param)
# If we have enough elements already, form a new buffer.
# If bucket_size is None, accumulate everything into a single bucket.
if bucket_size is not None:
if (data_end_index - bucket_data_start_index) >= bucket_size:
set_bucket_(bucket_params, bucket_data_start_index, data_end_index)
bucket_data_start_index = data_end_index
bucket_params = set()
data_start_index = data_end_index
# Add remaining params to a new bucket.
if len(bucket_params) > 0:
set_bucket_(bucket_params, bucket_data_start_index, data_end_index)
if not overlap_grad_reduce:
assert len(bucket_params) == len(
params
), 'All params should be in one bucket when overlap_grad_reduce is False'
# Print buckets.
if torch.distributed.get_rank() == 0:
print('> buckets for gradient all-reduce:')
for index, bucket in enumerate(self.buckets):
print(f' params for bucket {index+1}')
numel = 0
for param in bucket.params:
numel += param.data.nelement()
print(f' {param_to_name[param]}')
print(f' total number of elements: {numel}')
def reset(self):
"""Set the data to zero and reset all buckets."""
self.zero()
for bucket in self.buckets:
bucket.reset()
self.is_last_microbatch = True
def done(self):
"""Wait for all buckets' all-reductions to complete."""
for bucket in self.buckets:
bucket.done()
def mark_grad_as_done(self, param: torch.nn.Parameter):
"""
When the number of microbatches is greater than 1, we only want
to register grads when processing the last microbatch and
overlap_grad_reduce is True.
"""
assert (
self.overlap_grad_reduce
), 'mark_grad_as_done() should only be called when overlap_grad_reduce is True'
if self.is_last_microbatch:
bucket = self.param_to_bucket[param]
bucket.set(param)
class DistributedDataParallelBase(MegatronModule, ABC):
"""Abstract class for DDP."""
def __init__(self, module):
super(DistributedDataParallelBase, self).__init__()
# Keep a pointer to the model.
self.module = module
@abstractmethod
def allreduce_gradients(self):
pass
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def state_dict(self, prefix='', keep_vars=False):
return self.module.state_dict(prefix=prefix, keep_vars=keep_vars)
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
return self.module.state_dict_for_save_checkpoint(prefix=prefix, keep_vars=keep_vars)
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
class DistributedDataParallel(DistributedDataParallelBase):
"""
DDP wrapper which stores grads in contiguous buffers. Also has option of
overlapping all-reduce with computation by breaking up full model's
gradients into smaller buckets and running all-reduce on each bucket
asynchronously.
This class:
- has the potential to reduce memory fragmentation.
- provides the option to do the gradient accumulation
in a type other than the params type (e.g., fp32).
Arguments:
module: input model.
data_parallel_group: data-parallel group.
accumulate_allreduce_grads_in_fp32: if true do the gradient accumulation
and the gradient all-reduce in float32.
overlap_grad_reduce: if true, overlap all-reduce with computation by
breaking up grads into buckets. If false, single synchronous all-reduce
is used instead.
"""
def __init__(
self,
module: torch.nn.Module,
data_parallel_group: torch.distributed.ProcessGroup,
accumulate_allreduce_grads_in_fp32: bool,
overlap_grad_reduce: bool,
bucket_size: int = 40000000,
):
super(DistributedDataParallel, self).__init__(module)
# Set bucket_size to infinity if overlap_grad_reduce is False.
self.overlap_grad_reduce = overlap_grad_reduce
if not self.overlap_grad_reduce:
bucket_size = None
self.module = module
self.grad_buffers = {}
self.grad_buffer_param_index_map = {}
self.param_to_grad_buffer = {}
# Group parameters by their gradient type.
grad_dtype_to_params = {}
grad_dtype_to_numel = {}
param_to_name = {}
for name, param in self.module.named_parameters():
if param.requires_grad:
param.grad_added_to_main_grad = False
param_to_name[param] = name
dtype = torch.float if accumulate_allreduce_grads_in_fp32 else param.dtype
params = grad_dtype_to_params.get(dtype, [])
params.append(param)
grad_dtype_to_params[dtype] = params
# Calculate number of elements per dtype.
grad_dtype_to_numel[dtype] = (
grad_dtype_to_numel.get(dtype, 0) + param.data.nelement()
)
# Allocate the grad buffers and map the grads.
# The grad buffer under the hood creates buckets as appropriate, depending on
# whether overlap_grad_reduce is True or not.
data_parallel_size = torch.distributed.get_world_size(group=data_parallel_group)
for dtype, params in grad_dtype_to_params.items():
# Pad so size is divisible by the data parallel size.
numel = grad_dtype_to_numel[dtype]
numel_padded = int(math.ceil(numel / data_parallel_size)) * data_parallel_size
self.grad_buffers[dtype] = GradBuffer(
numel,
numel_padded,
dtype,
params,
data_parallel_group,
bucket_size,
param_to_name,
self.overlap_grad_reduce,
)
# Parameters are laid out in the corresponding grad_buffer in reverse
# order, so count indices from the back.
index = grad_dtype_to_numel[dtype]
for param in params:
self.param_to_grad_buffer[param] = self.grad_buffers[dtype]
if dtype not in self.grad_buffer_param_index_map:
self.grad_buffer_param_index_map[dtype] = {}
index -= param.data.nelement()
self.grad_buffer_param_index_map[dtype][param] = (
index,
index + param.data.nelement(),
)
# Register backward hook.
# Accumulation function for the gradients need to be stored so they
# don't go out of scope.
self.grad_accs = []
for param in self.module.parameters():
if param.requires_grad:
# Expand so we get access to grad_fn.
param_tmp = param.expand_as(param)
# Get the gradient accumulator function.
grad_acc = param_tmp.grad_fn.next_functions[0][0]
grad_acc.register_hook(self._make_param_hook(param, self.param_to_grad_buffer))
self.grad_accs.append(grad_acc)
def _make_param_hook(
self, param: torch.nn.Parameter, param_to_grad_buffer: Dict[torch.nn.Parameter, GradBuffer]
):
"""Create the all-reduce hook for backprop."""
def param_hook(*unused):
if param.requires_grad:
if self.overlap_grad_reduce:
assert (
param.grad is not None
), 'param.grad being None is not safe when overlap_grad_reduce is True'
if param.grad is not None and not param.grad_added_to_main_grad:
param.main_grad.add_(param.grad.data)
param.grad = None
if self.overlap_grad_reduce:
param_to_grad_buffer[param].mark_grad_as_done(param)
return param_hook
@contextmanager
def no_sync(self):
"""Context manager that turns off gradient synchronization."""
for grad_buffer in self.grad_buffers.values():
grad_buffer.is_last_microbatch = False
try:
yield
finally:
for grad_buffer in self.grad_buffers.values():
grad_buffer.is_last_microbatch = True
def zero_grad_buffer(self):
"""Set the grad buffer data to zero. Needs to be called at the
begining of each iteration."""
for param in self.module.parameters():
if param.requires_grad:
param.grad_added_to_main_grad = False
for grad_buffer in self.grad_buffers.values():
grad_buffer.reset()
def broadcast_params(self):
"""Sync params across all DP ranks."""
for param in self.module.parameters():
torch.distributed.broadcast(
param.data,
src=mpu.get_data_parallel_src_rank(),
group=mpu.get_data_parallel_group(),
)
def allreduce_gradients(self):
"""
Reduce gradients across data parallel ranks.
When overlap_grad_reduce is set to True, waits for asynchronous all-reduces
to complete.
When overlap_grad_reduce is set to False, calls synchronous
all-reduce.
"""
for grad_buffer in self.grad_buffers.values():
grad_buffer.done()
|
Megatron-LM-master
|
megatron/model/distributed.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""T5 model."""
import torch
from megatron import get_args
from megatron.core import tensor_parallel
from megatron.model.enums import AttnMaskType
from megatron.model.language_model import parallel_lm_logits, get_language_model
from megatron.model import LayerNorm
from megatron.model.utils import (
openai_gelu,
get_linear_layer
)
from .module import MegatronModule
def t5_extended_attention_mask(attention_mask_list):
def attn_mask_postprocess(attn_mask):
# [b, 1, s, s]
extended_attention_mask = attn_mask.unsqueeze(1)
return extended_attention_mask
return [attn_mask_postprocess(attn_mask) for attn_mask in attention_mask_list]
def t5_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class T5LMHead(MegatronModule):
"""Masked LM head for T5
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
parallel_output: wether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, parallel_output):
super(T5LMHead, self).__init__()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
self.bias.model_parallel = True
self.bias.partition_dim = 0
self.bias.stride = 1
self.parallel_output = parallel_output
def forward(self, hidden_states, word_embeddings_weight):
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
class T5Model(MegatronModule):
"""T5 Language model."""
def __init__(self,
config,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True,
add_encoder=True,
add_decoder=True):
super().__init__(config=config)
args = get_args()
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.add_encoder = add_encoder
self.add_decoder = add_decoder
self.language_model, self._language_model_key = get_language_model(
config=config,
num_tokentypes=num_tokentypes,
add_pooler=False,
add_encoder=add_encoder,
add_decoder=add_decoder,
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings()
if self.post_process and self.add_decoder:
self.lm_head = T5LMHead(
self.shared_embedding_or_output_weight().size(0),
parallel_output)
self._lm_head_key = 'lm_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, encoder_input_ids, decoder_input_ids, encoder_attn_mask,
decoder_attn_mask, encoder_decoder_attn_mask,
tokentype_ids=None, lm_labels=None, enc_hidden_states=None):
# Converting the attention masks to proper parameter settings
encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask = t5_extended_attention_mask(
[encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask])
encoder_position_ids = t5_position_ids(encoder_input_ids)
decoder_position_ids = t5_position_ids(decoder_input_ids)
lm_output = self.language_model(encoder_input_ids,
encoder_position_ids,
encoder_attn_mask,
decoder_input_ids,
decoder_position_ids,
decoder_attn_mask,
encoder_decoder_attn_mask,
tokentype_ids=tokentype_ids,
enc_hidden_states=enc_hidden_states)
if self.post_process and self.add_decoder:
decoder_output, encoder_output = lm_output
# Output. [s, b, h]
lm_logits = self.lm_head(decoder_output,
self.shared_embedding_or_output_weight())
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0,1).contiguous()
else:
# [b s] => [s b]
lm_labels = lm_labels.transpose(0,1).contiguous()
if self.fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
# [s b] => [b s]
lm_loss = lm_loss.transpose(0,1).contiguous()
return lm_loss
elif self.add_decoder and not self.add_encoder:
decoder_output, encoder_output = lm_output
return decoder_output
else:
encoder_output = lm_output
return encoder_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process and self.add_decoder:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process and self.add_decoder:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(prefix=prefix,
keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process and self.add_decoder:
self.lm_head.load_state_dict(state_dict[self._lm_head_key],
strict=strict)
# Load word embeddings.
if self.post_process and not self.pre_process and self.add_decoder:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
|
Megatron-LM-master
|
megatron/model/t5_model.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Utilities for models."""
import math
import torch
from megatron import get_args
from megatron.model import LayerNorm, RMSNorm
def init_method_normal(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
def scaled_init_method_normal(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
def attention_mask_func(attention_scores, attention_mask):
attention_scores.masked_fill_(attention_mask, -10000.0)
return attention_scores
def get_linear_layer(rows, columns, init_method):
"""Simple linear layer with weight initialization."""
layer = torch.nn.Linear(rows, columns)
if get_args().perform_initialization:
init_method(layer.weight)
with torch.no_grad():
layer.bias.zero_()
return layer
@torch.jit.script
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
(1.0 + 0.044715 * x * x)))
def openai_gelu(x):
return gelu_impl(x)
#This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter
@torch.jit.script
def erf_gelu(x):
return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype)+torch.ones_like(x).to(dtype=x.dtype))
def get_norm(config):
args = get_args()
if args.normalization == "LayerNorm":
return LayerNorm(
config.hidden_size,
eps=config.layernorm_epsilon,
no_persist_layer_norm=not config.persist_layer_norm,
sequence_parallel=config.sequence_parallel,
apply_layernorm_1p=args.apply_layernorm_1p)
elif args.normalization == "RMSNorm":
return RMSNorm(args.hidden_size, args.norm_epsilon)
else:
raise Exception(f"unsupported norm type '{args.normalization}'.")
|
Megatron-LM-master
|
megatron/model/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Transformer."""
from contextlib import nullcontext
import math
import numpy as np
import torch
import torch.nn.functional as F
from typing import Optional
from megatron import get_timers, get_args, get_retro_args, core, get_num_microbatches
from .module import MegatronModule
from megatron.core import mpu, tensor_parallel
from megatron.core.enums import ModelType
from megatron.model.enums import AttnMaskType, LayerType, AttnType
from megatron.model.fused_softmax import FusedScaleMaskSoftmax
from megatron.model.fused_bias_gelu import bias_gelu_impl
from megatron.core.models.common.rotary_pos_embedding import apply_rotary_pos_emb
from megatron.model.utils import attention_mask_func, openai_gelu, erf_gelu, get_norm
try:
from einops import rearrange
except ImportError:
rearrange = None
try:
from flash_attn.flash_attn_interface import flash_attn_unpadded_func
except ImportError:
try:
from flash_attn.flash_attn_interface import flash_attn_varlen_func as flash_attn_unpadded_func
except ImportError:
flash_attn_unpadded_func = None
""" We use the following notation throughout this file:
h: hidden size
n: number of attention heads
p: number of model parallel partitions
np: n/p
hp: h/p
hn: h/n
b: batch size
s: sequence length
l: number of layers
Transformer takes input of size [s, b, h] and returns a
tensor of the same size. We use the following arguments:
hyperparameters: transformer hyperparameters
"""
class DropPath(MegatronModule):
"""Drop paths (Stochastic Depth) per sample
(when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=0.):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, hidden_state):
if self.drop_prob == 0. or not self.training:
return hidden_state
keep_prob = 1 - self.drop_prob
# work with diff dim tensors, not just 2D ConvNets
# hidden_state: [s, b, h]
shape = (1,) + (hidden_state.shape[1],) + (1,) * (hidden_state.ndim - 2)
random_tensor = keep_prob + \
torch.rand(shape, dtype=hidden_state.dtype, device=hidden_state.device)
random_tensor.floor_() # binarize
output = hidden_state.div(keep_prob) * random_tensor
return output
class ParallelMLP(MegatronModule):
"""MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension.
"""
def __init__(self, config):
super(ParallelMLP, self).__init__()
args = get_args()
self.add_bias = config.add_bias_linear
ffn_hidden_size = config.ffn_hidden_size
if config.gated_linear_unit:
ffn_hidden_size *= 2
# Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
self.dense_h_to_4h = tensor_parallel.ColumnParallelLinear(
config.hidden_size,
ffn_hidden_size,
config=config,
init_method=config.init_method,
bias=self.add_bias,
gather_output=False,
skip_bias_add=True,
)
self.bias_gelu_fusion = False
self.activation_func = None
self.swiglu = args.swiglu
if args.openai_gelu:
self.activation_func = openai_gelu
elif args.onnx_safe:
self.activation_func = erf_gelu
elif args.swiglu:
def swiglu(x):
x = torch.chunk(x, 2, dim=-1)
return F.silu(x[0]) * x[1]
self.activation_func = swiglu
elif args.squared_relu:
def squared_relu(x):
return torch.pow(F.relu(x), 2)
self.activation_func = squared_relu
else:
self.bias_gelu_fusion = args.bias_gelu_fusion
self.activation_func = F.gelu
# Project back to h.
self.dense_4h_to_h = tensor_parallel.RowParallelLinear(
config.ffn_hidden_size,
config.hidden_size,
config=config,
init_method=config.output_layer_init_method,
bias=self.add_bias,
input_is_parallel=True
)
def forward(self, hidden_states):
# [s, b, 4hp]
intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
if self.bias_gelu_fusion:
assert self.add_bias is True
assert self.activation_func == F.gelu
intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel)
else:
if bias_parallel is not None:
intermediate_parallel = intermediate_parallel + bias_parallel
intermediate_parallel = self.activation_func(intermediate_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output, output_bias
class SwitchMLP(MegatronModule):
"""
Routes input to one of N MLP "experts"
"""
def __init__(self, config):
super(SwitchMLP, self).__init__()
args = get_args()
self.router = torch.nn.Linear(config.hidden_size, args.num_experts)
self.experts = torch.nn.ModuleList()
for i in range(args.num_experts):
self.experts.append(ParallelMLP(config))
def forward(self, hidden_states):
# hidden_states: [s, b, h]
s = hidden_states.size(0)
b = hidden_states.size(1)
h = hidden_states.size(2)
route = self.router(hidden_states)
route = torch.nn.functional.softmax(route, dim=2)
max_prob, max_ind = torch.max(route, dim=2)
max_prob = torch.unsqueeze(max_prob, 2) # [s b 1]
# TODO (rprenger) TODO this could be made easier to read
# Converting [s, b, h] to [s*b, h].
# Each vector could be routed differently
hidden_states = hidden_states.view(-1, hidden_states.size(2)) # [s*b h]
max_prob = max_prob.view(-1, max_prob.size(2)) # [s*b 1]
max_ind = max_ind.view(-1) # [s*b]
output_total = torch.empty_like(hidden_states)
output_bias_total = torch.empty_like(hidden_states)
#TODO (rprenger) This does each expert in serial, but it could be parallelized
for expert_num, expert in enumerate(self.experts):
local_indices = (max_ind == expert_num).nonzero()
hidden = hidden_states[local_indices,:]
output, output_bias = expert(hidden)
if output_bias is not None:
output_bias = output_bias.expand_as(output)
output_bias_total[local_indices,:] = output_bias
output_total[local_indices,:] = output
output_total = output_total*max_prob
output_total = output_total.view(s, b, h)
if output_bias is not None:
output_bias_total = output_bias_total*max_prob
output_bias_total = output_bias_total.view(s, b, h)
else:
output_bias_total = None
return output_total, output_bias_total
class CoreAttention(MegatronModule):
def __init__(self, layer_number, config,
attn_mask_type=AttnMaskType.padding):
super(CoreAttention, self).__init__()
self.fp16 = config.fp16
self.bf16 = config.bf16
self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
if self.apply_query_key_layer_scaling:
self.attention_softmax_in_fp32 = True
self.layer_number = max(1, layer_number)
self.attn_mask_type = attn_mask_type
self.sequence_parallel = config.sequence_parallel
projection_size = config.kv_channels * config.num_attention_heads
# Per attention head and per partition values.
world_size = mpu.get_tensor_model_parallel_world_size()
self.hidden_size_per_partition = core.utils.divide(projection_size,
world_size)
self.hidden_size_per_attention_head = core.utils.divide(
projection_size, config.num_attention_heads)
self.num_attention_heads_per_partition = core.utils.divide(
config.num_attention_heads, world_size)
coeff = None
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
if self.apply_query_key_layer_scaling:
coeff = self.layer_number
self.norm_factor *= coeff
self.scale_mask_softmax = FusedScaleMaskSoftmax(
self.fp16, self.bf16,
self.attn_mask_type,
config.masked_softmax_fusion,
attention_mask_func,
self.attention_softmax_in_fp32,
coeff)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
def forward(self, query_layer, key_layer,
value_layer, attention_mask):
# ===================================
# Raw attention scores. [b, np, s, s]
# ===================================
# [b, np, sq, sk]
output_size = (query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0))
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.reshape(output_size[2],
output_size[0] * output_size[1], -1)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.view(output_size[3],
output_size[0] * output_size[1], -1)
# preallocting input tensor: [b * np, sq, sk]
matmul_input_buffer = mpu.get_global_memory_buffer().get_tensor(
(output_size[0]*output_size[1], output_size[2], output_size[3]),
query_layer.dtype, "mpu")
# Raw attention scores. [b * np, sq, sk]
matmul_result = torch.baddbmm(
matmul_input_buffer,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0, alpha=(1.0/self.norm_factor))
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
# ===========================
# Attention probs and dropout
# ===========================
# attention scores and attention mask [b, np, sq, sk]
attention_probs = self.scale_mask_softmax(attention_scores,
attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
if not self.sequence_parallel:
with tensor_parallel.get_cuda_rng_tracker().fork():
attention_probs = self.attention_dropout(attention_probs)
else:
attention_probs = self.attention_dropout(attention_probs)
# =========================
# Context layer. [sq, b, hp]
# =========================
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
# context layer shape: [b, np, sq, hn]
output_size = (value_layer.size(1),
value_layer.size(2),
query_layer.size(0),
value_layer.size(3))
# change view [sk, b * np, hn]
value_layer = value_layer.view(value_layer.size(0),
output_size[0] * output_size[1], -1)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(output_size[0] * output_size[1],
output_size[2], -1)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + \
(self.hidden_size_per_partition,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class FlashSelfAttention(torch.nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
device=None, dtype=None):
super().__init__()
assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, '
'e.g., with pip install flash-attn')
assert rearrange is not None, 'Please install einops first, e.g., with pip install einops'
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, q, k, v):
"""Implements the multihead softmax attention.
Arguments
---------
q, k, v: The tensor containing the query, key, and value. (B, S, H, D)
"""
assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q,k,v)))
assert all((i.is_cuda for i in (q,k,v)))
batch_size, seqlen_q = q.shape[0], q.shape[1]
seqlen_k = k.shape[1]
q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]]
cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32,
device=q.device)
if self.training:
# during training q,k,v always have same seqlen
assert seqlen_k == seqlen_q
is_causal = self.causal
cu_seqlens_k = cu_seqlens_q
dropout_p = self.dropout_p
else:
# turn off FA causal mask after first inference autoregressive iteration
# only on first autoregressive step q,k,v have same seqlen
is_causal = seqlen_q == seqlen_k
cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32,
device=q.device)
dropout_p = 0
output = flash_attn_unpadded_func(
q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k,
dropout_p,
softmax_scale=self.softmax_scale, causal=is_causal
)
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
return output
class ParallelAttention(MegatronModule):
"""Parallel self-attention layer abstract class.
Self-attention layer takes input with size [s, b, h]
and returns output of the same size.
"""
def __init__(self, config, layer_number,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding):
super(ParallelAttention, self).__init__()
args = get_args()
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.params_dtype = config.params_dtype
self.sequence_parallel = config.sequence_parallel
self.group_query_attention = args.group_query_attention
self.num_query_groups = args.num_query_groups
query_projection_size = config.kv_channels * config.num_attention_heads
if self.group_query_attention:
kv_projection_size = args.kv_channels * args.num_query_groups
else:
kv_projection_size = args.kv_channels * args.num_attention_heads
self.use_flash_attn = args.use_flash_attn \
and attention_type == AttnType.self_attn \
and self.attn_mask_type == AttnMaskType.causal
if self.use_flash_attn:
if flash_attn_unpadded_func is None:
raise ImportError('FlashAttention is not installed, please install with '
'pip install flash-attn')
assert attention_type == AttnType.self_attn, ('FlashAttention code path only supports '
'self-attention for now')
assert self.attn_mask_type == AttnMaskType.causal, ('FlashAttention code path only '
'supports causal mask for now')
if rearrange is None:
raise ImportError('einops is not installed, please install with pip install einops')
# Per attention head and per partition values.
world_size = mpu.get_tensor_model_parallel_world_size()
self.hidden_size_per_attention_head = core.utils.divide(
query_projection_size, config.num_attention_heads)
self.num_attention_heads_per_partition = core.utils.divide(
config.num_attention_heads, world_size)
if self.group_query_attention:
if args.num_query_groups % world_size != 0:
raise NotImplementedError('Currently the num_query_groups should be '
'a multiple of the tensor parallel size')
self.num_query_groups_per_partition = core.utils.divide(
args.num_query_groups, world_size)
else:
self.num_query_groups_per_partition = self.num_attention_heads_per_partition
# Strided linear layer.
if attention_type == AttnType.self_attn:
self.query_key_value = tensor_parallel.ColumnParallelLinear(
config.hidden_size,
query_projection_size + 2 * kv_projection_size,
config=config,
init_method=config.init_method,
bias=args.add_bias_linear,
gather_output=False)
else:
assert attention_type == AttnType.cross_attn
if self.group_query_attention:
raise NotImplementedError("Grouped query attention not implemented for cross-attention.")
assert query_projection_size == kv_projection_size
self.query = tensor_parallel.ColumnParallelLinear(
config.hidden_size,
query_projection_size,
config=config,
init_method=config.init_method,
bias=config.add_bias_linear,
gather_output=False)
self.key_value = tensor_parallel.ColumnParallelLinear(
config.hidden_size,
2 * kv_projection_size,
config=config,
init_method=config.init_method,
bias=config.add_bias_linear,
gather_output=False)
self.core_attention = CoreAttention(self.layer_number, config,
self.attn_mask_type)
self.checkpoint_core_attention = config.recompute_granularity == 'selective'
if self.use_flash_attn:
self.core_attention_flash = FlashSelfAttention(
causal=True, attention_dropout=config.attention_dropout
)
# Output.
self.dense = tensor_parallel.RowParallelLinear(
query_projection_size,
config.hidden_size,
config=config,
init_method=config.output_layer_init_method,
bias=args.add_bias_linear,
input_is_parallel=True,
skip_bias_add=True)
def _checkpointed_attention_forward(self, query_layer, key_layer,
value_layer, attention_mask,
rotary_pos_emb=None):
"""Forward method with activation checkpointing."""
def custom_forward(*inputs):
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
output_ = self.core_attention(query_layer, key_layer,
value_layer, attention_mask)
return output_
q_pos_emb, k_pos_emb = (None, None) if rotary_pos_emb is None \
else rotary_pos_emb
hidden_states = tensor_parallel.checkpoint(
custom_forward,
False, query_layer, key_layer, value_layer, attention_mask,
q_pos_emb, k_pos_emb)
return hidden_states
def _allocate_memory(self, inference_max_sequence_len, batch_size, num_attention_heads):
return torch.empty(
inference_max_sequence_len,
batch_size,
num_attention_heads,
self.hidden_size_per_attention_head,
dtype=self.params_dtype,
device=torch.cuda.current_device())
def forward(self, hidden_states, attention_mask,
encoder_output=None, inference_params=None,
rotary_pos_emb=None):
# hidden_states: [sq, b, h]
# =================================================
# Pre-allocate memory for key-values for inference.
# =================================================
is_first_step = False
if inference_params:
if self.layer_number not in inference_params.key_value_memory_dict:
inf_max_seq_len = inference_params.max_sequence_length
inf_max_batch_size = inference_params.max_batch_size
inference_key_memory = self._allocate_memory(
inf_max_seq_len, inf_max_batch_size,
self.num_query_groups_per_partition)
inference_value_memory = self._allocate_memory(
inf_max_seq_len, inf_max_batch_size,
self.num_query_groups_per_partition)
inference_params.key_value_memory_dict[self.layer_number] = (
inference_key_memory, inference_value_memory)
is_first_step = True
else:
inference_key_memory, inference_value_memory = \
inference_params.key_value_memory_dict[self.layer_number]
# =====================
# Query, Key, and Value
# =====================
if self.attention_type == AttnType.self_attn:
# Attention heads [sq, b, h] --> [sq, b, ng * (np/ng + 2) * hn)]
mixed_x_layer, _ = self.query_key_value(hidden_states)
# [sq, b, hp] --> [sq, b, ng, (np/ng + 2) * hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
self.num_query_groups_per_partition,
(
(self.num_attention_heads_per_partition // self.num_query_groups_per_partition + 2)
* self.hidden_size_per_attention_head
),
)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn]
(query_layer,
key_layer,
value_layer) = torch.split(
mixed_x_layer,
[
(
self.num_attention_heads_per_partition // self.num_query_groups_per_partition
* self.hidden_size_per_attention_head
),
self.hidden_size_per_attention_head,
self.hidden_size_per_attention_head
],
dim=3)
# [sq, b, ng, np/ng * hn] -> [sq, b, np, hn] -
query_layer = query_layer.view(query_layer.size(0), query_layer.size(1), -1, self.hidden_size_per_attention_head)
else:
# Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
mixed_kv_layer, _ = self.key_value(encoder_output)
# [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
new_tensor_shape = mixed_kv_layer.size()[:-1] + \
(self.num_attention_heads_per_partition,
2 * self.hidden_size_per_attention_head)
mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)
# [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
(key_layer,
value_layer) = tensor_parallel.split_tensor_along_last_dim(mixed_kv_layer, 2)
# Attention head [sq, b, h] --> [sq, b, hp]
query_layer, _ = self.query(hidden_states)
# [sq, b, hp] --> [sq, b, np, hn]
new_tensor_shape = query_layer.size()[:-1] + \
(self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head)
query_layer = query_layer.view(*new_tensor_shape)
# ==================================
# Adjust key and value for inference
# ==================================
# duplicate the pos_emb for self attention
if rotary_pos_emb is not None:
if isinstance(rotary_pos_emb, tuple):
rotary_pos_emb = rotary_pos_emb
else:
rotary_pos_emb = ((rotary_pos_emb,) * 2)
if inference_params:
batch_start = inference_params.batch_size_offset
batch_end = batch_start + key_layer.size(1)
assert batch_end <= inference_key_memory.size(1)
sequence_start = inference_params.sequence_len_offset
sequence_end = sequence_start + key_layer.size(0)
assert sequence_end <= inference_key_memory.size(0)
# Copy key and values.
inference_key_memory[sequence_start:sequence_end,
batch_start:batch_end, ...] = key_layer
inference_value_memory[sequence_start:sequence_end,
batch_start:batch_end, ...] = value_layer
key_layer = inference_key_memory[
:sequence_end, batch_start:batch_end, ...]
value_layer = inference_value_memory[
:sequence_end, batch_start:batch_end, ...]
# adjust the key rotary positional embedding
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
# need to cross check this condition during inference
# if not set_inference_key_value_memory:
if not is_first_step:
# In inference, we compute one token at a time.
# Select the correct positional embedding
# (only the last token in the sequence)
q_pos_emb = q_pos_emb[sequence_end - 1 : sequence_end]
else:
# In the first forward pass of inference,
# we use the entire provided prefix.
# q_pos_emb here has the rope embeddings of the entire
# prefix + to-be-generated output so
# we slice to just the prefix.
q_pos_emb = q_pos_emb[:sequence_end, :, :, :]
k_pos_emb = k_pos_emb[:sequence_end, :, :, :]
rotary_pos_emb = (q_pos_emb, k_pos_emb)
# ==================================
# core attention computation
# ==================================
# expand the key_layer and value_layer [sk, b, ng, hn] -> [sk, b, np, hn]
key_layer = key_layer.repeat_interleave(
self.num_attention_heads_per_partition // self.num_query_groups_per_partition,
dim = 2
)
value_layer = value_layer.repeat_interleave(
self.num_attention_heads_per_partition // self.num_query_groups_per_partition,
dim = 2
)
# apply relative positional encoding (rotary embedding)
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb)
key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb)
# TODO, can apply positional embedding to value_layer so it has
# absolute positional embedding.
# otherwise, only relative positional embedding takes effect
# value_layer = apply_rotary_pos_emb(value_layer, k_pos_emb)
if not self.use_flash_attn:
if self.checkpoint_core_attention:
context_layer = self._checkpointed_attention_forward(
query_layer, key_layer, value_layer, attention_mask)
else:
context_layer = self.core_attention(
query_layer, key_layer, value_layer, attention_mask)
else:
q, k, v = [rearrange(x, 's b ... -> b s ...').contiguous()
for x in (query_layer, key_layer, value_layer)]
if not self.sequence_parallel:
with tensor_parallel.get_cuda_rng_tracker().fork():
context_layer = self.core_attention_flash(q, k, v)
else:
context_layer = self.core_attention_flash(q, k, v)
context_layer = rearrange(context_layer, 'b s h d -> s b (h d)').contiguous()
# =================
# Output. [sq, b, h]
# =================
output, bias = self.dense(context_layer)
return output, bias
def bias_dropout_add(x, bias, residual, prob, training):
# type: (Tensor, Optional[Tensor], Tensor, float, bool) -> Tensor
if bias is not None:
x = x + bias
out = torch.nn.functional.dropout(x, p=prob, training=training)
out = residual + out
return out
def get_bias_dropout_add(training):
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
@torch.jit.script
def bias_dropout_add_fused_train(x: torch.Tensor,
bias: Optional[torch.Tensor],
residual: torch.Tensor,
prob: float) -> torch.Tensor:
return bias_dropout_add(x, bias, residual, prob, True)
@torch.jit.script
def bias_dropout_add_fused_inference(x: torch.Tensor,
bias: Optional[torch.Tensor],
residual: torch.Tensor,
prob: float) -> torch.Tensor:
return bias_dropout_add(x, bias, residual, prob, False)
class ParallelTransformerLayer(MegatronModule):
"""A single transformer layer.
Transformer layer takes input with size [s, b, h] and returns an
output of the same size.
"""
def __init__(self, config,
layer_number, layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
drop_path_rate=0.):
# retriever=None):
args = get_args()
super(ParallelTransformerLayer, self).__init__()
self.layer_number = layer_number
self.layer_type = layer_type
self.apply_residual_connection_post_norm \
= config.apply_residual_connection_post_layernorm
self.bf16 = config.bf16
self.fp32_residual_connection = config.fp32_residual_connection
# Normalize the input data.
self.input_norm = get_norm(config)
# Self attention.
self.self_attention = ParallelAttention(
config,
layer_number,
attention_type=AttnType.self_attn,
attn_mask_type=self_attn_mask_type)
self.hidden_dropout = config.hidden_dropout
self.bias_dropout_fusion = config.bias_dropout_fusion
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else None
# Normalize the attention output
self.post_attention_norm = get_norm(config)
# Cross attention.
if self.layer_type in (LayerType.decoder,
LayerType.retro_decoder,
LayerType.retro_decoder_with_retriever,
LayerType.retro_encoder):
self.inter_attention = ParallelAttention(
config,
layer_number,
attention_type=AttnType.cross_attn)
# Normalize the attention output.
self.post_inter_attention_norm = get_norm(config)
# MLP
if args.num_experts is not None:
self.mlp = SwitchMLP(config)
else:
self.mlp = ParallelMLP(config)
# Set bias+dropout+add fusion grad_enable execution handler.
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)
self.bias_dropout_add_exec_handler = \
nullcontext if use_nvfuser else torch.enable_grad
if args.retro_add_retriever:
retro_args = get_retro_args()
self.retro_num_neighbors = args.retro_num_neighbors
self.retro_chunk_length = retro_args.retro_gpt_chunk_length
self.retro_retrieved_length = retro_args.retro_gpt_retrieved_length
# Retriever (bi-directional transformer with cross attention)
if layer_type == LayerType.retro_decoder_with_retriever:
self.retriever = ParallelTransformer(
config=config,
model_type=ModelType.retro_encoder,
self_attn_mask_type=AttnMaskType.padding,
pre_process=True,
post_process=False,
)
self._retriever_key = 'retriever'
else:
self.retriever = None
def default_decoder_cross_attention(self,
encoder_output,
enc_dec_attn_mask,
norm_input,
norm_output,
bias_dropout_add_func):
'''Cross attention for a standard encoder-decoder model.'''
# Attention.
attention_output, attention_bias = \
self.inter_attention(norm_output,
enc_dec_attn_mask,
encoder_output=encoder_output)
# Residual connection.
if self.apply_residual_connection_post_norm:
residual = norm_output
else:
residual = norm_input
if attention_bias is not None:
attention_bias = attention_bias.expand_as(residual)
# Bias-dropout-add.
with self.bias_dropout_add_exec_handler():
norm_input = bias_dropout_add_func(
attention_output,
attention_bias,
residual,
self.hidden_dropout)
# Normalize.
norm_output = self.post_inter_attention_norm(norm_input)
return norm_input, norm_output
def retro_encoder_cross_attention(self,
retriever_output,
norm_input,
norm_output,
bias_dropout_add_func):
"""Cross attention for Retro encoder.
Notation:
ns : Sequence length.
bs : Batch size.
d : Hidden size.
l : Number of chunks per sample (i.e., seq_length/chunk_length).
k : Number of neighbors.
r : Number of retrieved tokens (neighbors + continuation).
"""
ns, bs, d = norm_output.shape # [r, bs * l * k, d]
# Divide sequence dimension into chunks.
chunked_outputs = norm_output.reshape(self.retro_retrieved_length,
-1,
self.retro_num_neighbors,
d)
chunked_outputs_before_norm = \
norm_input.reshape(self.retro_retrieved_length, -1,
self.retro_num_neighbors, d) # [r, bs*l, k, d]
# Per-chunk attention.
norm_inputs = []
norm_outputs = []
for k in range(self.retro_num_neighbors):
# Attention.
chunked_output = chunked_outputs[:,:,k].contiguous()
attention_output, attention_bias = \
self.inter_attention(
chunked_output, # Q (neighbor embedding)
None,
encoder_output=retriever_output) # K, V (hidden act)
# Residual connection.
if self.apply_residual_connection_post_norm:
residual = chunked_output
else:
residual = chunked_outputs_before_norm[:,:,k]
# Re-enable torch grad to enable fused optimization.
with torch.enable_grad():
norm_input = bias_dropout_add_func(
attention_output,
None if attention_bias is None else attention_bias.expand_as(residual),
residual,
self.hidden_dropout)
norm_inputs.append(norm_input)
# Layer norm.
norm_output = self.post_inter_attention_norm(norm_input)
norm_outputs.append(norm_output)
# Concatenate layer norms.
# norm_input : [r, k * bs * l, d]
# norm_output : [r, k * bs * l, d]
norm_input = torch.stack(norm_inputs, dim=1).reshape(ns, bs, d)
norm_output = torch.stack(norm_outputs, dim=1).reshape(ns, bs, d)
return norm_input, norm_output
def retro_decoder_cross_attention(self,
retriever_input,
retriever_output,
retriever_attn_mask,
norm_input,
norm_output,
inference_params,
bias_dropout_add_func):
"""Cross attention for Retro decoder.
Notation:
ns : Sequence length.
bs : Batch size.
d : Hidden size.
l : Number of chunks per sample (i.e., seq_length/chunk_length).
m : Number of tokens per chunk.
k : Number of neighbors.
r : Number of retrieved tokens (neighbors + continuation).
"""
ns, bs, d = norm_output.shape
l = int(np.ceil(ns / self.retro_chunk_length))
# Retrieve neighbors.
if self.layer_type == LayerType.retro_decoder_with_retriever:
first_ns = ns % self.retro_chunk_length
if first_ns > 0:
raise Exception("test this case.")
first_chunk, rest_chunk = \
norm_output[:first_ns], norm_output[first_ns:]
first_chunk = torch.nn.functional.pad(
first_chunk,
(0, 0, 0, 0, 0, self.retro_chunk_length - first_ns),
'constant',
0)
chunked_output = \
torch.cat((first_chunk, rest_chunk), dim=0) # [l * m, bs, d]
else:
chunked_output = norm_output # [l * m, bs, d]
chunked_output = chunked_output \
.reshape(l, self.retro_chunk_length, bs, d) \
.permute(1, 2, 0, 3) \
.reshape(self.retro_chunk_length, bs * l, d) \
.contiguous()
# Get Encoder Output
retriever_output = self.retriever(
hidden_states=retriever_input,
attention_mask=retriever_attn_mask,
retriever_output=chunked_output,
retriever_attn_mask=retriever_attn_mask,
inference_params=inference_params) # [r, k * bs * l , d]
retriever_output = retriever_output.reshape(
self.retro_retrieved_length * self.retro_num_neighbors, bs * l, d) # [r * k, bs * l, d]
# Chunks.
pad = (ns - 1) % self.retro_chunk_length
attending_chunks = norm_output[pad:]
padded_chunks = torch.nn.functional.pad(
attending_chunks,
(0, 0, 0, 0, 0, self.retro_chunk_length - 1),
'constant', 0)
padded_chunked_output = padded_chunks \
.reshape(l, self.retro_chunk_length, bs, d) \
.permute(1, 2, 0, 3)
padded_chunked_output = padded_chunked_output.reshape(
self.retro_chunk_length, bs * l, d).contiguous()
# Encoder output.
attention_output, attention_bias = \
self.inter_attention(padded_chunked_output,
None,
encoder_output=retriever_output)
# Residual connection.
if self.apply_residual_connection_post_norm:
residual = norm_output
else:
residual = norm_input
# Re-enable torch grad to enable fused optimization.
with torch.enable_grad():
norm_input = bias_dropout_add_func(
attention_output,
None if attention_bias is None else attention_bias.expand_as(attention_output),
torch.zeros_like(attention_output),
self.hidden_dropout)
norm_input = norm_input \
.reshape(self.retro_chunk_length, bs, l, d) \
.permute(2, 0, 1, 3) # [l, m, bs, d]
norm_input = norm_input.reshape(self.retro_chunk_length * l, bs, d)
norm_input = torch.nn.functional.pad(
norm_input,
(0, 0, 0, 0, pad, 0),
'constant', 0)[:ns] # [ns, b, d]
norm_input = norm_input + residual
# Layer norm post the decoder attention
norm_output = self.post_inter_attention_norm(norm_input)
return retriever_output, norm_input, norm_output
def forward(self, hidden_states, attention_mask,
encoder_output=None, enc_dec_attn_mask=None,
retriever_input=None,
retriever_output=None,
retriever_attn_mask=None,
inference_params=None,
rotary_pos_emb=None):
# hidden_states: [s, b, h]
# Layer norm at the beginning of the transformer layer.
norm_output = self.input_norm(hidden_states)
# Self attention.
attention_output, attention_bias = \
self.self_attention(
norm_output,
attention_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb)
# Residual connection.
if self.apply_residual_connection_post_norm:
residual = norm_output
else:
residual = hidden_states
if self.drop_path is None:
# jit scripting for a nn.module (with dropout) is not
# trigerring the fusion kernel. For now, we use two
# different nn.functional routines to account for varying
# dropout semantics during training and inference phases.
if self.bias_dropout_fusion:
if self.training:
bias_dropout_add_func = bias_dropout_add_fused_train
else:
bias_dropout_add_func = bias_dropout_add_fused_inference
else:
bias_dropout_add_func = get_bias_dropout_add(self.training)
if attention_bias is not None:
attention_bias = attention_bias.expand_as(residual)
with self.bias_dropout_add_exec_handler():
norm_input = bias_dropout_add_func(
attention_output,
attention_bias,
residual,
self.hidden_dropout)
else:
out = torch.nn.functional.dropout(attention_output + attention_bias,
p=self.hidden_dropout,
training=self.training)
norm_input = residual + self.drop_path(out)
# Layer norm post the self attention.
norm_output = self.post_attention_norm(norm_input)
# Cross attention.
if self.layer_type == LayerType.encoder:
pass
elif self.layer_type == LayerType.decoder:
norm_input, norm_output = \
self.default_decoder_cross_attention(
encoder_output,
enc_dec_attn_mask,
norm_input,
norm_output,
bias_dropout_add_func)
elif self.layer_type == LayerType.retro_encoder:
norm_input, norm_output = \
self.retro_encoder_cross_attention(
retriever_output,
norm_input,
norm_output,
bias_dropout_add_func)
elif self.layer_type in (LayerType.retro_decoder,
LayerType.retro_decoder_with_retriever):
retriever_output, norm_input, norm_output = \
self.retro_decoder_cross_attention(
retriever_input,
retriever_output,
retriever_attn_mask,
norm_input,
norm_output,
inference_params,
bias_dropout_add_func)
else:
raise Exception("Unsupported layer type, '%s'." %
self.layer_type.name)
# MLP.
mlp_output, mlp_bias = self.mlp(norm_output)
# Second residual connection.
if self.apply_residual_connection_post_norm:
residual = norm_output
else:
residual = norm_input
if self.drop_path is None:
if mlp_bias is not None:
mlp_bias = mlp_bias.expand_as(residual)
with self.bias_dropout_add_exec_handler():
output = bias_dropout_add_func(
mlp_output,
mlp_bias,
residual,
self.hidden_dropout)
# Jit compiled function creates 'view' tensor. This tensor
# potentially gets saved in the MPU checkpoint function context,
# which rejects view tensors. While making a viewless tensor here
# won't result in memory savings (like the data loader, or
# p2p_communication), it serves to document the origin of this
# 'view' tensor.
output = core.utils.make_viewless_tensor(inp = output,
requires_grad = output.requires_grad,
keep_graph = True)
else:
if mlp_bias is not None:
mlp_output = mlp_output + mlp_bias
out = torch.nn.functional.dropout(mlp_output,
p=self.hidden_dropout,
training=self.training)
output = residual + self.drop_path(out)
if self.layer_type == LayerType.retro_decoder_with_retriever:
return output, retriever_output
else:
return output
class NoopTransformerLayer(MegatronModule):
"""A single 'no-op' transformer layer.
The sole purpose of this layer is for when a standalone embedding layer
is used (i.e., args.standalone_embedding_stage == True). In this case,
zero transformer layers are assigned when pipeline rank == 0. Additionally,
when virtual pipeline rank >= 1, zero total model parameters are created
(virtual rank 0 contains the input embedding). This results in the model's
input and output tensors being the same, which causes an error when
performing certain memory optimiations on the output tensor (e.g.,
deallocating it). Thus, this layer disconnects the input from the output
via a clone. Since ranks containing a no-op layer are generally under-
utilized (both compute and memory), there's no worry of any performance
degredation.
"""
def __init__(self, layer_number):
super().__init__()
self.layer_number = layer_number
def forward(self, hidden_states, attention_mask,
encoder_output=None, enc_dec_attn_mask=None,
inference_params=None):
return hidden_states.clone()
def _get_num_layers(args, model_type, is_decoder=False):
"""Compute the number of transformer layers resident on the current rank."""
is_encoder_and_decoder_model = (model_type == ModelType.encoder_and_decoder)
if model_type == ModelType.retro_encoder:
num_layers = args.retro_encoder_layers
elif mpu.get_pipeline_model_parallel_world_size() > 1:
if is_encoder_and_decoder_model:
assert args.pipeline_model_parallel_split_rank is not None
# When a standalone embedding stage is used, a rank is taken from
# the encoder's ranks, to be used for the encoder's embedding
# layer. This way, the rank referenced by the 'split rank' remains
# the same whether or not a standalone embedding stage is used.
num_ranks_in_encoder = (
args.pipeline_model_parallel_split_rank - 1
if args.standalone_embedding_stage else
args.pipeline_model_parallel_split_rank
)
num_ranks_in_decoder = args.transformer_pipeline_model_parallel_size - num_ranks_in_encoder
assert args.encoder_num_layers % num_ranks_in_encoder == 0, \
'encoder_num_layers (%d) must be divisible by number of ranks given to encoder (%d)' % (args.encoder_num_layers, num_ranks_in_encoder)
assert args.decoder_num_layers % num_ranks_in_decoder == 0, \
'decoder_num_layers (%d) must be divisible by number of ranks given to decoder (%d)' % (args.decoder_num_layers, num_ranks_in_decoder)
if mpu.is_pipeline_stage_before_split():
num_layers = (
0
if args.standalone_embedding_stage
and mpu.get_pipeline_model_parallel_rank() == 0 else
args.encoder_num_layers // num_ranks_in_encoder
)
else:
num_layers = args.decoder_num_layers // num_ranks_in_decoder
else:
assert args.num_layers == args.encoder_num_layers
assert args.num_layers % args.transformer_pipeline_model_parallel_size == 0, \
'num_layers must be divisible by transformer_pipeline_model_parallel_size'
# When a standalone embedding stage is used, all transformer layers
# are divided among pipeline rank >= 1, while on pipeline rank 0,
# ranks either contain the input embedding layer (virtual pp rank 0),
# or no layers at all (virtual pp rank >= 1).
num_layers = (
0
if args.standalone_embedding_stage
and mpu.get_pipeline_model_parallel_rank() == 0 else
args.num_layers // args.transformer_pipeline_model_parallel_size
)
else:
if not is_decoder:
num_layers = args.encoder_num_layers
else:
num_layers = args.decoder_num_layers
return num_layers
def _get_layer_type(model_type, default_layer_type, retro_layer_numbers,
layer_number):
args = get_args()
if args.retro_add_retriever and layer_number in retro_layer_numbers:
if model_type == ModelType.retro_decoder:
return LayerType.retro_decoder_with_retriever \
if layer_number == retro_layer_numbers[0] \
else LayerType.retro_decoder
elif model_type == ModelType.retro_encoder:
return LayerType.retro_encoder
else:
raise Exception("Unsupported model type, '%s'." % model_type)
else:
return default_layer_type
class ParallelTransformer(MegatronModule):
"""Transformer class."""
def __init__(self, config,
model_type, layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
post_norm=True,
pre_process=True,
post_process=True,
drop_path_rate=0.0):
super(ParallelTransformer, self).__init__()
args = get_args()
self.layer_type = layer_type
self.model_type = model_type
self.bf16 = config.bf16
self.fp32_residual_connection = config.fp32_residual_connection
self.post_norm = post_norm
self.pre_process = pre_process
self.post_process = post_process
self.input_tensor = None
self.drop_path_rate = drop_path_rate
self.transformer_impl = args.transformer_impl
self.retro_add_retriever = args.retro_add_retriever
# Store activation checkpoiting flag.
self.recompute_granularity = config.recompute_granularity
self.recompute_method = config.recompute_method
self.recompute_num_layers = config.recompute_num_layers
self.distribute_saved_activations = \
config.distribute_saved_activations and not config.sequence_parallel
self.sequence_parallel = config.sequence_parallel
# Transformer Engine Init.
self.transformer_engine_v_0_10 = False
self.transformer_engine_v_0_11 = False
self.transformer_engine_v_0_8 = False
if self.transformer_impl == 'transformer_engine':
global transformer_engine
import transformer_engine
from importlib.metadata import version
from pkg_resources import packaging
te_version = packaging.version.Version(version("transformer-engine"))
if te_version >= packaging.version.Version("0.8.0"):
self.transformer_engine_v_0_8 = True
if te_version >= packaging.version.Version("0.10.0"):
self.transformer_engine_v_0_10 = True
if te_version >= packaging.version.Version("0.11.0"):
self.transformer_engine_v_0_11 = True
del version, packaging
assert not args.squared_relu, "TransformerEngine does not support squared relu activation."
self.use_fp8 = args.fp8 is not None
self.fp8_recipe = None
self.fp8_group = None
if self.use_fp8:
assert args.transformer_impl == 'transformer_engine', \
'transformer-engine required for fp8 training and inference'
self.fp8_group = mpu.get_amax_reduction_group()
if args.fp8 == "e4m3":
fp8_format = transformer_engine.common.recipe.Format.E4M3
elif args.fp8 == "hybrid":
fp8_format = transformer_engine.common.recipe.Format.HYBRID
else:
raise ValueError("The DelayedScaling recipe only supports E4M3 and HYBRID formats.")
self.fp8_recipe = transformer_engine.common.recipe.DelayedScaling(
margin=args.fp8_margin,
interval=args.fp8_interval,
fp8_format=fp8_format,
amax_history_len=args.fp8_amax_history_len,
amax_compute_algo=args.fp8_amax_compute_algo,
override_linear_precision=(False, False, not args.fp8_wgrad),
)
self.num_microbatches_in_previous_step = -1
self.microbatch_count = 0
self.checkpoint_core_attention = config.recompute_granularity == 'selective'
# Number of layers.
self.num_layers = _get_num_layers(args, model_type,
layer_type==LayerType.decoder)
self.drop_path_rates = [
rate.item() for rate in
torch.linspace(0, self.drop_path_rate, config.num_layers)]
self.retro_layer_numbers = None
if model_type == ModelType.retro_decoder:
retro_layer_start = 6 if config.num_layers <= 15 else 9
self.retro_layer_numbers = \
np.arange(retro_layer_start, args.num_layers + 1, 3).tolist()
if model_type == ModelType.retro_encoder:
self.retro_layer_numbers = [1]
# Transformer layers.
if args.retro_add_retriever:
assert self.recompute_granularity != 'full', \
"Full recompute not supported for Retro."
assert args.transformer_impl == 'local', \
"Transformer engine does not support Retro layers."
def build_layer(layer_number):
if args.transformer_impl == 'local':
current_layer_type = _get_layer_type(
model_type, layer_type, self.retro_layer_numbers,
layer_number)
return ParallelTransformerLayer(
config,
layer_number,
layer_type=current_layer_type,
self_attn_mask_type=self_attn_mask_type,
drop_path_rate=self.drop_path_rates[layer_number - 1])
else:
# This argument is only available from TE v0.10 onwards.
extra_transformer_engine_kwargs = {}
if self.transformer_engine_v_0_8:
extra_transformer_engine_kwargs["bias"] = args.add_bias_linear
if self.transformer_engine_v_0_10:
extra_transformer_engine_kwargs["activation"] = "swiglu" if args.swiglu else "gelu"
if self.transformer_engine_v_0_11:
extra_transformer_engine_kwargs["normalization"] = args.normalization
return transformer_engine.pytorch.TransformerLayer(
config.hidden_size,
config.ffn_hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.layernorm_epsilon,
hidden_dropout=config.hidden_dropout,
attention_dropout=config.attention_dropout,
init_method=config.init_method,
output_layer_init_method=config.output_layer_init_method,
layer_number=layer_number,
kv_channels=config.kv_channels,
self_attn_mask_type=self_attn_mask_type.name,
tp_group=mpu.get_tensor_model_parallel_group(),
get_rng_state_tracker=tensor_parallel.get_cuda_rng_tracker,
fuse_wgrad_accumulation=config.gradient_accumulation_fusion,
apply_query_key_layer_scaling=config.apply_query_key_layer_scaling,
attention_softmax_in_fp32=config.attention_softmax_in_fp32,
seq_length=args.seq_length,
micro_batch_size=args.micro_batch_size,
sequence_parallel=config.sequence_parallel,
params_dtype=config.params_dtype,
apply_residual_connection_post_layernorm=config.apply_residual_connection_post_layernorm,
output_layernorm=False,
layer_type="encoder",
drop_path_rate=self.drop_path_rates[layer_number - 1],
set_parallel_mode=True,
fuse_qkv_params=True,
**extra_transformer_engine_kwargs)
if config.virtual_pipeline_model_parallel_size is not None:
assert config.num_layers % config.virtual_pipeline_model_parallel_size == 0, \
'num_layers_per_stage must be divisible by ' \
'virtual_pipeline_model_parallel_size'
assert args.model_type != ModelType.encoder_and_decoder
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
self.num_layers = self.num_layers // config.virtual_pipeline_model_parallel_size
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
offset = mpu.get_virtual_pipeline_model_parallel_rank() * (
config.num_layers // config.virtual_pipeline_model_parallel_size) + \
(mpu.get_pipeline_model_parallel_rank() * self.num_layers)
else:
# Each stage gets a contiguous set of layers.
if args.model_type == ModelType.encoder_and_decoder and \
mpu.get_pipeline_model_parallel_world_size() > 1:
pipeline_rank = mpu.get_pipeline_model_parallel_rank()
if layer_type == LayerType.encoder:
offset = pipeline_rank * self.num_layers
else:
num_ranks_in_enc = args.pipeline_model_parallel_split_rank
offset = (pipeline_rank - num_ranks_in_enc) * self.num_layers
else:
offset = mpu.get_pipeline_model_parallel_rank() * self.num_layers
if self.num_layers == 0:
# When a standalone embedding stage is used (e.g.,
# args.standalone_embedding_stage == True), virtual pipeline ranks
# on pipeline rank 0 will have zero transformer layers assigned to
# them. This results in the model's input and output tensors to be
# the same, which will cause failure for certain output tensor
# optimizations (e.g., pipeline output deallocation). To remedy
# this, we assign a 'no-op' layer on these ranks, which will
# disconnect the input tensor from the output tensor.
self.num_layers = 1
self.layers = torch.nn.ModuleList([ NoopTransformerLayer(1) ])
else:
self.layers = torch.nn.ModuleList(
[build_layer(i + 1 + offset) for i in range(self.num_layers)])
# Update dropout rate for Retro encoder.
if model_type == ModelType.retro_encoder:
for layer in self.layers:
if layer.self_attention.use_flash_attn:
layer.self_attention.core_attention_flash.dropout_p = \
torch.nn.Dropout(args.retro_encoder_attention_dropout)
else:
layer.self_attention.core_attention.attention_dropout.p =\
args.retro_encoder_attention_dropout
layer.hidden_dropout = args.retro_encoder_hidden_dropout
if self.post_process and self.post_norm:
# Final layer norm before output.
self.final_norm = get_norm(config)
def _get_layer(self, layer_number):
return self.layers[layer_number]
def _checkpointed_forward(self, hidden_states, attention_mask,
encoder_output, enc_dec_attn_mask,
rotary_pos_emb, is_first_microbatch):
"""Forward method with activation checkpointing."""
def custom(start, end):
def custom_forward(*args, **kwargs):
x_, *args = args
for index in range(start, end):
layer = self._get_layer(index)
x_ = layer(x_, *args, **kwargs)
return x_
return custom_forward
te_forward_kwargs = {}
if self.transformer_impl == 'transformer_engine':
te_forward_kwargs['is_first_microbatch'] = is_first_microbatch
if self.transformer_engine_v_0_10:
te_forward_kwargs['rotary_pos_emb'] = rotary_pos_emb
if self.recompute_method == 'uniform':
# Uniformly divide the total number of Transformer layers and
# checkpoint the input activation of each divided chunk.
# A method to further reduce memory usage reducing checkpoints.
l = 0
while l < self.num_layers:
if self.transformer_impl == 'transformer_engine':
hidden_states = transformer_engine.pytorch.checkpoint(
custom(l, l + self.recompute_num_layers),
self.distribute_saved_activations,
tensor_parallel.get_cuda_rng_tracker,
mpu.get_tensor_model_parallel_group(),
hidden_states, attention_mask, encoder_output,
enc_dec_attn_mask, **te_forward_kwargs)
else:
hidden_states = tensor_parallel.checkpoint(
custom(l, l + self.recompute_num_layers),
self.distribute_saved_activations,
hidden_states, attention_mask,
encoder_output, enc_dec_attn_mask,
None, None, None, None, rotary_pos_emb)
l += self.recompute_num_layers
elif self.recompute_method == 'block':
# Checkpoint the input activation of only a set number of individual
# Transformer layers and skip the rest.
# A method fully use the device memory removing redundant re-computation.
for l in range(self.num_layers):
if l < self.recompute_num_layers:
if self.transformer_impl == 'transformer_engine':
hidden_states = transformer_engine.pytorch.checkpoint(
custom(l, l + 1),
self.distribute_saved_activations,
tensor_parallel.get_cuda_rng_tracker,
mpu.get_tensor_model_parallel_group(),
hidden_states, attention_mask, encoder_output,
enc_dec_attn_mask, **te_forward_kwargs)
else:
hidden_states = tensor_parallel.checkpoint(
custom(l, l + 1),
self.distribute_saved_activations,
hidden_states, attention_mask,
encoder_output, enc_dec_attn_mask,
None, None, None, None, rotary_pos_emb)
else:
if self.transformer_impl == 'transformer_engine':
hidden_states = custom(l, l + 1)(
hidden_states, attention_mask, encoder_output,
enc_dec_attn_mask, **te_forward_kwargs)
else:
hidden_states = custom(l, l + 1)(
hidden_states, attention_mask,
encoder_output, enc_dec_attn_mask,
None, None, None, None, rotary_pos_emb)
else:
raise ValueError("Invalid activation recompute method.")
return hidden_states
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(self, hidden_states, attention_mask,
encoder_output=None, enc_dec_attn_mask=None,
retriever_input=None,
retriever_output=None,
retriever_attn_mask=None,
inference_params=None,
rotary_pos_emb=None):
# hidden_states: [s, b, h]
# Checks.
if inference_params:
assert self.recompute_granularity is None, \
'inference does not work with activation checkpointing'
if not self.pre_process:
# See set_input_tensor()
hidden_states = self.input_tensor
# Viewless tensor.
# - We only need to create a viewless tensor in the case of micro batch
# size (mbs) == 1, since in this case, 'hidden_states.transpose()'
# above creates a view tensor, and '.contiguous()' is a pass-through.
# For mbs >= 2, '.contiguous()' creates a new tensor, eliminating
# the need to make it viewless.
#
# However, we don't explicitly check mbs == 1 here because
# make_viewless_tensor() has negligible overhead when its input
# is already viewless.
#
# - For the 'else' case above, calling make_viewless_tensor() here is
# likely redundant, since p2p_communication.py (likely originator)
# already creates viewless tensors. That said, make_viewless_tensor()
# is called here to be future-proof and corner-case-proof.
hidden_states = core.utils.make_viewless_tensor(
hidden_states,
requires_grad=True,
keep_graph=True,
)
# RNG context.
if self.sequence_parallel:
rng_context = tensor_parallel.get_cuda_rng_tracker().fork()
else:
rng_context = nullcontext()
# Forward layers.
with rng_context:
# The fp8_autocast context manager is a no-op when enabled=True
# The if...else serves to short circuit name resolution for fp8_autocast
with transformer_engine.pytorch.fp8_autocast(
enabled=self.use_fp8,
fp8_recipe=self.fp8_recipe,
fp8_group=self.fp8_group
) if self.use_fp8 else nullcontext():
# Determine if the current iteration is first microbatch
if self.num_microbatches_in_previous_step != get_num_microbatches():
self.microbatch_count = 0 # Reset count on new batch size rampup interval
self.num_microbatches_in_previous_step = get_num_microbatches()
is_first_microbatch = self.microbatch_count % get_num_microbatches() == 0
# Forward pass.
if self.recompute_granularity == 'full':
hidden_states = self._checkpointed_forward(hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
is_first_microbatch)
else:
forward_kwargs = {
'encoder_output': encoder_output,
'enc_dec_attn_mask': enc_dec_attn_mask,
'inference_params': inference_params,
}
if self.transformer_impl == 'transformer_engine':
forward_kwargs['is_first_microbatch'] = is_first_microbatch
forward_kwargs['checkpoint_core_attention'] = self.checkpoint_core_attention
if self.transformer_engine_v_0_10:
forward_kwargs['rotary_pos_emb'] = rotary_pos_emb
else:
forward_kwargs['rotary_pos_emb'] = rotary_pos_emb
forward_kwargs['retriever_input'] = retriever_input
forward_kwargs['retriever_output'] = retriever_output
forward_kwargs['retriever_attn_mask'] = retriever_attn_mask
for index in range(self.num_layers):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states,
attention_mask,
**forward_kwargs)
# First Retro decoder layer returns both hidden_states
# and retriever_output. Make retriever_output available
# to subsequence Retro layers.
if isinstance(hidden_states, tuple):
assert len(hidden_states) == 2
hidden_states, retriever_output = hidden_states
forward_kwargs["retriever_output"] = retriever_output
# Skip counter update for eval and activation checkpointing
if torch.is_grad_enabled() and self.training:
self.microbatch_count += 1
# Final layer norm.
if self.post_process and self.post_norm:
hidden_states = self.final_norm(hidden_states)
return hidden_states
def load_state_dict(self, state_dict, strict=True):
"""Customize load."""
# Handle renaming layernorm -> norm in component names
state_dict_ = {}
for key in state_dict.keys():
newkey = key.replace("layernorm", "norm")
state_dict_[newkey] = state_dict[key]
super().load_state_dict(state_dict_, strict)
|
Megatron-LM-master
|
megatron/model/transformer.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""GPT-2 model."""
import torch
from megatron import get_args
from megatron.core import tensor_parallel
from .module import MegatronModule
from .enums import AttnMaskType
from .language_model import parallel_lm_logits
from .language_model import get_language_model
def post_language_model_processing(lm_output, labels, logit_weights,
parallel_output,
fp16_lm_cross_entropy):
# Output. Format [s b h]
output = parallel_lm_logits(
lm_output,
logit_weights,
parallel_output)
if labels is None:
# [s b h] => [b s h]
return output.transpose(0,1).contiguous()
else:
# [b s] => [s b]
labels = labels.transpose(0,1).contiguous()
if fp16_lm_cross_entropy:
assert output.dtype == torch.half
loss = tensor_parallel.vocab_parallel_cross_entropy(output, labels)
else:
loss = tensor_parallel.vocab_parallel_cross_entropy(output.float(), labels)
# [s b] => [b, s]
loss = loss.transpose(0,1).contiguous()
return loss
class GPTModel(MegatronModule):
"""GPT-2 Language model."""
def __init__(self,
config,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True):
args = get_args()
super().__init__(config=config, share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights)
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.untie_embeddings_and_output_weights = args.untie_embeddings_and_output_weights
self.language_model, self._language_model_key = get_language_model(
config=config,
num_tokentypes=num_tokentypes,
add_pooler=False,
encoder_attn_mask_type=AttnMaskType.causal,
pre_process=self.pre_process,
post_process=self.post_process)
if not args.untie_embeddings_and_output_weights:
self.initialize_word_embeddings()
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, input_ids, position_ids, attention_mask,
retriever_input_ids=None,
retriever_position_ids=None,
retriever_attn_mask=None,
labels=None, tokentype_ids=None, inference_params=None):
lm_output = self.language_model(
input_ids,
position_ids,
attention_mask,
retriever_input_ids=retriever_input_ids,
retriever_position_ids=retriever_position_ids,
retriever_attn_mask=retriever_attn_mask,
inference_params=inference_params)
if self.post_process:
return post_language_model_processing(
lm_output, labels,
self.language_model.output_layer.weight if self.untie_embeddings_and_output_weights else self.shared_embedding_or_output_weight(),
self.parallel_output,
self.fp16_lm_cross_entropy)
else:
return lm_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(
prefix=prefix, keep_vars=keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process and not self.untie_embeddings_and_output_weights:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(prefix=prefix,
keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Load word_embeddings.
if self.post_process and not self.pre_process and not self.untie_embeddings_and_output_weights:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
if self._language_model_key in state_dict:
state_dict = state_dict[self._language_model_key]
self.language_model.load_state_dict(state_dict, strict=strict)
|
Megatron-LM-master
|
megatron/model/gpt_model.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Megatron Module"""
import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from megatron import get_args
from megatron.core import mpu, tensor_parallel
_FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
_HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
_BF16_TYPES = (torch.BFloat16Tensor, torch.cuda.BFloat16Tensor)
def param_is_not_shared(param):
return not hasattr(param, 'shared') or not param.shared
class MegatronModule(torch.nn.Module):
"""Megatron specific extensions of torch Module with support
for pipelining."""
def __init__(self, config=None, share_embeddings_and_output_weights=True):
super(MegatronModule, self).__init__()
self.config = config
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""Use this function to override the state dict for
saving checkpoints."""
return self.state_dict(prefix=prefix, keep_vars=keep_vars)
def shared_embedding_or_output_weight(self):
if self.pre_process:
return self.language_model.embedding.word_embeddings.weight
else:
if not self.share_embeddings_and_output_weights:
raise Exception('shared_embedding_or_output_weight() called for last '
'stage, but share_embeddings_and_output_weights is false')
return self.word_embeddings.weight
def initialize_word_embeddings(self):
args = get_args()
if not self.share_embeddings_and_output_weights:
raise Exception('initialize_word_embeddings() was called but '
'share_embeddings_and_output_weights is false')
# This function just initializes the word embeddings in the final stage
# when we are using pipeline parallelism. Nothing to do if we aren't
# using pipeline parallelism.
if args.pipeline_model_parallel_size == 1:
return
# Parameters are shared between the word embeddings layers, and the
# heads at the end of the model. In a pipelined setup with more than
# one stage, the initial embedding layer and the head are on different
# workers, so we do the following:
# 1. Create a second copy of word_embeddings on the last stage, with
# initial parameters of 0.0.
# 2. Do an all-reduce between the first and last stage to ensure that
# the two copies of word_embeddings start off with the same
# parameter values.
# 3. In the training loop, before an all-reduce between the grads of
# the two word_embeddings layers to ensure that every applied weight
# update is the same on both stages.
if mpu.is_pipeline_last_stage() and not self.pre_process:
assert not mpu.is_pipeline_first_stage()
self._word_embeddings_for_head_key = 'word_embeddings_for_head'
# set word_embeddings weights to 0 here, then copy first
# stage's weights using all_reduce below.
self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
args.padded_vocab_size, self.config.hidden_size,
config=self.config, init_method=self.config.init_method)
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
# Zero out initial weights for decoder embedding.
# NOTE: We don't currently support T5 with the interleaved schedule.
if not mpu.is_pipeline_first_stage(ignore_virtual=True) and \
self.pre_process:
self.language_model.embedding.zero_parameters()
if not torch.distributed.is_initialized():
if not getattr(MegatronModule, "embedding_warning_printed", False):
print("WARNING! Distributed processes aren't initialized, so "
"word embeddings in the last layer are not initialized. "
"If you are just manipulating a model this is fine, but "
"this needs to be handled manually. If you are training "
"something is definitely wrong.")
MegatronModule.embedding_warning_printed = True
return
# Ensure that first and last stages have the same initial parameter
# values.
if mpu.is_rank_in_embedding_group():
torch.distributed.all_reduce(self.shared_embedding_or_output_weight().data,
group=mpu.get_embedding_group())
# Ensure that encoder(first stage) and decoder(split stage) position
# embeddings have the same initial parameter values
# NOTE: We don't currently support T5 with the interleaved schedule.
if mpu.is_rank_in_position_embedding_group() and \
args.pipeline_model_parallel_split_rank is not None:
# TODO: Support tokentype embedding.
self.language_model.embedding.cuda()
position_embeddings = self.language_model.embedding.position_embeddings
torch.distributed.all_reduce(position_embeddings.weight.data,
group=mpu.get_position_embedding_group())
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val`
#is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_float16(val, float16_convertor):
"""Convert fp32 `val` to fp16/bf16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, _FLOAT_TYPES):
val = float16_convertor(val)
return val
return conversion_helper(val, half_conversion)
def float16_to_fp32(val):
"""Convert fp16/bf16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class Float16Module(MegatronModule):
def __init__(self, module, args):
super(Float16Module, self).__init__()
if args.fp16:
self.add_module('module', module.half())
def float16_convertor(val):
return val.half()
elif args.bf16:
self.add_module('module', module.bfloat16())
def float16_convertor(val):
return val.bfloat16()
else:
raise Exception('should not be here')
self.float16_convertor = float16_convertor
def set_input_tensor(self, input_tensor):
return self.module.set_input_tensor(input_tensor)
def forward(self, *inputs, **kwargs):
if mpu.is_pipeline_first_stage():
inputs = fp32_to_float16(inputs, self.float16_convertor)
outputs = self.module(*inputs, **kwargs)
if mpu.is_pipeline_last_stage():
outputs = float16_to_fp32(outputs)
return outputs
def state_dict(self, prefix='', keep_vars=False):
return self.module.state_dict(prefix=prefix, keep_vars=keep_vars)
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
return self.module.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
|
Megatron-LM-master
|
megatron/model/module.py
|
import os
import torch
from megatron import get_args, print_rank_0
from megatron.checkpointing import get_checkpoint_tracker_filename, get_checkpoint_name
from megatron.model import BertModel
from .module import MegatronModule
from megatron.core import mpu
from megatron.model.enums import AttnMaskType
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.language_model import get_language_model
from megatron.model.utils import scaled_init_method_normal
from megatron.model.bert_model import bert_extended_attention_mask, bert_position_ids
def general_ict_model_provider(only_query_model=False, only_block_model=False):
"""Build the model."""
args = get_args()
assert args.ict_head_size is not None, \
"Need to specify --ict-head-size to provide an ICTBertModel"
assert mpu.get_tensor_model_parallel_world_size() == 1 and mpu.get_pipeline_model_parallel_world_size() == 1, \
"Model parallel size > 1 not supported for ICT"
print_rank_0('building ICTBertModel...')
# simpler to just keep using 2 tokentypes since the LM we initialize with has 2 tokentypes
model = ICTBertModel(
ict_head_size=args.ict_head_size,
num_tokentypes=2,
parallel_output=True,
only_query_model=only_query_model,
only_block_model=only_block_model)
return model
class ICTBertModel(MegatronModule):
"""Bert-based module for Inverse Cloze task."""
def __init__(self,
ict_head_size,
num_tokentypes=1,
parallel_output=True,
only_query_model=False,
only_block_model=False):
super(ICTBertModel, self).__init__()
bert_kwargs = dict(
ict_head_size=ict_head_size,
num_tokentypes=num_tokentypes,
parallel_output=parallel_output
)
assert not (only_block_model and only_query_model)
self.use_block_model = not only_query_model
self.use_query_model = not only_block_model
if self.use_query_model:
# this model embeds (pseudo-)queries - Embed_input in the paper
self.query_model = IREncoderBertModel(**bert_kwargs)
self._query_key = 'question_model'
if self.use_block_model:
# this model embeds evidence blocks - Embed_doc in the paper
self.block_model = IREncoderBertModel(**bert_kwargs)
self._block_key = 'context_model'
def forward(self, query_tokens, query_attention_mask, block_tokens, block_attention_mask):
"""Run a forward pass for each of the models and return the respective embeddings."""
query_logits = self.embed_query(query_tokens, query_attention_mask)
block_logits = self.embed_block(block_tokens, block_attention_mask)
return query_logits, block_logits
def embed_query(self, query_tokens, query_attention_mask):
"""Embed a batch of tokens using the query model"""
if self.use_query_model:
query_types = torch.cuda.LongTensor(*query_tokens.shape).fill_(0)
query_ict_logits, _ = self.query_model.forward(query_tokens, query_attention_mask, query_types)
return query_ict_logits
else:
raise ValueError("Cannot embed query without query model.")
def embed_block(self, block_tokens, block_attention_mask):
"""Embed a batch of tokens using the block model"""
if self.use_block_model:
block_types = torch.cuda.LongTensor(*block_tokens.shape).fill_(0)
block_ict_logits, _ = self.block_model.forward(block_tokens, block_attention_mask, block_types)
return block_ict_logits
else:
raise ValueError("Cannot embed block without block model.")
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""Save dict with state dicts of each of the models."""
state_dict_ = {}
if self.use_query_model:
state_dict_[self._query_key] \
= self.query_model.state_dict_for_save_checkpoint(
prefix=prefix, keep_vars=keep_vars)
if self.use_block_model:
state_dict_[self._block_key] \
= self.block_model.state_dict_for_save_checkpoint(
prefix=prefix, keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Load the state dicts of each of the models"""
if self.use_query_model:
print("Loading ICT query model", flush=True)
self.query_model.load_state_dict(
state_dict[self._query_key], strict=strict)
if self.use_block_model:
print("Loading ICT block model", flush=True)
self.block_model.load_state_dict(
state_dict[self._block_key], strict=strict)
def init_state_dict_from_bert(self):
"""Initialize the state from a pretrained BERT model on iteration zero of ICT pretraining"""
args = get_args()
tracker_filename = get_checkpoint_tracker_filename(args.bert_load)
if not os.path.isfile(tracker_filename):
raise FileNotFoundError("Could not find BERT load for ICT")
with open(tracker_filename, 'r') as f:
iteration = int(f.read().strip())
assert iteration > 0
checkpoint_name = get_checkpoint_name(args.bert_load, iteration, False)
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading checkpoint {}'.format(
torch.distributed.get_rank(), checkpoint_name))
try:
state_dict = torch.load(checkpoint_name, map_location='cpu')
except BaseException:
raise ValueError("Could not load checkpoint")
# load the LM state dict into each model
model_dict = state_dict['model']['language_model']
self.query_model.language_model.load_state_dict(model_dict)
self.block_model.language_model.load_state_dict(model_dict)
# give each model the same ict_head to begin with as well
query_ict_head_state_dict = self.state_dict_for_save_checkpoint()[self._query_key]['ict_head']
self.block_model.ict_head.load_state_dict(query_ict_head_state_dict)
class IREncoderBertModel(MegatronModule):
"""BERT-based encoder for queries or blocks used for learned information retrieval."""
def __init__(self, ict_head_size, num_tokentypes=2, parallel_output=True):
super(IREncoderBertModel, self).__init__()
args = get_args()
self.ict_head_size = ict_head_size
self.parallel_output = parallel_output
init_method = init_method_normal(args.init_method_std)
scaled_init_method = scaled_init_method_normal(args.init_method_std,
args.num_layers)
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=True,
encoder_attn_mask_type=AttnMaskType.padding,
init_method=init_method,
scaled_init_method=scaled_init_method)
self.ict_head = get_linear_layer(args.hidden_size, ict_head_size, init_method)
self._ict_head_key = 'ict_head'
def forward(self, input_ids, attention_mask, tokentype_ids=None):
extended_attention_mask = bert_extended_attention_mask(
attention_mask, next(self.language_model.parameters()).dtype)
position_ids = bert_position_ids(input_ids)
lm_output, pooled_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids)
# Output.
ict_logits = self.ict_head(pooled_output)
return ict_logits, None
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
state_dict_[self._ict_head_key] \
= self.ict_head.state_dict(prefix=prefix,
keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
self.ict_head.load_state_dict(
state_dict[self._ict_head_key], strict=strict)
|
Megatron-LM-master
|
megatron/model/realm_model.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import torch
from torch import nn
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float()).type_as(x)
return output * self.weight
|
Megatron-LM-master
|
megatron/model/rms_norm.py
|
# Copyright (c) 2021 Microsoft
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# Modified by Chunyuan Li (chunyl@microsoft.com)
# Swin Transformer
# --------------------------------------------------------
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
import torch.distributed as dist
from torch.nn.init import trunc_normal_
from megatron.model.transformer import DropPath
from megatron import get_args
from megatron.model import LayerNorm
import numpy as np
from math import sqrt
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None,
out_features=None, act_layer=nn.GELU, drop=0.):
super(Mlp, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super(WindowAttention, self).__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2 Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0).type(attn.type())
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn_out = attn
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn_out
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
@staticmethod
def compute_macs(module, input, output):
B, N, C = input[0].shape
module.__flops__ += module.flops(N) * B
class SwinTransformerBlock(nn.Module):
r"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=(self.window_size, self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.H = input_resolution[0]
self.W = input_resolution[1]
self.attn_mask_dict = {}
def create_attn_mask(self, H, W):
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1)) # 1 Hp Wp 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x):
B, L, C = x.shape
H = int(sqrt(L))
W = H
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
if H in self.attn_mask_dict.keys():
attn_mask = self.attn_mask_dict[H]
else:
self.attn_mask_dict[H] = self.create_attn_mask(self.H, self.W).to(x.device)
attn_mask = self.attn_mask_dict[H]
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows, attn = self.attn(x_windows, attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size} mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r"""Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
H = int(sqrt(L))
W = H
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
x, _ = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def forward_with_features(self, x):
fea = []
for blk in self.blocks:
x, _ = blk(x)
fea.append(x)
if self.downsample is not None:
x = self.downsample(x)
return x, fea
def forward_with_attention(self, x):
attns = []
for blk in self.blocks:
x, attn = blk(x)
attns.append(attn)
if self.downsample is not None:
x = self.downsample(x)
return x, attns
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size.
patch_size (int | tuple(int)): Patch size.
in_chans (int): Number of input channels.
num_classes (int): Number of classes for classification head.
embed_dim (int): Embedding dimension.
depths (tuple(int)): Depth of Swin Transformer layers.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: Truee
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate.
drop_path_rate (float): Stochastic depth rate.
norm_layer (nn.Module): normalization layer.
ape (bool): If True, add absolute position embedding to the patch embedding.
patch_norm (bool): If True, add normalization after patch embedding.
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
# todo: to be implemented
return {'relative_position_bias_table'}
def forward(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x_region = self.norm(x) # B L C
x = self.avgpool(x_region.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward_feature_maps(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x_grid = self.norm(x) # B L C
x = self.avgpool(x_grid.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x, x_grid
def forward_selfattention(self, x, n=1):
# n=1 return the last layer attn map; otherwise return attn maps in all layers
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
if n==1:
return self.forward_last_selfattention(x)
else:
return self.forward_all_selfattention(x)
def forward_last_selfattention(self, x):
for i, layer in enumerate(self.layers):
if i < len(self.layers) - 1:
x = layer(x)
else:
x, attns = layer.forward_with_attention(x)
return attns[-1]
def forward_all_selfattention(self, x):
attn_out = []
for layer in self.layers:
x, attns = layer.forward_with_attention(x)
attn_out += attns
return attn_out
def forward_return_n_last_blocks(self, x, n=1, return_patch_avgpool=False, depth=[]):
num_blks = sum(depth)
start_idx = num_blks - n
sum_cur = 0
for i, d in enumerate(depth):
sum_cur_new = sum_cur + d
if start_idx >= sum_cur and start_idx < sum_cur_new:
start_stage = i
start_blk = start_idx - sum_cur
sum_cur = sum_cur_new
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
# we will return the averaged token features from the `n` last blocks
# note: there is no [CLS] token in Swin Transformer
output = []
s = 0
for i, layer in enumerate(self.layers):
x, fea = layer.forward_with_features(x)
if i >= start_stage:
for x_ in fea[start_blk:]:
if i == len(self.layers)-1: # use the norm in the last stage
x_ = self.norm(x_)
x_avg = torch.flatten(self.avgpool(x_.transpose(1, 2)), 1) # B C
# print(f'Stage {i}, x_avg {x_avg.shape}')
output.append(x_avg)
start_blk = 0
return torch.cat(output, dim=-1)
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
if dist.get_rank() == 0:
print(f"GFLOPs layer_{i}: {layer.flops() / 1e9}")
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
def init_weights(self, pretrained='', pretrained_layers=[], verbose=True):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location='cpu')
logging.info(f'=> loading pretrained model {pretrained}')
model_dict = self.state_dict()
pretrained_dict = {
k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] is '*'
or 'relative_position_index' not in k
or 'attn_mask' not in k
)
if need_init:
if verbose:
logging.info(f'=> init {k} from {pretrained}')
if 'relative_position_bias_table' in k and v.size() != model_dict[k].size():
relative_position_bias_table_pretrained = v
relative_position_bias_table_current = model_dict[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
if nH1 != nH2:
logging.info(f"Error in loading {k}, passing")
else:
if L1 != L2:
logging.info(
'=> load_pretrained: resized variant: {} to {}'
.format((L1, nH1), (L2, nH2))
)
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2),
mode='bicubic')
v = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
if 'absolute_pos_embed' in k and v.size() != model_dict[k].size():
absolute_pos_embed_pretrained = v
absolute_pos_embed_current = model_dict[k]
_, L1, C1 = absolute_pos_embed_pretrained.size()
_, L2, C2 = absolute_pos_embed_current.size()
if C1 != C1:
logging.info(f"Error in loading {k}, passing")
else:
if L1 != L2:
logging.info(
'=> load_pretrained: resized variant: {} to {}'
.format((1, L1, C1), (1, L2, C2))
)
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(-1, S1, S1, C1)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(0, 3, 1, 2)
absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(
absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')
v = absolute_pos_embed_pretrained_resized.permute(0, 2, 3, 1).flatten(1, 2)
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
def freeze_pretrained_layers(self, frozen_layers=[]):
for name, module in self.named_modules():
if (
name.split('.')[0] in frozen_layers
or '.'.join(name.split('.')[0:2]) in frozen_layers
or (len(frozen_layers) > 0 and frozen_layers[0] is '*')
):
for _name, param in module.named_parameters():
param.requires_grad = False
logging.info(
'=> set param {} requires grad to False'
.format(name)
)
for name, param in self.named_parameters():
if (
name.split('.')[0] in frozen_layers
or (len(frozen_layers) > 0 and frozen_layers[0] is '*')
and param.requires_grad is True
):
param.requires_grad = False
logging.info(
'=> set param {} requires grad to False'
.format(name)
)
return self
def get_swin(is_teacher=False):
args = get_args()
if args.swin_backbone_type == "tiny":
embed_dim = 96
depths = [2, 2, 6, 2]
num_heads = [3, 6, 12, 24]
drop_path_rate = 0.1
elif args.swin_backbone_type == 'h3':
embed_dim = 384
depths = [2, 2, 18, 2]
num_heads = [6, 12, 24, 48]
drop_path_rate = 0.2
else:
embed_dim = 128
depths = [2, 2, 18, 2]
num_heads = [4, 8, 16, 32]
drop_path_rate = 0.2
swin = SwinTransformer(
img_size=224,
in_chans=3,
num_classes=1000,
patch_size=4,
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=7,
mlp_ratio=4,
qkv_bias=True,
drop_rate=0,
attn_drop_rate=0,
drop_path_rate=(0.0 if is_teacher else drop_path_rate),
norm_layer=partial(LayerNorm, eps=1e-6),
ape=False,
patch_norm=True,
)
return swin
|
Megatron-LM-master
|
megatron/model/vision/esvit_swin_backbone.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Vision Transformer(VIT) model."""
import torch
from torch.nn.init import trunc_normal_
from megatron import get_args
from megatron.model.utils import get_linear_layer
from megatron.model.vision.vit_backbone import VitBackbone, VitMlpHead
from megatron.model.vision.mit_backbone import mit_b3_avg
from megatron.model.module import MegatronModule
class VitClassificationModel(MegatronModule):
"""Vision Transformer Model."""
def __init__(self, config, num_classes, finetune=False,
pre_process=True, post_process=True):
super(VitClassificationModel, self).__init__()
args = get_args()
self.config = config
self.hidden_size = args.hidden_size
self.num_classes = num_classes
self.finetune = finetune
self.pre_process = pre_process
self.post_process = post_process
self.backbone = VitBackbone(
config=config,
pre_process=self.pre_process,
post_process=self.post_process,
single_token_output=True
)
if self.post_process:
if not self.finetune:
self.head = VitMlpHead(config, self.hidden_size, self.num_classes)
else:
self.head = get_linear_layer(
self.hidden_size,
self.num_classes,
torch.nn.init.zeros_
)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.backbone.set_input_tensor(input_tensor)
def forward(self, input):
hidden_states = self.backbone(input)
if self.post_process:
hidden_states = self.head(hidden_states)
return hidden_states
class MitClassificationModel(MegatronModule):
"""Mix vision Transformer Model."""
def __init__(self, num_classes,
pre_process=True, post_process=True):
super(MitClassificationModel, self).__init__()
args = get_args()
self.hidden_size = args.hidden_size
self.num_classes = num_classes
self.backbone = mit_b3_avg()
self.head = torch.nn.Linear(512, num_classes)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, torch.nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, torch.nn.Linear) and m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
pass
def forward(self, input):
hidden_states = self.backbone(input)
hidden_states = self.head(hidden_states)
return hidden_states
|
Megatron-LM-master
|
megatron/model/vision/classification.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
import apex
import einops
import torch
import torch.nn.functional as F
from megatron import get_args, print_rank_0
from megatron.model.utils import get_linear_layer
from megatron.model.vision.vit_backbone import VitBackbone
from megatron.model.module import MegatronModule
from megatron.model.vision.mit_backbone import mit_b3
from megatron.model.vision.utils import resize
class VitInpaintingModel(MegatronModule):
def __init__(self, config, pre_process=True, post_process=True):
super(VitInpaintingModel, self).__init__()
args = get_args()
self.config = config
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = config.hidden_size
self.backbone = VitBackbone(
config=config,
pre_process=self.pre_process,
post_process=self.post_process,
class_token=False,
)
self.patch_dim = args.patch_dim
self.img_h = args.img_h
self.img_w = args.img_w
self.seq_length = args.seq_length
# full mask
if self.post_process:
self.linear_decoder = get_linear_layer(
self.hidden_size,
self.backbone.flatten_dim,
torch.nn.init.zeros_
)
def set_input_tensor(self, input_tensor):
self.backbone.set_input_tensor(input_tensor)
def forward(self, input):
hidden_states = self.backbone(input)
if not self.post_process:
return hidden_states
decoded_output = self.linear_decoder(hidden_states)
output = einops.rearrange(
decoded_output,
"b (h w) (p1 p2 c) -> b c (h p1) (w p2)",
p1=self.patch_dim,
p2=self.patch_dim,
h=self.img_h//self.patch_dim,
w=self.img_w//self.patch_dim,
)
return output
class MLP(torch.nn.Module):
"""
Linear Embedding
"""
def __init__(self, input_dim=2048, embed_dim=768):
super().__init__()
self.proj = torch.nn.Linear(input_dim, embed_dim)
def forward(self, x):
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class MitInpaintingModel(MegatronModule):
"""Mix vision Transformer Model."""
def __init__(self, pre_process=True, post_process=True):
super(MitInpaintingModel, self).__init__()
self.pre_process = pre_process
self.post_process = post_process
args = get_args()
self.patch_dim = args.patch_dim
self.img_h = args.img_h
self.img_w = args.img_w
self.flatten_dim = self.patch_dim * self.patch_dim * 3
self.backbone = mit_b3()
self.in_channels = [64, 128, 320, 512]
self.embedding_dim = 768
c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels
self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=self.embedding_dim)
self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=self.embedding_dim)
self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=self.embedding_dim)
self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=self.embedding_dim)
self.conv_fuse = torch.nn.Conv2d(self.embedding_dim*4, self.embedding_dim, 1, 1, bias=False)
self.norm = apex.parallel.SyncBatchNorm(self.embedding_dim)
self.dropout = torch.nn.Dropout2d(0.1)
self.linear_pred = torch.nn.Conv2d(self.embedding_dim, self.flatten_dim, kernel_size=1)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
pass
def forward(self, input):
c1, c2, c3, c4 = self.backbone(input)
n, _, h, w = c4.shape
_c4 = self.linear_c4(c4).permute(0, 2, 1).reshape(n, -1, c4.shape[2], c4.shape[3])
_c4 = resize(_c4, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c3 = self.linear_c3(c3).permute(0, 2, 1).reshape(n, -1, c3.shape[2], c3.shape[3])
_c3 = resize(_c3, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c2 = self.linear_c2(c2).permute(0, 2, 1).reshape(n, -1, c2.shape[2], c2.shape[3])
_c2 = resize(_c2, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c1 = self.linear_c1(c1).permute(0, 2, 1).reshape(n, -1, c1.shape[2], c1.shape[3])
_c = torch.cat([_c4, _c3, _c2, _c1], dim=1)
_c = self.conv_fuse(_c)
x = self.norm(_c)
x = F.relu(x, inplace=True)
x = self.dropout(x)
x = self.linear_pred(x)
output = einops.rearrange(
x,
"b (c p1 p2) h w -> b c (h p1) (w p2)",
p1=self.patch_dim,
p2=self.patch_dim,
h=self.img_h//self.patch_dim,
w=self.img_w//self.patch_dim,
)
return output
|
Megatron-LM-master
|
megatron/model/vision/inpainting.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Vision Transformer(VIT) model."""
import math
import einops
import torch
import apex
import torch.nn.functional as F
from megatron import get_args
from megatron.model.transformer import ParallelTransformer
from megatron.model.utils import (
get_linear_layer,
init_method_normal,
scaled_init_method_normal,
)
from megatron.model.module import MegatronModule
CLASS_TOKEN_LENGTH = 8
class VitMlpHead(MegatronModule):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, config, hidden_size, num_classes):
super(VitMlpHead, self).__init__()
self.config = config
self.dense_in = torch.nn.Linear(hidden_size, hidden_size)
self.relu = torch.nn.ReLU()
self.dense_out = torch.nn.Linear(hidden_size, num_classes)
torch.nn.init.constant_(self.dense_out.bias, -10)
def forward(self, hidden_states):
# hidden_states: [b, 1, h]
# sequence_index: index of the token to pool.
dense_in_result = self.dense_in(hidden_states)
tanh_result = torch.tanh(dense_in_result)
dense_out_result = self.dense_out(tanh_result)
return dense_out_result
def isPerfectSquare(x):
if(x >= 0):
sr = math.sqrt(x)
return (int(sr) * int(sr) == x)
return False
def twod_interpolate_position_embeddings_hook(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
args = get_args()
num_patches_per_dim_h = args.img_h // args.patch_dim
num_patches_per_dim_w = args.img_w // args.patch_dim
num_patches = num_patches_per_dim_h * num_patches_per_dim_w
hidden_size = args.hidden_size
key = prefix + "weight"
assert key in state_dict
if key in state_dict:
input_param = state_dict[key]
input_seq_len = input_param.shape[0]
assert(isPerfectSquare(input_seq_len) or isPerfectSquare(input_seq_len - CLASS_TOKEN_LENGTH))
input_has_class_token = not isPerfectSquare(input_seq_len)
num_tok_input = input_seq_len - CLASS_TOKEN_LENGTH if input_has_class_token else input_seq_len
num_tok_output = num_patches
output_has_class_token = args.class_token_present
# update input_param and load it to state_dict[key]
if input_has_class_token:
input_param_tok = input_param[:CLASS_TOKEN_LENGTH, :]
input_param_grid = input_param[CLASS_TOKEN_LENGTH:, :]
else:
input_param_tok = torch.zeros(CLASS_TOKEN_LENGTH, hidden_size)
input_param_grid = input_param
assert input_param.shape[1] == hidden_size
if num_tok_input != num_tok_output:
gs_input = int(math.sqrt(num_tok_input))
gs_new = (num_patches_per_dim_h, num_patches_per_dim_w)
input_param_grid = input_param_grid.transpose(0, 1).contiguous()
input_param_grid = input_param_grid.reshape(
(1, -1, gs_input, gs_input)
)
input_param_grid = input_param_grid.float()
scale_factor = (gs_new[0] / gs_input, gs_new[1] / gs_input)
input_param_grid = F.interpolate(
input_param_grid, scale_factor=scale_factor, mode="bilinear"
)
input_param_grid = input_param_grid.half()
input_param_grid = input_param_grid.reshape((-1, num_tok_output))
input_param_grid = input_param_grid.transpose(0, 1).contiguous()
assert input_param_grid.shape[1] == hidden_size
input_param = input_param_grid
assert (
input_param.shape[0] == num_tok_output
and input_param.shape[1] == hidden_size
)
if output_has_class_token:
input_param = torch.cat((input_param_tok, input_param), dim=0)
state_dict[key] = input_param
class VitBackbone(MegatronModule):
"""Vision Transformer Model."""
def __init__(self,
config,
pre_process=True,
post_process=True,
class_token=True,
single_token_output=False,
post_layer_norm=True,
drop_path_rate=0.0):
super(VitBackbone, self).__init__(share_embeddings_and_output_weights=False)
args = get_args()
self.config = config
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.pre_process = pre_process
self.post_process = post_process
self.class_token = class_token
self.post_layer_norm = post_layer_norm
self.hidden_size = args.hidden_size
self.patch_dim = args.patch_dim
self.img_h = args.img_h
self.img_w = args.img_w
self.micro_batch_size = args.micro_batch_size
self.single_token_output = single_token_output
self.drop_path_rate = drop_path_rate
assert self.img_h % self.patch_dim == 0
assert self.img_w % self.patch_dim == 0
self.num_patches_per_dim_h = self.img_h // self.patch_dim
self.num_patches_per_dim_w = self.img_w // self.patch_dim
self.num_patches = self.num_patches_per_dim_h * self.num_patches_per_dim_w
self.seq_length = self.num_patches + (CLASS_TOKEN_LENGTH if self.class_token else 0)
self.flatten_dim = self.patch_dim * self.patch_dim * args.num_channels
self.input_tensor = None
self.position_ids = None
if self.pre_process:
# cls_token
if self.class_token:
self.cls_token = torch.nn.Parameter(
torch.randn(1, CLASS_TOKEN_LENGTH, self.hidden_size)
)
torch.nn.init.zeros_(self.cls_token)
self.position_ids = torch.arange(self.seq_length).expand(1, -1).cuda()
# Linear encoder
self.linear_encoder = torch.nn.Linear(
self.flatten_dim, self.hidden_size
)
# embedding
self.position_embeddings = torch.nn.Embedding(
self.seq_length, self.hidden_size
)
init_method_normal(args.init_method_std)(
self.position_embeddings.weight
)
args.class_token_present = self.class_token
self.position_embeddings._register_load_state_dict_pre_hook(
twod_interpolate_position_embeddings_hook
)
self.embedding_dropout = torch.nn.Dropout(args.hidden_dropout)
# Transformer
self.transformer = ParallelTransformer(
config,
model_type=args.model_type,
pre_process=self.pre_process,
post_process=self.post_process,
post_layer_norm=self.post_layer_norm,
drop_path_rate=self.drop_path_rate
)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.transformer.set_input_tensor(input_tensor)
def forward(self, input):
if self.pre_process:
rearranged_input = einops.rearrange(
input,
"b c (h p1) (w p2) -> b (h w) (p1 p2 c)",
p1=self.patch_dim,
p2=self.patch_dim,
)
assert rearranged_input.dtype == torch.half
encoder_output = self.linear_encoder(rearranged_input)
concatenated_tokens = encoder_output
if self.class_token:
cls_tokens = self.cls_token.expand(encoder_output.shape[0], -1, -1)
concatenated_tokens = torch.cat((cls_tokens, encoder_output), dim=1)
token_embeddings = concatenated_tokens + \
self.position_embeddings(self.position_ids[:, :concatenated_tokens.shape[1]])
# [b, s, h] => [s, b, h]
token_embeddings = token_embeddings.transpose(0, 1).contiguous()
hidden_states = self.embedding_dropout(token_embeddings)
else:
hidden_states = input
hidden_states = self.transformer(hidden_states, None)
if self.post_process:
# [s b h] => [b s h]
if self.single_token_output:
hidden_states = hidden_states[0]
else:
hidden_states = hidden_states.transpose(0, 1).contiguous()
return hidden_states
|
Megatron-LM-master
|
megatron/model/vision/vit_backbone.py
|
import torch.nn.functional as F
import torch
from megatron import print_rank_0, get_args
from megatron.core import mpu
from megatron.data.vit_dataset import ClassificationTransform
from megatron.data.image_folder import ImageFolder
_FEATURE_BANK = None
def build_data_loader(dataset, drop_last=True, shuffle=False):
"""Data loader. Note that batch-size is the local (per GPU) batch-size."""
# Sampler.
args = get_args()
micro_batch_size = 16
num_workers = args.num_workers
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank,
drop_last=drop_last, shuffle=shuffle
)
# Data loader. Note that batch size is the per GPU batch size.
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=micro_batch_size,
sampler=sampler,
shuffle=False,
num_workers=num_workers,
drop_last=not drop_last,
pin_memory=True,
)
return data_loader
def compute_feature_bank(model):
args = get_args()
global _FEATURE_BANK
feature_bank = []
feature_label = []
train_ds = ImageFolder(
root=args.data_path[0],
transform=ClassificationTransform((args.img_h, args.img_w), train=False),
data_per_class_fraction=1.0
)
classes = len(train_ds.classes)
dataloader = build_data_loader(train_ds)
for m in model:
m.eval()
with torch.no_grad():
for i, batch in enumerate(dataloader):
images = batch[0].cuda().contiguous()
labels = batch[1].cuda().contiguous()
student_feature, teacher_feature = model[0](images)
feature = F.normalize(teacher_feature.float(), dim=1)
feature_bank.append(feature)
feature_label.append(labels)
for m in model:
m.train()
# [N', D]
feature_bank = torch.cat(feature_bank, dim=0).contiguous()
feature_label = torch.cat(feature_label, dim=0).contiguous()
feature_banks = [torch.zeros_like(feature_bank)
for i in range(mpu.get_data_parallel_world_size())]
torch.distributed.all_gather(feature_banks,
feature_bank,
group=mpu.get_data_parallel_group())
assert torch.all(torch.eq(feature_banks[mpu.get_data_parallel_rank()],
feature_bank))
feature_labels = [torch.zeros_like(feature_label)
for i in range(mpu.get_data_parallel_world_size())]
torch.distributed.all_gather(feature_labels,
feature_label,
group=mpu.get_data_parallel_group())
# [D, N]
feature_banks = torch.cat(feature_banks, dim=0).t().contiguous()
# [N]
feature_labels = torch.cat(feature_labels, dim=0).contiguous()
print_rank_0("feature_banks size is {}".format(feature_banks.size()))
print_rank_0("feature labels size is {}".format(feature_labels.size()))
_FEATURE_BANK = (feature_banks, feature_labels, classes)
def get_feature_bank():
global _FEATURE_BANK
assert _FEATURE_BANK is not None
return _FEATURE_BANK
# knn monitor as in InstDisc https://arxiv.org/abs/1805.01978
# implementation follows http://github.com/zhirongw/lemniscate.pytorch and
# https://github.com/leftthomas/SimCLR
def knn_predict(feature, feature_bank, feature_labels, classes, knn_k, knn_t):
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=knn_k, dim=-1)
# [B, K]
sim_labels = torch.gather(feature_labels.expand(feature.size(0), -1),
dim=-1,
index=sim_indices)
sim_weight = (sim_weight / knn_t).exp()
# counts for each class
one_hot_label = torch.zeros(feature.size(0) * knn_k,
classes,
device=sim_labels.device)
# [B*K, C]
one_hot_label = one_hot_label.scatter(dim=-1,
index=sim_labels.view(-1, 1),
value=1.0)
# weighted score ---> [B, C]
pred_scores = torch.sum(
one_hot_label.view(feature.size(0), -1, classes) * sim_weight.unsqueeze(dim=-1),
dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
return pred_labels
|
Megatron-LM-master
|
megatron/model/vision/knn_monitor.py
|
import warnings
import torch
import torch.nn.functional as F
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
if isinstance(size, torch.Size):
size = tuple(int(x) for x in size)
return F.interpolate(input, size, scale_factor, mode, align_corners)
|
Megatron-LM-master
|
megatron/model/vision/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this source tree.
# copied from https://github.com/facebookresearch/dino/blob/main/main_dino.py
# reworked/refactored some parts to make it run in Megatron.
import math
import apex
import einops
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn.init import trunc_normal_
from megatron import get_args, print_rank_0
from megatron.model.utils import get_linear_layer
from megatron.model.vision.vit_backbone import VitBackbone
from megatron.model.module import MegatronModule
from megatron.model.vision.mit_backbone import mit_b5_avg
from megatron.model.vision.esvit_swin_backbone import get_swin
class DINOLoss(torch.nn.Module):
def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
center_momentum=0.9):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate((
np.linspace(warmup_teacher_temp,
teacher_temp, warmup_teacher_temp_epochs),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
))
self.teacher_temp = teacher_temp
def forward(self, student_output, teacher_output, iteration):
"""
Cross-entropy between softmax outputs of the teacher
and student network.
"""
args = get_args()
student_out = student_output / self.student_temp
student_out = student_out.chunk(self.ncrops)
epoch = iteration // args.iter_per_epoch
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_out = teacher_out.detach().chunk(2)
total_loss = 0
n_loss_terms = 0
for iq, q in enumerate(teacher_out):
for v in range(len(student_out)):
if v == iq:
# we skip cases where student and teacher operate on the same view
continue
loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)
total_loss += loss.mean()
n_loss_terms += 1
total_loss /= n_loss_terms
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
torch.distributed.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * torch.distributed.get_world_size())
self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
class DINOHead(torch.nn.Module):
def __init__(self, in_dim, out_dim, norm_last_layer=True, nlayers=3):
super().__init__()
args = get_args()
hidden_dim = args.dino_head_hidden_size
bottleneck_dim = args.dino_bottleneck_size
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = torch.nn.Linear(in_dim, bottleneck_dim)
else:
layers = [torch.nn.Linear(in_dim, hidden_dim)]
layers.append(torch.nn.GELU())
for _ in range(nlayers - 2):
layers.append(torch.nn.Linear(hidden_dim, hidden_dim))
layers.append(torch.nn.GELU())
layers.append(torch.nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = torch.nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = torch.nn.utils.weight_norm(torch.nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, torch.nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, torch.nn.Linear) and m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = torch.nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
class MultiCropWrapper(MegatronModule):
"""
Perform forward pass separately on each resolution input.
The inputs corresponding to a single resolution are clubbed and single
forward is run on the same resolution inputs. Hence we do several
forward passes = number of different resolutions used. We then
concatenate all the output features and run the head forward on these
concatenated features.
"""
def __init__(self, backbone, head):
super(MultiCropWrapper, self).__init__()
# disable layers dedicated to ImageNet labels classification
#backbone.fc, backbone.head = torch.nn.Identity(), torch.nn.Identity()
self.backbone = backbone
self.head = head
def forward(self, x):
# convert to list
if not isinstance(x, list):
x = [x]
idx_crops = torch.cumsum(torch.unique_consecutive(
torch.tensor([inp.shape[-1] for inp in x]),
return_counts=True,
)[1], 0)
start_idx = 0
for end_idx in idx_crops:
_out = self.backbone(torch.cat(x[start_idx: end_idx]))
if start_idx == 0:
output = _out
else:
output = torch.cat((output, _out))
start_idx = end_idx
# Run the head forward on the concatenated features.
if self.training:
return self.head(output)
else:
return output
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep,
warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = \
np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) \
* (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def get_student_backbone_and_num_features(config, pre_process=True, post_process=True):
args = get_args()
if args.vision_backbone_type == 'vit':
student = VitBackbone(config,
pre_process=pre_process,
post_process=post_process,
drop_path_rate=0.1,
single_token_output=True)
num_features = args.hidden_size
elif args.vision_backbone_type == 'mit':
student = mit_b5_avg(drop_path_rate=0.1)
num_features = 512
elif args.vision_backbone_type == 'swin':
student = get_swin()
num_features = student.num_features
else:
raise Exception('{} vision backbone is not supported.'.format(
args.vision_backbone_type))
return student, num_features
def get_teacher_backbone_and_num_features(config, pre_process=True, post_process=True):
args = get_args()
if args.vision_backbone_type == 'vit':
teacher = VitBackbone(config,
pre_process=pre_process,
post_process=post_process,
single_token_output=True)
num_features = args.hidden_size
elif args.vision_backbone_type == 'mit':
teacher = mit_b5_avg(drop_path_rate=0.0)
num_features = 512
elif args.vision_backbone_type == 'swin':
teacher = get_swin(is_teacher=True)
num_features = teacher.num_features
else:
raise Exception('{} vision backbone is not supported.'.format(
args.vision_backbone_type))
return teacher, num_features
class DINOPretrainModel(MegatronModule):
def __init__(self, config, pre_process=True, post_process=True):
super(DINOPretrainModel, self).__init__()
args = get_args()
self.config = config
self.out_dim = 65536
self.dino_loss = DINOLoss(
self.out_dim,
args.dino_local_crops_number + 2,
args.dino_warmup_teacher_temp,
args.dino_teacher_temp,
args.dino_warmup_teacher_temp_epochs,
300,
)
self.pre_process = pre_process
self.post_process = post_process
self.momentum_teacher = 0.996
student_backbone, num_features = \
get_student_backbone_and_num_features(config, pre_process, post_process)
self.student = MultiCropWrapper(
student_backbone,
DINOHead(num_features, self.out_dim,
norm_last_layer=args.dino_norm_last_layer)
)
self.momentum_schedule = cosine_scheduler(
self.momentum_teacher, 1,
args.train_iters // args.iter_per_epoch,
args.iter_per_epoch
)
teacher_backbone, num_features = \
get_teacher_backbone_and_num_features(config, pre_process, post_process)
self.teacher = MultiCropWrapper(
teacher_backbone,
DINOHead(num_features, self.out_dim)
)
self.teacher.load_state_dict(self.student.state_dict())
for p in self.teacher.parameters():
if hasattr(p, "requires_grad") and p.requires_grad is not None:
p.requires_grad = False
def set_input_tensor(self, tensor):
pass
def forward(self, input):
student_output = None
if self.training:
student_output = self.student(input)
teacher_output = self.teacher(input[:2])
else:
teacher_output = self.teacher(input)
return student_output, teacher_output
def cancel_gradients_last_layer(self, iteration):
args = get_args()
epoch = iteration // args.iter_per_epoch
if epoch < args.dino_freeze_last_layer:
for n, p in self.student.named_parameters():
if "last_layer" in n:
p.grad = None
def update_momentum(self, iteration):
with torch.no_grad():
m = self.momentum_schedule[iteration]
for param_q, param_k in zip(self.student.parameters(), self.teacher.parameters()):
param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)
|
Megatron-LM-master
|
megatron/model/vision/dino.py
|
# Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from torch.nn.init import trunc_normal_
from megatron.model.transformer import DropPath
from megatron.model import LayerNorm
class Mlp(nn.Module):
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = self.fc1(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
proj_drop=0.,
sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = LayerNorm(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
class MixVisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], output_avg=False):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.output_avg = output_avg
# patch_embed
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
# transformer encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[0])
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
self.norm2 = norm_layer(embed_dims[1])
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
self.norm3 = norm_layer(embed_dims[2])
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.norm4 = norm_layer(embed_dims[3])
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def freeze_patch_emb(self):
self.patch_embed1.requires_grad = False
def forward_features(self, x):
B = x.shape[0]
outs = []
# stage 1
x, H, W = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
x = blk(x, H, W)
x = self.norm1(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 2
x, H, W = self.patch_embed2(x)
for i, blk in enumerate(self.block2):
x = blk(x, H, W)
x = self.norm2(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 3
x, H, W = self.patch_embed3(x)
for i, blk in enumerate(self.block3):
x = blk(x, H, W)
x = self.norm3(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 4
x, H, W = self.patch_embed4(x)
for i, blk in enumerate(self.block4):
x = blk(x, H, W)
x = self.norm4(x)
if not self.output_avg:
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
return outs
def forward(self, x):
x = self.forward_features(x)
if self.output_avg:
x = x[3].mean(dim=1)
return x
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x
class mit_b0(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b0, self).__init__(
patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class mit_b1(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b1, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class mit_b2(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b2, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class mit_b3(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b3, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class mit_b3_avg(MixVisionTransformer):
def __init__(self, drop_path_rate=0.1, **kwargs):
super(mit_b3_avg, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=drop_path_rate, output_avg=True)
class mit_b4(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b4, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class mit_b5(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b5, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class mit_b5_avg(MixVisionTransformer):
def __init__(self, drop_path_rate=0.1, **kwargs):
super(mit_b5_avg, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=drop_path_rate, output_avg=True)
|
Megatron-LM-master
|
megatron/model/vision/mit_backbone.py
|
# Copyright (c) 2021 Microsoft
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# Swin Transformer
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from math import sqrt
from megatron import get_args
from functools import partial
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None,
out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.H = input_resolution[0]
self.W = input_resolution[1]
self.attn_mask_dict = {}
def create_attn_mask(self, H, W):
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1)) # 1 Hp Wp 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x):
B, L, C = x.shape
H = int(sqrt(L))
W = H
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
x_b4_ds = x
if self.downsample is not None:
x = self.downsample(x)
return x_b4_ds, x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3,
norm_layer=partial(nn.LayerNorm, eps=1e-6), ape=False, patch_norm=True,
use_checkpoint=False, output_avg=False, **kwargs):
super().__init__()
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
self.img_size = to_2tuple(img_size)
self.patch_size = to_2tuple(patch_size)
self.output_avg = output_avg
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
h = self.img_size[0] // self.patch_size[0]
w = self.img_size[1] // self.patch_size[1]
outs = []
for i, layer in enumerate(self.layers):
px, x = layer(x)
b, n, c = px.shape
if i != len(self.layers) - 1 or not self.output_avg:
px = px.permute(0, 2, 1).contiguous()
px = px.reshape(b, c, h, w)
# is this a fair assumption ?? i think it's baked into the architecture
h, w = h//2, w//2
outs.append(px)
if self.output_avg:
return outs[-1].mean(dim=1)
return outs
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
def get_swin(drop_path_rate=0.3, output_avg=False):
args = get_args()
window_size = 7
embed_dim = 128
depths = [2, 2, 18, 2]
num_heads = [4, 8, 16, 32]
swin = SwinTransformer(
img_size=(args.img_h, args.img_w,),
in_chans=3,
patch_size=args.patch_dim,
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=window_size,
drop_path_rate=drop_path_rate,
output_avg=output_avg,
)
return swin
|
Megatron-LM-master
|
megatron/model/vision/swin_backbone.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Blendable dataset."""
import hashlib
import os
import time
import numpy as np
import torch
from megatron import print_rank_0
from megatron.core import mpu
class BlendableDataset(torch.utils.data.Dataset):
def __init__(self, datasets, weights, size, *,
data_cache_path=None):
self.datasets = datasets
num_datasets = len(datasets)
assert num_datasets == len(weights)
self.size = size
# Normalize weights.
weights = np.array(weights, dtype=np.float64)
sum_weights = np.sum(weights)
assert sum_weights > 0.0
weights /= sum_weights
# Build indicies.
def _build_indices():
start_time = time.time()
assert num_datasets < 32767
# Dataset index is a 16-bit integer to alow at least 2^15 datasets.
# PyTorch isn't happy casting numpy uint16 to a Torch Tensor,
# so we use int16 although a dataset_index can never be negative.
dataset_index = np.zeros(self.size, dtype=np.int16)
dataset_sample_index = np.zeros(self.size, dtype=np.int64)
from megatron.data import helpers
helpers.build_blending_indices(dataset_index, dataset_sample_index,
weights, num_datasets, self.size,
torch.distributed.get_rank() == 0)
print_rank_0('> elapsed time for building blendable dataset indices: '
'{:.2f} (sec)'.format(time.time() - start_time))
return dataset_index, dataset_sample_index
desc = "Blendable dataset\n\n"
desc += "Datasets:\n"
for dataset in datasets:
desc += dataset.desc + "\n\n"
desc += f"Weights: {weights}\n"
desc += f"Size: {size}\n"
self.desc = desc
if data_cache_path:
desc_hash = hashlib.md5(desc.encode('utf-8')).hexdigest()
desc_path = os.path.join(data_cache_path, desc_hash + ".dsc")
index_path = os.path.join(data_cache_path, desc_hash + "_index.npy")
sample_index_path = os.path.join(data_cache_path, desc_hash + "_sample_index.npy")
cache_hit = os.path.isfile(index_path) and os.path.isfile(sample_index_path)
cache_success = True
if torch.distributed.get_rank() == 0 and not cache_hit:
print(' > WARNING: could not find index map files for blendable'
' dataset, building indices on rank 0 ...', flush=True)
dataset_index, dataset_sample_index = _build_indices()
try:
os.makedirs(os.path.dirname(index_path), exist_ok=True)
with open(desc_path, 'wt') as fd:
fd.write(desc)
np.save(index_path, dataset_index, allow_pickle=True)
np.save(sample_index_path, dataset_sample_index,
allow_pickle=True)
except OSError:
print(f'There was an error trying to create the data cache directory ({data_cache_path})')
print('or a file in it. This is set with the --data-cache-path argument. Please')
print('ensure you have write access to this directory or specify one that you do have')
print('write access to.')
cache_success = False
counts = torch.cuda.LongTensor([cache_success])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
if counts[0].item() != (
torch.distributed.get_world_size() //
torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group())):
print_rank_0("Data index creation unsuccessful, exiting.")
exit()
# Load on all ranks.
print_rank_0(f'> loading blendable dataset index: {index_path}')
self.dataset_index = np.load(index_path, allow_pickle=True, mmap_mode='r')
assert self.dataset_index.size == self.size
print_rank_0(f'> loading blendable dataset sample index: {sample_index_path}')
self.dataset_sample_index = np.load(sample_index_path, allow_pickle=True, mmap_mode='r')
assert self.dataset_sample_index.size == self.size
else:
self.dataset_index, self.dataset_sample_index = _build_indices()
# Check size
_ = self.__getitem__(self.size - 1)
try:
_ = self.__getitem__(self.size)
raise RuntimeError('BlendedDataset size is improperly bounded')
except IndexError:
pass
print_rank_0('> size of blendable dataset: '
'{} samples'.format(self.size))
def __len__(self):
return self.size
def __getitem__(self, idx):
dataset_idx = self.dataset_index[idx]
sample_idx = self.dataset_sample_index[idx]
return {
"dataset_idx" : dataset_idx,
**self.datasets[dataset_idx][sample_idx],
}
|
Megatron-LM-master
|
megatron/data/blendable_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""BERT Style dataset."""
import numpy as np
import torch
from megatron import (
get_args,
get_tokenizer,
mpu,
print_rank_0
)
from megatron.data.dataset_utils import (
get_samples_mapping,
get_a_and_b_segments,
truncate_segments,
create_tokens_and_tokentypes,
create_masked_lm_predictions
)
class BertDataset(torch.utils.data.Dataset):
def __init__(self, name, indexed_dataset, data_prefix,
num_epochs, max_num_samples, masked_lm_prob,
max_seq_length, short_seq_prob, seed, binary_head):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
self.binary_head = binary_head
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(self.indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
self.max_seq_length - 3, # account for added tokens
short_seq_prob,
self.seed,
self.name,
self.binary_head)
# Vocab stuff.
tokenizer = get_tokenizer()
self.vocab_id_list = list(tokenizer.inv_vocab.keys())
self.vocab_id_to_token_dict = tokenizer.inv_vocab
self.cls_id = tokenizer.cls
self.sep_id = tokenizer.sep
self.mask_id = tokenizer.mask
self.pad_id = tokenizer.pad
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_idx, end_idx, seq_length = self.samples_mapping[idx]
sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32))
return build_training_sample(sample, seq_length,
self.max_seq_length, # needed for padding
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id, self.sep_id,
self.mask_id, self.pad_id,
self.masked_lm_prob, np_rng,
self.binary_head)
def build_training_sample(sample,
target_seq_length, max_seq_length,
vocab_id_list, vocab_id_to_token_dict,
cls_id, sep_id, mask_id, pad_id,
masked_lm_prob, np_rng, binary_head):
"""Biuld training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
"""
if binary_head:
# We assume that we have at least two sentences in the sample
assert len(sample) > 1
assert target_seq_length <= max_seq_length
# Divide sample into two segments (A and B).
if binary_head:
tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample,
np_rng)
else:
tokens_a = []
for j in range(len(sample)):
tokens_a.extend(sample[j])
tokens_b = []
is_next_random = False
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a),
len(tokens_b), max_num_tokens, np_rng)
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b,
cls_id, sep_id)
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob,
cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng)
# Padding.
tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np \
= pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length)
train_sample = {
'text': tokens_np,
'types': tokentypes_np,
'labels': labels_np,
'is_random': int(is_next_random),
'loss_mask': loss_mask_np,
'padding_mask': padding_mask_np,
'truncated': int(truncated)}
return train_sample
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0, \
f"num_tokens ({num_tokens}) is greater than " \
"max_seq_length ({max_seq_length})."
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-1] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
|
Megatron-LM-master
|
megatron/data/bert_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""GPT style dataset."""
import hashlib
import os
import time
import numpy as np
import torch
from megatron import print_rank_0
from megatron.core import mpu
from megatron.data.blendable_dataset import BlendableDataset
from megatron.data.dataset_utils import get_datasets_weights_and_num_samples
from megatron.data.dataset_utils import get_train_valid_test_split_
from megatron.data.indexed_dataset import MMapIndexedDataset
def build_train_valid_test_datasets(data_prefix, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup,
train_data_prefix=None,
valid_data_prefix=None,
test_data_prefix=None,
return_doc_ids=False, *,
data_cache_path=None):
"""Build train, valid, and test datasets."""
if data_prefix:
print_rank_0("Single data path provided for train, valid & test")
# Single dataset.
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(data_prefix[0],
splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup,
data_cache_path=data_cache_path)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
train_num_samples, valid_num_samples, test_num_samples = map(
sum,
zip(*datasets_train_valid_test_num_samples)
)
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
prefixes[i], splits_string,
datasets_train_valid_test_num_samples[i],
seq_length, seed, skip_warmup,
return_doc_ids,
data_cache_path=data_cache_path)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights, train_num_samples,
data_cache_path=data_cache_path)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights, valid_num_samples,
data_cache_path=data_cache_path)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights, test_num_samples,
data_cache_path=data_cache_path)
return (blending_train_dataset, blending_valid_dataset,
blending_test_dataset)
else:
print_rank_0("Separate data paths provided for train, valid & test. Split string will be ignored.")
train_dataset, valid_dataset, test_dataset = None, None, None
# Single dataset.
if train_data_prefix is not None:
train_dataset = build_dataset("train", train_data_prefix,
splits_string,
train_valid_test_num_samples[0],
seq_length, seed, skip_warmup,
data_cache_path=data_cache_path)
if valid_data_prefix is not None:
valid_dataset = build_dataset("valid", valid_data_prefix,
splits_string,
train_valid_test_num_samples[1],
seq_length, seed, False,
data_cache_path=data_cache_path)
if test_data_prefix is not None:
test_dataset = build_dataset("test", test_data_prefix,
splits_string,
train_valid_test_num_samples[2],
seq_length, seed, False,
data_cache_path=data_cache_path)
return (train_dataset, valid_dataset, test_dataset)
def _build_train_valid_test_datasets(data_prefix, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup,
return_doc_ids=False, *,
data_cache_path=None):
"""Build train, valid, and test datasets."""
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
skip_warmup)
total_num_of_documents = indexed_dataset.sizes.shape[0]
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
documents = np.arange(start=splits[index], stop=splits[index + 1],
step=1, dtype=np.int32)
dataset = GPTDataset(name, data_prefix, documents, indexed_dataset,
splits_string,
train_valid_test_num_samples[index],
seq_length, seed,
return_doc_ids,
data_cache_path=data_cache_path)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def build_dataset(dataset_name, data_prefix,
splits_string, num_samples,
seq_length, seed, skip_warmup,
*,
data_cache_path=None):
dataset = None
if len(data_prefix) == 1:
dataset = _build_dataset(dataset_name, data_prefix[0],
splits_string, num_samples, seq_length,
seed, skip_warmup,
data_cache_path=data_cache_path)
else:
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix, num_samples)
prefixes, weights, dataset_num_samples = output
num_samples = sum(dataset_num_samples)
# Build individual datasets.
datasets = []
for i in range(len(prefixes)):
ds = _build_dataset(dataset_name, prefixes[i],
splits_string, dataset_num_samples[i],
seq_length, seed, skip_warmup,
data_cache_path=data_cache_path)
if ds:
datasets.append(ds)
if datasets:
dataset = BlendableDataset(datasets, weights, num_samples,
data_cache_path=data_cache_path)
return dataset
def _build_dataset(dataset_name, data_prefix, splits_string,
num_samples, seq_length, seed, skip_warmup,
*,
data_cache_path=None):
"""
Build dataset. This method is called when individual
train, valid, test datasets are provided
"""
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
skip_warmup)
total_num_of_documents = indexed_dataset.sizes.shape[0]
print_rank_0(' {}:'.format(dataset_name))
print_rank_0(' document indices in [0, {}) total of {} '
'documents'.format(total_num_of_documents, total_num_of_documents))
documents = np.arange(start=0, stop=total_num_of_documents,
step=1, dtype=np.int32)
dataset = GPTDataset(dataset_name, data_prefix, documents, indexed_dataset,
splits_string, num_samples, seq_length, seed,
data_cache_path=data_cache_path)
return dataset
def get_indexed_dataset_(data_prefix, skip_warmup):
"""Build indexed dataset."""
print_rank_0(' > building dataset index ...')
start_time = time.time()
indexed_dataset = MMapIndexedDataset(data_prefix, skip_warmup=skip_warmup)
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' number of documents: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
class GPTDataset(torch.utils.data.Dataset):
def __init__(self, name, data_prefix, documents, indexed_dataset,
splits_string, num_samples, seq_length, seed,
return_doc_ids=False, *,
data_cache_path=None):
self.name = name
self.indexed_dataset = indexed_dataset
self.return_doc_ids = return_doc_ids
# Checks
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx, self.desc, self.desc_hash = \
_build_index_mappings(self.name, data_prefix,
documents, self.indexed_dataset.sizes,
splits_string, num_samples, seq_length, seed,
data_cache_path=data_cache_path)
def __len__(self):
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def __getitem__(self, idx):
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
doc_ids = []
if doc_index_f == doc_index_l:
doc_ids.append(self.doc_idx[doc_index_f])
sample = self.indexed_dataset.get(self.doc_idx[doc_index_f],
offset=offset_f,
length=offset_l - offset_f + 1)
else:
# Otherwise, get the rest of the initial document.
doc_ids.append(self.doc_idx[doc_index_f])
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f],
offset=offset_f)]
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
doc_ids.append(self.doc_idx[i])
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
# And finally add the relevant portion of last document.
doc_ids.append(self.doc_idx[doc_index_l])
sample_list.append(self.indexed_dataset.get(
self.doc_idx[doc_index_l],
length=offset_l + 1))
sample = np.concatenate(sample_list)
if self.return_doc_ids: # for retro preprocessing
return {'text': np.array(sample, dtype=np.int64),
'doc_ids': np.array(doc_ids, dtype=np.int64)}
else:
return {'text': np.array(sample, dtype=np.int64)}
def _build_index_mappings(name, data_prefix, documents, sizes,
splits_string, num_samples, seq_length, seed,
*,
data_cache_path):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
desc = "GPT Dataset\n\n"
desc += f"Data prefix {data_prefix}\n"
desc += f"Dataset name {name}\n"
desc += f"Number of samples {num_samples}\n"
desc += f"Sequence length {seq_length}\n"
desc += f"Random seed {seed}\n"
desc += f"Split {splits_string}\n"
desc_hash = hashlib.md5(desc.encode('utf-8')).hexdigest()
desc_filename = desc_hash + ".dsc"
doc_idx_filename = desc_hash + '_doc_idx.npy'
sample_idx_filename = desc_hash + '_sample_idx.npy'
shuffle_idx_filename = desc_hash + '_shuffle_idx.npy'
# Look for cache in main data dir first to avoid unnecessary
# duplication, then look in data-cache-path if specified,
# If nothing is found, use the last path looked in
build_indices = True
prefixes = [os.path.join(os.path.dirname(data_prefix), 'index-cache')]
if data_cache_path is not None:
prefixes.append(data_cache_path)
for prefix in prefixes:
idx_path = {
'desc': os.path.join(prefix, desc_filename),
'doc': os.path.join(prefix, doc_idx_filename),
'sample': os.path.join(prefix, sample_idx_filename),
'shuffle': os.path.join(prefix, shuffle_idx_filename)
}
for f in idx_path.values():
if not os.path.isfile(f):
break
else:
# Found our files!
build_indices = False
break
data_cache_dir = os.path.dirname(idx_path['desc'])
data_cache_success = True
# Build the indexed mapping if not exist.
if build_indices and torch.distributed.get_rank() == 0:
print_rank_0(' > WARNING: could not find index map files, building '
'the indices on rank 0 ...')
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
print(' > only one epoch required, setting '
'separate_last_epoch to False', flush=True)
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - 1) // seq_length
last_epoch_num_samples = num_samples - \
num_samples_from_epochs_minus_one
assert last_epoch_num_samples >= 0, \
'last epoch number of samples should be non-negative.'
num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length
assert last_epoch_num_samples <= (num_samples_per_epoch + 1), \
'last epoch number of samples exceeded max value.'
# If we have less than 80% of the samples for the last epoch,
# seperate out the epoch and treat it differently.
# Note: the 80% number is just based on common sense and can
# be adjusted if needed.
separate_last_epoch = (last_epoch_num_samples <
int(0.80 * num_samples_per_epoch))
if separate_last_epoch:
string = ' > last epoch number of samples ({}) is smaller '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to True'
else:
string = ' > last epoch number of samples ({}) is larger '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to False'
print(string.format(last_epoch_num_samples,
num_samples_per_epoch), flush=True)
try:
os.makedirs(data_cache_dir, exist_ok=True)
# description
with open(idx_path['desc'], 'wt') as fd:
fd.write(desc)
# doc-idx.
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng,
separate_last_epoch)
np.save(idx_path['doc'], doc_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save doc-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# sample-idx.
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
from megatron.data import helpers
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch)
np.save(idx_path['sample'], sample_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save sample-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(num_samples_,
sample_idx.shape[0] - 1, np_rng)
np.save(idx_path['shuffle'], shuffle_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save shuffle-idx mapping'
' (seconds): {:4f}'.format(time.time() - start_time))
except OSError:
print(f'There was an error trying to create the data cache directory ({data_cache_dir})')
print('or a file in it. This defaults to a directory "index-cache" within the directory')
print('the data files are in and can be set with the --data-cache-path argument. Please')
print('ensure you have write access to this directory or specify one that you do have')
print('write access to.')
data_cache_success = False
counts = torch.cuda.LongTensor([data_cache_success])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
if counts[0].item() != (
torch.distributed.get_world_size() //
torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group())):
print_rank_0("Data index creation unsuccessful, exiting.")
exit()
# Load mappings.
start_time = time.time()
print_rank_0(f" > loading doc-idx mapping from {idx_path['doc']}")
doc_idx = np.load(idx_path['doc'], allow_pickle=True, mmap_mode='r')
print_rank_0(f" > loading sample-idx mapping from {idx_path['sample']}")
sample_idx = np.load(idx_path['sample'], allow_pickle=True, mmap_mode='r')
print_rank_0(f" > loading shuffle-idx mapping from {idx_path['shuffle']}")
shuffle_idx = np.load(idx_path['shuffle'], allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
sample_idx.shape[0]))
print_rank_0(' total number of epochs: {}'.format(num_epochs))
return doc_idx, sample_idx, shuffle_idx, desc, desc_hash
def _num_tokens(documents, sizes):
"""Total number of tokens in the dataset."""
return np.sum(sizes[documents])
def _num_epochs(tokens_per_epoch, seq_length, num_samples):
"""Based on number of samples and sequence lenght, calculate how many
epochs will be needed."""
num_epochs = 0
total_tokens = 0
while True:
num_epochs += 1
total_tokens += tokens_per_epoch
# -1 is because we need to retrieve seq_length + 1 token each time
# but the last token will overlap with the first token of the next
# sample except for the last sample.
if ((total_tokens - 1) // seq_length) >= num_samples:
return num_epochs
def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch):
"""Build an array with length = number-of-epochs * number-of-dcuments.
Each index is mapped to a corresponding document."""
if not separate_last_epoch or num_epochs == 1:
doc_idx = np.mgrid[0:num_epochs, 0:len(documents)][1]
doc_idx[:] = documents
doc_idx = doc_idx.reshape(-1)
doc_idx = doc_idx.astype(np.int32)
np_rng.shuffle(doc_idx)
return doc_idx
doc_idx_first = _build_doc_idx(documents, num_epochs-1, np_rng, False)
doc_idx_last = _build_doc_idx(documents, 1, np_rng, False)
return np.concatenate((doc_idx_first, doc_idx_last))
def _build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch):
"""Sample index mapping is a 2D array with sizes
[number-of-samples + 1, 2] where [..., 0] contains
the index into `doc_idx` and [..., 1] is the
starting offset in that document."""
# Total number of samples. For -1 see comments in `_num_epochs`.
num_samples = (num_epochs * tokens_per_epoch - 1) // seq_length
sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32)
# Index into sample_idx.
sample_index = 0
# Index into doc_idx.
doc_idx_index = 0
# Begining offset for each document.
doc_offset = 0
# Start with first document and no offset.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
while sample_index <= num_samples:
# Start with a fresh sequence.
remaining_seq_length = seq_length + 1
while remaining_seq_length != 0:
# Get the document length.
doc_id = doc_idx[doc_idx_index]
doc_length = sizes[doc_id] - doc_offset
# And add it to the current sequence.
remaining_seq_length -= doc_length
# If we have more than a full sequence, adjust offset and set
# remaining length to zero so we return from the while loop.
# Note that -1 here is for the same reason we have -1 in
# `_num_epochs` calculations.
if remaining_seq_length <= 0:
doc_offset += (remaining_seq_length + doc_length - 1)
remaining_seq_length = 0
else:
# Otherwise, start from the begining of the next document.
doc_idx_index += 1
doc_offset = 0
# Record the sequence.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
return sample_idx
def _build_shuffle_idx(num_samples, total_size, np_rng):
"""Build the range [0, size) and shuffle."""
print(' > building shuffle index with split [0, {}) and [{}, {}) '
'...'.format(num_samples, num_samples, total_size), flush=True)
dtype_ = np.uint32
if total_size >= (np.iinfo(np.uint32).max - 1):
dtype_ = np.int64
shuffle_idx_first = np.arange(start=0, stop=num_samples,
step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_first)
if num_samples == total_size:
return shuffle_idx_first
shuffle_idx_last = np.arange(start=num_samples, stop=total_size,
step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_last)
return np.concatenate((shuffle_idx_first, shuffle_idx_last))
|
Megatron-LM-master
|
megatron/data/gpt_dataset.py
|
import itertools
import os
import pickle
import shutil
import numpy as np
import torch
from megatron import get_args
from megatron.core import mpu
def detach(tensor):
return tensor.detach().cpu().numpy()
class OpenRetreivalDataStore(object):
"""
Serializable data structure for holding data for blocks --
embeddings and necessary metadata for Retriever
"""
def __init__(self, embedding_path=None, load_from_path=True, rank=None):
self.embed_data = dict()
if embedding_path is None:
args = get_args()
embedding_path = args.embedding_path
rank = args.rank
self.embedding_path = embedding_path
self.rank = rank
if load_from_path:
self.load_from_file()
block_data_name = os.path.splitext(self.embedding_path)[0]
self.temp_dir_name = block_data_name + '_tmp'
def state(self):
return {
'embed_data': self.embed_data,
}
def clear(self):
"""
Clear the embedding data structures to save memory.
The metadata ends up getting used, and is also much smaller in
dimensionality so it isn't really worth clearing.
"""
self.embed_data = dict()
def load_from_file(self):
"""Populate members from instance saved to file"""
if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0:
print("\n> Unpickling BlockData", flush=True)
state_dict = pickle.load(open(self.embedding_path, 'rb'))
if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0:
print(">> Finished unpickling BlockData\n", flush=True)
self.embed_data = state_dict['embed_data']
def add_block_data(self, row_id, block_embeds, allow_overwrite=False):
"""
Add data for set of blocks
:param row_id: 1D array of unique int ids for the blocks
:param block_embeds: 2D array of embeddings of the blocks
In the case of retriever this will be [start_idx, end_idx, doc_idx]
"""
for idx, embed in zip(row_id, block_embeds):
if not allow_overwrite and idx in self.embed_data:
raise ValueError("Unexpectedly tried to overwrite block data")
self.embed_data[idx] = np.float16(embed)
def save_shard(self):
"""
Save the block data that was created this in this process
"""
if not os.path.isdir(self.temp_dir_name):
os.makedirs(self.temp_dir_name, exist_ok=True)
# save the data for each shard
with open('{}/{}.pkl'.format(self.temp_dir_name, self.rank), 'wb') \
as writer:
pickle.dump(self.state(), writer)
def merge_shards_and_save(self):
#Combine all the shards made using save_shard
shard_names = os.listdir(self.temp_dir_name)
seen_own_shard = False
for fname in os.listdir(self.temp_dir_name):
shard_rank = int(os.path.splitext(fname)[0])
if shard_rank == self.rank:
seen_own_shard = True
continue
with open('{}/{}'.format(self.temp_dir_name, fname), 'rb') as f:
data = pickle.load(f)
old_size = len(self.embed_data)
shard_size = len(data['embed_data'])
# add the shard's data and check to make sure there
# is no overlap
self.embed_data.update(data['embed_data'])
assert len(self.embed_data) == old_size + shard_size
assert seen_own_shard
# save the consolidated shards and remove temporary directory
with open(self.embedding_path, 'wb') as final_file:
pickle.dump(self.state(), final_file)
shutil.rmtree(self.temp_dir_name, ignore_errors=True)
print("Finished merging {} shards for a total of {} embeds".format(
len(shard_names), len(self.embed_data)), flush=True)
class FaissMIPSIndex(object):
"""
Wrapper object for a BlockData which similarity search via FAISS under the hood
"""
def __init__(self, embed_size, embed_data=None, use_gpu=False):
self.embed_size = embed_size
self.embed_data = embed_data
self.use_gpu = use_gpu
self.mips_index = None
self._set_mips_index()
def _set_mips_index(self):
"""
Create a Faiss Flat index with inner product as the metric
to search against
"""
try:
import faiss
except ImportError:
raise Exception("Error: Please install faiss to use FaissMIPSIndex")
if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0:
print("\n> Building index", flush=True)
cpu_index = faiss.IndexFlatIP(self.embed_size)
if self.use_gpu:
# create resources and config for GpuIndex
config = faiss.GpuMultipleClonerOptions()
config.shard = True
config.useFloat16 = True
gpu_index = faiss.index_cpu_to_all_gpus(cpu_index, co=config)
self.mips_index = faiss.IndexIDMap(gpu_index)
if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0:
print(">> Initialized index on GPU", flush=True)
else:
# CPU index supports IDs so wrap with IDMap
self.mips_index = faiss.IndexIDMap(cpu_index)
if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0:
print(">> Initialized index on CPU", flush=True)
# if we were constructed with a BlockData, then automatically load it
# when the FAISS structure is built
if self.embed_data is not None:
self.add_embed_data(self.embed_data)
def reset_index(self):
"""Delete existing index and create a new"""
del self.mips_index
# reset the block data so that _set_block_index will reload it as well
if self.embed_data is not None:
embed_data_path = self.embed_data.embedding_path
del self.embed_data
self.embed_data = OpenRetreivalDataStore(embed_data_path)
self._set_mips_index()
def update_index(self):
"""Delete existing index and create a new"""
del self.mips_index
# reset the block data so that _set_mips_index will reload it as well
if self.embed_data is not None:
self.embed_data.load_from_file()
self._set_mips_index()
def add_embed_data(self, all_embed_data):
"""Add the embedding of each block to the underlying FAISS index"""
# this assumes the embed_data is a dict : {int: np.array<float>}
block_indices, block_embeds = zip(*all_embed_data.embed_data.items())
# the embeddings have to be entered in as float32 even though the math
# internally is done with float16.
embeds_arr = np.float32(np.array(block_embeds))
indices_arr = np.array(block_indices)
# we no longer need the embedding data since it's in the index now
all_embed_data.clear()
self.mips_index.add_with_ids(embeds_arr, indices_arr)
if not mpu.model_parallel_is_initialized() or mpu.get_data_parallel_rank() == 0:
print(">>> Finished adding block data to index", flush=True)
def search_mips_index(self, query_embeds, top_k, reconstruct=True):
"""
Get the top-k blocks by the index distance metric.
:param reconstruct: if True: return a [num_queries x k x embed_dim]
array of blocks
if False: return [num_queries x k] array of
distances, and another for indices
"""
query_embeds = np.float32(detach(query_embeds))
if reconstruct:
# get the vectors themselves
top_k_block_embeds = self.mips_index.search_and_reconstruct(\
query_embeds, top_k)
return top_k_block_embeds
else:
# get distances and indices of closest vectors
distances, block_indices = self.mips_index.search(query_embeds, top_k)
return distances, block_indices
|
Megatron-LM-master
|
megatron/data/realm_index.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Dataloaders."""
import random
import torch
import numpy as np
from torch.utils.data import Dataset
from megatron import get_args
from megatron.core import mpu
def build_pretraining_data_loader(dataset, consumed_samples):
"""Buld dataloader given an input dataset."""
if dataset is None:
return None
args = get_args()
# Megatron sampler
if args.dataloader_type == 'single':
batch_sampler = MegatronPretrainingSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=args.micro_batch_size,
data_parallel_rank=mpu.get_data_parallel_rank(),
data_parallel_size=mpu.get_data_parallel_world_size())
elif args.dataloader_type == 'cyclic':
batch_sampler = MegatronPretrainingRandomSampler(
dataset,
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=args.micro_batch_size,
data_parallel_rank=mpu.get_data_parallel_rank(),
data_parallel_size=mpu.get_data_parallel_world_size(),
data_sharding=args.data_sharding)
else:
raise Exception('{} dataloader type is not supported.'.format(
args.dataloader_type))
# Torch dataloader.
return torch.utils.data.DataLoader(dataset,
batch_sampler=batch_sampler,
num_workers=args.num_workers,
pin_memory=True)
class MegatronPretrainingSampler:
def __init__(self, total_samples, consumed_samples, micro_batch_size,
data_parallel_rank, data_parallel_size, drop_last=True):
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.drop_last = drop_last
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.consumed_samples < self.total_samples, \
'no samples left to consume: {}, {}'.format(self.consumed_samples,
self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples
def get_start_end_idx(self):
start_idx = self.data_parallel_rank * self.micro_batch_size
end_idx = start_idx + self.micro_batch_size
return start_idx, end_idx
def __iter__(self):
batch = []
# Last batch will be dropped if drop_last is not set False
for idx in range(self.consumed_samples, self.total_samples):
batch.append(idx)
if len(batch) == self.micro_batch_times_data_parallel_size:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
class RandomSeedDataset(Dataset):
def __init__(self, dataset):
args = get_args()
self.base_seed = args.seed
self.curr_seed = args.seed
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def set_epoch(self, epoch):
self.curr_seed = self.base_seed + epoch
def __getitem__(self, idx):
seed = idx + self.curr_seed
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
return self.dataset[idx]
class MegatronPretrainingRandomSampler:
def __init__(self, dataset, total_samples, consumed_samples, micro_batch_size,
data_parallel_rank, data_parallel_size, data_sharding):
# Keep a copy of input params for later use.
self.dataset = dataset
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.data_sharding = data_sharding
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.last_batch_size = \
self.total_samples % self.micro_batch_times_data_parallel_size
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples
def __iter__(self):
active_total_samples = self.total_samples - self.last_batch_size
self.epoch = self.consumed_samples // active_total_samples
current_epoch_samples = self.consumed_samples % active_total_samples
assert current_epoch_samples % self.micro_batch_times_data_parallel_size == 0
if isinstance(self.dataset, RandomSeedDataset):
self.dataset.set_epoch(self.epoch)
# data sharding and random sampling
if self.data_sharding:
bucket_size = (self.total_samples // self.micro_batch_times_data_parallel_size) \
* self.micro_batch_size
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
g = torch.Generator()
g.manual_seed(self.epoch)
random_idx = torch.randperm(bucket_size, generator=g).tolist()
idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
else:
full_bucket_size = (self.total_samples // self.micro_batch_size) \
* self.micro_batch_size
full_bucket_offset = current_epoch_samples
g = torch.Generator()
g.manual_seed(self.epoch)
idx_range_total = \
torch.randperm(full_bucket_size, generator=g).tolist()
idx_range_active = idx_range_total[full_bucket_offset:]
idx_range = idx_range_active[self.data_parallel_rank::self.data_parallel_size]
batch = []
# Last batch if not complete will be dropped.
for idx in idx_range:
batch.append(idx)
if len(batch) == self.micro_batch_size:
self.consumed_samples += self.micro_batch_times_data_parallel_size
yield batch
batch = []
|
Megatron-LM-master
|
megatron/data/data_samplers.py
|
from . import indexed_dataset
|
Megatron-LM-master
|
megatron/data/__init__.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, and NVIDIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Most of the code here has been copied from:
# https://github.com/google-research/albert/blob/master/create_pretraining_data.py
# with some modifications.
import math
import os
import time
import collections
import numpy as np
import torch
from megatron import (
get_args,
print_rank_0
)
from megatron.core import mpu
from megatron.data.blendable_dataset import BlendableDataset
from megatron.data.indexed_dataset import MMapIndexedDataset
DSET_TYPE_BERT = 'standard_bert'
DSET_TYPE_ICT = 'ict'
DSET_TYPE_T5 = 't5'
DSET_TYPE_MULTIMODAL = 'multimodal'
DSET_TYPES = [DSET_TYPE_BERT, DSET_TYPE_ICT, DSET_TYPE_T5, DSET_TYPE_MULTIMODAL]
def get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples):
# The data prefix should be in the format of:
# weight-1, data-prefix-1, weight-2, data-prefix-2, ..
assert len(data_prefix) % 2 == 0
num_datasets = len(data_prefix) // 2
weights = [0]*num_datasets
prefixes = [0]*num_datasets
for i in range(num_datasets):
weights[i] = float(data_prefix[2*i])
prefixes[i] = (data_prefix[2*i+1]).strip()
# Normalize weights
weight_sum = 0.0
for weight in weights:
weight_sum += weight
assert weight_sum > 0.0
weights = [weight / weight_sum for weight in weights]
# Add 0.5% (the 1.005 factor) so in case the bleding dataset does
# not uniformly distribute the number of samples, we still have
# samples left to feed to the network.
if isinstance(train_valid_test_num_samples, list):
datasets_train_valid_test_num_samples = []
for weight in weights:
datasets_train_valid_test_num_samples.append(
[int(math.ceil(val * weight * 1.005))
for val in train_valid_test_num_samples])
else:
# Used when separate dataset files are provided for train,
# valid and test
datasets_train_valid_test_num_samples = [
int(math.ceil(train_valid_test_num_samples * weight * 1.005))
for weight in weights]
return prefixes, weights, datasets_train_valid_test_num_samples
def compile_helper():
"""Compile helper function ar runtime. Make sure this
is invoked on a single process."""
import os
import subprocess
path = os.path.abspath(os.path.dirname(__file__))
ret = subprocess.run(['make', '-C', path])
if ret.returncode != 0:
print("Making C++ dataset helpers module failed, exiting.")
import sys
sys.exit(1)
def get_a_and_b_segments(sample, np_rng):
"""Divide sample into a and b segments."""
# Number of sentences in the sample.
n_sentences = len(sample)
# Make sure we always have two sentences.
assert n_sentences > 1, 'make sure each sample has at least two sentences.'
# First part:
# `a_end` is how many sentences go into the `A`.
a_end = 1
if n_sentences >= 3:
# Note that randin in numpy is exclusive.
a_end = np_rng.randint(1, n_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
# Second part:
tokens_b = []
for j in range(a_end, n_sentences):
tokens_b.extend(sample[j])
# Random next:
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng):
"""Truncates a pair of sequences to a maximum sequence length."""
#print(len_a, len_b, max_num_tokens)
assert len_a > 0
if len_a + len_b <= max_num_tokens:
return False
while len_a + len_b > max_num_tokens:
if len_a > len_b:
len_a -= 1
tokens = tokens_a
else:
len_b -= 1
tokens = tokens_b
if np_rng.random() < 0.5:
del tokens[0]
else:
tokens.pop()
return True
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
tokentypes.append(0)
# [SEP].
tokens.append(sep_id)
tokentypes.append(0)
# Segment B.
for token in tokens_b:
tokens.append(token)
tokentypes.append(1)
if tokens_b:
# [SEP].
tokens.append(sep_id)
tokentypes.append(1)
return tokens, tokentypes
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
def create_masked_lm_predictions(tokens,
vocab_id_list, vocab_id_to_token_dict,
masked_lm_prob,
cls_id, sep_id, mask_id,
max_predictions_per_seq,
np_rng,
max_ngrams=3,
do_whole_word_mask=True,
favor_longer_ngram=False,
do_permutation=False,
geometric_dist=False,
masking_style="bert"):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
cand_indexes = []
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
for (i, token) in enumerate(tokens):
if token == cls_id or token == sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (do_whole_word_mask and len(cand_indexes) >= 1 and
not is_start_piece(vocab_id_to_token_dict[token])):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(vocab_id_to_token_dict[token]):
token_boundary[i] = 1
output_tokens = list(tokens)
masked_lm_positions = []
masked_lm_labels = []
if masked_lm_prob == 0:
return (output_tokens, masked_lm_positions,
masked_lm_labels, token_boundary)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
# Note(mingdachen):
# By default, we set the probilities to favor shorter ngram sequences.
pvals = 1. / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx:idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
(masked_lms, masked_spans) = ([], [])
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
if masking_style == "bert":
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))]
elif masking_style == "t5":
masked_token = mask_id
else:
raise ValueError("invalid value of masking style")
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_spans.append(MaskedLmInstance(
index=index_set,
label=[tokens[index] for index in index_set]))
assert len(masked_lms) <= num_to_predict
np_rng.shuffle(ngram_indexes)
select_indexes = set()
if do_permutation:
for cand_index_set in ngram_indexes:
if len(select_indexes) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes or index in select_indexes:
continue
n = np.random.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(select_indexes) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(select_indexes) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes or index in select_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
select_indexes.add(index)
assert len(select_indexes) <= num_to_predict
select_indexes = sorted(select_indexes)
permute_indexes = list(select_indexes)
np_rng.shuffle(permute_indexes)
orig_token = list(output_tokens)
for src_i, tgt_i in zip(select_indexes, permute_indexes):
output_tokens[src_i] = orig_token[tgt_i]
masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
# Sort the spans by the index of the first span
masked_spans = sorted(masked_spans, key=lambda x: x.index[0])
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary, masked_spans)
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-1] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
def build_train_valid_test_datasets_with_prefixes(train_valid_test_num_samples,
max_seq_length,
seed,
skip_warmup,
train_data_prefix=None,
valid_data_prefix=None,
test_data_prefix=None,
binary_head=False,
max_seq_length_dec=None,
dataset_type='standard_bert'):
print_rank_0("Separate data paths provided for train, valid & test.")
train_dataset, valid_dataset, test_dataset = None, None, None
# Single dataset.
if train_data_prefix is not None:
train_dataset = build_dataset("train", train_data_prefix,
train_valid_test_num_samples[0],
max_seq_length, seed, skip_warmup,
binary_head, max_seq_length_dec,
dataset_type=dataset_type)
if valid_data_prefix is not None:
valid_dataset = build_dataset("valid", valid_data_prefix,
train_valid_test_num_samples[1],
max_seq_length, seed, False,
binary_head, max_seq_length_dec,
dataset_type=dataset_type)
if test_data_prefix is not None:
test_dataset = build_dataset("test", test_data_prefix,
train_valid_test_num_samples[2],
max_seq_length, seed, False,
binary_head, max_seq_length_dec,
dataset_type=dataset_type)
return (train_dataset, valid_dataset, test_dataset)
def build_train_valid_test_datasets(data_prefix, splits_string,
train_valid_test_num_samples,
max_seq_length, seed,
skip_warmup, binary_head=False,
max_seq_length_dec=None,
dataset_type='standard_bert'):
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(data_prefix[0],
splits_string,
train_valid_test_num_samples,
max_seq_length, seed,
skip_warmup,
binary_head,
max_seq_length_dec,
dataset_type=dataset_type)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
train_num_samples, valid_num_samples, test_num_samples = map(
sum,
zip(*datasets_train_valid_test_num_samples)
)
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
prefixes[i], splits_string,
datasets_train_valid_test_num_samples[i],
max_seq_length, seed, skip_warmup, binary_head,
max_seq_length_dec, dataset_type=dataset_type)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights, train_num_samples)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights, valid_num_samples)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights, test_num_samples)
return (blending_train_dataset, blending_valid_dataset,
blending_test_dataset)
def _build_train_valid_test_datasets(data_prefix, splits_string,
train_valid_test_num_samples,
max_seq_length, seed,
skip_warmup, binary_head,
max_seq_length_dec,
dataset_type='standard_bert'):
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
dataset_type,
skip_warmup)
# Get start and end indices of train/valid/train into doc-idx
# Note that doc-idx is desinged to be num-docs + 1 so we can
# easily iterate over it.
total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
start_index = indexed_dataset.doc_idx[splits[index]]
end_index = indexed_dataset.doc_idx[splits[index + 1]]
print_rank_0(' sentence indices in [{}, {}) total of {} '
'sentences'.format(start_index, end_index,
end_index - start_index))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_split_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
# Get the pointer to the original doc-idx so we can set it later.
doc_idx_ptr = indexed_dataset.get_doc_idx()
# Slice the doc-idx
start_index = splits[index]
# Add +1 so we can index into the dataset to get the upper bound.
end_index = splits[index + 1] + 1
# New doc_idx view.
indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
dataset = build_dataset(
name, data_prefix,
train_valid_test_num_samples[index], max_seq_length,
seed, skip_warmup, binary_head, max_seq_length_dec,
dataset_type, indexed_dataset)
# Set the original pointer so dataset remains the main dataset.
indexed_dataset.set_doc_idx(doc_idx_ptr)
# Checks.
assert indexed_dataset.doc_idx[0] == 0
assert indexed_dataset.doc_idx.shape[0] == \
(total_num_of_documents + 1)
return dataset
train_dataset = build_split_dataset(0, 'train')
valid_dataset = build_split_dataset(1, 'valid')
test_dataset = build_split_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def build_dataset(name, data_prefix, max_num_samples,
max_seq_length, seed, skip_warmup, binary_head,
max_seq_length_dec, dataset_type='standard_bert',
indexed_dataset=None):
from megatron.data.bert_dataset import BertDataset
from megatron.data.ict_dataset import ICTDataset
from megatron.data.t5_dataset import T5Dataset
from megatron.data.multimodal_dataset import MultiModalDataset
if dataset_type not in DSET_TYPES:
raise ValueError("Invalid dataset_type: ", dataset_type)
if indexed_dataset is None:
indexed_dataset = get_indexed_dataset_(data_prefix,
dataset_type,
skip_warmup)
kwargs = dict(
name=name,
data_prefix=data_prefix,
num_epochs=None,
max_num_samples=max_num_samples,
max_seq_length=max_seq_length,
seed=seed,
)
if dataset_type == DSET_TYPE_ICT:
args = get_args()
title_dataset = get_indexed_dataset_(
args.titles_data_path,
dataset_type,
skip_warmup)
dataset = ICTDataset(
block_dataset=indexed_dataset,
title_dataset=title_dataset,
query_in_block_prob=args.query_in_block_prob,
use_one_sent_docs=args.use_one_sent_docs,
binary_head=binary_head,
**kwargs
)
elif dataset_type == DSET_TYPE_T5:
args = get_args()
dataset = T5Dataset(
indexed_dataset=indexed_dataset,
masked_lm_prob=args.mask_prob,
max_seq_length_dec=max_seq_length_dec,
short_seq_prob=args.short_seq_prob,
**kwargs
)
elif dataset_type == DSET_TYPE_BERT:
args = get_args()
dataset = BertDataset(
indexed_dataset=indexed_dataset,
masked_lm_prob=args.mask_prob,
short_seq_prob=args.short_seq_prob,
binary_head=binary_head,
**kwargs
)
elif dataset_type == DSET_TYPE_MULTIMODAL:
args = get_args()
dataset = MultiModalDataset(
name=name,
data_prefix=data_prefix,
indexed_dataset=indexed_dataset,
num_samples=max_num_samples,
seq_length=max_seq_length,
seed=seed,
img_h=args.img_h,
img_w=args.img_w,
)
else:
raise NotImplementedError("Dataset type not fully implemented.")
return dataset
def get_indexed_dataset_(data_prefix, dataset_type, skip_warmup):
print_rank_0(' > building dataset index ...')
start_time = time.time()
multimodal = dataset_type == DSET_TYPE_MULTIMODAL
indexed_dataset = MMapIndexedDataset(data_prefix, skip_warmup, multimodal)
assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' > indexed dataset stats:')
print_rank_0(' number of documents: {}'.format(
indexed_dataset.doc_idx.shape[0] - 1))
print_rank_0(' number of sentences: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
def get_train_valid_test_split_(splits_string, size):
""" Get dataset splits from comma or '/' separated string list."""
splits = []
if splits_string.find(',') != -1:
splits = [float(s) for s in splits_string.split(',')]
elif splits_string.find('/') != -1:
splits = [float(s) for s in splits_string.split('/')]
else:
splits = [float(splits_string)]
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] +
int(round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
def get_samples_mapping(indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
name,
binary_head):
"""Get a list that maps a sample index to a starting sentence index, end sentence index, and length"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples "
"or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
indexmap_filename += '_{}s'.format(seed)
indexmap_filename += '.npy'
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0 and \
not os.path.isfile(indexmap_filename):
print(' > WARNING: could not find index map file {}, building '
'the indices on rank 0 ...'.format(indexmap_filename))
# Make sure the types match the helpers input types.
assert indexed_dataset.doc_idx.dtype == np.int64
assert indexed_dataset.sizes.dtype == np.int32
# Build samples mapping
verbose = torch.distributed.get_rank() == 0
start_time = time.time()
print_rank_0(' > building samples index mapping for {} ...'.format(
name))
# First compile and then import.
from megatron.data import helpers
samples_mapping = helpers.build_mapping(
indexed_dataset.doc_idx,
indexed_dataset.sizes,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
verbose,
2 if binary_head else 1)
print_rank_0(' > done building samples index maping')
np.save(indexmap_filename, samples_mapping, allow_pickle=True)
print_rank_0(' > saved the index mapping in {}'.format(
indexmap_filename))
# Make sure all the ranks have built the mapping
print_rank_0(' > elasped time to build and save samples mapping '
'(seconds): {:4f}'.format(
time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
assert counts[0].item() == (
torch.distributed.get_world_size() //
torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()))
# Load indexed dataset.
print_rank_0(' > loading indexed mapping from {}'.format(
indexmap_filename))
start_time = time.time()
samples_mapping = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
samples_mapping.shape[0]))
return samples_mapping
|
Megatron-LM-master
|
megatron/data/dataset_utils.py
|
# BSD 3-Clause License
#
# Copyright (c) Soumith Chintala 2016,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# code taken from
# https://github.com/pytorch/vision/blob/main/torchvision/datasets/folder.py
# added support for classes_fraction and data_per_class_fraction
from torchvision.datasets import VisionDataset
from PIL import Image
import os
import os.path
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
import numpy as np
def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def is_image_file(filename: str) -> bool:
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
data_per_class_fraction: float,
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
"""Generates a list of samples of a form (path_to_sample, class).
Args:
directory (str): root dataset directory
class_to_idx (Dict[str, int]): dictionary mapping class name to class index
extensions (optional): A list of allowed extensions.
Either extensions or is_valid_file should be passed. Defaults to None.
is_valid_file (optional): A function that takes path of a file
and checks if the file is a valid file
(used to check of corrupt files) both extensions and
is_valid_file should not be passed. Defaults to None.
Raises:
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
Returns:
List[Tuple[str, int]]: samples of a form (path_to_sample, class)
"""
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
local_instances = []
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
local_instances.append(item)
instances.extend(local_instances[0:int(len(local_instances) * data_per_class_fraction)])
return instances
class DatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/[...]/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/[...]/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(
self,
root: str,
loader: Callable[[str], Any],
extensions: Optional[Tuple[str, ...]] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
classes_fraction=1.0,
data_per_class_fraction=1.0,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> None:
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
self.classes_fraction = classes_fraction
self.data_per_class_fraction = data_per_class_fraction
classes, class_to_idx = self._find_classes(self.root)
samples = self.make_dataset(self.root,
class_to_idx,
self.data_per_class_fraction,
extensions,
is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.total = len(samples)
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
@staticmethod
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
data_per_class_fraction: float,
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
return make_dataset(directory,
class_to_idx,
data_per_class_fraction,
extensions=extensions,
is_valid_file=is_valid_file)
def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]:
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
all_classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes = all_classes[0:int(len(all_classes) * self.classes_fraction)]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
curr_index = index
for x in range(self.total):
try:
path, target = self.samples[curr_index]
sample = self.loader(path)
break
except Exception as e:
curr_index = np.random.randint(0, self.total)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self) -> int:
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
# TODO: specify the return type
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/[...]/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/[...]/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
is_valid_file (callable, optional): A function that takes path of an Image file
and check if the file is a valid file (used to check of corrupt files)
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
classes_fraction=1.0,
data_per_class_fraction=1.0,
loader: Callable[[str], Any] = default_loader,
is_valid_file: Optional[Callable[[str], bool]] = None,
):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
transform=transform,
target_transform=target_transform,
classes_fraction=classes_fraction,
data_per_class_fraction=data_per_class_fraction,
is_valid_file=is_valid_file)
self.imgs = self.samples
|
Megatron-LM-master
|
megatron/data/image_folder.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Wikipedia dataset from DPR code for ORQA."""
from abc import ABC
import csv
import numpy as np
import random
import torch
from torch.utils.data import Dataset
from megatron import print_rank_0, get_args, get_tokenizer
from megatron.core import tensor_parallel
from megatron.data.biencoder_dataset_utils import make_attention_mask
def get_open_retrieval_wiki_dataset():
args = get_args()
tokenizer = get_tokenizer()
dataset = OpenRetrievalEvidenceDataset('2018 Wikipedia from DPR codebase',
'evidence',
args.evidence_data_path,
tokenizer,
args.retriever_seq_length)
return dataset
def get_open_retrieval_batch(data_iterator):
# Items and their type.
keys = ['row_id', 'context', 'context_mask', 'context_types',
'context_pad_mask']
datatype = torch.int64
# Broadcast data.
data = None if data_iterator is None else next(data_iterator)
data_b = tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
row_id = data_b['row_id'].long()
context = data_b['context'].long()
# TODO: make the context mask a binary one
context_mask = (data_b['context_mask'] < 0.5)
context_types = data_b['context_types'].long()
context_pad_mask = data_b['context_pad_mask'].long()
return row_id, context, context_mask, context_types, context_pad_mask
def build_tokens_types_paddings_from_text(row, tokenizer, max_seq_length):
"""Build token types and paddings, trim if needed, and pad if needed."""
title_ids = tokenizer.tokenize(row['title'])
context_ids = tokenizer.tokenize(row['text'])
# Appending the title of the context at front
extended_context_ids = title_ids + [tokenizer.sep_id] + context_ids
context_ids, context_types, context_pad_mask = \
build_tokens_types_paddings_from_ids(extended_context_ids,
max_seq_length, tokenizer.cls, tokenizer.sep, tokenizer.pad)
return context_ids, context_types, context_pad_mask
# noinspection DuplicatedCode
def build_tokens_types_paddings_from_ids(text_ids, max_seq_length,
cls_id, sep_id, pad_id):
"""Build token types and paddings, trim if needed, and pad if needed."""
enc_ids = []
tokentypes_enc = []
# [CLS].
enc_ids.append(cls_id)
tokentypes_enc.append(0)
# A.
len_src = len(text_ids)
enc_ids.extend(text_ids)
tokentypes_enc.extend([0] * len_src)
# Cap the size.
if len(enc_ids) > max_seq_length - 1:
enc_ids = enc_ids[0: max_seq_length - 1]
tokentypes_enc = tokentypes_enc[0: max_seq_length - 1]
# [SEP].
enc_ids.append(sep_id)
tokentypes_enc.append(0)
num_tokens_enc = len(enc_ids)
# Padding.
padding_length = max_seq_length - len(enc_ids)
if padding_length > 0:
enc_ids.extend([pad_id] * padding_length)
tokentypes_enc.extend([pad_id] * padding_length)
pad_mask = ([1] * num_tokens_enc) + ([0] * padding_length)
pad_mask = np.array(pad_mask, dtype=np.int64)
return enc_ids, tokentypes_enc, pad_mask
def build_sample(row_id, context_ids, context_types, context_pad_mask):
"""Convert to numpy and return a sample consumed by the batch producer."""
context_ids = np.array(context_ids, dtype=np.int64)
context_types = np.array(context_types, dtype=np.int64)
context_mask = make_attention_mask(context_ids, context_ids)
sample = ({
'row_id': row_id,
'context': context_ids,
'context_mask': context_mask,
'context_types': context_types,
'context_pad_mask': context_pad_mask
})
return sample
class OpenRetrievalEvidenceDataset(ABC, Dataset):
"""Open Retrieval Evidence dataset class."""
def __init__(self, task_name, dataset_name, datapath, tokenizer,
max_seq_length):
# Store inputs.
self.task_name = task_name
self.dataset_name = dataset_name
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
print_rank_0(' > building {} dataset for {}:'.format(self.task_name,
self.dataset_name))
# Process the files.
print_rank_0(datapath)
self.samples, self.id2text = self.process_samples_from_single_path(
datapath)
args = get_args()
if args.sample_rate < 1: # subsample
k = int(len(self.samples) * args.sample_rate)
self.samples = random.sample(self.samples, k)
print_rank_0(' >> total number of samples: {}'.format(
len(self.samples)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
row = self.samples[idx]
context_ids, context_types, context_pad_mask = \
build_tokens_types_paddings_from_text(row, self.tokenizer,
self.max_seq_length)
sample = build_sample(row['doc_id'],
context_ids,
context_types,
context_pad_mask)
return sample
@staticmethod
def process_samples_from_single_path(filename):
print_rank_0(' > Processing {} ...'.format(filename))
total = 0
rows = []
id2text = {}
with open(filename) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
next(reader, None) # skip the headers
for row in reader:
# file format: doc_id, doc_text, title
doc_id = int(row[0])
text = row[1]
title = row[2]
rows.append({'doc_id': doc_id,
'text': text,
'title': title})
assert doc_id not in id2text
id2text[doc_id] = (text, title)
total += 1
if total % 100000 == 0:
print_rank_0(' > processed {} rows so far ...'.format(
total))
print_rank_0(' >> processed {} samples.'.format(len(rows)))
return rows, id2text
|
Megatron-LM-master
|
megatron/data/orqa_wiki_dataset.py
|
"""AutoAugment data augmentation policy for ImageNet.
-- Begin license text.
MIT License
Copyright (c) 2018 Philip Popien
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-- End license text.
Code adapted from https://github.com/DeepVoltaire/AutoAugment.
This module implements the fixed AutoAugment data augmentation policy for ImageNet provided in
Appendix A, Table 9 of reference [1]. It does not include any of the search code for augmentation
policies.
Reference:
[1] https://arxiv.org/abs/1805.09501
"""
import random
import numpy as np
from PIL import Image
from PIL import ImageEnhance
from PIL import ImageOps
_MAX_LEVEL = 10 # Maximum integer strength of an augmentation, if applicable.
class ImageNetPolicy:
"""Definition of an ImageNetPolicy.
Implements a fixed AutoAugment data augmentation policy targeted at
ImageNet training by randomly applying at runtime one of the 25 pre-defined
data augmentation sub-policies provided in Reference [1].
Usage example as a Pytorch Transform:
>>> transform=transforms.Compose([transforms.Resize(256),
>>> ImageNetPolicy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
"""Initialize an ImageNetPolicy.
Args:
fillcolor (tuple): RGB color components of the color to be used for
filling when needed (default: (128, 128, 128), which
corresponds to gray).
"""
# Instantiate a list of sub-policies.
# Each entry of the list is a SubPolicy which consists of
# two augmentation operations,
# each of those parametrized as operation, probability, magnitude.
# Those two operations are applied sequentially on the image upon call.
self.policies = [
SubPolicy("posterize", 0.4, 8, "rotate", 0.6, 9, fillcolor),
SubPolicy("solarize", 0.6, 5, "autocontrast", 0.6, 5, fillcolor),
SubPolicy("equalize", 0.8, 8, "equalize", 0.6, 3, fillcolor),
SubPolicy("posterize", 0.6, 7, "posterize", 0.6, 6, fillcolor),
SubPolicy("equalize", 0.4, 7, "solarize", 0.2, 4, fillcolor),
SubPolicy("equalize", 0.4, 4, "rotate", 0.8, 8, fillcolor),
SubPolicy("solarize", 0.6, 3, "equalize", 0.6, 7, fillcolor),
SubPolicy("posterize", 0.8, 5, "equalize", 1.0, 2, fillcolor),
SubPolicy("rotate", 0.2, 3, "solarize", 0.6, 8, fillcolor),
SubPolicy("equalize", 0.6, 8, "posterize", 0.4, 6, fillcolor),
SubPolicy("rotate", 0.8, 8, "color", 0.4, 0, fillcolor),
SubPolicy("rotate", 0.4, 9, "equalize", 0.6, 2, fillcolor),
SubPolicy("equalize", 0.0, 7, "equalize", 0.8, 8, fillcolor),
SubPolicy("invert", 0.6, 4, "equalize", 1.0, 8, fillcolor),
SubPolicy("color", 0.6, 4, "contrast", 1.0, 8, fillcolor),
SubPolicy("rotate", 0.8, 8, "color", 1.0, 2, fillcolor),
SubPolicy("color", 0.8, 8, "solarize", 0.8, 7, fillcolor),
SubPolicy("sharpness", 0.4, 7, "invert", 0.6, 8, fillcolor),
SubPolicy("shearX", 0.6, 5, "equalize", 1.0, 9, fillcolor),
SubPolicy("color", 0.4, 0, "equalize", 0.6, 3, fillcolor),
SubPolicy("equalize", 0.4, 7, "solarize", 0.2, 4, fillcolor),
SubPolicy("solarize", 0.6, 5, "autocontrast", 0.6, 5, fillcolor),
SubPolicy("invert", 0.6, 4, "equalize", 1.0, 8, fillcolor),
SubPolicy("color", 0.6, 4, "contrast", 1.0, 8, fillcolor),
SubPolicy("equalize", 0.8, 8, "equalize", 0.6, 3, fillcolor),
]
def __call__(self, img):
"""Define call method for ImageNetPolicy class."""
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
"""Define repr method for ImageNetPolicy class."""
return "ImageNetPolicy"
class SubPolicy:
"""Definition of a SubPolicy.
A SubPolicy consists of two augmentation operations,
each of those parametrized as operation, probability, magnitude.
The two operations are applied sequentially on the image upon call.
"""
def __init__(
self,
operation1,
probability1,
magnitude_idx1,
operation2,
probability2,
magnitude_idx2,
fillcolor,
):
"""Initialize a SubPolicy.
Args:
operation1 (str): Key specifying the first augmentation operation.
There are fourteen key values altogether (see supported_ops below
listing supported operations). probability1 (float): Probability
within [0., 1.] of applying the first augmentation operation.
magnitude_idx1 (int): Integer specifiying the strength of the first
operation as an index further used to derive the magnitude from a
range of possible values.
operation2 (str): Key specifying the second augmentation operation.
probability2 (float): Probability within [0., 1.] of applying the
second augmentation operation.
magnitude_idx2 (int): Integer specifiying the strength of the
second operation as an index further used to derive the magnitude
from a range of possible values.
fillcolor (tuple): RGB color components of the color to be used for
filling.
Returns:
"""
# List of supported operations for operation1 and operation2.
supported_ops = [
"shearX",
"shearY",
"translateX",
"translateY",
"rotate",
"color",
"posterize",
"solarize",
"contrast",
"sharpness",
"brightness",
"autocontrast",
"equalize",
"invert",
]
assert (operation1 in supported_ops) and (
operation2 in supported_ops
), "SubPolicy:one of oper1 or oper2 refers to an unsupported operation."
assert (
0.0 <= probability1 <= 1.0 and 0.0 <= probability2 <= 1.0
), "SubPolicy: prob1 and prob2 should be within [0., 1.]."
assert (
isinstance(magnitude_idx1, int) and 0 <= magnitude_idx1 <= 10
), "SubPolicy: idx1 should be specified as an integer within [0, 10]."
assert (
isinstance(magnitude_idx2, int) and 0 <= magnitude_idx2 <= 10
), "SubPolicy: idx2 should be specified as an integer within [0, 10]."
# Define a dictionary where each key refers to a specific type of
# augmentation and the corresponding value is a range of ten possible
# magnitude values for that augmentation.
num_levels = _MAX_LEVEL + 1
ranges = {
"shearX": np.linspace(0, 0.3, num_levels),
"shearY": np.linspace(0, 0.3, num_levels),
"translateX": np.linspace(0, 150 / 331, num_levels),
"translateY": np.linspace(0, 150 / 331, num_levels),
"rotate": np.linspace(0, 30, num_levels),
"color": np.linspace(0.0, 0.9, num_levels),
"posterize": np.round(np.linspace(8, 4, num_levels), 0).astype(
np.int32
),
"solarize": np.linspace(256, 0, num_levels), # range [0, 256]
"contrast": np.linspace(0.0, 0.9, num_levels),
"sharpness": np.linspace(0.0, 0.9, num_levels),
"brightness": np.linspace(0.0, 0.9, num_levels),
"autocontrast": [0]
* num_levels, # This augmentation doesn't use magnitude parameter.
"equalize": [0]
* num_levels, # This augmentation doesn't use magnitude parameter.
"invert": [0]
* num_levels, # This augmentation doesn't use magnitude parameter.
}
def rotate_with_fill(img, magnitude):
"""Define rotation transformation with fill.
The input image is first rotated, then it is blended together with
a gray mask of the same size. Note that fillcolor as defined
elsewhere in this module doesn't apply here.
Args:
magnitude (float): rotation angle in degrees.
Returns:
rotated_filled (PIL Image): rotated image with gray filling for
disoccluded areas unveiled by the rotation.
"""
rotated = img.convert("RGBA").rotate(magnitude)
rotated_filled = Image.composite(
rotated, Image.new("RGBA", rotated.size, (128,) * 4), rotated
)
return rotated_filled.convert(img.mode)
# Define a dictionary of augmentation functions where each key refers
# to a specific type of augmentation and the corresponding value defines
# the augmentation itself using a lambda function.
# pylint: disable=unnecessary-lambda
func_dict = {
"shearX": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC,
fillcolor=fillcolor,
),
"shearY": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC,
fillcolor=fillcolor,
),
"translateX": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(
1,
0,
magnitude * img.size[0] * random.choice([-1, 1]),
0,
1,
0,
),
fillcolor=fillcolor,
),
"translateY": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(
1,
0,
0,
0,
1,
magnitude * img.size[1] * random.choice([-1, 1]),
),
fillcolor=fillcolor,
),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(
1 + magnitude * random.choice([-1, 1])
),
"posterize": lambda img, magnitude: ImageOps.posterize(
img, magnitude
),
"solarize": lambda img, magnitude: ImageOps.solarize(
img, magnitude
),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(
img
).enhance(1 + magnitude * random.choice([-1, 1])),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(
img
).enhance(1 + magnitude * random.choice([-1, 1])),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(
img
).enhance(1 + magnitude * random.choice([-1, 1])),
"autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img),
}
# Store probability, function and magnitude of the first augmentation
# for the sub-policy.
self.probability1 = probability1
self.operation1 = func_dict[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
# Store probability, function and magnitude of the second augmentation
# for the sub-policy.
self.probability2 = probability2
self.operation2 = func_dict[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
"""Define call method for SubPolicy class."""
# Randomly apply operation 1.
if random.random() < self.probability1:
img = self.operation1(img, self.magnitude1)
# Randomly apply operation 2.
if random.random() < self.probability2:
img = self.operation2(img, self.magnitude2)
return img
|
Megatron-LM-master
|
megatron/data/autoaugment.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from PIL import Image, UnidentifiedImageError
import numpy as np
import io
import torch
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
from torchvision.transforms import Compose, ToTensor, Normalize, ToPILImage, RandomResizedCrop, Resize
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(img_h, img_w):
return Compose([
ToPILImage(),
RandomResizedCrop((img_h, img_w), scale=(0.5, 1.0), interpolation=BICUBIC),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
class MultiModalDataset(torch.utils.data.Dataset):
def __init__(self, name, data_prefix, indexed_dataset,
num_samples, seq_length, seed, img_h, img_w):
self.name = name
self.indexed_dataset = indexed_dataset
self.doc_idx = indexed_dataset.get_doc_idx()
self.visual_transform = _transform(img_h, img_w)
def __len__(self):
return self.indexed_dataset.sizes.shape[0]
def __getitem__(self, idx):
text_sample, mode = self.indexed_dataset.get(self.doc_idx[idx])
assert mode == 0
img_sample, mode = self.indexed_dataset.get(self.doc_idx[idx]+1)
assert mode == 1
img_pad = img_sample[0].item()
xs = img_sample[1:].tobytes(order='C')
xs = xs[:len(xs)-img_pad]
img_sample = np.array(Image.open(io.BytesIO(xs)))
img_sample = self.visual_transform(img_sample).reshape(-1)
return {'text': np.array(text_sample, dtype=np.int64),
'img': np.array(img_sample, dtype=np.float32)}
|
Megatron-LM-master
|
megatron/data/multimodal_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import random
import numpy as np
import torch
import torchvision.transforms as T
from torchvision import datasets
from megatron import get_args
from megatron.data.image_folder import ImageFolder
from megatron.data.autoaugment import ImageNetPolicy
from megatron.data.data_samplers import RandomSeedDataset
from PIL import Image, ImageFilter, ImageOps
class GaussianBlur(object):
"""
Apply Gaussian Blur to the PIL image.
"""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
do_it = random.random() <= self.prob
if not do_it:
return img
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
class Solarization(object):
"""
Apply Solarization to the PIL image.
"""
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
class ClassificationTransform():
def __init__(self, image_size, train=True):
args = get_args()
assert args.fp16 or args.bf16
self.data_type = torch.half if args.fp16 else torch.bfloat16
if train:
self.transform = T.Compose([
T.RandomResizedCrop(image_size),
T.RandomHorizontalFlip(),
T.ColorJitter(0.4, 0.4, 0.4, 0.1),
ImageNetPolicy(),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
T.ConvertImageDtype(self.data_type)
])
else:
self.transform = T.Compose([
T.Resize(image_size),
T.CenterCrop(image_size),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
T.ConvertImageDtype(self.data_type)
])
def __call__(self, input):
output = self.transform(input)
return output
class InpaintingTransform():
def __init__(self, image_size, train=True):
args = get_args()
self.mask_factor = args.mask_factor
self.mask_type = args.mask_type
self.image_size = image_size
self.patch_size = args.patch_dim
self.mask_size = int(self.mask_factor*(image_size[0]/self.patch_size)*(image_size[1]/self.patch_size))
self.train = train
assert args.fp16 or args.bf16
self.data_type = torch.half if args.fp16 else torch.bfloat16
if self.train:
self.transform = T.Compose([
T.RandomResizedCrop(self.image_size),
T.RandomHorizontalFlip(),
T.ColorJitter(0.4, 0.4, 0.4, 0.1),
ImageNetPolicy(),
T.ToTensor(),
T.ConvertImageDtype(self.data_type)
])
else:
self.transform = T.Compose([
T.Resize(self.image_size, interpolation=2),
T.CenterCrop(self.image_size),
T.ToTensor(),
T.ConvertImageDtype(self.data_type)
])
def gen_mask(self, image_size, mask_size, mask_type, patch_size):
# output: mask as a list with indices for missing patches
action_list = [[0, 1], [0, -1], [1, 0], [-1, 0]]
assert image_size[0] == image_size[1]
img_size_patch = image_size[0] // patch_size
# drop masked patches
mask = torch.zeros((image_size[0], image_size[1]), dtype=torch.float)
if mask_type == 'random':
x = torch.randint(0, img_size_patch, ())
y = torch.randint(0, img_size_patch, ())
for i in range(mask_size):
r = torch.randint(0, len(action_list), ())
x = torch.clamp(x + action_list[r][0], min=0, max=img_size_patch - 1)
y = torch.clamp(y + action_list[r][1], min=0, max=img_size_patch - 1)
x_offset = x * patch_size
y_offset = y * patch_size
mask[x_offset:x_offset+patch_size, y_offset:y_offset+patch_size] = 1
else:
assert mask_type == 'row'
count = 0
for x in reversed(range(img_size_patch)):
for y in reversed(range(img_size_patch)):
if (count < mask_size):
count += 1
x_offset = x * patch_size
y_offset = y * patch_size
mask[x_offset:x_offset+patch_size, y_offset:y_offset+patch_size] = 1
return mask
def __call__(self, input):
trans_input = self.transform(input)
mask = self.gen_mask(self.image_size, self.mask_size,
self.mask_type, self.patch_size)
mask = mask.unsqueeze(dim=0)
return trans_input, mask
class DinoTransform(object):
def __init__(self, image_size, train=True):
args = get_args()
self.data_type = torch.half if args.fp16 else torch.bfloat16
flip_and_color_jitter = T.Compose([
T.RandomHorizontalFlip(p=0.5),
T.RandomApply(
[T.ColorJitter(brightness=0.4, contrast=0.4,
saturation=0.2, hue=0.1)],
p=0.8
),
T.RandomGrayscale(p=0.2),
])
if args.fp16 or args.bf16:
normalize = T.Compose([
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
T.ConvertImageDtype(self.data_type)
])
else:
normalize = T.Compose([
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
# first global crop
scale_const = 0.4
self.global_transform1 = T.Compose([
T.RandomResizedCrop(image_size,
scale=(scale_const, 1),
interpolation=Image.BICUBIC),
flip_and_color_jitter,
GaussianBlur(1.0),
normalize
])
# second global crop
self.global_transform2 = T.Compose([
T.RandomResizedCrop(image_size,
scale=(scale_const, 1),
interpolation=Image.BICUBIC),
flip_and_color_jitter,
GaussianBlur(0.1),
Solarization(0.2),
normalize
])
# transformation for the local small crops
self.local_crops_number = args.dino_local_crops_number
self.local_transform = T.Compose([
T.RandomResizedCrop(args.dino_local_img_size,
scale=(0.05, scale_const),
interpolation=Image.BICUBIC),
flip_and_color_jitter,
GaussianBlur(p=0.5),
normalize
])
def __call__(self, image):
crops = []
crops.append(self.global_transform1(image))
crops.append(self.global_transform2(image))
for _ in range(self.local_crops_number):
crops.append(self.local_transform(image))
return crops
def build_train_valid_datasets(data_path, image_size=224):
args = get_args()
if args.vision_pretraining_type == 'classify':
train_transform = ClassificationTransform(image_size)
val_transform = ClassificationTransform(image_size, train=False)
elif args.vision_pretraining_type == 'inpaint':
train_transform = InpaintingTransform(image_size, train=False)
val_transform = InpaintingTransform(image_size, train=False)
elif args.vision_pretraining_type == 'dino':
train_transform = DinoTransform(image_size, train=True)
val_transform = ClassificationTransform(image_size, train=False)
else:
raise Exception('{} vit pretraining type is not supported.'.format(
args.vit_pretraining_type))
# training dataset
train_data_path = data_path[0] if len(data_path) <= 2 else data_path[2]
train_data = ImageFolder(
root=train_data_path,
transform=train_transform,
classes_fraction=args.classes_fraction,
data_per_class_fraction=args.data_per_class_fraction
)
train_data = RandomSeedDataset(train_data)
# validation dataset
val_data_path = data_path[1]
val_data = ImageFolder(
root=val_data_path,
transform=val_transform
)
val_data = RandomSeedDataset(val_data)
return train_data, val_data
|
Megatron-LM-master
|
megatron/data/vit_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""T5 Style dataset."""
import collections
import numpy as np
import torch
from megatron import get_tokenizer
from megatron.data.dataset_utils import (
create_masked_lm_predictions,
get_samples_mapping
)
class T5Dataset(torch.utils.data.Dataset):
def __init__(self, name, indexed_dataset, data_prefix,
num_epochs, max_num_samples, masked_lm_prob,
max_seq_length, max_seq_length_dec,
short_seq_prob, seed):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
self.max_seq_length_dec = max_seq_length_dec
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(self.indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
self.max_seq_length - 2, # account for added tokens
short_seq_prob,
self.seed,
self.name,
False)
# Vocab stuff.
tokenizer = get_tokenizer()
self.vocab_id_list = list(tokenizer.inv_vocab.keys())
self.vocab_id_to_token_dict = tokenizer.inv_vocab
self.cls_id = tokenizer.cls
self.sep_id = tokenizer.sep
self.mask_id = tokenizer.mask
self.pad_id = tokenizer.pad
self.bos_id = tokenizer.bos_token_id
self.eos_id = tokenizer.eos_token_id
self.sentinel_tokens = tokenizer.additional_special_tokens_ids
assert len(self.sentinel_tokens) > 0, "Provide the argument --vocab-extra-ids 100 to the script"
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_index, end_index, seq_length = self.samples_mapping[idx]
sample = []
for index in range(start_index, end_index):
sample.append(self.indexed_dataset[index])
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
np_rng = np.random.RandomState(seed=(self.seed + idx))
return build_training_sample(sample, seq_length,
self.max_seq_length, # needed for padding
self.max_seq_length_dec,
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id, self.sep_id,
self.mask_id, self.pad_id,
self.masked_lm_prob, np_rng,
self.bos_id, self.eos_id,
self.sentinel_tokens)
def build_training_sample(sample, target_seq_length,
max_seq_length, max_seq_length_dec,
vocab_id_list, vocab_id_to_token_dict,
cls_id, sep_id, mask_id, pad_id,
masked_lm_prob, np_rng, bos_id=None,
eos_id=None, sentinel_tokens=None):
"""Build training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
bos_id: start of decoder example id
eos_id: end of generation id
sentinel_tokens: unique value to be substituted for every replaced span
"""
assert target_seq_length <= max_seq_length
# flatten sentences into one list
tokens = [token for sentence in sample for token in sentence]
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
truncated = len(tokens) > max_num_tokens
tokens = tokens[:max_num_tokens]
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
(tokens, masked_positions, masked_labels, _, masked_spans) = create_masked_lm_predictions(
tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob,
cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng,
max_ngrams=10, geometric_dist=True, masking_style="t5")
# Padding.
tokens_enc, tokens_dec_in, labels, enc_mask, \
dec_mask, enc_dec_mask, loss_mask \
= pad_and_convert_to_numpy(tokens, masked_positions,
masked_labels, pad_id, max_seq_length,
max_seq_length_dec, masked_spans,
bos_id, eos_id, sentinel_tokens)
train_sample = {
'text_enc': tokens_enc,
'text_dec': tokens_dec_in,
'labels': labels,
'loss_mask': loss_mask,
'truncated': int(truncated),
'enc_mask': enc_mask,
'dec_mask': dec_mask,
'enc_dec_mask': enc_dec_mask,
}
return train_sample
def pad_and_convert_to_numpy(tokens, masked_positions,
masked_labels, pad_id,
max_seq_length, max_seq_length_dec,
masked_spans=None, bos_id=None,
eos_id=None, sentinel_tokens=None):
"""Pad sequences and convert them to numpy."""
sentinel_tokens = collections.deque(sentinel_tokens)
t5_input = []
(t5_decoder_in, t5_decoder_out) = ([bos_id], [])
(start_index, end_index) = (0, None)
for span in masked_spans:
flag = sentinel_tokens.popleft()
# Append the same tokens in decoder input and output
t5_decoder_in.append(flag)
t5_decoder_in.extend(span.label)
t5_decoder_out.append(flag)
t5_decoder_out.extend(span.label)
end_index = span.index[0]
t5_input.extend(tokens[start_index: end_index])
t5_input.append(flag)
# the next start index is the token after the last span token
start_index = span.index[-1] + 1
# Add <eos> token to the t5_decoder_out
t5_decoder_out.append(eos_id)
# Add the remaining tokens to the t5 input
t5_input.extend(tokens[start_index:])
# assert (len(t5_input) - len(masked_spans)) + \
# (len(t5_decoder_in) - (len(masked_spans) + 1)) == len(tokens)
# Some checks.
# Encoder-side padding mask.
num_tokens = len(t5_input)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(masked_positions) == len(masked_labels)
# Tokens..
filler = [pad_id] * padding_length
tokens_enc = np.array(t5_input + filler, dtype=np.int64)
# Decoder-side padding mask.
num_tokens_dec = len(t5_decoder_in)
padding_length_dec = max_seq_length_dec - num_tokens_dec
assert padding_length_dec >= 0
filler_dec = [pad_id] * padding_length_dec
tokens_dec_in = np.array(t5_decoder_in + filler_dec, dtype=np.int64)
# Create attention masks
enc_mask = make_attention_mask(tokens_enc, tokens_enc)
enc_dec_mask = make_attention_mask(tokens_dec_in, tokens_enc)
dec_mask = make_attention_mask(tokens_dec_in, tokens_dec_in)
dec_mask = dec_mask * make_history_mask(tokens_dec_in)
# Labels mask.
labels = t5_decoder_out + ([-1] * padding_length_dec)
labels = np.array(labels, dtype=np.int64)
# Loss mask
loss_mask = ([1] * num_tokens_dec) + ([0] * padding_length_dec)
loss_mask = np.array(loss_mask, dtype=np.int64)
return tokens_enc, tokens_dec_in, labels, enc_mask, \
dec_mask, enc_dec_mask, loss_mask
def make_attention_mask(source_block, target_block):
"""
Returns a 2-dimensional (2-D) attention mask
:param source_block: 1-D array
:param target_block: 1-D array
"""
mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)
mask = mask.astype(np.int64)
# (source_length, target_length)
return mask
def make_attention_mask_3d(source_block, target_block):
"""
Returns a 3-dimensional (3-D) attention mask
:param source_block: 1-D array
:param target_block: 1-D array
"""
mask = (target_block[:, None, :] >= 1) * (source_block[:, :, None] >= 1)
# (batch, source_length, target_length)
# mask = mask.astype(np.int64)
return mask
def make_history_mask(block):
length = block.shape[0]
arange = np.arange(length)
history_mask = (arange[None, ] <= arange[:, None])
history_mask = history_mask.astype(np.int64)
return history_mask
def make_history_mask_3d(block):
batch, length = block.shape
arange = torch.arange(length, device=block.device)
history_mask = (arange[None, ] <= arange[:, None])[None, ]
history_mask = history_mask.expand(batch, length, length)
return history_mask
|
Megatron-LM-master
|
megatron/data/t5_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Essentially re-written in entirety
import os
import shutil
import struct
from enum import Enum
from functools import lru_cache
from itertools import accumulate
from types import TracebackType
from typing import List, Optional, Tuple, Type, Union
import numpy as np
import torch
from megatron import print_rank_0
_INDEX_HEADER = b"MMIDIDX\x00\x00"
class DType(Enum):
uint8 = 1
int8 = 2
int16 = 3
int32 = 4
int64 = 5
float64 = 6
float32 = 7
uint16 = 8
@classmethod
def code_from_dtype(cls, value: Type[np.number]) -> int:
return cls[value.__name__].value
@classmethod
def dtype_from_code(cls, value: int) -> Type[np.number]:
return getattr(np, cls(value).name)
@staticmethod
def size(key: Union[int, Type[np.number]]) -> int:
if isinstance(key, int):
return DType.dtype_from_code(key)().itemsize
elif np.number in key.__mro__:
return key().itemsize
else:
raise ValueError
@staticmethod
def optimal_dtype(cardinality: int) -> Type[np.number]:
if cardinality is not None and cardinality < 65500:
return np.uint16
else:
return np.int32
class _IndexWriter(object):
"""
Object class to write the index file i.e. <data-path>.idx
"""
def __init__(self, path: str, dtype: Type[np.number]) -> None:
self.path = path
self.dtype = dtype
def __enter__(self) -> "_IndexWriter":
self.idx_path = open(self.path, "wb")
# fixed, vestigial practice
self.idx_path.write(_INDEX_HEADER)
# fixed, vestigial practice
self.idx_path.write(struct.pack("<Q", 1))
# the numeric code for the dtype
self.idx_path.write(struct.pack("<B", DType.code_from_dtype(self.dtype)))
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
self.idx_path.close()
def write(
self,
sequence_lengths: List[int],
sequence_modes: Optional[List[int]],
document_indices: List[int],
) -> None:
sequence_pointers = self._sequence_pointers(sequence_lengths)
# the number of sequences in the dataset
sequence_count = len(sequence_lengths)
self.idx_path.write(struct.pack("<Q", sequence_count))
# the number of documents in the dataset
document_count = len(document_indices)
self.idx_path.write(struct.pack("<Q", document_count))
# the number of tokens per sequence
sequence_lengths = np.array(sequence_lengths, dtype=np.int32)
self.idx_path.write(sequence_lengths.tobytes(order="C"))
del sequence_lengths
# the byte offsets for all sequences
sequence_pointers = np.array(sequence_pointers, dtype=np.int64)
self.idx_path.write(sequence_pointers.tobytes(order="C"))
del sequence_pointers
# the sequence indices marking the end of each document
document_indices = np.array(document_indices, dtype=np.int64)
self.idx_path.write(document_indices.tobytes(order="C"))
# the mode per sequence
if sequence_modes is not None:
sequence_modes = np.array(sequence_modes, dtype=np.int32)
self._file.write(sequence_modes.tobytes(order='C'))
del sequence_modes
def _sequence_pointers(self, sequence_lengths: List[int]) -> List[int]:
itemsize = DType.size(self.dtype)
curr_ptr = 0
list_ptr = []
for length in sequence_lengths:
list_ptr.append(curr_ptr)
curr_ptr += length * itemsize
return list_ptr
class _IndexReader(object):
"""
Object class to read the index file i.e. <data-path>.idx
"""
def __init__(self, path: str, multimodal: bool) -> None:
with open(path, "rb") as stream:
header = stream.read(9)
assert header == _INDEX_HEADER, f"bad header, cannot read: {path}"
version = struct.unpack("<Q", stream.read(8))[0]
assert version == 1, f"bad version, cannot read: {path}"
code = struct.unpack("<B", stream.read(1))[0]
self._dtype = DType.dtype_from_code(code)
self._dtype_size = DType.size(self._dtype)
self._sequence_count = struct.unpack("<Q", stream.read(8))[0]
self._document_count = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
self._multimodal = multimodal
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print_rank_0(" reading sequence lengths...")
self._sequence_lengths = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._sequence_count, offset=offset
)
print_rank_0(" reading sequence pointers...")
self._sequence_pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._sequence_count,
offset=offset + self._sequence_lengths.nbytes,
)
print_rank_0(" reading document indices...")
self._document_indices = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._document_count,
offset=offset + self._sequence_lengths.nbytes + self._sequence_pointers.nbytes,
)
self._sequence_modes = None
if self._multimodal:
print_rank_0(" reading sequence modes...")
self._sequence_modes = np.frombuffer(
self._bin_buffer,
dtype=np.int8,
count=self._len,
offset=offset
+ self._sequence_lengths.nbytes
+ self._sequence_pointers.nbytes
+ self._document_indices.nbytes,
)
def __del__(self) -> None:
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
def __len__(self) -> int:
return self._sequence_count
@lru_cache(maxsize=8)
def __getitem__(self, i: int) -> Tuple[np.int32, np.int64, Optional[np.int8]]:
return (
self._sequence_pointers[i],
self._sequence_lengths[i],
self._sequence_modes[i] if self._multimodal else None,
)
@property
def dtype(self) -> Type[np.number]:
return self._dtype
@property
def sizes(self) -> np.ndarray:
return self._sequence_lengths
@property
def doc_idx(self) -> np.ndarray:
return self._document_indices
@property
def modes(self) -> np.ndarray:
return self._sequence_modes
class MMapIndexedDataset(torch.utils.data.Dataset):
def __init__(self, path: str, skip_warmup: bool = False, multimodal: bool = False) -> None:
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._multimodal = multimodal
self._do_init(path, skip_warmup, multimodal)
def __getstate__(self) -> str:
return self._path
def __setstate__(self, path: str) -> None:
self._do_init(path, skip_warmup=True, multimodal=False)
def __del__(self) -> None:
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self) -> int:
return len(self._index)
def __getitem__(self, idx: Union[int, np.integer, slice]) -> np.ndarray:
if isinstance(idx, (int, np.integer)):
sequence_pointer, sequence_length, sequence_mode = self._index[idx]
sequence = np.frombuffer(
self._bin_buffer,
dtype=self._index.dtype,
count=sequence_length,
offset=sequence_pointer,
)
return (sequence, sequence_mode) if sequence_mode is not None else sequence
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sequence_lengths = self._index._sequence_lengths[idx]
sequence_modes = self._index._sequence_modes[idx] if self._multimodal else None
sequence_offsets = list(accumulate(sequence_lengths))
sequences = np.split(
np.frombuffer(
self._bin_buffer,
dtype=self._index.dtype,
count=sum(sequence_lengths),
offset=self._index._sequence_pointers[start],
),
sequence_offsets[:-1],
)
return (sequences, sequence_modes) if sequence_modes is not None else sequences
else:
raise TypeError("Unexpected type received for idx: {}".format(type(idx)))
def _do_init(self, path: str, skip_warmup: bool, multimodal: bool) -> None:
self._path = path
if not skip_warmup:
print_rank_0(" warming up index mmap file...")
self.warmup_mmap_file(get_idx_path(self._path))
self._index = _IndexReader(get_idx_path(self._path), multimodal)
if not skip_warmup:
print_rank_0(" warming up data mmap file...")
self.warmup_mmap_file(get_bin_path(self._path))
print_rank_0(" creating np buffer of mmap...")
self._bin_buffer_mmap = np.memmap(get_bin_path(self._path), mode="r", order="C")
print_rank_0(" creating memory view of np buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def get(self, idx: int, offset: int = 0, length: Optional[int] = None) -> np.ndarray:
"""Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
sequence_pointer, sequence_length, sequence_mode = self._index[idx]
if length is None:
length = sequence_length - offset
sequence_pointer += offset * DType.size(self._index.dtype)
sequence = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=length, offset=sequence_pointer
)
return (sequence, sequence_mode) if sequence_mode is not None else sequence
@property
def sizes(self) -> np.ndarray:
return self._index.sizes
@property
def doc_idx(self) -> np.ndarray:
return self._index._document_indices
def get_doc_idx(self) -> np.ndarray:
return self._index._document_indices
def set_doc_idx(self, doc_idx: np.ndarray) -> None:
self._index._document_indices = doc_idx
def modes(self) -> np.ndarray:
return self._index.modes
@property
def supports_prefetch(self) -> bool:
return False
@staticmethod
def exists(path_prefix: str) -> bool:
return os.path.exists(get_idx_path(path_prefix)) and os.path.exists(
get_bin_path(path_prefix)
)
@staticmethod
def warmup_mmap_file(path: str) -> None:
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDatasetBuilder(object):
def __init__(
self, bin_path: str, dtype: Type[np.number] = np.int32, multimodal: bool = False
) -> None:
self._data_file = open(bin_path, "wb")
self._dtype = dtype
self._multimodal = multimodal
self._sequence_lengths = []
self._document_indices = [0]
self._sequence_modes = [] if self._multimodal else None
def add_item(self, tensor: torch.Tensor, mode: int = 0) -> None:
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C"))
self._sequence_lengths.append(np_array.size)
if self._multimodal:
self._sequence_modes.append(mode)
def add_doc(
self, tensor: torch.Tensor, lengths: List[int], modes: Optional[List[int]] = None
) -> None:
np_array = np.array(tensor, dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C"))
self._sequence_lengths.extend(lengths)
self._document_indices.append(len(self._sequence_lengths))
if self._multimodal:
self._sequence_modes.extend(modes if modes is not None else [0] * lengths)
def end_document(self) -> None:
self._document_indices.append(len(self._sequence_lengths))
def merge_file_(self, path_prefix: str) -> None:
# Concatenate index
index = _IndexReader(get_idx_path(path_prefix), multimodal=self._multimodal)
assert index.dtype == self._dtype
offset = len(self._sequence_lengths)
self._sequence_lengths.extend(index.sizes)
self._document_indices.extend((offset + index.doc_idx)[1:])
if self._multimodal:
self._sequence_modes.extend(index._sequence_modes)
# Concatenate data
with open(get_bin_path(path_prefix), "rb") as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, idx_path: str) -> None:
self._data_file.close()
with _IndexWriter(idx_path, self._dtype) as writer:
writer.write(self._sequence_lengths, self._sequence_modes, self._document_indices)
def get_idx_path(path_prefix: str) -> str:
return path_prefix + ".idx"
def get_bin_path(path_prefix: str) -> str:
return path_prefix + ".bin"
|
Megatron-LM-master
|
megatron/data/indexed_dataset.py
|
import os
import time
import numpy as np
import torch
from megatron import print_rank_0
from megatron.core import mpu, tensor_parallel
from megatron.data.dataset_utils import create_masked_lm_predictions, pad_and_convert_to_numpy
from megatron import get_args, get_tokenizer, print_rank_0
def get_one_epoch_dataloader(dataset, micro_batch_size=None):
"""Specifically one epoch to be used in an indexing job."""
args = get_args()
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
if micro_batch_size is None:
micro_batch_size = args.micro_batch_size
global_batch_size = micro_batch_size * world_size
num_workers = args.num_workers
sampler = torch.utils.data.SequentialSampler(dataset)
# importantly, drop_last must be False to get all the data.
assert False, 'DistributedBatchSampler deprecated, change the implementation'
from megatron.data.samplers import DistributedBatchSampler
batch_sampler = DistributedBatchSampler(sampler,
batch_size=global_batch_size,
drop_last=False,
rank=rank,
world_size=world_size)
return torch.utils.data.DataLoader(dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=True)
def get_ict_batch(data_iterator):
# Items and their type.
keys = ['query_tokens', 'query_pad_mask',
'block_tokens', 'block_pad_mask', 'block_data']
datatype = torch.int64
# Broadcast data.
if data_iterator is None:
data = None
else:
data = next(data_iterator)
data_b = tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
query_tokens = data_b['query_tokens'].long()
query_pad_mask = data_b['query_pad_mask'].long()
block_tokens = data_b['block_tokens'].long()
block_pad_mask = data_b['block_pad_mask'].long()
block_indices = data_b['block_data'].long()
return query_tokens, query_pad_mask,\
block_tokens, block_pad_mask, block_indices
def join_str_list(str_list):
"""Join a list of strings, handling spaces appropriately"""
result = ""
for s in str_list:
if s.startswith("##"):
result += s[2:]
else:
result += " " + s
return result
class BlockSampleData(object):
"""A struct for fully describing a fixed-size block of data as used in REALM
:param start_idx: for first sentence of the block
:param end_idx: for last sentence of the block (may be partially truncated in sample construction)
:param doc_idx: the index of the document from which the block comes in the original indexed dataset
:param block_idx: a unique integer identifier given to every block.
"""
def __init__(self, start_idx, end_idx, doc_idx, block_idx):
self.start_idx = start_idx
self.end_idx = end_idx
self.doc_idx = doc_idx
self.block_idx = block_idx
def as_array(self):
return np.array([self.start_idx, self.end_idx, self.doc_idx, self.block_idx]).astype(np.int64)
def as_tuple(self):
return self.start_idx, self.end_idx, self.doc_idx, self.block_idx
class BlockSamplesMapping(object):
def __init__(self, mapping_array):
# make sure that the array is compatible with BlockSampleData
assert mapping_array.shape[1] == 4
self.mapping_array = mapping_array
def __len__(self):
return self.mapping_array.shape[0]
def __getitem__(self, idx):
"""Get the data associated with an indexed sample."""
sample_data = BlockSampleData(*self.mapping_array[idx])
return sample_data
def get_block_samples_mapping(block_dataset, title_dataset, data_prefix, num_epochs,
max_num_samples, max_seq_length, seed, name, use_one_sent_docs=False):
"""Get samples mapping for a dataset over fixed size blocks. This function also requires
a dataset of the titles for the source documents since their lengths must be taken into account.
:return: samples_mapping (BlockSamplesMapping)
"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples "
"or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{}s'.format(seed)
if use_one_sent_docs:
indexmap_filename += '_1sentok'
indexmap_filename += '.npy'
# Build the indexed mapping if not exist.
if mpu.get_data_parallel_rank() == 0 and \
not os.path.isfile(indexmap_filename):
print(' > WARNING: could not find index map file {}, building '
'the indices on rank 0 ...'.format(indexmap_filename))
# Make sure the types match the helpers input types.
assert block_dataset.doc_idx.dtype == np.int64
assert block_dataset.sizes.dtype == np.int32
# Build samples mapping
verbose = torch.distributed.get_rank() == 0
start_time = time.time()
print_rank_0(' > building samples index mapping for {} ...'.format(
name))
from megatron.data import helpers
mapping_array = helpers.build_blocks_mapping(
block_dataset.doc_idx,
block_dataset.sizes,
title_dataset.sizes,
num_epochs,
max_num_samples,
max_seq_length - 3, # account for added tokens
seed,
verbose,
use_one_sent_docs)
print_rank_0(' > done building samples index mapping')
np.save(indexmap_filename, mapping_array, allow_pickle=True)
print_rank_0(' > saved the index mapping in {}'.format(
indexmap_filename))
# Make sure all the ranks have built the mapping
print_rank_0(' > elapsed time to build and save samples mapping '
'(seconds): {:4f}'.format(
time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
assert counts[0].item() == torch.distributed.get_world_size(
group=mpu.get_data_parallel_group())
# Load indexed dataset.
print_rank_0(' > loading indexed mapping from {}'.format(
indexmap_filename))
start_time = time.time()
mapping_array = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r')
samples_mapping = BlockSamplesMapping(mapping_array)
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
mapping_array.shape[0]))
return samples_mapping
|
Megatron-LM-master
|
megatron/data/realm_dataset_utils.py
|
import itertools
import random
import numpy as np
from torch.utils.data import Dataset
from megatron import get_tokenizer
from megatron import get_args
from megatron.data.dataset_utils import get_indexed_dataset_
from megatron.data.realm_dataset_utils import get_block_samples_mapping
def make_attention_mask(source_block, target_block):
"""
Returns a 2-dimensional (2-D) attention mask
:param source_block: 1-D array
:param target_block: 1-D array
"""
mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)
mask = mask.astype(np.int64)
# (source_length, target_length)
return mask
def get_ict_dataset(use_titles=True, query_in_block_prob=1):
"""Get a dataset which uses block samples mappings to get ICT/block indexing data (via get_block())
rather than for training, since it is only built with a single epoch sample mapping.
"""
args = get_args()
block_dataset = get_indexed_dataset_(args.data_path, 'mmap', True)
titles_dataset = get_indexed_dataset_(args.titles_data_path, 'mmap', True)
kwargs = dict(
name='full',
block_dataset=block_dataset,
title_dataset=titles_dataset,
data_prefix=args.data_path,
num_epochs=1,
max_num_samples=None,
max_seq_length=args.seq_length,
seed=1,
query_in_block_prob=query_in_block_prob,
use_titles=use_titles,
use_one_sent_docs=args.use_one_sent_docs
)
dataset = ICTDataset(**kwargs)
return dataset
class ICTDataset(Dataset):
"""Dataset containing sentences and their blocks for an inverse cloze task."""
def __init__(self, name, block_dataset, title_dataset, data_prefix,
num_epochs, max_num_samples, max_seq_length, query_in_block_prob,
seed, use_titles=True, use_one_sent_docs=False, binary_head=False):
self.name = name
self.seed = seed
self.max_seq_length = max_seq_length
self.query_in_block_prob = query_in_block_prob
self.block_dataset = block_dataset
self.title_dataset = title_dataset
self.rng = random.Random(self.seed)
self.use_titles = use_titles
self.use_one_sent_docs = use_one_sent_docs
self.samples_mapping = get_block_samples_mapping(
block_dataset, title_dataset, data_prefix, num_epochs,
max_num_samples, max_seq_length, seed, name, use_one_sent_docs)
self.tokenizer = get_tokenizer()
self.vocab_id_list = list(self.tokenizer.inv_vocab.keys())
self.vocab_id_to_token_list = self.tokenizer.inv_vocab
self.cls_id = self.tokenizer.cls
self.sep_id = self.tokenizer.sep
self.mask_id = self.tokenizer.mask
self.pad_id = self.tokenizer.pad
def __len__(self):
return len(self.samples_mapping)
def __getitem__(self, idx):
"""Get an ICT example of a pseudo-query and the block of text from which it was extracted"""
sample_data = self.samples_mapping[idx]
start_idx, end_idx, doc_idx, block_idx = sample_data.as_tuple()
if self.use_titles:
title = self.title_dataset[int(doc_idx)]
title_pad_offset = 3 + len(title)
else:
title = None
title_pad_offset = 2
block = [self.block_dataset[i] for i in range(start_idx, end_idx)]
assert len(block) > 1 or self.use_one_sent_docs or self.query_in_block_prob == 1
# randint() is inclusive for Python rng
rand_sent_idx = self.rng.randint(0, len(block) - 1)
# keep the query in the context query_in_block_prob fraction of the time.
if self.rng.random() < self.query_in_block_prob:
query = block[rand_sent_idx].copy()
else:
query = block.pop(rand_sent_idx)
# still need to truncate because blocks are concluded when
# the sentence lengths have exceeded max_seq_length.
query = query[:self.max_seq_length - 2]
block = list(itertools.chain(*block))[:self.max_seq_length - title_pad_offset]
query_tokens, query_pad_mask = self.concat_and_pad_tokens(query)
context_tokens, context_pad_mask = self.concat_and_pad_tokens(block, title)
query_mask = make_attention_mask(query_tokens, query_tokens)
context_mask = make_attention_mask(context_tokens, context_tokens)
block_data = sample_data.as_array()
sample = {
'query_tokens': query_tokens,
'query_mask': query_mask,
'query_pad_mask': query_pad_mask,
'context_tokens': context_tokens,
'context_mask': context_mask,
'context_pad_mask': context_pad_mask,
'block_data': block_data,
}
return sample
def get_block(self, start_idx, end_idx, doc_idx):
"""Get the IDs for an evidence block plus the title of the corresponding document"""
block = [self.block_dataset[i] for i in range(start_idx, end_idx)]
title = self.title_dataset[int(doc_idx)]
block = list(itertools.chain(*block))[:self.max_seq_length - (3 + len(title))]
block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title)
return block_tokens, block_pad_mask
def get_null_block(self):
"""Get empty block and title - used in REALM pretraining"""
block, title = [], []
block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title)
return block_tokens, block_pad_mask
def concat_and_pad_tokens(self, tokens, title=None):
"""Concat with special tokens and pad sequence to self.max_seq_length"""
tokens = list(tokens)
if title is None:
tokens = [self.cls_id] + tokens + [self.sep_id]
else:
title = list(title)
tokens = [self.cls_id] + title + [self.sep_id] + tokens + [self.sep_id]
assert len(tokens) <= self.max_seq_length
num_pad = self.max_seq_length - len(tokens)
pad_mask = [1] * len(tokens) + [0] * num_pad
tokens += [self.pad_id] * num_pad
return np.array(tokens), np.array(pad_mask)
|
Megatron-LM-master
|
megatron/data/ict_dataset.py
|
import os
import time
import numpy as np
import torch
from megatron import get_args, get_tokenizer, print_rank_0
from megatron.core import mpu, tensor_parallel
from megatron.data.dataset_utils import create_masked_lm_predictions, \
pad_and_convert_to_numpy
from megatron.data.data_samplers import MegatronPretrainingSampler
def make_attention_mask(source_block, target_block):
"""
Returns a 2-dimensional (2-D) attention mask
:param source_block: 1-D array
:param target_block: 1-D array
"""
mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)
mask = mask.astype(np.int64)
# (source_length, target_length)
return mask
def get_one_epoch_dataloader(dataset, micro_batch_size=None):
"""Specifically one epoch to be used in an indexing job."""
args = get_args()
if micro_batch_size is None:
micro_batch_size = args.micro_batch_size
num_workers = args.num_workers
# Use megatron's sampler with consumed samples set to 0 as
# this is only for evaluation and don't intend to resume half way.
# Also, set the drop last to false as don't intend to remove
# the last batch
batch_sampler = MegatronPretrainingSampler(
total_samples=len(dataset),
consumed_samples=0,
micro_batch_size=args.micro_batch_size,
data_parallel_rank=mpu.get_data_parallel_rank(),
data_parallel_size=mpu.get_data_parallel_world_size(),
drop_last=False)
return torch.utils.data.DataLoader(dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=True)
def get_ict_batch(data_iterator):
# Items and their type.
keys = ['query_tokens', 'query_mask',
'context_tokens', 'context_mask', 'block_data']
datatype = torch.int64
# Broadcast data.
if data_iterator is None:
data = None
else:
data = next(data_iterator)
data_b = tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
query_tokens = data_b['query_tokens'].long()
query_mask = data_b['query_mask'] < 0.5
context_tokens = data_b['context_tokens'].long()
context_mask = data_b['context_mask'] < 0.5
block_indices = data_b['block_data'].long()
return query_tokens, query_mask,\
context_tokens, context_mask, block_indices
def join_str_list(str_list):
"""Join a list of strings, handling spaces appropriately"""
result = ""
for s in str_list:
if s.startswith("##"):
result += s[2:]
else:
result += " " + s
return result
class BlockSampleData(object):
"""A struct for fully describing a fixed-size block of data as used in REALM
:param start_idx: for first sentence of the block
:param end_idx: for last sentence of the block (may be partially truncated in sample construction)
:param doc_idx: the index of the document from which the block comes in the original indexed dataset
:param block_idx: a unique integer identifier given to every block.
"""
def __init__(self, start_idx, end_idx, doc_idx, block_idx):
self.start_idx = start_idx
self.end_idx = end_idx
self.doc_idx = doc_idx
self.block_idx = block_idx
def as_array(self):
return np.array([self.start_idx, self.end_idx, self.doc_idx, self.block_idx]).astype(np.int64)
def as_tuple(self):
return self.start_idx, self.end_idx, self.doc_idx, self.block_idx
class BlockSamplesMapping(object):
def __init__(self, mapping_array):
# make sure that the array is compatible with BlockSampleData
assert mapping_array.shape[1] == 4
self.mapping_array = mapping_array
def __len__(self):
return self.mapping_array.shape[0]
def __getitem__(self, idx):
"""Get the data associated with an indexed sample."""
sample_data = BlockSampleData(*self.mapping_array[idx])
return sample_data
def get_block_samples_mapping(block_dataset, title_dataset, data_prefix, num_epochs,
max_num_samples, max_seq_length, seed, name, use_one_sent_docs=False):
"""Get samples mapping for a dataset over fixed size blocks. This function also requires
a dataset of the titles for the source documents since their lengths must be taken into account.
:return: samples_mapping (BlockSamplesMapping)
"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples "
"or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{}s'.format(seed)
if use_one_sent_docs:
indexmap_filename += '_1sentok'
indexmap_filename += '.npy'
# Build the indexed mapping if not exist.
if mpu.get_data_parallel_rank() == 0 and \
not os.path.isfile(indexmap_filename):
print(' > WARNING: could not find index map file {}, building '
'the indices on rank 0 ...'.format(indexmap_filename))
# Make sure the types match the helpers input types.
assert block_dataset.doc_idx.dtype == np.int64
assert block_dataset.sizes.dtype == np.int32
# Build samples mapping
verbose = torch.distributed.get_rank() == 0
start_time = time.time()
print_rank_0(' > building samples index mapping for {} ...'.format(
name))
from megatron.data import helpers
mapping_array = helpers.build_blocks_mapping(
block_dataset.doc_idx,
block_dataset.sizes,
title_dataset.sizes,
num_epochs,
max_num_samples,
max_seq_length - 3, # account for added tokens
seed,
verbose,
use_one_sent_docs)
print_rank_0(' > done building samples index mapping')
np.save(indexmap_filename, mapping_array, allow_pickle=True)
print_rank_0(' > saved the index mapping in {}'.format(
indexmap_filename))
# Make sure all the ranks have built the mapping
print_rank_0(' > elapsed time to build and save samples mapping '
'(seconds): {:4f}'.format(
time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
assert counts[0].item() == torch.distributed.get_world_size(
group=mpu.get_data_parallel_group())
# Load indexed dataset.
print_rank_0(' > loading indexed mapping from {}'.format(
indexmap_filename))
start_time = time.time()
mapping_array = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r')
samples_mapping = BlockSamplesMapping(mapping_array)
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
mapping_array.shape[0]))
return samples_mapping
|
Megatron-LM-master
|
megatron/data/biencoder_dataset_utils.py
|
# This file isn't really a formal automated test, it's just a place to
# put some code used during development and manual testing of
# indexed_dataset.
from megatron.data import indexed_dataset
from megatron.tokenizer import build_tokenizer
import argparse
import os
import sys
import torch
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, "../../../"))
def test_indexed_dataset(args):
ds = indexed_dataset.MMapIndexedDataset(args.data)
tokenizer = build_tokenizer(args)
print(len(ds.doc_idx))
print(len(ds))
print(ds.doc_idx[-1])
if ds.supports_prefetch:
# just prefetch the whole thing in test (so assume it is small)
ds.prefetch(range(len(ds)))
if args.count > len(ds.doc_idx) - 1:
args.count = len(ds.doc_idx) - 1
for i in range(args.count):
start = ds.doc_idx[i]
end = ds.doc_idx[i + 1]
ids = ds[start:end]
print(f"Document {i}:")
print("--------------")
for s in ids:
assert len(s) > 0
l = s.data.tolist()
text = tokenizer.detokenize(l)
print(text)
print("---")
def test_indexed_dataset_get(args):
ds = indexed_dataset.MMapIndexedDataset(args.data)
tokenizer = build_tokenizer(args)
size = ds.sizes[0]
print(f"size: {size}")
full = ds.get(0)
print(full)
# print(tokenizer.detokenize(full.data.tolist()))
print("---")
end = ds.get(0, offset=size - 10)
print(end)
# print(tokenizer.detokenize(end.data.tolist()))
start = ds.get(0, length=10)
print(start)
# print(tokenizer.detokenize(start.data.tolist()))
part = ds.get(0, offset=2, length=8)
print(part)
# print(tokenizer.detokenize(part.data.tolist()))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, help='prefix to data files')
parser.add_argument('--count', type=int, default=10,
help='Number of samples/documents to print')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True,
choices=['BertWordPieceLowerCase',
'GPT2BPETokenizer'],
help='What type of tokenizer to use.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file (if necessary).')
parser.add_argument('--epochs', type=int, default=5,
help='Number of epochs to plan for')
parser.add_argument('--max-num-samples', type=int, default=None,
help='Maximum number of samples to plan for')
parser.add_argument('--masked-lm-prob', type=float, default=0.15,
help='probability of masking tokens')
parser.add_argument('--seq-length', type=int, default=512,
help='maximum sequence length')
parser.add_argument('--short-seq-prob', type=float, default=0.1,
help='probability of creating a short sequence')
parser.add_argument('--seed', type=int, default=1234,
help='random seed')
args = parser.parse_args()
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
test_indexed_dataset_get(args)
if __name__ == "__main__":
main()
|
Megatron-LM-master
|
megatron/data/test/test_indexed_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""For backward compatibility, we need the class definitions to deserialize."""
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000,
min_scale=1,
delayed_shift=1,
consecutive_hysteresis=False):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
self.min_scale = min_scale
self.delayed_shift = delayed_shift
self.cur_hysteresis = delayed_shift
self.consecutive_hysteresis = consecutive_hysteresis
|
Megatron-LM-master
|
megatron/fp16_deprecated/loss_scaler.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import atexit
import copy
import io
import os
import re
import subprocess
import tempfile
from distutils.version import LooseVersion
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
__version__ = '0.0.6'
REQUIRED_PACKAGES = [
'tensorflow >= 2.1.0',
]
path = os.path.dirname(os.path.realpath(__file__))
class CMakeExtension(Extension):
def __init__(self, name, cmake_path, sources, **kwargs):
super(CMakeExtension, self).__init__(name, sources=sources, **kwargs)
self.cmake_path = cmake_path
ext_modules = []
ext_modules.append(
CMakeExtension(
name="structured_sparsity",
cmake_path=os.path.join(path, "atex", "structured_sparsity"),
sources=[],
)
)
ext_modules.append(
CMakeExtension(
name="nv_norms",
cmake_path=os.path.join(path, "atex", "nv_norms"),
sources=[],
)
)
def get_cmake_bin():
cmake_bin = "cmake"
try:
out = subprocess.check_output([cmake_bin, "--version"])
except OSError:
cmake_installed_version = LooseVersion("0.0")
else:
cmake_installed_version = LooseVersion(
re.search(r"version\s*([\d.]+)", out.decode()).group(1)
)
if cmake_installed_version < LooseVersion("3.18.0"):
print(
"Could not find a recent CMake to build Transformer Engine. "
"Attempting to install CMake 3.18 to a temporary location via pip.",
flush=True,
)
cmake_temp_dir = tempfile.TemporaryDirectory(prefix="nvte-cmake-tmp")
atexit.register(cmake_temp_dir.cleanup)
try:
_ = subprocess.check_output(
["pip", "install", "--target", cmake_temp_dir.name, "cmake~=3.18.0"]
)
except Exception:
raise RuntimeError(
"Failed to install temporary CMake. "
"Please update your CMake to 3.18+."
)
cmake_bin = os.path.join(cmake_temp_dir.name, "bin", "run_cmake")
with io.open(cmake_bin, "w") as f_run_cmake:
f_run_cmake.write(
f"#!/bin/sh\nPYTHONPATH={cmake_temp_dir.name} {os.path.join(cmake_temp_dir.name, 'bin', 'cmake')} \"$@\""
)
os.chmod(cmake_bin, 0o755)
return cmake_bin
class CMakeBuildExtension(build_ext, object):
def __init__(self, *args, **kwargs) -> None:
super(CMakeBuildExtension, self).__init__(*args, **kwargs)
def build_extensions(self) -> None:
print("Building CMake extensions!")
self.cmake_bin = get_cmake_bin()
for extension in self.extensions:
self.build_cmake(extension)
def build_cmake(self, extension) -> None:
config = "Debug" if self.debug else "Release"
ext_name = extension.name
build_dir = self.get_ext_fullpath(ext_name).replace(
self.get_ext_filename(ext_name), ""
)
build_dir = os.path.abspath(build_dir)
cmake_args = [
"-DCMAKE_BUILD_TYPE=" + config,
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(config.upper(), build_dir),
]
try:
import ninja
except ImportError:
pass
else:
cmake_args.append("-GNinja")
cmake_args = cmake_args
cmake_build_args = ["--config", config]
cmake_build_dir = os.path.join(self.build_temp, ext_name, config)
if not os.path.exists(cmake_build_dir):
os.makedirs(cmake_build_dir)
config_and_build_commands = [
[self.cmake_bin, extension.cmake_path] + cmake_args,
[self.cmake_bin, "--build", "."] + cmake_build_args,
]
print(f"Running CMake in {cmake_build_dir}:")
for command in config_and_build_commands:
print(" ".join(command))
sys.stdout.flush()
# Config and build the extension
try:
for command in config_and_build_commands:
subprocess.check_call(command, cwd=cmake_build_dir)
except OSError as e:
raise RuntimeError("CMake failed: {}".format(str(e)))
setup(
name="atex",
version=__version__,
packages=find_packages(),
description=('tensorflow-nv-norms is for fused layer/instance normalization ops for TensorFlow'),
ext_modules=ext_modules,
cmdclass={"build_ext": CMakeBuildExtension},
author='NVIDIA',
author_email='kaixih@nvidia.com',
install_requires=REQUIRED_PACKAGES,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow custom op machine learning',
)
|
atex-release
|
setup.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
from . import nv_norms
from . import structured_sparsity
|
atex-release
|
atex/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import copy
import argparse
import time
from statistics import mean
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
import numpy as np
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
SAVEDMODEL_PATH = "exported_model"
def load_and_convert(path, precision):
""" Load a saved model and convert it to FP32 or FP16. Return a converter """
params = copy.deepcopy(trt.DEFAULT_TRT_CONVERSION_PARAMS)
params = params._replace(
precision_mode=(
trt.TrtPrecisionMode.FP16
if precision.lower() == "fp16" else
trt.TrtPrecisionMode.FP32
),
max_workspace_size_bytes=2 << 32, # 8,589,934,592 bytes
maximum_cached_engines=100,
minimum_segment_size=3,
allow_build_at_runtime=True,
)
import pprint
print("%" * 85)
pprint.pprint(params)
print("%" * 85)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=path,
conversion_params=params,
use_dynamic_shape=False,
)
return converter
if __name__ == "__main__":
BATCH_SIZE = 32 # This number will depend on the size of your dataset.
INFERENCE_STEPS = 2000
WARMUP_STEPS = 150
parser = argparse.ArgumentParser(prog='mydaemon')
feature_parser = parser.add_mutually_exclusive_group(required=True)
feature_parser.add_argument('--use_native_tensorflow', dest="use_tftrt", help="help", action='store_false')
feature_parser.add_argument('--use_tftrt_model', dest="use_tftrt", action='store_true')
args = parser.parse_args()
print("\n=========================================")
print("Inference using: {} ...".format(
"TF-TRT" if args.use_tftrt else "Native Tensorflow")
)
print("=========================================\n")
time.sleep(2)
def dataloader_fn(batch_size):
dataset = tf.data.Dataset.from_tensor_slices(np.random.uniform(size=(1, 224, 224, 3)).astype(np.float32))
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.take(count=1) # loop over 1 batch
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
if args.use_tftrt:
converter = load_and_convert(
os.path.join(SAVEDMODEL_PATH),
precision="fp16",
)
xx = converter.convert()
converter.save(
os.path.join(SAVEDMODEL_PATH, "converted")
)
root = tf.saved_model.load(os.path.join(SAVEDMODEL_PATH, "converted"))
else:
root = tf.saved_model.load(SAVEDMODEL_PATH)
infer_fx = root.signatures['serving_default']
try:
output_tensorname = list(infer_fx.structured_outputs.keys())[0]
except AttributeError:
# Output tensor doesn't have a name, index 0
output_tensorname = 0
ds = dataloader_fn(
batch_size=BATCH_SIZE
)
iterator = iter(ds)
@tf.function
def infer_step(batch_x):
return infer_fx(batch_x)[output_tensorname]
try:
step_times = list()
for step in range(1, INFERENCE_STEPS + 1):
input_batch = iterator.get_next()
if step % 100 == 0:
print("Processing step: %04d ..." % step)
start_t = time.time()
probs = infer_step(input_batch).numpy()
step_time = time.time() - start_t
if step >= WARMUP_STEPS:
step_times.append(step_time)
except tf.errors.OutOfRangeError:
pass
avg_step_time = mean(step_times)
print("\nAverage step time: %.1f msec" % (avg_step_time * 1e3))
print("Average throughput: %d samples/sec" % (
BATCH_SIZE / avg_step_time
))
|
atex-release
|
atex/structured_sparsity/tftrt_infer.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
from . import tf_asp
|
atex-release
|
atex/structured_sparsity/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import inspect
import numpy as np
import os
import tempfile
import tensorflow as tf
from atex.structured_sparsity import tf_asp
import shutil
from tensorflow.keras import layers, optimizers
from tensorflow.python.platform import test
def GetSingleLayerConfigs():
"""Gets all valid tests for single layer model.
Returns:
All the test configs as tuples of (layer_name, input_shape, output_dim).
"""
layers = ["Dense", "Dense", "Dense", "Dense","Dense", "Dense",
"Conv2D", "Conv2D"]
input_shapes = [(32,), (64,), (128,), (127,), (126,), (125,),
(64, 64, 32), (32, 32, 64)]
output_dims = [8, 16, 32, 8, 8, 8,
16, 32]
for l, i, o in zip(layers, input_shapes, output_dims):
yield l, i, o
def GetConvNetModel():
"""Gets an example convnet model. """
input1 = layers.Input(shape=(28, 28, 16))
conv1_1 = layers.Conv2D(32, (3, 3), padding='same', use_bias=False,
name="conv1_1")
conv1_2 = layers.Conv2D(32, (3, 3), padding='same', use_bias=False,
name="conv1_2")
conv2_1 = layers.Conv2D(32, (3, 3), padding='same', use_bias=False,
name="conv2_1")
conv3 = layers.Conv2D(32, (3, 3), padding='same', use_bias=False,
name="conv3")
add1 = layers.Add(name="add1")
batch_norm1 = layers.BatchNormalization(
beta_initializer='uniform', gamma_initializer='uniform',
moving_mean_initializer='uniform',
moving_variance_initializer='uniform', name="batch_norm1")
batch_norm2 = layers.BatchNormalization(
beta_initializer='uniform', gamma_initializer='uniform',
moving_mean_initializer='uniform',
moving_variance_initializer='uniform', name="batch_norm2")
batch_norm3 = layers.BatchNormalization(
beta_initializer='uniform', gamma_initializer='uniform',
moving_mean_initializer='uniform',
moving_variance_initializer='uniform', name="batch_norm3")
relu1 = layers.ReLU(name="relu1")
y1_1 = conv1_1(input1)
y1_1 = batch_norm1(y1_1)
y1_1 = relu1(y1_1)
y1_2 = conv1_2(y1_1)
y1_2 = batch_norm2(y1_2)
y2_1 = conv2_1(input1)
y2_1 = batch_norm3(y2_1)
y2 = add1([y2_1, y1_2])
output1 = conv3(y2)
model = tf.keras.models.Model(inputs=input1, outputs=output1)
return model
def GetInferlistModel(layer_names, input_shape, output_dim):
"""Gets a sequential model with given layers.
Args:
layer_names: A list of supported layer names. The first and last name should
be conv* or dense* at the same time. The middle names should be
batch_norm*, relu*.
input_shape: A tuple of integers representing the input shape.
output_dim: An integer representing the output dim of layers.
Returns:
A sequential model.
Raises:
A value error if unsupported names are encountered.
"""
model = tf.keras.Sequential(name="sequential")
if len(layer_names) == 0:
raise ValueError("The layer_names could not be empty.")
model.add(layers.Input(shape=input_shape))
for i, layer_name in enumerate(layer_names):
if layer_name.startswith("conv"):
if i + 1 < len(layer_names) and layer_names[i + 1].startswith("bias_add"):
use_bias=True
else:
use_bias=False
model.add(layers.Conv2D(output_dim, 3, padding='same', use_bias=use_bias,
name=layer_name))
elif layer_name.startswith("dense"):
if i + 1 < len(layer_names) and layer_names[i + 1].startswith("bias_add"):
use_bias=True
else:
use_bias=False
model.add(layers.Dense(output_dim, use_bias=use_bias, name=layer_name))
elif layer_name.startswith("bias_add"):
continue
elif layer_name.startswith("batch_norm"):
model.add(layers.BatchNormalization(
beta_initializer='uniform', gamma_initializer='uniform',
moving_mean_initializer='uniform',
moving_variance_initializer='uniform', name=layer_name))
elif layer_name.startswith("relu"):
model.add(layers.ReLU(name=layer_name))
else:
raise ValueError(
"The layer_names contains unsupported layer_name: %s" % layer_name)
return model
def GetInferlistModelConfigs():
"""Gets all valid tests for inferlist model.
"""
layers = [
("conv1", "batch_norm1", "conv2"),
("conv1", "bias_add", "conv2"),
("dense1", "bias_add", "dense2"),
("conv1", "bias_add", "batch_norm1", "batch_norm2", "relu1", "conv2"),
("dense1", "bias_add", "relu1", "dense2"),
("dense1", "bias_add", "dense2"),
("dense1", "bias_add", "dense2"),
]
expected_logs = [
("Permute-C: node_name=sequential/conv2/Conv2D",
"Permute-K: node_name=sequential/batch_norm1/FusedBatchNormV3",
"Permute-K: node_name=sequential/conv1/Conv2D"),
("Permute-C: node_name=sequential/conv2/Conv2D",
"Permute-K: node_name=sequential/conv1/BiasAdd",
"Permute-K: node_name=sequential/conv1/Conv2D"),
("Permute-C: node_name=sequential/dense2/MatMul",
"Permute-K: node_name=sequential/dense1/BiasAdd",
"Permute-K: node_name=sequential/dense1/MatMul"),
("Permute-C: node_name=sequential/conv2/Conv2D",
"Permute-K: node_name=sequential/batch_norm2/FusedBatchNormV3",
"Permute-K: node_name=sequential/batch_norm1/FusedBatchNormV3",
"Permute-K: node_name=sequential/conv1/BiasAdd",
"Permute-K: node_name=sequential/conv1/Conv2D"),
("Permute-C: node_name=sequential/dense2/MatMul",
"Permute-K: node_name=sequential/dense1/BiasAdd",
"Permute-K: node_name=sequential/dense1/MatMul"),
("Permute-C: node_name=sequential/dense2/MatMul",
"Permute-K: node_name=sequential/dense1/BiasAdd",
"Permute-K: node_name=sequential/dense1/MatMul"),
("Permute-C: node_name=sequential/dense2/MatMul",
"Permute-K: node_name=sequential/dense1/BiasAdd",
"Permute-K: node_name=sequential/dense1/MatMul"),
]
input_shapes = [
(28, 28, 16),
(28, 28, 16),
(64,),
(28, 28, 16),
(64,),
(1024,),
(32,),
]
output_dims = [32, 32, 32, 32, 32, 512, 16]
devices = ["GPU", "GPU", "GPU", "GPU", "GPU", "GPU", "CPU"]
for c, l, i, o, d in zip(layers, expected_logs, input_shapes,
output_dims, devices):
yield c, l, i, o, d
class TfAspOptimizerTest(test.TestCase):
def _CheckMask(self, mask):
"""Checks if every 4 values contain 2 zeros. """
mask_ndims = len(mask.shape)
# For Dense: mask's shape (I, O). For Conv2D: mask's shape (H, W, I, O). We
# need to transpose them to (None, I) for better access since the pruning is
# along the I dim.
if mask_ndims == 2:
mask = tf.transpose(mask)
elif mask_ndims == 4:
mask = tf.transpose(mask, perm=[0, 1, 3, 2])
mask = tf.reshape(mask, shape=(-1, mask.shape[-1]))
result = True
ngroups = mask.shape[1] // 4
for row in range(mask.shape[0]):
for col in range(0, ngroups * 4, 4):
one_mask = mask[row, col:col+4]
result = result and (tf.math.reduce_sum(one_mask) == 2)
if ngroups * 4 < mask.shape[1]:
one_mask = mask[row, ngroups*4:]
result = result and (tf.math.reduce_sum(one_mask) <= 2)
self.assertEqual(result, True)
def testPrunedSparsitySingleLayer(self):
for layer, input_shape, output_dim in GetSingleLayerConfigs():
input_1 = layers.Input(shape=input_shape)
if layer == "Dense":
layer_1 = layers.Dense(output_dim, name="dense_1")
elif layer == "Conv2D":
layer_1 = layers.Conv2D(output_dim, (3, 3), name="conv_1")
output_1 = layer_1(input_1)
model = tf.keras.models.Model(inputs=input_1, outputs=output_1)
opt = optimizers.legacy.SGD(learning_rate=0.2, momentum=1.0)
opt = tf_asp.AspOptimizerWrapperV2(
opt, model, permute=False, padding=True,
plot_to_file=inspect.currentframe().f_code.co_name)
if layer == "Dense":
batched_shape = (5,) + input_shape
elif layer == "Conv2D":
batched_shape = (5,) + input_shape
x = tf.random.normal(shape=batched_shape)
# Run the train step once to trigger the mask compute.
with tf.GradientTape(persistent=True) as tape:
y = model(x)
loss = tf.reduce_mean(y)
grads = tape.gradient(loss, model.variables)
opt.apply_gradients(zip(grads, model.variables))
mask = opt.get_slot(layer_1.kernel, "mask")
self._CheckMask(mask)
def testMasksCanBeUpdatedOnlyOnceInTrainLoop(self):
model = GetConvNetModel()
opt = optimizers.legacy.SGD(learning_rate=0.2, momentum=1.0)
opt = tf_asp.AspOptimizerWrapperV2(
opt, model, permute=False, padding=True,
plot_to_file=inspect.currentframe().f_code.co_name)
@tf.function
def train_step(x):
with tf.GradientTape() as tape:
y = model(x)
loss = tf.reduce_sum(y)
grads = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
return loss
x_train = tf.random.normal(shape=(100, 28, 28, 16))
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(10)
masks_ref = []
masks = []
for step, x in enumerate(train_dataset):
loss = train_step(x)
# Inital train step creates and updates the masks. Following train steps
# shouldn't change the masks.
if step == 0:
for layer in model.layers:
if isinstance(layer, layers.Conv2D):
masks_ref.append(tf.identity(opt.get_slot(layer.kernel, "mask")))
if step == 10:
for layer in model.layers:
if isinstance(layer, layers.Conv2D):
masks.append(opt.get_slot(layer.kernel, "mask"))
break
for mask_ref, mask in zip(masks_ref, masks):
self.assertAllEqual(mask_ref, mask)
def testMasksCanBeUpdatedOnlyOnceInModelFit(self):
model = GetConvNetModel()
opt = optimizers.legacy.SGD(learning_rate=0.2, momentum=1.0)
opt = tf_asp.AspOptimizerWrapperV2(
opt, model, permute=False, padding=True,
plot_to_file=inspect.currentframe().f_code.co_name)
model.compile(optimizer=opt, loss="mse")
x_train = tf.random.normal(shape=(100, 28, 28, 16))
y_train = tf.random.normal(shape=(100, 28, 28, 32))
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(10)
# Inital train step creates and updates the masks.
model.fit(train_dataset, epochs=1, steps_per_epoch=1)
masks_ref = []
for layer in model.layers:
if isinstance(layer, layers.Conv2D):
masks_ref.append(tf.identity(opt.get_slot(layer.kernel, "mask")))
# Following train steps shouldn't change the masks.
model.fit(train_dataset, initial_epoch=1, epochs=10, steps_per_epoch=1)
masks = []
for layer in model.layers:
if isinstance(layer, layers.Conv2D):
masks.append(opt.get_slot(layer.kernel, "mask"))
for mask_ref, mask in zip(masks_ref, masks):
self.assertAllEqual(mask_ref, mask)
def testInnerOptimizerWithIncreasingIterations(self):
model = GetConvNetModel()
inner_opt = optimizers.legacy.SGD(learning_rate=0.2, momentum=1.0)
opt = tf_asp.AspOptimizerWrapperV2(
inner_opt, model, permute=False, padding=True,
plot_to_file=inspect.currentframe().f_code.co_name)
@tf.function
def train_step(x):
with tf.GradientTape() as tape:
y = model(x)
loss = tf.reduce_sum(y)
grads = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
return loss
x_train = tf.random.normal(shape=(100, 28, 28, 16))
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(10)
for step, x in enumerate(train_dataset):
loss = train_step(x)
self.assertEqual(opt.iterations, inner_opt.iterations)
def testInnerOptimizerHyperparameters(self):
model = GetConvNetModel()
inner_opt = optimizers.legacy.Adam(learning_rate=0.2)
opt = tf_asp.AspOptimizerWrapperV2(
inner_opt, model, permute=False, padding=True)
# Check all hyperparameters in inner_opt._hyper
for attr in inner_opt._hyper:
self.assertEqual(getattr(inner_opt, attr), getattr(opt, attr))
# Check all setattr of any optimizer can affect both.
opt.beta_1 = 0.5
inner_opt.beta_2 = 0.6
self.assertEqual(inner_opt.beta_1, opt.beta_1)
self.assertEqual(inner_opt.beta_2, opt.beta_2)
# Check non-hyperparams.
self.assertTrue(hasattr(inner_opt, 'epsilon'))
self.assertFalse(hasattr(opt, 'epsilon'))
def _CheckPermuteLogs(self, model, expected_logs, input_shapes,
expected_num=None,
search_device='GPU',
search_time_limit=5,
logger_level=tf_asp.SHOW_PERMUTATION_MORE_INFO,
logger_capture_level=tf_asp.SHOW_PERMUTATION_MORE_INFO,
plot_to_file='test.png'):
tf.get_logger().setLevel(logger_level)
if not isinstance(input_shapes, list):
input_shapes = [input_shapes]
inputs = []
for input_shape in input_shapes:
inputs.append(tf.random.normal(shape=(10,) + input_shape))
if len(inputs) == 1:
inputs = inputs[0]
expected = model(inputs)
opt = optimizers.legacy.SGD(learning_rate=0.2, momentum=1.0)
# The permute is triggered during the init stage of the ASP wrapper.
with self.assertLogs(level=logger_capture_level) as cm:
opt = tf_asp.AspOptimizerWrapperV2(
opt, model, permute=True, padding=True,
search_device=search_device,
search_time_limit=search_time_limit,
input_shapes=(None,) + input_shape,
plot_to_file=plot_to_file)
matches = []
for log in cm.output:
for expected_log in expected_logs:
matches.append(expected_log in log)
if expected_num:
self.assertEqual(sum(matches), expected_num)
else:
self.assertEqual(sum(matches), len(expected_logs))
result = model(inputs)
self.assertAllClose(expected, result, rtol=1e-2, atol=1e-2)
def testPermuteGraphWithConvNet(self):
model = GetConvNetModel()
expected_logs = (
"Permute-C: node_name=model/conv1_2/Conv2D",
"Permute-C: node_name=model/conv3/Conv2D",
"Permute-K: node_name=model/batch_norm1/FusedBatchNormV3",
"Permute-K: node_name=model/batch_norm2/FusedBatchNormV3",
"Permute-K: node_name=model/batch_norm3/FusedBatchNormV3",
"Permute-K: node_name=model/conv1_1/Conv2D",
"Permute-K: node_name=model/conv1_2/Conv2D",
"Permute-K: node_name=model/conv2_1/Conv2D")
self._CheckPermuteLogs(model, expected_logs, (28, 28, 16),
plot_to_file=inspect.currentframe().f_code.co_name)
def testPermuteGraphWithComplexSiblings(self):
input1 = layers.Input(shape=(28, 28, 16))
conv1_1 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv1_1")
conv1_2 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv1_2")
conv1_3 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv1_3")
conv2_1 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv2_1")
conv2_2 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv2_2")
conv3_1 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv3_1")
conv3_2 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv3_2")
conv4_1 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv4_1")
conv4_2 = layers.Conv2D(32, (3, 3), padding='same', activation='relu',
bias_initializer='uniform', name="conv4_2")
add1 = layers.Add(name="add1")
add2 = layers.Add(name="add2")
y1_1 = conv1_1(input1)
y1_2 = conv1_2(y1_1)
y1_3 = conv1_3(y1_2)
y1 = add1([y1_2, y1_3])
y2_1 = conv2_1(input1)
y2_2 = conv2_2(y2_1)
y3_1 = conv3_1(y2_2)
y3_2 = conv3_2(y3_1)
y2 = add2([y2_2, y3_2, y1])
y4_1 = conv4_1(y2)
output1 = conv4_2(y4_1)
model = tf.keras.models.Model(inputs=input1, outputs=output1)
expected_logs = (
"Permute-Siblings: model/conv2_2/Conv2D",
"Permute-Siblings: model/conv1_2/Conv2D",
"Permute-Siblings: model/conv1_3/Conv2D,model/conv4_1/Conv2D," \
"model/conv3_1/Conv2D",
"Permute-Siblings: model/conv3_2/Conv2D",
"Permute-Siblings: model/conv4_2/Conv2D")
self._CheckPermuteLogs(model, expected_logs, (28, 28, 16),
plot_to_file=inspect.currentframe().f_code.co_name)
def testPermuteGraphWithInferlistOps(self):
for i, (layers, expected_log, input_shape, output_dim, device) in \
enumerate(GetInferlistModelConfigs()):
model = GetInferlistModel(layers, input_shape, output_dim)
self._CheckPermuteLogs(
model, expected_log, input_shape, search_device=device,
plot_to_file=inspect.currentframe().f_code.co_name + str(i))
def testPermuteGraphWithInferlistOpAndNoEndingAllowlistOps(self):
tf.get_logger().setLevel(tf_asp.SHOW_PERMUTATION_INFO)
# The conv2 has two upstream branches: conv1 and batch_norm1. The
# batch_norm1 branch is not ended with any allowlist nodes, making this
# graph unsupported by permutation, though the conv1 branch looks good.
shape1 = (28, 28, 128)
shape2 = (28, 28, 16)
x1 = layers.Input(shape=shape1)
x2 = layers.Input(shape=shape2)
conv1 = layers.Conv2D(16, 2, padding='same', use_bias=False, name="conv1")
batch_norm1 = layers.BatchNormalization(name='batch_norm1')
conv2 = layers.Conv2D(8, 2, padding='same', use_bias=False, name="conv2")
add1 = layers.Add(name="add1")
y1 = conv1(x1)
y2 = batch_norm1(x2)
y3 = add1([y1, y2])
y4 = conv2(y3)
model = tf.keras.models.Model(inputs=[x1, x2], outputs=y4)
expected_log = ["0/2 variables (Conv2D or MatMul) are permuted!"]
self._CheckPermuteLogs(model, expected_log, [shape1, shape2],
plot_to_file=inspect.currentframe().f_code.co_name)
def testPermuteGraphWithClearlistOpAndNoEndingAllowlistOps(self):
tf.get_logger().setLevel(tf_asp.SHOW_PERMUTATION_INFO)
# The conv2 has two upstream branches: conv1 and relu1. The relu1 branch is
# not ended with any allowlist nodes, making this graph unsupported by
# permutation, though the conv1 branch looks good.
shape1 = (28, 28, 128)
shape2 = (28, 28, 16)
x1 = layers.Input(shape=shape1)
x2 = layers.Input(shape=shape2)
conv1 = layers.Conv2D(16, 2, padding='same', use_bias=False, name="conv1")
relu1 = layers.ReLU(name='relu1')
conv2 = layers.Conv2D(8, 2, padding='same', use_bias=False, name="conv2")
add1 = layers.Add(name="add1")
y1 = conv1(x1)
y2 = relu1(x2)
y3 = add1([y1, y2])
y4 = conv2(y3)
model = tf.keras.models.Model(inputs=[x1, x2], outputs=y4)
expected_log = ["0/2 variables (Conv2D or MatMul) are permuted!"]
self._CheckPermuteLogs(model, expected_log, [shape1, shape2],
plot_to_file=inspect.currentframe().f_code.co_name)
def testPermuteGraphWithUnsupportedOps(self):
tf.get_logger().setLevel(tf_asp.SHOW_PERMUTATION_INFO)
# The conv2 has two upstream branches: conv1_1 and conv1_2. The conv1_1
# branch contains an unsupported "Reshape" op, making this graph
# unsupported for permutation, though the conv1_2 branch looks good.
shape1 = (28, 28, 128)
shape2 = (15, 57, 128)
x1 = layers.Input(shape=shape1)
x2 = layers.Input(shape=shape2)
conv1_1 = layers.Conv2D(16, 2, padding='same', use_bias=False,
name="conv1_1")
conv1_2 = layers.Conv2D(16, 2, padding='valid', use_bias=False,
name="conv2_1")
conv2 = layers.Conv2D(8, 2, padding='same', use_bias=False, name="conv2")
add1 = layers.Add(name='add1')
y1_1 = conv1_1(x1)
old_shape = y1_1.shape
new_shape = (-1, old_shape[1] // 2, old_shape[2] * 2, old_shape[3])
y1_1 = tf.reshape(y1_1, shape=new_shape)
y1_2 = conv1_2(x2)
y1 = add1([y1_1, y1_2])
y2 = conv2(y1)
model = tf.keras.models.Model(inputs=[x1, x2], outputs=y2)
expected_log = ["0/3 variables (Conv2D or MatMul) are permuted!"]
self._CheckPermuteLogs(model, expected_log, [shape1, shape2],
plot_to_file=inspect.currentframe().f_code.co_name)
def testAutomaticalSkipPermutation(self):
# TODO(kaixih):
self.skipTest("The second run cannot skip the permutation. Need to debug.")
tf.get_logger().setLevel(tf_asp.SHOW_PERMUTATION_INFO)
input_shape = (512,)
model = GetInferlistModel(["dense1", "bias_add", "dense2"],
input_shape=input_shape, output_dim=128)
opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.2, momentum=1.0)
# We wrap the optimizer for the first time to trigger the permutation.
with self.assertLogs(level=tf_asp.SHOW_PERMUTATION_INFO) as cm:
opt = tf_asp.AspOptimizerWrapperV2(
opt, model, permute=True, padding=True,
plot_to_file=inspect.currentframe().f_code.co_name)
expected_log = "1/2 variables (Conv2D or MatMul) are permuted!"
matches = []
for log in cm.output:
matches.append(expected_log in log)
self.assertIn(True, matches)
@tf.function
def train_step(x):
with tf.GradientTape() as tape:
y = model(x)
loss = tf.reduce_sum(y)
grads = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
return loss
x_train = tf.random.normal(shape=(100,) + input_shape)
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(10)
# These train steps trigger the pruning of the weights. Note, we just launch
# one train step to ensure the updated weights are not exploded, since we
# don't use any activation in the model.
for step, x in enumerate(train_dataset):
loss = train_step(x)
if step == 0:
break
# We wrap the optimizer again to trigger another round of permutation.
# However, the actual permutation should be automatically skipped, since the
# weights have already been pruned and the permutation won't improve the
# magnitude.
with self.assertLogs(level=tf_asp.SHOW_PERMUTATION_MORE_INFO) as cm:
opt = tf_asp.AspOptimizerWrapperV2(
opt, model, permute=True, padding=True,
plot_to_file=inspect.currentframe().f_code.co_name)
expected_log = "0/2 variables (Conv2D or MatMul) are permuted!"
matches = []
for log in cm.output:
matches.append(expected_log in log)
self.assertIn(True, matches)
def testPermuteWithSubclassedModel(self):
class SubclassedModel(tf.keras.Model):
def __init__(self, name):
super(SubclassedModel, self).__init__(name=name)
self.conv1_1 = layers.Conv2D(32, (3, 3), padding='same', use_bias=False,
name="conv1_1")
self.conv1_2 = layers.Conv2D(32, (3, 3), padding='same', use_bias=False,
name="conv1_2")
self.batch_norm1 = layers.BatchNormalization(
beta_initializer='uniform', gamma_initializer='uniform',
moving_mean_initializer='uniform',
moving_variance_initializer='uniform', name="batch_norm1")
def call(self, x):
y1_1 = self.conv1_1(x)
y1_1 = self.batch_norm1(y1_1)
y1_2 = self.conv1_2(y1_1)
return y1_2
model = SubclassedModel(name='subclassed')
input_shape = (12, 12, 16)
model.build(input_shape=(None,) + input_shape)
expected_logs = [
"Permute-C: node_name=subclassed/conv1_2/Conv2D",
"Permute-K: node_name=subclassed/batch_norm1/FusedBatchNormV3",
"Permute-K: node_name=subclassed/conv1_1/Conv2D"
]
self._CheckPermuteLogs(model, expected_logs, input_shape,
plot_to_file=inspect.currentframe().f_code.co_name)
def testPermuteMixedApisWithBrokenInferlistOp(self):
# A simple model of Conv2D->BatchNorm->Conv2D. Since the first Conv2D is not
# from a keras layer, none of the layers would be permuted.
class SubclassedModel(tf.keras.Model):
def __init__(self, name):
super(SubclassedModel, self).__init__(name=name)
v_init = tf.random_normal_initializer()
self.filter = tf.Variable(
initial_value=v_init(shape=(3, 3, 16, 32), dtype='float32'),
trainable=True)
self.conv_layer = layers.Conv2D(
32, (3, 3), padding='same', use_bias=False, name="conv_layer")
self.batch_norm_layer = layers.BatchNormalization(
name="batch_norm_layer")
def call(self, x):
y = tf.nn.conv2d(x, self.filter, (1, 1), 'SAME')
y = self.batch_norm_layer(y)
return self.conv_layer(y)
model = SubclassedModel(name='subclassed')
input_shape = (12, 12, 16)
model.build(input_shape=(None,) + input_shape)
expected_log = ['0/1 variables (Conv2D or MatMul) are permuted!']
self._CheckPermuteLogs(model, expected_log, input_shape,
plot_to_file=inspect.currentframe().f_code.co_name)
def testPermuteMixedApisWithEmptyInferlistOp(self):
# A simple model of Conv2D->BatchNorm->Conv2D. Since the BatchNorm is not
# from a keras layer, none of the layers would be permuted.
class SubclassedModel(tf.keras.Model):
def __init__(self, name):
super(SubclassedModel, self).__init__(name=name)
v_init = tf.random_normal_initializer()
self.conv2d_1 = layers.Conv2D(
32, (3, 3), padding='same', use_bias=False, name="conv2d_1")
self.conv2d_2 = layers.Conv2D(
32, (3, 3), padding='same', use_bias=False, name="conv2d_2")
self.scale = tf.Variable(
initial_value=v_init(shape=(32,), dtype='float32'), trainable=True)
self.offset = tf.Variable(
initial_value=v_init(shape=(32,), dtype='float32'), trainable=True)
def call(self, x):
y = self.conv2d_1(x)
y, _, _ = tf.compat.v1.nn.fused_batch_norm(y, self.scale, self.offset)
return self.conv2d_2(y)
model = SubclassedModel(name='subclassed')
input_shape = (12, 12, 16)
model.build(input_shape=(None,) + input_shape)
expected_log = ['0/2 variables (Conv2D or MatMul) are permuted!']
self._CheckPermuteLogs(model, expected_log, input_shape,
plot_to_file=inspect.currentframe().f_code.co_name)
def testPermuteMixedPrecision(self):
class SubclassedModel(tf.keras.Model):
def __init__(self, name):
super(SubclassedModel, self).__init__(name=name)
self.conv2d_1 = layers.Conv2D(
32, (3, 3), padding='same', use_bias=False, name="conv2d_1")
self.batch_norm_1 = layers.BatchNormalization(name="batch_norm_1")
self.conv2d_2 = layers.Conv2D(
32, (3, 3), padding='same', use_bias=False, name="conv2d_2")
self.relu_1 = layers.ReLU(name='relu_1')
def call(self, x):
y = self.conv2d_1(x)
y = self.batch_norm_1(y)
y = self.relu_1(y)
return self.conv2d_2(y)
# When using the mixed precision policy, there will be many Cast ops
# inserted after ReadVariableOp and we have patterns like
# ReadVariableOp->Cast->Conv2D which we should treat them as skippable.
tf.keras.mixed_precision.set_global_policy('mixed_float16')
model = SubclassedModel(name='subclassed')
input_shape = (12, 12, 16)
model.build(input_shape=(None,) + input_shape)
expected_log = ['1/2 variables (Conv2D or MatMul) are permuted!']
self._CheckPermuteLogs(model, expected_log, input_shape,
plot_to_file=inspect.currentframe().f_code.co_name)
tf.keras.mixed_precision.set_global_policy('float32')
def testPermuteMixedPrecisionOptimizerOrder(self):
class SubclassedModel(tf.keras.Model):
def __init__(self, name):
super(SubclassedModel, self).__init__(name=name)
self.conv2d_1 = layers.Conv2D(
32, (3, 3), padding='same', use_bias=False, name="conv2d_1")
self.batch_norm_1 = layers.BatchNormalization(name="batch_norm_1")
self.conv2d_2 = layers.Conv2D(
32, (3, 3), padding='same', use_bias=False, name="conv2d_2")
self.relu_1 = layers.ReLU(name='relu_1')
def call(self, x):
y = self.conv2d_1(x)
y = self.batch_norm_1(y)
y = self.relu_1(y)
return self.conv2d_2(y)
# When using the mixed precision policy, there will be many Cast ops
# inserted after ReadVariableOp and we have patterns like
# ReadVariableOp->Cast->Conv2D which we should treat them as skippable.
tf.keras.mixed_precision.set_global_policy('mixed_float16')
model = SubclassedModel(name='subclassed')
input_shape = (12, 12, 16)
model.build(input_shape=(None,) + input_shape)
opt = optimizers.legacy.SGD(learning_rate=0.2, momentum=1.0)
# The permute is triggered during the init stage of the ASP wrapper.
opt = tf_asp.AspOptimizerWrapperV2(
opt, model, permute=True, padding=True,
search_device='GPU', input_shapes=(None,) + input_shape)
# The LossScaleOptimizer needs to be the laster wrapper.
# TODO(kaixih): If we need to relax this ordering requirement.
opt = tf.keras.mixed_precision.LossScaleOptimizer(opt, dynamic=False,
initial_scale=128.0)
model.compile(optimizer=opt, loss="mse")
x_train = tf.random.normal(shape=(100, 12, 12, 16))
y_train = tf.random.normal(shape=(100, 12, 12, 32))
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(10)
model.fit(train_dataset, epochs=1, steps_per_epoch=1)
tf.keras.mixed_precision.set_global_policy('float32')
def testSameVariableNames(self):
input_shape = (16, 16, 64)
inner_model = tf.keras.Sequential(name='b')
inner_model.add(layers.Conv2D(16, (3, 3), padding='same',
input_shape=input_shape,
name='b/c/conv2d_1'))
model = tf.keras.Sequential(name='a')
model.add(inner_model)
model.add(layers.Conv2D(32, (3, 3), name='b/c/conv2d_1'))
expected_logs = [
"Permute-C: node_name=a/b/c/conv2d_1/Conv2D",
"Permute-K: node_name=a/b/b/c/conv2d_1/BiasAdd",
"Permute-K: node_name=a/b/b/c/conv2d_1/Conv2D"]
self._CheckPermuteLogs(model, expected_logs, input_shape)
def testSameOpNames(self):
input_shape = (100, 100, 64)
inner_model1 = tf.keras.Sequential(name='a/b')
inner_model1.add(layers.Conv2D(64, (3, 3), padding='same',
input_shape=input_shape,
name='c/conv2d_1'))
inner_model2 = tf.keras.Sequential(name='a')
inner_model2.add(layers.Conv2D(16, (3, 3), padding='same',
input_shape=input_shape,
name='b/c/conv2d_1'))
model = tf.keras.Sequential(name='n')
model.add(inner_model1)
model.add(inner_model2)
expected_logs = [
"Failed to distinguish variables for op_name=n/a/b/c/conv2d_1/Conv2D,",
"Failed to distinguish variables for op_name=n/a/b/c/conv2d_1/Conv2D_1,",
"Failed to distinguish variables for op_name=n/a/b/c/conv2d_1/BiasAdd,",
"Failed to distinguish variables for op_name=n/a/b/c/conv2d_1/BiasAdd_1,",
]
self._CheckPermuteLogs(model, expected_logs, input_shape)
def testUnsupportedSavedModel(self):
input_shape = (16, 16, 64)
conv_1 = layers.Conv2D(16, (3, 3), padding='same', name='conv_1')
conv_2 = layers.Conv2D(32, (3, 3), padding='same', name='conv_2')
input1 = layers.Input(shape=input_shape)
output1 = conv_1(input1)
output1 = conv_2(output1)
model = tf.keras.models.Model(inputs=input1, outputs=output1)
expected_logs = [
"Permute-C: node_name=model/conv_2/Conv2D",
"Permute-K: node_name=model/conv_1/BiasAdd",
"Permute-K: node_name=model/conv_1/Conv2D"]
self._CheckPermuteLogs(model, expected_logs, input_shape)
try:
tmpdir = tempfile.mkdtemp()
tf.saved_model.save(model, tmpdir)
loaded = tf.saved_model.load(tmpdir)
finally:
shutil.rmtree(tmpdir)
with self.assertRaisesRegex(
ValueError, '`model` can only be a `tf.keras.Model` instance.'):
self._CheckPermuteLogs(loaded, [], input_shape)
infer = loaded.signatures['serving_default']
with self.assertRaisesRegex(
ValueError, '`model` can only be a `tf.keras.Model` instance.'):
self._CheckPermuteLogs(infer, [], input_shape)
def testLargeInputDim(self):
model = tf.keras.Sequential(name="seq")
input_shape = (512, )
model.add(layers.Dense(512, input_shape=input_shape))
model.add(layers.Dense(2064))
model.add(layers.Dense(512))
expected_logs = [
"[TF-ASP] Finally swap"]
self._CheckPermuteLogs(model, expected_logs, input_shape)
if __name__ == "__main__":
test.main()
|
atex-release
|
atex/structured_sparsity/tf_asp_optimizer_test.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# WARNING:tensorflow:[TF-ASP] Allowlist is used: (Dense, Conv2D, )
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: dense_2 (type=Dense, shape=(128, 8))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_2 (type=Conv2D, shape=(3, 3, 16, 32))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_3 (type=Conv2D, shape=(3, 3, 32, 64))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_4 (type=Conv2D, shape=(3, 3, 64, 128))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_5 (type=Conv2D, shape=(3, 3, 128, 128))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_6 (type=Conv2D, shape=(3, 3, 128, 128))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_7 (type=Conv2D, shape=(3, 3, 128, 128))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_8 (type=Conv2D, shape=(3, 3, 128, 128))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_9 (type=Conv2D, shape=(3, 3, 128, 128))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_10 (type=Conv2D, shape=(7, 7, 128, 32))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_11 (type=Conv2D, shape=(7, 7, 32, 16))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: conv2d_12 (type=Conv2D, shape=(7, 7, 16, 8))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: dense (type=Dense, shape=(69192, 200))
# WARNING:tensorflow:[TF-ASP] Pruning list accepts the "kernel" variable from layer: dense_1 (type=Dense, shape=(200, 128))
# _________________________________________________________________
# Layer (type) Output Shape Param #
# =================================================================
# conv2d (Conv2D) (None, 222, 222, 8) 224
# _________________________________________________________________
# conv2d_1 (Conv2D) (None, 220, 220, 16) 1168
# _________________________________________________________________
# conv2d_2 (Conv2D) (None, 218, 218, 32) 4640
# _________________________________________________________________
# conv2d_3 (Conv2D) (None, 216, 216, 64) 18496
# _________________________________________________________________
# conv2d_4 (Conv2D) (None, 216, 216, 128) 73856
# _________________________________________________________________
# conv2d_5 (Conv2D) (None, 216, 216, 128) 147584
# _________________________________________________________________
# conv2d_6 (Conv2D) (None, 216, 216, 128) 147584
# _________________________________________________________________
# conv2d_7 (Conv2D) (None, 216, 216, 128) 147584
# _________________________________________________________________
# conv2d_8 (Conv2D) (None, 216, 216, 128) 147584
# _________________________________________________________________
# conv2d_9 (Conv2D) (None, 216, 216, 128) 147584
# _________________________________________________________________
# conv2d_10 (Conv2D) (None, 210, 210, 32) 200736
# _________________________________________________________________
# conv2d_11 (Conv2D) (None, 204, 204, 16) 25104
# _________________________________________________________________
# conv2d_12 (Conv2D) (None, 198, 198, 8) 6280
# _________________________________________________________________
# conv2d_13 (Conv2D) (None, 192, 192, 4) 1572
# _________________________________________________________________
# conv2d_14 (Conv2D) (None, 186, 186, 2) 394
# _________________________________________________________________
# flatten (Flatten) (None, 69192) 0
# _________________________________________________________________
# dense (Dense) (None, 200) 13838600
# _________________________________________________________________
# dense_1 (Dense) (None, 128) 25728
# _________________________________________________________________
# dense_2 (Dense) (None, 8) 1032
# =================================================================
# Total params: 14,935,750
# Trainable params: 14,935,750
# Non-trainable params: 0
# _________________________________________________________________
# signature_def['serving_default']:
# The given SavedModel SignatureDef contains the following input(s):
# inputs['conv2d_input'] tensor_info:
# dtype: DT_FLOAT
# shape: (-1, 224, 224, 3)
# name: serving_default_conv2d_input:0
# The given SavedModel SignatureDef contains the following output(s):
# outputs['dense_2'] tensor_info:
# dtype: DT_FLOAT
# shape: (-1, 8)
# name: StatefulPartitionedCall:0
# Method name is: tensorflow/serving/predict
import tensorflow as tf
from tensorflow.keras import layers, optimizers, models
# ASP Step 1: Import the sparsity optimizer
from atex.structured_sparsity import tf_asp
tf.get_logger().setLevel(tf_asp.SHOW_PERMUTATION_INFO)
model = tf.keras.Sequential()
# Filter (2, 2, 4, 8): Skip pruning: input dim.
model.add(layers.Conv2D(8, (3, 3), padding='valid', activation="relu", input_shape=(224, 224, 3)))
# Filter (2, 2, 8, 16): Skip pruning: input dim.
model.add(layers.Conv2D(16, (3, 3), padding='valid', activation="relu"))
model.add(layers.Conv2D(32, (3, 3), padding='valid', activation="relu"))
model.add(layers.Conv2D(64, (3, 3), padding='valid', activation="relu"))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation="relu"))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation="relu"))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation="relu"))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation="relu"))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation="relu"))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation="relu"))
model.add(layers.Conv2D(32, (7, 7), padding='valid', activation="relu"))
model.add(layers.Conv2D(16, (7, 7), padding='valid', activation="relu"))
model.add(layers.Conv2D(8, (7, 7), padding='valid', activation="relu"))
# Filter (7, 7, 8, 4): Skip pruning: input/output dim.
model.add(layers.Conv2D(4, (7, 7), padding='valid', activation="relu"))
# Filter (7, 7, 4, 2): Skip pruning: input/output dim.
model.add(layers.Conv2D(2, (7, 7), padding='valid', activation="relu"))
model.add(layers.Flatten())
model.add(layers.Dense(200, activation="relu"))
model.add(layers.Dense(128, activation="relu"))
# Filter (128, 8): Skip pruning: output dim.
model.add(layers.Dense(8, activation="sigmoid"))
model.summary()
#print("Init variables:", model.variables)
x = tf.random.uniform(shape=(32, 224, 224, 3))
opt = optimizers.legacy.SGD(learning_rate=0.2, momentum=1.0)
# ASP Step 2: Use AspOptimizerWrapper to wrap the existing optimizer.
opt = tf_asp.AspOptimizerWrapperV2(opt, model, padding=True,
plot_to_file='main.png')
@tf.function
def train_step(x):
with tf.GradientTape(persistent=True) as tape:
y = model(x)
loss = tf.reduce_mean(y)
grads = tape.gradient(loss, model.variables)
opt.apply_gradients(zip(grads, model.variables))
return loss
for i in range(3):
loss = train_step(x)
#print("Updated variables (masked):", model.variables)
export_savedmodel = True
if export_savedmodel:
save_format = "exported_model"
model.save(save_format)
print(f"The model is saved to {save_format}")
new_model = models.load_model(save_format)
new_model.summary()
result_checked = True
for ref, new in zip(model.variables, new_model.variables):
checked = tf.math.reduce_all(tf.math.equal(ref, new))
if not checked:
#print("Issue with:", new)
result_checked = False
print("Loaded Model checking:", "Passed" if result_checked else "Failed")
tf_asp.check_pruned_layers(model)
|
atex-release
|
atex/structured_sparsity/main.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers, optimizers
from tensorflow.python.platform import tf_logging
from itertools import permutations
# A PoC optimizer wrapper to perform pruning with masks.
class AspOptimizerWrapper(optimizers.Optimizer):
def __init__(self, optimizer, model, denylist=None, allowlist=None,
padding=False, name=None, **kwargs):
super(AspOptimizerWrapper, self).__init__(name, **kwargs)
self._optimizer = optimizer
self._padding = padding
self._set_eligible_set(model, denylist=denylist, allowlist=allowlist)
# This is a 6x4 matrix to store all possible 2:4 patterns.
self._patterns = tf.convert_to_tensor(
list(set(permutations([0., 0., 1., 1.]))))
self._set_hyper("learning_rate", optimizer.learning_rate)
self._set_hyper("decay", optimizer.decay)
def _prepare(self, var_list):
return self._optimizer._prepare(var_list)
def _create_slots(self, var_list):
self._optimizer._create_slots(var_list)
for var in var_list:
if var.ref() in self._eligible_set:
self.add_slot(var, "mask")
def _m4n2_1d(self, matrix):
m, n = 4, 2
mat = tf.math.abs(tf.reshape(matrix, shape=(-1, m)))
pmax = tf.math.argmax(tf.linalg.matmul(mat, tf.transpose(self._patterns)),
axis=1)
mask = tf.gather(self._patterns, pmax)
mask = tf.reshape(mask, shape=matrix.shape)
return mask
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
apply_gradients_op = super(AspOptimizerWrapper, self).apply_gradients(
grads_and_vars, name, experimental_aggregate_gradients)
# Normally self._optimizer.iterations is incremented in
# self._optimizer.apply_gradients(). Since that is not called, we increment
# it here instead.
with tf.control_dependencies([apply_gradients_op]):
return self._optimizer.iterations.assign_add(1)
def _resource_apply_dense(self, grad, var, apply_state):
if not var.ref() in self._eligible_set:
return self._optimizer._resource_apply_dense(grad, var, apply_state)
# The masks are only updated before the first step.
mask = self.get_slot(var, "mask")
def update_mask():
# Conv2D stores a 4D filter weight and Dense stores a 2D kernel weight.
# For Conv2D, the filter is in the shape of (H, W, I, O) and we need to
# permute it to (H*W*O, I) and prune it along I. For Dense, the kernel is
# in shape of (I, O) and we need to permute it to (O, I) and prune it
# along I.
if var.shape.rank == 2:
matrix = tf.transpose(var, perm=[1, 0])
orig_input_dim = matrix.shape[1]
m = 4
padding_size = m - orig_input_dim % m
if self._padding and padding_size != 0:
matrix = tf.pad(matrix, [[0, 0], [0, padding_size]], "CONSTANT")
elif var.shape.rank == 4:
matrix = tf.transpose(var, perm=[0, 1, 3, 2])
permuted_shape = matrix.shape
matrix = tf.reshape(matrix, shape=(-1, matrix.shape[-1]))
new_mask = self._m4n2_1d(matrix)
if var.shape.rank == 2:
if self._padding and padding_size != 0:
new_mask = new_mask[:, :orig_input_dim]
new_mask = tf.transpose(new_mask, perm=[1, 0])
elif var.shape.rank == 4:
new_mask = tf.reshape(new_mask, shape=permuted_shape)
new_mask = tf.transpose(new_mask, perm=[0, 1, 3, 2])
update_mask_op = mask.assign(new_mask)
with tf.control_dependencies([update_mask_op]):
return tf.identity(mask)
updated_mask = tf.cond(self._iterations == 0, update_mask,
lambda: tf.identity(mask))
with tf.control_dependencies([updated_mask]):
opt_op = self._optimizer._resource_apply_dense(grad, var, apply_state)
with tf.control_dependencies([opt_op]):
new_var = tf.math.multiply(var, mask)
return var.assign(new_var)
def _resource_apply_sparse(self, grad, var, indices, apply_state):
return self._optimizer._resource_apply_sparse(grad, var, indices,
apply_state)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,
**kwargs):
return self._optimizer._resource_apply_sparse_duplicate_indices(
grad, handle, indices, **kwargs)
def get_config(self):
return self._optimizer.get_config()
# For both Dense and Conv2D, we need to make sure the output dim is a
# multiple of 8 and the input dim is a multiple of 16 to use the
# sparse tensor cores. For Dense layer, the kernel is a 2D matrix with
# (I, O). For Conv2D layer, since the filter is in the format of HWIO,
# the implicit GEMM will view it as a matrix of (H*W*I, O). In such
# case, we simply apply a conservative restriction by requiring I be a
# multiple of 16.
def _check_valid_layer(self, layer):
if (not isinstance(layer, layers.Dense) and
not isinstance(layer, layers.Conv2D)):
return False
if layer.kernel.shape[-1] % 8 == 0:
# Padding mode only supports padding the input dim in Dense layer.
if self._padding:
if isinstance(layer, layers.Dense):
return True
if (isinstance(layer, layers.Conv2D) and
layer.kernel.shape[2] % 16 == 0):
return True
else:
if (isinstance(layer, layers.Dense) and
layer.kernel.shape[0] % 16 == 0):
return True
if (isinstance(layer, layers.Conv2D) and
layer.kernel.shape[2] % 16 == 0):
return True
return False
def _set_eligible_set(self, model, denylist, allowlist):
if denylist and allowlist:
raise ValueError("denylist and allowlist cannot be both defined.")
if not denylist and not allowlist:
allowlist = [layers.Dense, layers.Conv2D]
target_list = allowlist if allowlist else denylist
list_msg = "("
for layer_def in target_list:
list_msg += "%s, " % layer_def.__name__
list_msg += ")"
def layer_to_name(layer):
type_name = "Unknown"
if isinstance(layer, layers.Dense):
type_name = "Dense"
elif isinstance(layer, layers.Conv2D):
type_name = "Conv2D"
return type_name
eligible_set = set()
model_layers = model.submodules
if allowlist:
tf_logging.warn("[TF-ASP] Allowlist is used: %s" % list_msg)
for layer in model_layers:
if layer.__class__ in allowlist and self._check_valid_layer(layer):
eligible_set.add(layer.kernel.ref())
tf_logging.warn(
"[TF-ASP] Pruning list accepts the \"kernel\" variable from "
"layer: %s (type=%s, shape=%s)" % (layer.name,
layer_to_name(layer),
layer.kernel.shape))
else:
assert(denylist)
tf_logging.warn("[TF-ASP] Denylist is used: %s" % list_msg)
for layer in model_layers:
if layer.__class__ not in denylist and self._check_valid_layer(layer):
eligible_set.add(layer.kernel.ref())
tf_logging.warn(
"[TF-ASP] Pruning list accepts the \"kernel\" variable from "
"layer: %s (type=%s, shape=%s)" % (layer.name,
layer_to_name(layer),
layer.kernel.shape))
self._eligible_set = eligible_set
|
atex-release
|
atex/structured_sparsity/tf_asp/tf_asp_optimizer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import numpy as np
import time
import ctypes
import subprocess
### support for searching on the GPU
gpus_tested = False
gpus_found = 0
E = None
def set_cpu_device():
global gpus_tested, gpus_found
gpus_tested = True
gpus_found = 0
def set_gpu_device():
global gpus_tested, gpus_found
gpus_tested = False
gpus_found = 0
def use_gpu(initial_override = True):
global gpus_tested, gpus_found, E
if not gpus_tested:
if not initial_override:
gpus_tested = True
return False, None
try:
gpus_found = str(subprocess.check_output(["nvidia-smi", "-L"])).count('UUID')
except:
gpus_found = 0
gpus_tested = True
if gpus_found > 0:
E = ctypes.cdll.LoadLibrary(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../../../libstructured_sparsity.so"))
print(f"Found {gpus_found} gpus and kernels in {E}")
return gpus_found > 0 and E is not None, E
##############################################################################################
# pruning utilities
##############################################################################################
## apply 2:4 to some matrix
def apply_2_to_4(matrix):
for row in range(matrix.shape[0]):
for col in range(0,matrix.shape[1],4):
ix = np.argsort(np.abs(matrix[row,col:col+4]))
matrix[row,col+ix[0]] = 0.0
matrix[row,col+ix[1]] = 0.0
return matrix
## find the sum of magnitudes if 2:4 were applied to a matrix
def sum_after_2_to_4(matrix):
cur_sum = 0.0
use_cuda, E = use_gpu()
if not use_cuda:
start_time = time.perf_counter()
for row in range(matrix.shape[0]):
for col in range(0,matrix.shape[1],4):
ix = np.argsort(np.abs(matrix[row,col:col+4]))
cur_sum += abs(matrix[row,col+ix[2]])
cur_sum += abs(matrix[row,col+ix[3]])
np_elapsed = time.perf_counter() - start_time
else:
matrix = matrix.astype(np.float32)
cuda_sum = np.zeros((1), dtype=np.float32)
start_time = time.perf_counter()
matrix_view = np.copy(matrix).flatten()
blocks = max(int(matrix.shape[1]/4/2), 1)
threads = min(max(math.ceil(matrix.shape[0]/4), 1), 1024)
result = E.run_subset_sum_after_2_to_4(ctypes.c_void_p(matrix_view.ctypes.data),
ctypes.c_uint(matrix.shape[0]),
ctypes.c_uint(matrix.shape[1]),
ctypes.c_uint(0),
ctypes.c_uint(matrix.shape[1]),
ctypes.c_uint(blocks),
ctypes.c_uint(threads),
ctypes.c_void_p(cuda_sum.ctypes.data)
)
cuda_elapsed = time.perf_counter() - start_time
cur_sum = cuda_sum[0]
return cur_sum
## try swapping columns and tracking magnitude after pruning
def try_swap(matrix, dst, src):
src_base = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_base = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap
matrix[...,[src,dst]] = matrix[...,[dst,src]]
# check the Nx4 slices of the swapped columns
src_sum = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_sum = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap back
matrix[...,[src,dst]] = matrix[...,[dst,src]]
return src_sum + dst_sum, (src_sum + dst_sum) - (src_base + dst_base)
def unstructured_prune(matrix, sparsity):
shp = matrix.shape
matrix = matrix.flatten()
ix = np.argsort(matrix)
ix = ix[:int(len(ix)*sparsity)]
matrix[ix] = 0.0
matrix = np.reshape(matrix, shp)
return matrix
## exhaustively search an entire matrix on the GPU
def try_permutations_on_matrix(matrix, permutations):
use_cuda, E = use_gpu()
assert(use_cuda)
matrix = np.copy(matrix)
matrix = matrix.astype(np.float32)
matrix_view = np.copy(matrix).flatten()
permutations_view = np.copy(np.asarray(permutations)).astype(np.uint32).flatten()
stripe_groups = np.asarray([[s for s in range(int(matrix.shape[1]/4))]]).astype(np.uint32)
stripe_groups_view = stripe_groups.flatten()
blocks = 1 #len(stripe_groups[0])
threads = 32
improvement = np.zeros((1), dtype=np.float32)
permutation = np.zeros((1), dtype=np.uint32)
result = E.run_check_permutations(ctypes.c_void_p(matrix_view.ctypes.data), # matrix
ctypes.c_uint(matrix.shape[0]), # rows
ctypes.c_uint(matrix.shape[1]), # cols
ctypes.c_void_p(stripe_groups_view.ctypes.data),# stripe groups
ctypes.c_uint(len(stripe_groups[0])), # group width
ctypes.c_uint(len(stripe_groups)), # num groups
ctypes.c_void_p(permutations_view.ctypes.data), # permutations
ctypes.c_uint(len(permutations)), # num permutations
ctypes.c_void_p(improvement.ctypes.data), # improvement
ctypes.c_void_p(permutation.ctypes.data), # winning permutation index
ctypes.c_uint(blocks),
ctypes.c_uint(threads))
return improvement[0], permutations[permutation[0]]
## magnitude improvement from the naive 2:4 matrix / how much was lost by naive 2:4 compared to SOL
def efficacy(SOL_lost_magnitude, base_lost_magnitude, cur_lost_magnitude):
if base_lost_magnitude == SOL_lost_magnitude:
eff = 1.0
else:
eff = (base_lost_magnitude - cur_lost_magnitude) / (base_lost_magnitude - SOL_lost_magnitude)
return eff
## find the magnitude if the rows of a matrix were pruned independently, without structure
def magnitude_after_pruning_rows(matrix, rate=0.5):
magnitude = 0.
cols = matrix.shape[1]
for r in range(matrix.shape[0]):
rowVals = matrix[r]
rowVals = np.sort(np.abs(rowVals))
magnitude += np.sum(rowVals[int(cols*rate):])
return magnitude
##############################################################################################
# permutation utilities
##############################################################################################
## find the permutation needed to make matrix A look like matrix B
def find_permutation(A, B):
permutation = []
for col in range(A.shape[1]):
Avals = A[...,col]
for bcol in range(B.shape[1]):
if np.all(Avals - B[...,bcol] == np.zeros(Avals.shape)):
permutation.append(bcol)
break
return permutation
########################################
# reasonable method to find distance between permutations
# this is used to generate permutations "between" two other permutations to divide efficacy space
#######################################
## separate a flat permutation array into its groups, sort each group and the overall order to
## put the output into a canonical order: if two permutations have the same groups, they should appear identical
def make_grouped(A):
groups = []
for x in range(0,len(A),4):
group = []
for c in range(4):
group.append(A[x+c])
group = np.sort(group)
groups.append(group)
return groups
## given two permutations, find the groups they have in common
def common_groups(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
# convert to sets to take the intersection
As = set(tuple(Ag[g]) for g in range(len(Ag)))
Bs = set(tuple(Bg[g]) for g in range(len(Bg)))
common = As.intersection(Bs)
# flatten
C = []
for s in common:
for v in s:
C.append(v)
# group
return make_grouped(C)
## given two permutations, remove the groups that are common between them
def remove_common_groups(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
# convert to sets to take set difference
As = set(tuple(Ag[g]) for g in range(len(Ag)))
Bs = set(tuple(Bg[g]) for g in range(len(Bg)))
Ad = As - Bs
Bd = Bs - As
# turn the differences back into flat arrays
A = []
for s in Ad:
for v in s:
A.append(v)
B = []
for s in Bd:
for v in s:
B.append(v)
# group to put into canonical order, re-flatten
A = make_grouped(A)
B = make_grouped(B)
A = [item for sublist in A for item in sublist]
B = [item for sublist in B for item in sublist]
return A,B
## given two permutations, find which elements in B need to go where to look like A
def group_differences(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
wrong_entries = []
#for g,group in enumerate(Bg):
for g in range(len(Bg)):
group = Bg[g]
for i in range(len(group)):
val = group[i]
if val not in Ag[g]:
group_in_a = int(np.where(A == val)[0][0] / 4)
wrong_entries.append((val, g, group_in_a))
return wrong_entries
## (val, cur_group, desired_group) ==> dict[(cur_group, desired_group)] = [vals]
def dictify(wrong_entries):
result = {}
for entry in wrong_entries:
key = (entry[1], entry[2])
if key in result:
result[key].append(entry[0])
else:
result[key] = [entry[0]]
return result
## move groups of B to where they best match A's groups
def move_groups_to_match(B, A, debug=False):
Ag = make_grouped(A)
Bg = make_grouped(B)
new_Bg = [[] for g in range(len(Ag))]
wrong_entry_dict = dictify(group_differences(A, B))
if debug:
print(f"MGTM:\n\tAg: {Ag}\n\tBg: {Bg}\n\tWED: {wrong_entry_dict}")
moved_groups = []
keys_to_del = []
# move triples to the right spot
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(wrong_entry_dict[k]) == 3:
new_Bg[k[1]] = Bg[k[0]]
moved_groups.append(k[0])
keys_to_del.append(k)
if debug:
print(f"MGTM: moved triple {wrong_entry_dict[k]} from group {k[0]} to group {k[1]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# move doubles
for k in wrong_entry_dict.keys():
# if we've already moved the group to which this key belongs, remove it
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(wrong_entry_dict[k]) == 2:
if len(new_Bg[k[1]]) == 0: # move it to its requested destination if possible
new_Bg[k[1]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: moved double {wrong_entry_dict[k]} from group {k[0]} to its preferred group {k[1]}")
elif len(new_Bg[k[0]]) == 0: # otherwise leave it where it is (if possible)
new_Bg[k[0]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: left double {wrong_entry_dict[k]} where it was in group {k[0]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# move singles
# try to leave things where they are to prevent oscillating
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(new_Bg[k[1]]) == 0: # requested destination
new_Bg[k[1]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: moved single {wrong_entry_dict[k]} from group {k[0]} to its preferred group {k[1]}")
elif len(new_Bg[k[0]]) == 0:
new_Bg[k[0]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: left group {wrong_entry_dict[k]} where it was in group {k[0]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# put what's left where it'll fit
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
for dst in range(len(new_Bg)):
if len(new_Bg[dst]) == 0:
new_Bg[dst] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: put group {wrong_entry_dict[k]} where it found a spot in group {dst}")
break
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
assert(len(wrong_entry_dict) == 0)
Agsize = sum( [ len(group) for group in Ag] )
Bgsize = sum( [ len(group) for group in new_Bg] )
assert(Agsize == Bgsize)
new_B = [item for sublist in new_Bg for item in sublist]
return new_B
## swap two permutation entries and put the permutation into unique order
def swap_and_correct(permutation, src, tgt):
permutation[src],permutation[tgt] = permutation[tgt],permutation[src]
grouped = make_grouped(permutation)
grouped = [item for sublist in grouped for item in sublist]
return grouped
## make a swap that will move B in the direction of A
num_diffs = 0
def move_permutation_towards(B, A, debug=False):
global num_diffs
B = move_groups_to_match(B, A, debug)
wrong_entries = group_differences(A, B)
num_diffs = len(wrong_entries)
# nothing to do, early out
if len(wrong_entries) == 0:
if debug:
print("MPT: early out")
return B
if debug:
print(f"MPT: checking {len(wrong_entries)} diffs: {wrong_entries}")
# look for a group of three wrong entries that want to do the same thing
entry_dict = dictify(wrong_entries)
for k in entry_dict.keys():
entry = entry_dict[k]
if len(entry) == 3:
if debug:
print(f"MPT: found a triple swap at {k}: {entry_dict[k]}")
(src, dst) = k
# find the index of the one needed to complete the group
# the value is the value in A[dst] that's not in B[src]
# it's already in the destination group and may or may not need to move
group_id = dst
Ag = make_grouped(np.copy(A))
Bg = make_grouped(np.copy(B))
value = -1
for c in range(4):
if Ag[dst][c] not in Bg[src]:
value = Ag[dst][c]
if debug:
print(f"\tMPT: found the missing value {value} in A group {dst} offset {c}")
break
assert(value != -1)
# now find that value in B
idx0 = np.where(B == value)[0][0]
# find the index of the one this group doesn't need
# it's a member of the group but not in the dict entry
group_id = src
for c in range(4):
if B[group_id*4+c] not in entry_dict[k]:
if debug:
print(f"\tMPT: swapping {idx0} and {group_id*4+c}")
return swap_and_correct(B, idx0, group_id*4+c)
# look for a group of two entries that are heading to the same place as another wrong entry
victim_loner_pair = None
for k in entry_dict.keys():
entry = entry_dict[k]
if len(entry) == 2:
if debug:
print(f"MPT: found a double swap at {k}: {entry_dict[k]}")
(src, dst) = k
# find a wrong entry whose dst is the same
for k2 in entry_dict.keys():
if k2 == k:
continue
# k2 is a key whose value also belongs in stripe k2[1] (dst2)
if dst == k2[1]:
if debug:
print(f"\tMPT: found a loner going in the same direction at {k2}: {entry_dict[k2][0]}")
# instead of moving these three to where they're headed, start merging them by moving the loner into the double
# look for a complement: something moving from src to src2
(src2, dst2) = k2
complement_key = (src, src2)
if complement_key in entry_dict:
complement = entry_dict[complement_key][0]
if debug:
print(f"\t\tMPT: found a complement to the loner:{complement}")
return swap_and_correct(B, np.where(B == entry_dict[k2][0])[0][0], np.where(B == complement)[0][0])
# didn't find a complement, choose one of the two in the src group that don't belong
elif victim_loner_pair is None:
for k3 in entry_dict.keys():
if k3 == k:
continue
if k3[0] == src: # found the victim
victim = entry_dict[k3][0]
if debug:
print(f"\t\tMPT: found a victim for the double swap:{k3} -> {victim}")
victim_loner_pair = (victim, entry_dict[k2][0])
#return swap_and_correct(B, np.where(B == entry_dict[k2][0])[0][0], np.where(B == victim)[0][0])
if victim_loner_pair is not None:
if debug:
print(f"\t\tMPT: couldn't find any complements for double swaps, so going with a loner to make a triple: {victim_loner_pair}")
return swap_and_correct(B, np.where(B == victim_loner_pair[0])[0][0], np.where(B == victim_loner_pair[1])[0][0])
# look for one swap that will correct two entries
candidate_second = None
for we in range(len(wrong_entries)):
cur_entry = wrong_entries[we]
#if debug:
# print(f"\tMPT: checking {cur_entry} for complement")
for we2 in range(0,len(wrong_entries)):
pos_swap = wrong_entries[we2]
#if debug:
# print(f"\t\tMPT: is {pos_swap}?")
if cur_entry[1] == pos_swap[2] and cur_entry[2] == pos_swap[1]:
if debug:
print(f"\t\tfound complements: swapping {cur_entry} and {pos_swap}")
return swap_and_correct(B, np.where(B == cur_entry[0])[0][0], np.where(B == pos_swap[0])[0][0])
elif wrong_entries[0][2] == pos_swap[1]: # if pos_swap is currently where we[0] wants to go, keep it in mind
candidate_second = pos_swap
# fall back on picking the first one we come across
assert(candidate_second is not None)
if debug:
print(f"No complement, swapping two entries: {wrong_entries[0]} {candidate_second}")
return swap_and_correct(B, np.where(B == wrong_entries[0][0])[0][0], np.where(B == candidate_second[0])[0][0])
## find a shortest path from permutation A to B
import sys
import math
def permutation_distance(A, B, matrix=None, magnitude_targets=None, debug=False, verbosity=0):
global num_diffs
swaps = 0
debug = False
swap_limit = int(math.pow(2,int(len(A)/4)-1))
num_diffs = swap_limit
common = []
target_results = None
if magnitude_targets is not None:
assert matrix is not None
cur_mag = sum_after_2_to_4(matrix[:,A])
target_results = [(cur_mag, A) for i in range(len(magnitude_targets))]
if verbosity > 0 and matrix is not None:
print(f"swap {'0':>4} {sum_after_2_to_4(matrix[:, B]):>15.3f}")
if verbosity > 5:
print(f"swap {0:>4}, {make_grouped(A)} {make_grouped(B)}")
while not np.all(np.array(A)-np.array(B) == np.zeros(np.array(A).shape)):
cGroups = common_groups(A, B)
for g in cGroups:
common.append(g)
A, B = remove_common_groups(A, B)
if len(A) == 0:
break
B = move_permutation_towards(np.array(B), np.array(A), debug=debug)
swaps += 1
if matrix is not None:
total_cur_permute = [c for c in B]
for c in [item for sublist in common for item in sublist]:
total_cur_permute.append(c)
if verbosity > 0 or magnitude_targets is not None:
cur_mag = sum_after_2_to_4(matrix[:,total_cur_permute])
for i in range(len(target_results)):
result = target_results[i]
if abs(magnitude_targets[i] - result[0]) > abs(magnitude_targets[i] - cur_mag):
target_results[i] = (cur_mag, total_cur_permute)
if verbosity > 0:
print(f"swap {swaps:>4} {cur_mag:>15.3f}")
if verbosity > 5 or swaps > swap_limit:
print(f"swap {swaps:>4}, {A} {B}, {num_diffs} diffs remain")
# safety net
if swaps > swap_limit+3:
sys.exit()
return swaps, target_results
|
atex-release
|
atex/structured_sparsity/tf_asp/permuting_search_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
from .tf_asp_optimizer import AspOptimizerWrapper
from .tf_asp_optimizer_v2 import AspOptimizerWrapperV2
from .tf_asp_optimizer_v2 import check_pruned_layers
from .tf_asp_logging import *
|
atex-release
|
atex/structured_sparsity/tf_asp/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import tensorflow as tf
SHOW_PRUNING_INFO = tf.compat.v1.logging.WARN # 30
SHOW_PERMUTATION_INFO = 29
SHOW_PERMUTATION_MORE_INFO = 28
SHOW_PERMUTATION_DEBUG_INFO = tf.compat.v1.logging.DEBUG
|
atex-release
|
atex/structured_sparsity/tf_asp/tf_asp_logging.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import json
import numpy as np
import os
import pprint
import shutil
import tempfile
import tensorflow as tf
import time
from google.protobuf import json_format
from itertools import count
from tensorflow.keras import layers, optimizers, models
from tensorflow.python.platform import tf_logging
from .tf_asp_logging import *
from .permuting_search import Exhaustive_Search
from .permuting_search_utils import sum_after_2_to_4
from .permuting_search_utils import try_swap
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot
except ImportError:
# pydotplus is an improved version of pydot
try:
import pydotplus as pydot
except ImportError:
# Fall back on pydot if necessary.
try:
import pydot
except ImportError:
pydot = None
# To better preserve high magnitude weights, we permute weights before pruning.
# The permutation applied on the current op usually requires corresponding
# permutation on its upstream ops. Here we maintain four op lists to
# differentiate how the permutation is applied:
# * allowlist: GEMM-based ops that can benefit from sparse tensor cores.
# * inferlist: ops that require weight permutation when its downstream ops get
# permuted.
# * clearlist: ops that won't be affected by the permutation and the permutation
# sequences should be passed through to their upstream ops.
# * skiplist: this list is mainly used in the plot_to_file() to make the graph
# more concise.
# All other ops are viewed as unsupported ops and the permutation should be
# stopped when they are found. A typical supported pattern is:
# [ALLOWLIST] -> (INFERLIST|CLEARLIST)* -> [ALLOWLIST],
# where ()* means an arbitrary number of ops.
PERMUTABLE_2x4_ALLOWLIST = ('Conv2D', 'MatMul')
PERMUTABLE_2x4_INFERLIST = ('BiasAdd',
'FusedBatchNormV3')
PERMUTABLE_2x4_CLEARLIST = ('AddV2',
'Cast',
'Erf',
'Identity',
'Mul',
'RealDiv',
'Relu',
'Relu6',
'Rsqrt',
'Softmax',
'StopGradient',
'Sub')
PERMUTABLE_2x4_SKIPLIST = ('Const',
'Placeholder',
'ReadVariableOp')
def get_graph_def(model, input_shapes):
"""Gets the op graph def for the given model. """
if (getattr(model, '_build_input_shape', None) is None and
input_shapes == None):
model_path = tempfile.mkdtemp()
tf_logging.vlog(SHOW_PRUNING_INFO,
"[TF-ASP] tmpdir is created: %s. This is used to store a temp "
"savedmodel to extract input signatures. Users may specify "
"`input_shapes` to skip this step.\n" % model_path)
try:
tf.saved_model.save(model, model_path)
loaded = tf.saved_model.load(model_path)
infer = loaded.signatures['serving_default']
assert len(infer.structured_input_signature) == 2
infer_inputs = infer.structured_input_signature[1]
input_specs = []
# Manually extract each input specs to make sure the order is correct.
for i in range(len(infer_inputs)):
input_name = 'input_' + str(i+1)
input_specs.append(infer_inputs[input_name])
if len(input_specs) != len(infer_inputs):
raise ValueError
tf_logging.vlog(SHOW_PRUNING_INFO,
"[TF-ASP] Successfully found the input signature: [{}]\n".format(
', '.join([str(x) for x in input_specs])))
except:
raise ValueError("Failed to extract the input_shapes from the model. "
"Users may need to try manually specify 'input_shapes'")
finally:
tf_logging.vlog(SHOW_PRUNING_INFO,
"[TF-ASP] tmpdir is cleaned up: %s\n" % model_path)
shutil.rmtree(model_path)
else:
if getattr(model, '_build_input_shape', None) is not None:
model_input_shapes = model._build_input_shape
else:
model_input_shapes = input_shapes
input_specs = []
if not isinstance(model_input_shapes, list):
if isinstance(model_input_shapes, tf.TensorSpec):
input_specs.append(model_input_shapes)
else:
input_specs.append(tf.TensorSpec(shape=model_input_shapes))
else:
for input_shape in model_input_shapes:
if isinstance(input_shape, tf.TensorSpec):
input_specs.append(input_shape)
else:
input_specs.append(tf.TensorSpec(shape=input_shape))
# For some custom models, the list is only expected to contain multiple
# inputs.
if len(input_specs) == 1:
input_specs = input_specs[0]
tf_fn = tf.function(lambda x: model(x))
graph_def = tf_fn.get_concrete_function(input_specs).graph.as_graph_def()
json_string = json_format.MessageToJson(graph_def)
obj = json.loads(json_string)
return obj
def build_layer_dict(model):
"""Builds a dict holds {layer_name: layer} from the flattened model. """
def _is_module(obj):
return isinstance(obj, tf.Module)
layer_dict = {}
if not hasattr(model, '_flatten'):
return layer_dict
layer_with_path = tuple(model._flatten(predicate=_is_module, with_path=True))
for path, layer in layer_with_path:
layer_name = model.name
current_obj = model
for subpath in path:
if isinstance(subpath, str):
if type(current_obj) is dict:
current_obj = current_obj[subpath]
else:
current_obj = getattr(current_obj, subpath)
else:
assert isinstance(subpath, int)
current_obj = current_obj[subpath]
if _is_module(current_obj):
layer_name += '/' + current_obj.name
if layer_name not in layer_dict:
layer_dict[layer_name] = [layer]
elif layer not in layer_dict[layer_name]:
layer_dict[layer_name].append(layer)
return layer_dict
def find_variable(node_name, node_op, var_name, layer_dict):
"""Finds the variable for node_name. """
if node_op not in PERMUTABLE_2x4_ALLOWLIST + PERMUTABLE_2x4_INFERLIST:
return None
# We assume the last subdir is the op name. So, we strip it to obtain the
# prefix. For the MatMul, it is possible that the op name is
# "Tensordot/MatMul", e.g., when the input tensor is not in 2D.
prefix = node_name.rsplit('/', 1)[0]
if node_op == 'MatMul' and prefix.endswith("Tensordot"):
prefix = prefix.rsplit('/', 1)[0]
# Try to find the variable from layer_dict.
if prefix in layer_dict:
layers = layer_dict[prefix]
if len(layers) == 1:
layer = layers[0]
if var_name == 'kernel' and hasattr(layer, 'kernel'):
return layer.kernel
if var_name == 'bias' and hasattr(layer, 'bias'):
return layer.bias
if var_name == 'gamma' and hasattr(layer, 'variables'):
return layer.variables[0]
if var_name == 'beta' and hasattr(layer, 'variables'):
return layer.variables[1]
if var_name == 'moving_mean' and hasattr(layer, 'variables'):
return layer.variables[2]
if var_name == 'moving_variance' and hasattr(layer, 'variables'):
return layer.variables[3]
if len(layers) > 1:
tf_logging.vlog(SHOW_PERMUTATION_INFO,
"Failed to distinguish variables for op_name=%s, "
"candidates=(%s). Usually this happens when layers have "
"the same name in nested models. Please consider rename "
"them." % (node_name,
", ".join([x.name for x in layers])))
return None
def build_kernel_map(graph_def, layer_dict):
"""Creates a dict of op names with their variables.
Returns:
A dict {op_name: {var_name: var}}. The 'var' might be None, meaning the
naming convention used in the model doesn't match our assumption.
Depending on different ops, the valid 'var_name's are:
* MatMul: kernel
* Conv2D: kernel
* BiasAdd: bias
* FusedBatchNormV3: gamma, beta, moving_mean, moving_variance
"""
kernel_dict = {}
for node in graph_def['node']:
node_name = node['name']
node_op = node['op']
if node_op in PERMUTABLE_2x4_ALLOWLIST:
kernel_dict[node_name] = {}
kernel = find_variable(node_name, node_op, 'kernel', layer_dict)
kernel_dict[node_name]['kernel'] = kernel
elif node_op in PERMUTABLE_2x4_INFERLIST:
kernel_dict[node_name] = {}
if node_op == 'BiasAdd':
bias = find_variable(node_name, node_op, 'bias', layer_dict)
kernel_dict[node_name]['bias'] = bias
if node_op == 'FusedBatchNormV3':
gamma = find_variable(node_name, node_op, 'gamma', layer_dict)
beta = find_variable(node_name, node_op, 'beta', layer_dict)
moving_mean = find_variable(node_name, node_op, 'moving_mean',
layer_dict)
moving_variance = find_variable(node_name, node_op, 'moving_variance',
layer_dict)
kernel_dict[node_name]['gamma'] = gamma
kernel_dict[node_name]['beta'] = beta
kernel_dict[node_name]['moving_mean'] = moving_mean
kernel_dict[node_name]['moving_variance'] = moving_variance
return kernel_dict
def build_node_map(graph_def, kernel_map, prunable_kernels):
"""Builds a dict of op nodes with their attributes.
Returns:
A dict of {op_name: {attr_name: attr}}. The valid 'attr_name' are:
* op: a string of op_type
* inputs: a list of parent node names
* category: a string of ['allow', 'infer', 'clear', 'skip', 'deny']
When the op_type is 'MatMul' or 'Conv2D', there is one more attr:
* kernel: a variable tensor
When the op_type is 'BiasAdd', there is one more attr:
* bias: a variable tensor
When the op_type is 'FusedBatchNormV3', there are four more attrs:
* gamma: a variable tensor
* beta: a variable tensor
* moving_mean: a variable tensor
* moving_variance: a variable tensor
Note, all node's 'category' is 'deny' at the beginning and only when its
variable tensor is successfully assigned, the 'category' will be switched
to 'allow' or 'infer'.
"""
node_map = {}
for node in graph_def['node']:
node_name = node['name']
node_map[node_name] = {}
node_map[node_name]['op'] = node['op']
node_map[node_name]['inputs'] = node['input'] if 'input' in node else []
node_map[node_name]['category'] = 'deny'
if node['op'] in PERMUTABLE_2x4_ALLOWLIST:
kernel = kernel_map[node_name]['kernel']
if kernel is not None and kernel.ref() in prunable_kernels:
node_map[node_name]['kernel'] = kernel
node_map[node_name]['category'] = 'allow'
if node['op'] in PERMUTABLE_2x4_INFERLIST:
if node['op'] == 'BiasAdd':
bias = kernel_map[node_name]['bias']
if bias is not None:
node_map[node_name]['bias'] = bias
node_map[node_name]['category'] = 'infer'
if node['op'] == 'FusedBatchNormV3':
gamma = kernel_map[node_name]['gamma']
beta = kernel_map[node_name]['beta']
moving_mean = kernel_map[node_name]['moving_mean']
moving_variance = kernel_map[node_name]['moving_variance']
if not [x for x in (gamma, beta, moving_mean, moving_variance)
if x is None]:
node_map[node_name]['gamma'] = gamma
node_map[node_name]['beta'] = beta
node_map[node_name]['moving_mean'] = moving_mean
node_map[node_name]['moving_variance'] = moving_variance
node_map[node_name]['category'] = 'infer'
if node['op'] in PERMUTABLE_2x4_CLEARLIST:
node_map[node_name]['category'] = 'clear'
if node['op'] in PERMUTABLE_2x4_SKIPLIST:
node_map[node_name]['category'] = 'skip'
return node_map
def check_skippable(node_name, node_map):
"""Checks if the bransh starting from node_name is skippable.
We define the branch that will read variables as a skippable branch. The
node_name must be an immediate parents of an allowlist or inferlist node.
For example, these three patterns will be marked as skippable branch.
* Conv2D<-(ReadVariableOp)
* Conv2D<-(Cast)<-ReadVariableOp
* Conv2D->(Const)
These patterns will be the non-skippable:
* Conv2D<-(Placeholder)
* Conv2D<-(Cast)<-ReLU.
Note, the node in the parenthesis is the node_name.
"""
node_op = node_map[node_name]['op']
if node_op in ('Const', 'ReadVariableOp'):
return True
if node_op == 'Cast':
if 'inputs' in node_map[node_name]:
parent_node_names = node_map[node_name]['inputs']
if (len(parent_node_names) == 1 and
node_map[parent_node_names[0]]['op'] == 'ReadVariableOp'):
return True
return False
def find_allowlist_parents_helper(node_name, node_map):
"""Helper function for find_allowlist_parents(). """
node_category = node_map[node_name]['category']
if node_category == 'allow':
return [node_name]
if node_category in ('infer', 'clear'):
parent_node_names = node_map[node_name]['inputs']
parents = []
for parent_node_name in parent_node_names:
if (node_category == 'infer' and check_skippable(parent_node_name,
node_map)):
continue
new_parents = find_allowlist_parents_helper(parent_node_name, node_map)
if new_parents is None:
return None
parents.extend(x for x in new_parents if x not in parents)
if len(parents) == 0:
return None
return parents
return None
def find_allowlist_parents(node_name, node_map):
"""Finds all valid allowlist parent nodes of node_name.
We define the valid allowlist parents as the allowlist nodes that are on the
upstream paths of node_name and for each path, there is no other allowlist
nodes in between. Note, we return an empty list if any upstream path of
node_name is not ended with an allowlist op.
Args:
node_name: A node name, which must be either an allowlist or inferlist node.
node_map: A node map. See build_node_map().
Returns:
A list of valid allowlist parent node names.
"""
assert node_map[node_name]['category'] in ('allow', 'infer')
parents = []
parent_node_names = node_map[node_name]['inputs']
for parent_node_name in parent_node_names:
node_category = node_map[parent_node_name]['category']
if check_skippable(parent_node_name, node_map):
continue
new_parents = find_allowlist_parents_helper(parent_node_name, node_map)
# If any node has no valid parents, we should early exit, since we've found
# a not-ended-with-allowlist-node branch.
if new_parents is None:
return []
parents.extend(x for x in new_parents if x not in parents)
return parents
def build_permute_map(node_map):
"""Builds a map to track the permutation on allowlist and inferlist ops.
Args:
node_map: A node map. See build_node_map().
Returns:
A dict in the form of:
{ 'node_name':
{ 'parents':[],
'children':[],
'c-permuted': False,
'k-permuted': False,
'sibling_group_index': -1
}
}. The 'node_name' represents ops from the allowlist or inferlist.
"""
processed = {}
for node_name in node_map:
if node_map[node_name]['category'] in ('allow', 'infer'):
if node_name not in processed:
processed[node_name] = {}
parent_node_names = find_allowlist_parents(node_name, node_map)
for parent_node_name in parent_node_names:
if parent_node_name not in processed:
processed[parent_node_name] = {}
if 'parents' not in processed[node_name]:
processed[node_name]['parents'] = []
processed[node_name]['parents'].append(parent_node_name)
processed[node_name]['k-permuted'] = False
processed[node_name]['c-permuted'] = False
processed[node_name]['sibling_group_index'] = -1
# We don't update inferlist nodes' parent nodes.
if node_map[node_name]['category'] == 'infer':
continue
if 'children' not in processed[parent_node_name]:
processed[parent_node_name]['children'] = []
processed[parent_node_name]['children'].append(node_name)
processed[parent_node_name]['k-permuted'] = False
processed[parent_node_name]['c-permuted'] = False
processed[parent_node_name]['sibling_group_index'] = -1
return processed
def find_siblings(node_name, permute_map, found_siblings):
"""Finds all siblings of node_name.
Returns:
An updated sibling list by considering node_name. The siblings include the
node_name itself.
"""
# We don't permute the top layer of allowlist ops.
if 'parents' not in permute_map[node_name]:
return found_siblings
siblings = [node_name]
# Finds siblings that have the same parent with node_name.
for parent_node_name in permute_map[node_name]['parents']:
for child_node_name in permute_map[parent_node_name]['children']:
if child_node_name != node_name:
siblings.append(child_node_name)
new_siblings = [x for x in siblings if x not in found_siblings]
found_siblings.extend(new_siblings)
# Finds siblings of the above new_siblings. They are also siblings of
# node_name.
for new_sibling in new_siblings:
found_siblings = find_siblings(new_sibling, permute_map, found_siblings)
return found_siblings
def get_weights(node_name, node_map):
"""Returns a transposed/reshaped 2D weights of node_name.
For Conv2D, the weight is in (H,W,I,O) shape and we transpose it
to (I, O*H*W). For MatMul, we directly return its weight in (I, O) shape.
Returns:
A 2D tensor from the weight of node_name, or None if the weight cannot be
found in prunable_weights.
"""
kernel = node_map[node_name]['kernel']
# Since node_map[node_name]['category'] is expected to be 'allow', the
# 'kernel' must exist.
assert kernel is not None
if node_map[node_name]['op'] == 'Conv2D':
kernel = tf.transpose(kernel, perm=[2, 3, 0, 1])
transposed_kernel = tf.reshape(kernel, shape=(kernel.shape[0], -1))
return transposed_kernel
def search_for_good_permutation(matrix, search_device, search_time_limit):
"""Finds best permutation seq over the input dim of matrix. """
if search_device not in ('GPU', 'CPU', 'DEBUG'):
raise ValueError(
"search_device=%s is not supported." % search_device)
if search_device == 'DEBUG':
perm = tf.range(0, matrix.shape[0])
perm = tf.reverse(perm, axis=[0])
return perm, ""
# The Exhaustive_Search() and sum_after_2_to_4() expect the matrix in the
# shape of (O, I).
transposed_matrix = tf.transpose(matrix)
input_dim = transposed_matrix.shape[1]
# TODO(kaixih): Move this logic to GPU if perf issue is hit.
original_magnitude = tf.math.reduce_sum(tf.math.abs(transposed_matrix))
pruned_magnitude = sum_after_2_to_4(transposed_matrix.numpy())
epsilon = 1e-3
# We want to skip the permutation step if the pruned_magnitude is already good
# enough.
if (original_magnitude - pruned_magnitude) > epsilon:
if input_dim <= 2048:
permuted_matrix, duration, perm = Exhaustive_Search(
transposed_matrix.numpy(), stripe_group_size=8, escape_attempts=100,
search_device=search_device)
else:
permuted_matrix = transposed_matrix.numpy()
real_swap_num = 0
start_time = time.perf_counter()
perm = list(range(input_dim))
while time.perf_counter() - start_time < search_time_limit:
src = np.random.randint(input_dim)
dst = np.random.randint(input_dim)
src_group = int(src / 4)
dst_group = int(dst / 4)
if src_group == dst_group: # channel swapping within a stripe does nothing
continue
new_sum, improvement = try_swap(permuted_matrix, dst, src)
if improvement > 1e-9:
permuted_matrix[...,[src,dst]] = permuted_matrix[...,[dst,src]]
real_swap_num += 1
perm[src], perm[dst] = perm[dst], perm[src]
duration = time.perf_counter() - start_time
tf_logging.vlog(SHOW_PERMUTATION_MORE_INFO,
"[TF-ASP] Finally swap {} channel pairs until the search "
"time limit expires.".format(real_swap_num))
permuted_magnitude = sum_after_2_to_4(permuted_matrix)
if (pruned_magnitude - permuted_magnitude) > epsilon:
return None, "pruned_magnitude (%f) >= permuted_magnitude (%f)" % (
pruned_magnitude, permuted_magnitude)
return perm, "permuted_magnitude (%f) >= pruned_magnitude (%f)" % (
permuted_magnitude, pruned_magnitude)
else:
return None, "pruned_magnitude (%f) >= original_magnitude (%f)" % (
pruned_magnitude, original_magnitude)
def find_permutation(node_name, permute_map, node_map, search_device,
search_time_limit, index_generator):
"""Finds the permutation sequence and update the permute_map. """
if 'permutation' in permute_map[node_name]:
return
if node_map[node_name]['category'] != 'allow':
return
siblings = find_siblings(node_name, permute_map, [])
sibling_weights = []
for sibling in siblings:
weights = get_weights(sibling, node_map)
if weights is not None:
sibling_weights.append(weights)
if len(sibling_weights) != 0:
# The weights from siblings are concatenated along the output dim. So,
# concat_weights is in the shape of (I, n*O).
concat_weights = tf.concat(sibling_weights, axis=1)
permutation_seq, magnitude_info = search_for_good_permutation(
concat_weights, search_device, search_time_limit)
sibling_group_index = next(index_generator)
# Broadcast the permutation sequence to all siblings.
for sibling in siblings:
permute_map[sibling]['permutation'] = permutation_seq
permute_map[sibling]['sibling_group_index'] = sibling_group_index
tf_logging.vlog(SHOW_PERMUTATION_MORE_INFO,
"[TF-ASP] Permute-Siblings: %s (%s: %s)" % (
",".join([x for x in siblings]),
"Skipped" if permutation_seq is None else "Enabled",
magnitude_info))
def permute_C(node_name, node_map, permutation_seq):
"""Permutes the input dim of the weights from node_name. """
node_op = node_map[node_name]['op']
# Since permutation_seq exists, node_map musts be from the allowlist.
assert node_op in PERMUTABLE_2x4_ALLOWLIST
kernel = node_map[node_name]['kernel']
assert kernel is not None
if node_op == 'Conv2D':
transposed_kernel = tf.transpose(kernel, perm=[2, 3, 0, 1])
transposed_shape = transposed_kernel.shape
transposed_kernel = tf.reshape(transposed_kernel,
shape=(transposed_shape[0], -1))
shuffled_kernel = tf.gather(transposed_kernel, permutation_seq)
transposed_kernel = tf.reshape(shuffled_kernel, shape=transposed_shape)
recovered_kernel = tf.transpose(transposed_kernel, perm=[2, 3, 0, 1])
kernel.assign(recovered_kernel)
if node_op == 'MatMul':
shuffled_kernel = tf.gather(kernel, permutation_seq)
kernel.assign(shuffled_kernel)
tf_logging.vlog(SHOW_PERMUTATION_MORE_INFO,
"[TF-ASP] Permute-C: node_name=%s" % node_name)
def permute_K_impl(node_name, node_map, permutation_seq, trigger_node):
"""Permutes the output dim of the weights from node_name. """
node_op = node_map[node_name]['op']
if node_op in PERMUTABLE_2x4_ALLOWLIST:
kernel = node_map[node_name]['kernel']
assert kernel is not None
new_kernel = tf.gather(kernel, permutation_seq, axis=-1)
kernel.assign(new_kernel)
if node_op in PERMUTABLE_2x4_INFERLIST:
if node_op == "BiasAdd":
bias = node_map[node_name]['bias']
assert bias is not None
new_bias = tf.gather(bias, permutation_seq)
bias.assign(new_bias)
if node_op == 'FusedBatchNormV3':
gamma = node_map[node_name]['gamma']
beta = node_map[node_name]['beta']
moving_mean = node_map[node_name]['moving_mean']
moving_variance = node_map[node_name]['moving_variance']
assert not [x for x in (gamma, beta, moving_mean, moving_variance) if x is
None]
new_gamma = tf.gather(gamma, permutation_seq)
new_beta = tf.gather(beta, permutation_seq)
new_moving_mean = tf.gather(moving_mean, permutation_seq)
new_moving_variance = tf.gather(moving_variance, permutation_seq)
gamma.assign(new_gamma)
beta.assign(new_beta)
moving_mean.assign(new_moving_mean)
moving_variance.assign(new_moving_variance)
tf_logging.vlog(SHOW_PERMUTATION_MORE_INFO,
"[TF-ASP] Permute-K: node_name=%s, permute_seq from %s" % (
node_name, trigger_node))
def permute_K_helper(node_name, permute_map, node_map, permutation_seq,
trigger_node):
"""Permutes output dims of weights from node_name's upstream ops. """
parent_node_names = node_map[node_name]['inputs']
for parent_node_name in parent_node_names:
node_category = node_map[parent_node_name]['category']
# Finds an allowlist op.
if (node_category == 'allow' and
not permute_map[parent_node_name]['k-permuted']):
permute_K_impl(parent_node_name, node_map, permutation_seq, trigger_node)
permute_map[parent_node_name]['k-permuted'] = True
# Finds an inferlist op that hasn't been permuted yet.
elif (node_category == 'infer' and
not permute_map[parent_node_name]['k-permuted']):
permute_K_impl(parent_node_name, node_map, permutation_seq, trigger_node)
permute_map[parent_node_name]['k-permuted'] = True
permute_K_helper(parent_node_name, permute_map, node_map, permutation_seq,
trigger_node)
# Finds a clearlist op and passes the permutation seq through.
elif node_category == 'clear':
permute_K_helper(parent_node_name, permute_map, node_map, permutation_seq,
trigger_node)
def apply_permutation(node_name, permute_map, node_map):
"""Applies the permutation to node_name.
This function permutes the input dim of node_name (c-permute) and permutes the
output dim of its upstream nodes (k-permute).
"""
if 'permutation' in permute_map[node_name]:
permutation_seq = permute_map[node_name]['permutation']
if permutation_seq is None:
return
if not permute_map[node_name]['c-permuted']:
permute_C(node_name, node_map, permutation_seq)
permute_map[node_name]['c-permuted'] = True
# When "permutation" exists, the sequence must be propagated upstream,
# meaning the parent nodes must exist and be permutable.
permute_K_helper(node_name, permute_map, node_map, permutation_seq,
node_name)
def count_c_permuted(permute_map):
"""Counts how many layers are C-permuted. """
count = 0
for node_name in permute_map:
if permute_map[node_name] and permute_map[node_name]['c-permuted']:
count += 1
return count
def permute_model(model, input_shapes, prunable_kernels, search_device,
search_time_limit, plot_to_file):
"""Permute weights from all eligible ops in the model.
This function will first traverse the GraphDef obtained from the model. Note,
the GraphDef contains a graph of operations rather than layers. So, it will
locate weights from all eligible operations for pruning and then conduct
the permutation. The permutation includes C-permute which permutes the input
dim of the current weights and K-permute which permutes the output dim of the
previous weights so as to match the results of C-permute.
Args:
model: A built model.
input_shapes: A tuple or a list of tuples representing the input tensor
shapes.
prunable_kernels: A set of kernel refs that are pruning.
search_device: A string representing where the permutation searching occurs.
Valid strings are ['GPU'(default), 'CPU', 'DEBUG'].
plot_to_file: A string of file name to plot the colored op graph.
"""
graph_def = get_graph_def(model, input_shapes)
layer_dict = build_layer_dict(model)
kernel_map = build_kernel_map(graph_def, layer_dict)
tf_logging.vlog(SHOW_PERMUTATION_DEBUG_INFO,
"[TF-ASP] DEBUG kernel_map:\n" + pprint.pformat(kernel_map, indent=2))
node_map = build_node_map(graph_def, kernel_map, prunable_kernels)
tf_logging.vlog(SHOW_PERMUTATION_DEBUG_INFO,
"[TF-ASP] DEBUG node_map:\n" + pprint.pformat(node_map, indent=2))
permute_map = build_permute_map(node_map)
# After find_permutation(), the item in permute_map may not have "parents" and
# "permutation" if the node_name is the top layer, meaning it is only
# eligible for k-permute; the item may not have "children" if the node_name is
# the bottom layer.
index_generator = count()
for node_name in permute_map:
find_permutation(node_name, permute_map, node_map, search_device,
search_time_limit, index_generator)
tf_logging.vlog(SHOW_PERMUTATION_DEBUG_INFO,
"[TF-ASP] DEBUG permute_map (prolog):\n" + pprint.pformat(
permute_map, indent=2, compact=True))
for node_name in permute_map:
apply_permutation(node_name, permute_map, node_map)
tf_logging.vlog(SHOW_PERMUTATION_DEBUG_INFO,
"[TF-ASP] DEBUG permute_map (epilog):\n" + pprint.pformat(
permute_map, indent=2, compact=True))
if plot_to_file:
plot_ops_graph(node_map, permute_map, plot_to_file)
tf_logging.vlog(SHOW_PERMUTATION_INFO,
"[TF-ASP] %d/%d variables (Conv2D or MatMul) are permuted!\n" % (
count_c_permuted(permute_map), len(prunable_kernels)))
def check_pydot():
"""Returns True if PyDot and Graphviz are available."""
if pydot is None:
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except (OSError, pydot.InvocationException):
return False
def add_edge(dot, src, dst):
"""Adds edge from src to dst. """
if not dot.get_edge(src, dst):
dot.add_edge(pydot.Edge(src, dst))
def plot_ops_graph(node_map, permute_map, to_file):
"""Converts ops to dot format and save to a file. """
if not check_pydot():
message = (
'You must install pydot (`pip install pydot`) '
'and install graphviz '
'(see instructions at https://graphviz.gitlab.io/download/) ',
'for plot_to_file option to work.')
raise ImportError(message)
dot = pydot.Dot()
dot.set('rankdir', 'TB')
dot.set('concentrate', True)
dot.set('dpi', 96)
dot.set_node_defaults(shape='record')
# A handy knob to indicate whether the plot contains the skiplist ops.
contains_skiplist_nodes = True
# Add all the nodes to the dot.
for node_name in node_map:
node_category = node_map[node_name]['category']
fillcolor = 'red'
if node_category == 'allow':
fillcolor = 'green'
elif node_category == 'infer':
fillcolor = 'orange'
elif node_category == 'clear':
fillcolor = 'yellow'
elif node_category == 'skip':
fillcolor = 'grey'
if not contains_skiplist_nodes and node_category == 'skip':
continue
def format_shape(shape):
return str(shape).replace(str(None), 'None')
label = node_map[node_name]['op']
if node_category == 'allow':
kernel = node_map[node_name]['kernel']
assert kernel is not None
kernel_shape = format_shape(kernel.shape)
prunable_flag = 'S'
sibling_group_flag = -1
permute_k_flag = ''
permute_c_flag = ''
if node_name in permute_map:
if permute_map[node_name]:
sibling_group_flag = permute_map[node_name]['sibling_group_index']
permute_k_flag = 'K' if permute_map[node_name]['k-permuted'] else ''
permute_c_flag = 'C' if permute_map[node_name]['c-permuted'] else ''
else:
sibling_group_flag, permute_k_flag, permute_c_flag = (-1, '', '')
label = '{%s (%d, %s%s, %s)| kernel=%s}' % (
label, sibling_group_flag, permute_k_flag, permute_c_flag,
prunable_flag, kernel_shape)
if node_category == 'infer':
if node_name in permute_map and permute_map[node_name]:
label += ' (K)' if permute_map[node_name]['k-permuted'] else ''
if node_category == 'skip':
node = pydot.Node(node_name, label=label[0:2], style='filled',
fillcolor=fillcolor, shape='circle', fontsize=10)
else:
node = pydot.Node(node_name, label=label, style='filled',
fillcolor=fillcolor)
dot.add_node(node)
max_edges = 9999
edge_count = 0
try:
# Create edges for these nodes.
for dst_node_name in node_map:
for src_node_name in node_map[dst_node_name]['inputs']:
# We skip the src nodes if their name start with '^'. It seems they are
# some virtual nodes and we are not interested in them.
if (src_node_name.startswith('^') or (src_node_name not in node_map) or
(not contains_skiplist_nodes and
(node_map[dst_node_name]['category'] == 'skip' or
node_map[src_node_name]['category'] == 'skip'))):
continue
add_edge(dot, src_node_name, dst_node_name)
edge_count += 1
if edge_count >= max_edges:
raise StopIteration
except StopIteration:
print("[TF-ASP] The op graph is too large to plot. Only up to %d edges are"
" plotted." % max_edges)
file_name, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
dot.write(file_name + '.' + extension, format=extension)
print("[TF-ASP] The op graph is plotted to %s." % to_file)
|
atex-release
|
atex/structured_sparsity/tf_asp/permuting_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import numpy as np
import tensorflow as tf
from itertools import permutations
from tensorflow.keras import layers, optimizers
from tensorflow.python.platform import tf_logging
from .permuting_utils import permute_model
from .pruning_utils import get_2to4_mask
from .tf_asp_logging import SHOW_PRUNING_INFO
PRUNABLE_2x4_ALLOWLIST = (layers.Conv2D, layers.Dense)
def is_prunable(layer, padding):
"""Returns `True` if the `kernel` is prunable."""
if type(layer) not in PRUNABLE_2x4_ALLOWLIST:
return False
kernel = layer.kernel
# Check the output dim.
if kernel.shape[-1] % 8 != 0:
return False
# Check the input dim.
if type(layer) == layers.Dense:
return kernel.shape[0] % 16 == 0 if not padding else True
if type(layer) == layers.Conv2D:
return kernel.shape[2] % 16 == 0
return False
def find_prunable_kernels(model, padding):
"""Returns a set of variable refs that are prunable. """
if model is None:
raise ValueError('`model` cannot be None')
if not isinstance(model, tf.keras.Model):
raise ValueError('`model` can only be a `tf.keras.Model` instance.'
'You passed an instance of type: {input}.'.format(
input=model.__class__.__name__))
if not model.built:
raise ValueError('`model` must be a built model. '
'been built yet. Please call `model.build(input_shape)` '
'before pruning your model.')
prunable_kernels = set()
for layer in model.submodules:
if is_prunable(layer, padding):
prunable_kernels.add(layer.kernel.ref())
return prunable_kernels
class AspOptimizerWrapperV2(optimizers.legacy.Optimizer):
"""An optimizer that automatically applies sparsity to the weights.
`AspOptimizerWrapperV2` wraps another optimizer and applies the weight
permutation (if necessary) and weight pruning.
A typical usage:
>>> import tf_asp
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.2, momentum=1.0)
>>> opt = tf_asp.AspOptimizerWrapperV2(opt, model, padding=True)
Args:
optimizer: The `tf.keras.optimizers.Optimizer` instance to wrap.
model: The built model corresponds to the optimizer.
input_shapes: A tuple or a list of tuples representing the input tensor
shapes or TensorSpecs. This is required only when the input signature
cannot be deduced from the model.
padding: A boolean indicating whether padding is applied.
permute: A boolean indicating whether the permutation is on. It is true by
default.
search_device: A string indicating which device the permutation searching
uses: 'GPU' (default), 'CPU'.
plot_to_file: (str or None) The path to save the op graph plot using pydot
(if any). It is None by default.
"""
def __init__(self, optimizer, model, input_shapes=None, padding=False,
permute=True, search_device='GPU', search_time_limit=60,
plot_to_file=None, name=None, **kwargs):
super(AspOptimizerWrapperV2, self).__init__(name, **kwargs)
self._optimizer = optimizer
self._padding = padding
self._prunable_kernels = find_prunable_kernels(model, padding)
if permute:
permute_model(model, input_shapes, self._prunable_kernels, search_device,
search_time_limit, plot_to_file)
# A 6x4 matrix to store all combinations of 2:4 patterns. Allocate this
# tensor inside the optimizer to avoid GPU allocation when importing.
self._patterns = tf.convert_to_tensor(
list(set(permutations([0., 0., 1., 1.]))))
# We store a copy of learning_rate in _hyper, since the _hyper may be
# directly accessed in some circumstances.
# TODO(kaixih): check if we can remove such copy.
self._hyper['learning_rate'] = optimizer.learning_rate
def _prepare(self, var_list):
return self._optimizer._prepare(var_list)
def _create_slots(self, var_list):
self._optimizer._create_slots(var_list)
for var in var_list:
if var.ref() in self._prunable_kernels:
self.add_slot(var, "mask")
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
apply_gradients_op = super(AspOptimizerWrapperV2, self).apply_gradients(
grads_and_vars, name, experimental_aggregate_gradients)
# Normally self._optimizer.iterations is incremented in
# self._optimizer.apply_gradients(). Since that is not called, we increment
# it here instead.
with tf.control_dependencies([apply_gradients_op]):
return self._optimizer.iterations.assign_add(1)
def _resource_apply_dense(self, grad, var, apply_state):
if not var.ref() in self._prunable_kernels:
return self._optimizer._resource_apply_dense(grad, var, apply_state)
mask = self.get_slot(var, "mask")
def update_mask():
new_mask = get_2to4_mask(var, self._padding, self._patterns)
update_mask_op = mask.assign(new_mask)
with tf.control_dependencies([update_mask_op]):
return tf.identity(mask)
# The masks are updated at the beginning of fine-tuning.
maybe_update_mask = tf.cond(self._iterations == 0, update_mask,
lambda: tf.identity(mask))
with tf.control_dependencies([maybe_update_mask]):
opt_op = self._optimizer._resource_apply_dense(grad, var, apply_state)
with tf.control_dependencies([opt_op]):
new_var = tf.math.multiply(var, mask)
return var.assign(new_var)
def _resource_apply_sparse(self, grad, var, indices, apply_state):
return self._optimizer._resource_apply_sparse(grad, var, indices,
apply_state)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,
**kwargs):
return self._optimizer._resource_apply_sparse_duplicate_indices(
grad, handle, indices, **kwargs)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError as e:
if name == '_optimizer' or name == '_hyper':
# Avoid infinite recursion
raise e
# Delegate hyperparameter accesses to inner optimizer.
if name == 'lr':
name = 'learning_rate'
if name in self._optimizer._hyper:
return self._optimizer._get_hyper(name)
raise e
def __dir__(self):
result = set(super(AspOptimizerWrapperV2, self).__dir__())
if '_optimizer' in result:
result |= self._optimizer._hyper.keys()
if 'learning_rate' in self._optimizer._hyper.keys():
result.add('lr')
return list(result)
def __setattr__(self, name, value):
if name == 'lr':
name = 'learning_rate'
# Delegate setting hyperparameter to inner optimizer if the attribute does
# not exist on the AspOptimizerWrapperV2
try:
# We cannot check for the 'iterations' attribute as it cannot be set after
# it is accessed.
if name != 'iterations':
object.__getattribute__(self, name)
has_attribute = True
except AttributeError:
has_attribute = False
if (name != '_optimizer' and hasattr(self, '_optimizer') and
name in self._optimizer._hyper and not has_attribute):
self._optimizer._set_hyper(name, value)
# We need to update the wrapper's _hyper, since we store a copy of
# learning_rate.
if name == "learning_rate":
self._set_hyper(name, value)
else:
super(AspOptimizerWrapperV2, self).__setattr__(name, value)
def get_config(self):
serialized_optimizer = tf.keras.optimizers.serialize(self._optimizer)
return {
'optimizer': serialized_optimizer,
}
@classmethod
def from_config(cls, config, custom_objects=None):
return tf.keras.optimizers.deserialize(
config['optimizer'], custom_objects=custom_objects)
def check_pruned_layers(model, show_all=False, check_structured=False):
"""Checks how many layers are pruned. """
model_layers = model.submodules
layers_count = 0
pruned_count = 0
for layer in model_layers:
if type(layer) in (layers.Conv2D, layers.Dense):
layers_count += 1
total_count = tf.size(layer.kernel)
nonzero_count = tf.math.count_nonzero(layer.kernel)
nonzero_count = tf.cast(nonzero_count, 'int32')
zero_ratio = (total_count - nonzero_count) / total_count
if abs(zero_ratio - 0.5) < 0.003:
pruned_count += 1
is_structured_str = ""
if check_structured:
is_structured = True
weights = layer.kernel.numpy()
if type(layer) is layers.Conv2D:
K = layer.kernel.shape[3]
C = layer.kernel.shape[2]
R = layer.kernel.shape[0]
S = layer.kernel.shape[1]
for k in range(K):
for r in range(R):
for s in range(S):
for c_packed in range(0, C // 4):
if np.count_nonzero(weights[r, s, c_packed*4:(c_packed+1)*4, k]) > 2:
is_structured = False
if type(layer) is layers.Dense:
K = layer.kernel.shape[1]
C = layer.kernel.shape[0]
for k in range(K):
for c_packed in range(0, C // 4):
if np.count_nonzero(weights[c_packed*4:min((c_packed+1)*4,C), k]) > 2:
is_structured = False
is_structured_str = "Structured=" + str(is_structured)
if show_all:
print("[TF-ASP] layer=%s, type=%s, shape=%s: zero_ratio=%f %s" % (
layer.name, type(layer).__name__, layer.kernel.shape, zero_ratio, is_structured_str))
print("[TF-ASP] %d/%d layers (Conv2D or Dense) are pruned!" % (pruned_count,
layers_count))
return pruned_count, layers_count
|
atex-release
|
atex/structured_sparsity/tf_asp/tf_asp_optimizer_v2.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers, optimizers
from tensorflow.python.platform import tf_logging
def _m4n2_1d(matrix, patterns):
m, n = 4, 2
mat = tf.math.abs(tf.reshape(matrix, shape=(-1, m)))
pmax = tf.math.argmax(tf.linalg.matmul(mat, tf.transpose(patterns)), axis=1)
mask = tf.gather(patterns, pmax)
mask = tf.reshape(mask, shape=matrix.shape)
return mask
def get_2to4_mask(var, allow_padding, patterns):
"""Get a new 2:4 mask based on var.
Conv2D stores a 4D filter weight and Dense stores a 2D kernel weight.
For Conv2D, the filter is in the shape of (H, W, I, O) and we need to
permute it to (H*W*O, I) and prune it along I. For Dense, the kernel is
in shape of (I, O) and we need to permute it to (O, I) and prune it
along I.
Args:
var: A weight tensor from Dense or Conv2D layers.
allow_padding: Whether padding is allowed. Padding will be only applied to
the input dim of Dense layers.
Returns:
A tensor with 2:4 mask pattern. Its shape is identical to var.
"""
if var.shape.rank == 2:
matrix = tf.transpose(var, perm=[1, 0])
orig_input_dim = matrix.shape[1]
m = 4
padding_size = m - orig_input_dim % m
if allow_padding and padding_size != 0:
matrix = tf.pad(matrix, [[0, 0], [0, padding_size]], "CONSTANT")
elif var.shape.rank == 4:
matrix = tf.transpose(var, perm=[0, 1, 3, 2])
permuted_shape = matrix.shape
matrix = tf.reshape(matrix, shape=(-1, matrix.shape[-1]))
new_mask = _m4n2_1d(matrix, patterns)
if var.shape.rank == 2:
if allow_padding and padding_size != 0:
new_mask = new_mask[:, :orig_input_dim]
new_mask = tf.transpose(new_mask, perm=[1, 0])
elif var.shape.rank == 4:
new_mask = tf.reshape(new_mask, shape=permuted_shape)
new_mask = tf.transpose(new_mask, perm=[0, 1, 3, 2])
return new_mask
|
atex-release
|
atex/structured_sparsity/tf_asp/pruning_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
from .permuting_search_utils import *
################################################################################################################
# Exhaustive
# Try them all
# - order of columns within a group doesn't matter
# - order of groups doesn't matter
# - we can eliminate effective duplicates by defining aunique combination to be a sorted list of sorted groups
################################################################################################################
####################################################################
# generate unique permutations
####################################################################
# check if adding a column index to a current permutation would keep it in canonical form
# assumes that perm is in canonical form already!
def is_canonical(perm, col):
# if it's a new group
if len(perm) % 4 == 0:
# every column ID < col needs to be in the permutation already
for val in range(col):
if val not in perm:
return False
# this new group needs to be sorted w.r.t. the previous group
return col > perm[-4]
# not a new group, just check to see if it will still be sorted
return col > perm[-1]
# recursive: build a unique permutation one column index at a time
def generate_unique_combinations(built_permutation, remaining_columns, full_permutation_list, group_width, verbosity):
# base case: nothing else to add
if len(remaining_columns) == 0:
full_permutation_list.append(np.copy(built_permutation))
if verbosity > 26:
print(built_permutation)
# still more choices to make, so add each remaining column in turn column if it keeps everything sorted
else:
for c in range(len(remaining_columns)):
# to satisfy our immutables (values within groups are sorted, groups are globally sorted),
# only add this column if either:
# it's starting a new group and is larger than the previous group's first entry
# OR
# it's larger than the last value in the built_permutation
col_to_add = remaining_columns[c]
if is_canonical(built_permutation, col_to_add):
if verbosity > 30 and len(built_permutation) > 0:
print(f"Adding {col_to_add} at position {len(built_permutation)} with previous value {built_permutation[-1]}")
# add the column to the running permutation, remove it from remaining columns
built_permutation.append(col_to_add)
remaining_columns.pop(c)
# recurse
generate_unique_combinations(built_permutation, remaining_columns, full_permutation_list, group_width, verbosity)
# remove the most recent column and put it back on the remaining column list where we found it (sorted)
remaining_columns.insert(c, built_permutation.pop(-1))
elif verbosity > 30:
print(f"\tSkipping adding {col_to_add} at position {len(built_permutation)} with previous value {built_permutation[-1]}")
if verbosity > 15 and (len(built_permutation) + len(remaining_columns)) >= 16 and len(built_permutation) == 4:
print(f"\t{len(full_permutation_list)} unique permutations so far...")
import pickle
import os.path
from os import path
master_unique_permutation_list = {}
def generate_all_unique_combinations(C, M, verbosity, must_use_all_groups = False):
global master_unique_permutation_list
if len(master_unique_permutation_list) == 0 and path.exists("master_list.pkl"):
with open("master_list.pkl","rb") as cache:
master_unique_permutation_list = pickle.load(cache)
if (C,M) not in master_unique_permutation_list:
full_permutation_list = []
generate_unique_combinations([0], [c for c in range(1,C)], full_permutation_list, M, verbosity)
master_unique_permutation_list[(C,M)] = full_permutation_list
if verbosity > 24:
print(f"Generated {len(full_permutation_list)} unique permutations for {C} channels and group width {M}.")
with open("master_list.pkl", "wb") as cache:
pickle.dump(master_unique_permutation_list, cache)
elif verbosity > 24:
print(f"Using {len(master_unique_permutation_list[(C,M)])} cached unique permutations for {C} channels and group width {M}.")
unique_permutations = master_unique_permutation_list[(C,M)]
return unique_permutations
# analytical solution
import math
def predict_unique_combinations(C, M):
assert(C%M==0)
G = int(C/M)
return int(int(math.factorial(C)) / (int(math.pow(math.factorial(M),G)) * math.factorial(G)))
#################################################################
# exhaustively try all unique permutations
#################################################################
# exhaustively search the entire matrix
def search_matrix(matrix, group_width, verbosity):
# give up quickly if we'd go on forever
prediction = predict_unique_combinations(matrix.shape[1], group_width)
best_permutation = [c for c in range(matrix.shape[1])]
if prediction > 1e10:
return matrix, prediction, best_permutation
start_time = time.perf_counter()
full_permutation_list = generate_all_unique_combinations(matrix.shape[1], group_width, verbosity)
if verbosity > 24:
print(f"Found {len(full_permutation_list)} unique permutations for an input matrix of size {matrix.shape}, trying them now...")
# found them, now try them
best_improvement = 0.0
use_cuda, E = use_gpu()
if use_cuda and matrix.shape[1] >= 8 and group_width == 4: # CUDA path only works for a group width of 4
if verbosity > 13:
print(f"Using gpu to exhaustively search {matrix.shape}")
best_improvement, best_permutation = try_permutations_on_matrix(matrix, full_permutation_list)
else:
base_sum = sum_after_2_to_4(matrix)
for i in range(1,len(full_permutation_list)):
permutation = full_permutation_list[i]
permuted = matrix[:, permutation]
cur_improvement = sum_after_2_to_4(permuted) - base_sum
if (cur_improvement > best_improvement):
best_improvement = cur_improvement
best_permutation = permutation
if verbosity > 13:
print(best_permutation, best_improvement)
seconds = time.perf_counter() - start_time
return matrix[:, best_permutation], seconds, best_permutation, best_improvement
#############
# Stripe group handling
#############
# gather stripes from a larger matrix into a single matrix
def collect_stripes(matrix, stripes, group_width):
subset = np.zeros((matrix.shape[0], len(stripes)*group_width))
for s,stripe in enumerate(stripes):
subset[...,s*group_width:s*group_width+group_width] = matrix[...,stripe*group_width:stripe*group_width+group_width]
return subset
# apply the stripe group permutation to the entire permutation
def apply_stripe_group_permutation(sgp, stripes, group_width, permutation):
new_permutation = permutation.copy()
for subset_idx in range(len(sgp)):
dst_stripe_idx = stripes[int(subset_idx / group_width)]
dst_col_idx = subset_idx % group_width
subset_val = sgp[subset_idx]
src_stripe_idx = stripes[int(subset_val / group_width)]
src_col_idx = subset_val % group_width
new_permutation[dst_stripe_idx*group_width + dst_col_idx] = permutation[src_stripe_idx*group_width + src_col_idx]
return new_permutation
# generate all possible stripe groups
def generate_stripe_groups(num_stripes, window_size):
stripe_array = [[c] for c in range(num_stripes)]
next_stripe_array = []
for w in range(1, window_size):
for g in range(len(stripe_array)):
start_c = stripe_array[g][w-1]+1
group = stripe_array[g]
for c in range(start_c, num_stripes):
new_group = group.copy()
new_group.append(c)
next_stripe_array.append(new_group)
stripe_array = next_stripe_array
next_stripe_array = []
return set(tuple(stripe_array[g]) for g in range(len(stripe_array)))
stripe_set_config = None
stripe_set = None
# build the stripe map
def build_stripe_map(matrix, group_width, window_size, stripe_map, stripe_ids, perm_map, used_stripes, verbosity):
global stripe_set, stripe_set_config
window_size = int(window_size / group_width)
if stripe_set is None or stripe_set_config is None or stripe_set_config != (group_width, window_size):
num_stripes = int(matrix.shape[1] / group_width)
assert(group_width * num_stripes == matrix.shape[1])
stripe_set = generate_stripe_groups(num_stripes, window_size)
stripe_set_config = (group_width, window_size)
# step through each, update the stripe_map/stripe_ids if necessary
updates = 0
use_cuda, E = use_gpu()
gpu_list = []
gpu_groups = []
for i,s in enumerate(stripe_set):
sg = [] # build the group of stripes, check if any members changed
need_update = i >= len(stripe_map)
for stripe in s:
sg.append(stripe)
if stripe in used_stripes:
need_update = True
# pre-populate if we're building fresh
if i >= len(stripe_map):
stripe_ids.append(sg)
stripe_map.append(0.)
perm_map.append([c for c in range(group_width * window_size)])
# update entries if needed (only stripe_map and perm_map)
if need_update:
updates += 1
if not use_cuda: # do the work here if using the CPU
subset = collect_stripes(matrix, sg, group_width)
sub_result, sub_duration, permutation, improvement = search_matrix(subset, group_width, verbosity)
stripe_map[i] = improvement
perm_map[i] = permutation
else: # otherwise, just track the work needed to farm off to the GPU
gpu_groups.append(sg)
gpu_list.append(i)
if use_cuda: # if using the GPU, perform the work
matrix_view = np.copy(matrix).astype(np.float32).flatten()
all_permutations = generate_all_unique_combinations(window_size*group_width, group_width, verbosity)
num_permutations = len(all_permutations)
permutation_view = np.copy(np.asarray(all_permutations)).astype(np.uint32).flatten()
stripe_groups_view = np.asarray(gpu_groups).astype(np.uint32).flatten()
num_gpu_groups = len(gpu_list)
gpu_improvement = np.zeros((num_gpu_groups), dtype=np.float32).flatten()
gpu_permutation = np.zeros((num_gpu_groups), dtype=np.uint32).flatten()
result = E._Z21run_build_permute_mapPfjjPjjjS0_jjS_S0_(ctypes.c_void_p(matrix_view.ctypes.data),
ctypes.c_uint(matrix.shape[0]),
ctypes.c_uint(matrix.shape[1]),
ctypes.c_void_p(stripe_groups_view.ctypes.data),
ctypes.c_uint(num_gpu_groups),
ctypes.c_uint(window_size),
ctypes.c_void_p(permutation_view.ctypes.data),
ctypes.c_uint(num_permutations),
ctypes.c_uint(window_size*group_width),
ctypes.c_void_p(gpu_improvement.ctypes.data),
ctypes.c_void_p(gpu_permutation.ctypes.data))
# put the data where python expects it
for i in range(len(gpu_list)):
stripe_map[gpu_list[i]] = gpu_improvement[i]
perm_map[gpu_list[i]] = all_permutations[gpu_permutation[i]]
if verbosity > 15:
print(f"Updated {updates} of {len(stripe_map)} entries in the stripe map")
return stripe_map, stripe_ids, perm_map
# start performing stripe checks
sm_perturbations = 0
sm_perturbation_limit = 0
threshold = 0.0001
def use_stripe_map(matrix, group_width, stripe_map, stripe_ids, perm_map, permutation, verbosity):
global sm_perturbations, sm_perturbation_limit, threshold
used_stripes = []
stripe_groups_optimized = 0
improvement = 0.0
# set the traversal order
ix = np.flip(np.argsort(stripe_map)) # small to large --> large to small
for i in range(len(ix)):
stripe_group_id = ix[i]
perm = perm_map[stripe_group_id].copy()
if stripe_map[stripe_group_id] <= threshold:
# perturbations
if len(used_stripes) == 0 and sm_perturbations < sm_perturbation_limit:
sm_perturbations += 1
if verbosity > 13:
print(f"Perturbing {sm_perturbations} of {sm_perturbation_limit}")
# use this permutation, but swap two channels from left/right halves to include two stripes, no matter the group size
stripe_group_id = ix[np.random.randint(len(ix))]
perm = perm_map[stripe_group_id].copy()
# a little easier to escape from
src = np.random.randint(int(len(perm)/2))
dst = int(len(perm)/2) + np.random.randint(int(len(perm)/2))
perm[src],perm[dst] = perm[dst],perm[src]
else:
break
stripe_group = stripe_ids[stripe_group_id]
# don't work on stripes we've already touched
touched_stripe = False
for stripe in stripe_group:
if stripe in used_stripes:
touched_stripe = True
if touched_stripe:
continue
# apply the permutation we've already found to this stripe group
subset = collect_stripes(matrix, stripe_group, group_width)
sub_result = subset[...,perm]
permutation = apply_stripe_group_permutation(perm, stripe_group, group_width, permutation)
# scatter the results, track what changed
for s,stripe in enumerate(stripe_group):
# see if this group is in canonical form (entry 0 a multiple of 4, contiguous values))
group = perm[s*group_width:s*group_width+group_width] # columns in this group of the used permutation
changed = False
if group[0] % 4 != 0:
changed = True
for c in range(1,group_width):
if group[c] != group[c-1]+1:
changed = True
break
# if it's not, then it changed
if changed:
used_stripes.append(stripe_group[s])
matrix[...,stripe*group_width:stripe*group_width+group_width] = sub_result[...,s*group_width:s*group_width+group_width]
improvement += stripe_map[stripe_group_id]
stripe_groups_optimized += 1
return matrix, stripe_groups_optimized, stripe_map, stripe_ids, used_stripes, improvement, permutation
# entry point for exhaustive searches - both the entire matrix, as well as stripe groups
def Exhaustive_Search(matrix, stripe_group_size=-1, escape_attempts=0, verbosity=0, permutation=None,
search_device='GPU'):
if search_device == 'CPU':
set_cpu_device()
if search_device == 'GPU':
set_gpu_device()
global sm_perturbation_limit, sm_perturbations
sm_perturbations = 0
sm_perturbation_limit = escape_attempts
if permutation is None:
permutation = [c for c in range(matrix.shape[1])]
# only support N:4 for now
group_width = 4
result = np.copy(matrix)
# if the matrix is too large for a window size of 12, subdivide, then fix up with a global optimization with a window size of 8
if group_width==4 and stripe_group_size==12 and matrix.shape[1] > 512:
stripe_split = int(matrix.shape[1]/2/group_width)
col_split = stripe_split * group_width
if verbosity > 10:
print(f"Splitting large matrix with {matrix.shape[1]} at column {col_split}, launching two searches")
result[:,:col_split], durationL, permutation[:col_split] = Exhaustive_Search(result[:,:col_split], stripe_group_size=stripe_group_size, escape_attempts=escape_attempts, verbosity=verbosity, permutation=permutation[:col_split])
result[:,col_split:], durationR, permutation[col_split:] = Exhaustive_Search(result[:,col_split:], stripe_group_size=stripe_group_size, escape_attempts=escape_attempts, verbosity=verbosity, permutation=permutation[col_split:])
escape_attempts = max(escape_attempts, 100)*10
result,duration,permutation = Exhaustive_Search(result, stripe_group_size=8, escape_attempts=escape_attempts, verbosity=verbosity, permutation=permutation)
return result, durationL+durationR+duration, permutation
global stripe_set
stripe_set = None
# small enough to optimize the entire matrix at once
if stripe_group_size != -1:
stripe_map = []
stripe_ids = []
perm_map = []
used_stripes = []
optimized_groups_count = 0
iterations = 0
agg_improvement = 0.
cur_total_sum = sum_after_2_to_4(result)
# in practice, this work will be cached ahead of time; doing it now.
# (Reading the cached list from disk can take several seconds, which shouldn't be counted against the search, but amortized over every layer in a network)
generate_all_unique_combinations(stripe_group_size, group_width, verbosity)
start_time = time.perf_counter()
while True:
stripe_map, stripe_ids, perm_map = build_stripe_map(result, group_width, stripe_group_size, stripe_map, stripe_ids, perm_map, used_stripes, verbosity)
if (verbosity > 25):
print(F"\tbuilt stripe map: {stripe_map}\n{perm_map}\n")
result, stripe_groups_optimized, stripe_map, stripe_ids, used_stripes, improvement, permutation = use_stripe_map(result, group_width, stripe_map, stripe_ids, perm_map, permutation, verbosity)
if (verbosity > 20):
print(F"\tused stripe map: {used_stripes}, {improvement}\n")
# keep track of statistics, print occasionally
optimized_groups_count += stripe_groups_optimized
agg_improvement += improvement
if verbosity > 10:
iterations += 1
new_total_sum = sum_after_2_to_4(result)
actual_improvement = new_total_sum - cur_total_sum
duration = time.perf_counter() - start_time
print(F"\t{iterations:8} {new_total_sum:7.2f} {agg_improvement:7.2f} {actual_improvement:7.2f} {optimized_groups_count:4} {len(used_stripes)} {actual_improvement/max(stripe_groups_optimized,1):5.2f} {duration:7.2f}")
agg_improvement = 0.
optimized_groups_count = 0
cur_total_sum = new_total_sum
# converged?
if len(used_stripes) == 0:
break
duration = time.perf_counter() - start_time
else: # no sliding window, single iteration
result, duration, permutation, improvement = search_matrix(matrix, group_width, verbosity)
return result, duration, permutation
|
atex-release
|
atex/structured_sparsity/tf_asp/permuting_search.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# ==============================================================================
from __future__ import absolute_import
from atex.nv_norms.python.ops.nv_norm_ops import fused_layer_norm_op
from atex.nv_norms.python.ops.nv_norm_ops import fused_layer_norm_grad_op
from atex.nv_norms.python.ops.nv_norm_ops import fused_instance_norm_op
from atex.nv_norms.python.ops.nv_norm_ops import fused_instance_norm_grad_op
from atex.nv_norms.python.ops.nv_norm_ops import _layer_norm_grad
from atex.nv_norms.python.ops.nv_norm_ops import _instance_norm_grad
from atex.nv_norms.python.ops.nv_norm_ops import LayerNormalization
from atex.nv_norms.python.ops.nv_norm_ops import InstanceNormalization
|
atex-release
|
atex/nv_norms/__init__.py
|
atex-release
|
atex/nv_norms/python/__init__.py
|
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# ==============================================================================
"""Use fused layer and instance norm ops in python."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
norm_ops = load_library.load_op_library(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../../../../_fused_nv_norm_ops.so"))
fused_instance_norm_op = norm_ops.fused_instance_norm
fused_instance_norm_grad_op = norm_ops.fused_instance_norm_grad
fused_layer_norm_op = norm_ops.fused_layer_norm
fused_layer_norm_grad_op = norm_ops.fused_layer_norm_grad
@ops.RegisterGradient("FusedInstanceNorm")
def _instance_norm_grad(op, *grad):
"""The gradients for `fused_instance_norm`.
Args:
op: The `fused_instance_norm` `Operation` that we are differentiating, which
we can use to find the inputs and outputs of the original op.
grad: Gradient with respect to the output of the `fused_instance_norm` op.
Returns:
Gradients with respect to the input of `fused_instance_norm`.
"""
x = op.inputs[0]
gamma = op.inputs[1]
a = op.outputs[1]
b = op.outputs[2]
dx, dgamma, dbeta = fused_instance_norm_grad_op(
grad[0], x, gamma, a, b, data_format=op.get_attr("data_format"))
return [dx, dgamma, dbeta]
@ops.RegisterGradient("FusedLayerNorm")
def _layer_norm_grad(op, *grad):
"""The gradients for `fused_layer_norm`.
Args:
op: The `fused_layer_norm` `Operation` that we are differentiating, which we
can use to find the inputs and outputs of the original op.
grad: Gradient with respect to the output of the `fused_layer_norm` op.
Returns:
Gradients with respect to the input of `fused_layer_norm`.
"""
x = op.inputs[0]
gamma = op.inputs[1]
a = op.outputs[1]
b = op.outputs[2]
dx, dgamma, dbeta = fused_layer_norm_grad_op(
grad[0], x, gamma, a, b, axis=op.get_attr("axis"))
return [dx, dgamma, dbeta]
class LayerNormalization(tf.keras.layers.Layer):
"""LayerNormalization Layer.
Args: Same with tf.keras.layers.LayerNormalization except that axis has to
be packed and include last dimension.
Output shape:
y: Same shape as input.
"""
def __init__(self, **kwargs):
super(LayerNormalization, self).__init__()
self.layer_norm = tf.keras.layers.LayerNormalization(**kwargs)
def build(self, input_shape):
self.layer_norm.build(input_shape=input_shape)
self.built = True
def call(self, inputs):
axis = self.layer_norm.axis
# Nv norm ops require the axis to be a list.
if isinstance(axis, int):
axis = [axis]
if axis != sorted(set(axis)):
raise ValueError('We only support sorted and unique axis to make sure '
'the weights have the same data layout with the keras '
'layers.')
y, _, _ = fused_layer_norm_op(inputs,
self.layer_norm.gamma,
self.layer_norm.beta,
axis=axis,
epsilon=self.layer_norm.epsilon)
return y
def get_weights(self):
return self.layer_norm.get_weights()
def set_weights(self, weights):
self.layer_norm.set_weights(weights)
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.layer_norm.weights
def get_config(self):
config = {
'layer_norm': self.layer_norm,
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class InstanceNormalization(tf.keras.layers.Layer):
"""InstanceNormalization Layer.
Args: Same with tfa.layer.InstanceNormalization except that axis only takes
value form -1 and 1.
Output shape:
y: Same shape as input.
"""
def __init__(self, axis, **kwargs):
super(InstanceNormalization, self).__init__()
policy = tf.keras.mixed_precision.global_policy()
is_mixed_policy = (
policy is not None and policy.compute_dtype != policy.variable_dtype
)
# The FusedInstanceNorm requires the fp32 weights. So, we explicitly use the
# "float32" policy to avoid the weight autocasting in the "mixed_float16"
# scenario.
if is_mixed_policy:
tf.keras.mixed_precision.set_global_policy("float32")
self.instance_norm = tfa.layers.InstanceNormalization(axis=axis,**kwargs)
if is_mixed_policy:
tf.keras.mixed_precision.set_global_policy(policy)
def build(self, input_shape):
self.instance_norm.build(input_shape=input_shape)
self.built = True
def call(self, inputs):
axis = self.instance_norm.axis
# Nv norm ops require the data format instead of axis.
if axis == 1:
data_format = "NC..."
elif axis == -1:
data_format = "N...C"
else:
raise ValueError('We only support integer axis of 1 or -1 corresponds to'
'channel first or channel last layout.')
y, _, _ = fused_instance_norm_op(inputs,
self.instance_norm.weights[0],
self.instance_norm.weights[1],
data_format=data_format,
epsilon=self.instance_norm.epsilon)
return y
def get_config(self):
config = {'instance_norm': self.instance_norm}
base_config = super(InstanceNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_weights(self):
return self.instance_norm.get_weights()
def set_weights(self, weights):
self.instance_norm.set_weights(weights)
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.instance_norm.weights
|
atex-release
|
atex/nv_norms/python/ops/nv_norm_ops.py
|
atex-release
|
atex/nv_norms/python/ops/__init__.py
|
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# ==============================================================================
"""Tests for fused instance norm ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from atex.nv_norms import InstanceNormalization
from atex.nv_norms import fused_instance_norm_op, fused_instance_norm_grad_op
def instance_norm_grad_np(x, dy, gamma, cache, is_channel_first):
N_axis = (0, )
if is_channel_first:
D_axis = tuple([i for i in range(2, x.ndim)])
C_axis = (1, )
else:
D_axis = tuple([i for i in range(1, x.ndim-1)])
C_axis = (-1, )
ND_axis = N_axis + D_axis
D = 1
for dim in D_axis:
D *= x.shape[dim]
istd = cache["istd"]
mean = cache["mean"]
expand_d = -1 if is_channel_first else 1
expand_g = -1 if is_channel_first else 0
for i in range(len(D_axis)):
istd = np.expand_dims(istd, expand_d)
mean = np.expand_dims(mean, expand_d)
gamma = np.expand_dims(gamma, expand_g)
gamma = np.expand_dims(gamma, 0)
x_mean = x - mean
dgamma = np.sum(dy * x_mean * istd, axis=ND_axis, dtype=np.float32)
dbeta = np.sum(dy, axis=ND_axis, dtype=np.float32)
dl_di = dy * gamma * istd
di_dx = 1.
dl_dvar = np.sum(dy * gamma * x_mean * (-0.5) * (istd**3), axis=D_axis,
keepdims=True, dtype=np.float32)
dvar_dx = 2. * x_mean / D
dl_dmean = np.sum(-1. * dy * gamma * istd, axis=D_axis, keepdims=True,
dtype=np.float32)
dmean_dx = 1. / D
dx = dl_di * di_dx + dl_dvar * dvar_dx + dl_dmean * dmean_dx
return dgamma, dbeta, dx
def get_input_shape(N, C, D, x_rank, axis):
assert axis in (1, -1)
x_shape = [N]
if axis == 1:
x_shape += [C] + [D] * (x_rank - 2)
else:
x_shape += [D] * (x_rank - 2) + [C]
return x_shape
class NvNormsInstanceNormOpTest(test.TestCase):
def _runForward(self, x_shape, axis, data_dtype=tf.float32, epsilon=0.001):
assert axis in (1, -1)
x = tf.random.normal(shape=x_shape, stddev=10.0, dtype=data_dtype)
gamma = tf.constant(
np.random.normal(size=x_shape[axis]),
dtype=tf.float32)
beta = tf.constant(
np.random.normal(size=x_shape[axis]),
dtype=tf.float32)
instanceN_ref = tfa.layers.InstanceNormalization(
axis=axis, center=True, scale=True, epsilon=epsilon)
instanceN_ref.build(input_shape=x_shape)
instanceN_ref.set_weights([gamma, beta])
y_ref = instanceN_ref(x)
if axis == 1:
reduce_axis = tuple([i for i in range(2, x.ndim)])
else:
reduce_axis = tuple([i for i in range(1, x.ndim-1)])
mean_ref, var_ref = tf.cast(tf.nn.moments(x, axes=reduce_axis), tf.float32)
inv_var_ref = tf.constant(1. / (var_ref + epsilon))
# For ops fused_instance_norm_op, fused_instance_norm_grad_op, they take
# argument data_format in ("NC...", "N...C", "NCHW", "NHWC", "NCDHW",
# "NDHWC")
op_data_format = "NC..." if axis == 1 else "N...C"
y, mean, inv_std = fused_instance_norm_op(
x, gamma, beta, epsilon=epsilon, data_format=op_data_format)
self.assertAllClose(y, y_ref, atol=0.01)
self.assertAllClose(mean, mean_ref, atol=0.01)
self.assertAllClose(inv_std**2, inv_var_ref, atol=0.05)
def _runBackward(self, x_shape, axis, data_dtype=tf.float32, epsilon=0.001):
assert axis in (1, -1)
x_np = np.random.normal(0.0, 10.0, size=x_shape).astype(np.float32)
dy_np = np.random.normal(size=x_shape).astype(np.float32)
gamma_np = np.random.normal(size=x_shape[axis]).astype(np.float32)
x = tf.constant(x_np, dtype=data_dtype)
dy = tf.constant(dy_np, dtype=data_dtype)
gamma = tf.constant(gamma_np, dtype=tf.float32)
if axis == 1:
reduce_axis = tuple([i for i in range(2, x.ndim)])
else:
reduce_axis = tuple([i for i in range(1, x.ndim-1)])
mean, var = tf.nn.moments(x, axes=reduce_axis)
inv_std = tf.constant(1. / np.sqrt(var + epsilon), dtype=tf.float32)
mean = tf.cast(mean, tf.float32)
cache = {}
cache["istd"] = inv_std
cache["mean"] = mean
grad_op_data_format = "NC..." if axis == 1 else "N...C"
dx, dgamma, dbeta = fused_instance_norm_grad_op(
dy, x, gamma, mean, inv_std, data_format=grad_op_data_format)
dgamma_ref, dbeta_ref, dx_ref = instance_norm_grad_np(
x_np, dy_np, gamma_np, cache, axis == 1)
self.assertAllClose(dx_ref, dx, atol=0.02)
self.assertAllClose(dbeta_ref, dbeta, atol=0.05)
self.assertAllClose(dgamma_ref, dgamma, atol=0.05)
@test_util.run_gpu_only
def testFusedInstanceNormOp(self):
N, C = 2, 32
with self.cached_session(use_gpu=True) as sess:
x_ranks = [4, 5]
D_exps = [3, 4, 5, 6]
axes = [1, -1]
dtypes = [tf.float16, tf.float32]
for axis, x_rank, D_exp, dtype in itertools.product(axes, x_ranks, D_exps, dtypes):
x_shape = get_input_shape(N, C, 2**D_exp, x_rank, axis)
self._runForward(x_shape, axis, dtype)
# only test float32 for backward given the baseline is in float32
self._runBackward(x_shape, axis)
@test_util.run_gpu_only
def testFusedInstanceNormOpWithNonTypicalInputShapes(self):
with self.cached_session(use_gpu=True):
N, C = 1, 32
axes = [1, -1]
features = [
[1, 11],
[3, 4],
[1, 31],
[1, 4001],
[61, 82],
[113, 145],
[198, 331],
[179, 2929]]
for D, axis in itertools.product(features, axes):
x_shape = [N, C] + D
if axis == 1:
x_shape = [N, C] + D
else:
x_shape = [N] + D + [C]
self._runForward(x_shape, axis)
self._runBackward(x_shape, axis)
#4 ,1, 1 forward on volta
@test_util.run_gpu_only
def testFusedInstanceNormEmptyInput(self):
with self.cached_session(use_gpu=True) as sess:
x = tf.constant([], dtype=tf.float32)
x = tf.reshape(x, shape=(0, 0, 0, 0, 0))
gamma = tf.constant([], dtype=tf.float32)
beta = tf.constant([], dtype=tf.float32)
data_format = "NC..."
y, mean, inv_var = fused_instance_norm_op(
x, gamma, beta, data_format=data_format)
self.assertAllEqual(y.shape, [0, 0, 0, 0, 0])
self.assertAllEqual(mean.shape, [0, 0])
self.assertAllEqual(inv_var.shape, [0, 0])
@test_util.run_gpu_only
def testFusedInstanceNormGradEmptyInput(self):
with self.cached_session(use_gpu=True) as sess:
dy = tf.constant([], dtype=tf.float32)
dy = tf.reshape(dy, shape=(0, 0, 0, 0, 0))
x = tf.constant([], dtype=tf.float32)
x = tf.reshape(x, shape=(0, 0, 0, 0, 0))
gamma = tf.constant([], dtype=tf.float32)
mean = tf.constant([], dtype=tf.float32)
inv_var = tf.constant([], dtype=tf.float32)
mean = tf.reshape(mean, shape=(0, 0))
inv_var = tf.reshape(inv_var, shape=(0, 0))
data_format = "N...C"
dx, dgamma, dbeta = fused_instance_norm_grad_op(
dy, x, gamma, mean, inv_var, data_format=data_format)
self.assertAllEqual(dx.shape, [0, 0, 0, 0, 0])
self.assertAllEqual(dgamma.shape, [0])
self.assertAllEqual(dbeta.shape, [0])
class FusedInstanceNormLayerTest(test.TestCase):
def _runForward(self, x_shape, axis, epsilon=0.001):
assert axis in (1, -1)
x = tf.constant(np.random.normal(size=x_shape), dtype=tf.float32)
gamma = tf.constant(
np.random.normal(size=x_shape[axis]),
dtype=tf.float32)
beta = tf.constant(
np.random.normal(size=x_shape[axis]),
dtype=tf.float32)
instanceN_ref = tfa.layers.InstanceNormalization(
axis=axis, center=True, scale=True, epsilon=epsilon)
instanceN_ref.build(input_shape=x_shape)
instanceN_ref.set_weights([gamma, beta])
y_ref = instanceN_ref(x)
instanceN = InstanceNormalization(axis=axis)
instanceN.build(input_shape=x_shape)
instanceN.set_weights([gamma, beta])
y = instanceN(x)
self.assertAllClose(y, y_ref, rtol=0.01, atol=0.01)
def _runBackward(self, x_shape, axis, epsilon=0.01):
assert axis in (1, -1)
x = tf.constant(np.random.normal(size=x_shape), dtype=tf.float16)
gamma = tf.constant(np.random.normal(size=x_shape[axis]),
dtype=tf.float32)
beta = tf.constant(np.random.normal(size=x_shape[axis]),
dtype=tf.float32)
instanceN = InstanceNormalization(axis=axis)
instanceN.build(input_shape=x_shape)
instanceN.set_weights([gamma, beta])
instanceN_ref = tfa.layers.InstanceNormalization(axis=axis)
instanceN_ref.build(input_shape=x_shape)
instanceN_ref.set_weights([gamma, beta])
y_true = tf.random.normal(shape=x_shape)
def get_grads(instanceN):
with tf.GradientTape() as tape:
tape.watch(x)
y = instanceN(x)
loss = tf.math.reduce_mean(
tf.keras.losses.binary_crossentropy(y, y_true))
dx, (dgamma, dbeta) = tape.gradient(loss, [x, instanceN.variables])
return dx, dgamma, dbeta
dx_ref, dgamma_ref, dbeta_ref = get_grads(instanceN_ref)
dx, dgamma, dbeta = get_grads(instanceN)
self.assertAllClose(dx_ref, dx, atol=0.05)
self.assertAllClose(dbeta_ref, dbeta, atol=0.05)
self.assertAllClose(dgamma_ref, dgamma, atol=0.05)
@test_util.run_gpu_only
def testFusedInstanceNorm(self):
N, C = 2, 32
with self.cached_session(use_gpu=True) as sess:
rank, features = 5, 4
axes = [-1, 1]
for axis in axes:
x_shape = get_input_shape(N, C, 2**features, rank, axis)
self._runForward(x_shape, axis)
self._runBackward(x_shape, axis)
if __name__ == '__main__':
test.main()
|
atex-release
|
atex/nv_norms/tests/fused_instance_norm_test.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# ==============================================================================
"""Tests for fused layer norm ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from atex.nv_norms import fused_layer_norm_op, fused_layer_norm_grad_op
from atex.nv_norms import LayerNormalization
def layer_norm_grad_np(x, dy, gamma, cache, axis):
assert x.ndim >= 2, "x and dy have to be larger than 1D."
gamma_shape = gamma.shape
x_shape = x.shape
D = 1
for a in axis:
D *= x_shape[a]
N = x.size // D
x = x.reshape((N, D))
dy = dy.reshape((N, D))
gamma = gamma.reshape((D, ))
N_axis = (0, )
D_axis = (1, )
N = x.shape[0]
D = x.shape[1]
istd = cache["istd"].numpy()
mean = cache['mean'].numpy()
# We manually expand istd and mean from (N,) to (N,1) to facilitate the
# broadcasting in the following computation.
mean = np.expand_dims(mean, -1)
istd = np.expand_dims(istd, -1)
x_mean = x - mean
dgamma = np.sum(dy * x_mean * istd, axis=N_axis)
dbeta = np.sum(dy, axis=N_axis)
dl_di = dy * gamma * istd
di_dx = 1.
dl_dvar = np.sum(dy * gamma * x_mean * (-0.5) * (istd**3), axis=D_axis,
keepdims=True)
dvar_dx = 2. * x_mean / D
dl_dmean = np.sum(-1. * dy * gamma * istd, axis=D_axis, keepdims=True)
dmean_dx = 1. / D
dx = dl_di * di_dx + dl_dvar * dvar_dx + dl_dmean * dmean_dx
dgamma = dgamma.reshape(gamma_shape)
dbeta = dbeta.reshape(gamma_shape)
dx = dx.reshape(x_shape)
return dgamma, dbeta, dx
class NvNormsLayerNormOpTest(test.TestCase):
def _runForward(self, x_shape, data_dtype, axis, epsilon=0.001):
validated_axis = sorted(set([i % len(x_shape) for i in axis]))
weight_shape = [x_shape[i] for i in validated_axis]
x = tf.random.normal(shape=x_shape, stddev=10.0, dtype=tf.float32)
gamma = tf.constant(np.random.normal(size=weight_shape), dtype=tf.float32)
beta = tf.constant(np.random.normal(size=weight_shape), dtype=tf.float32)
ref_ln = tf.keras.layers.LayerNormalization(
axis=validated_axis, center=True, scale=True, epsilon=epsilon)
ref_ln.build(input_shape=x_shape)
ref_ln.set_weights([gamma, beta])
y_ref = ref_ln(x)
mean_ref, var_ref = tf.nn.moments(x, axes=validated_axis)
inv_var_ref = tf.constant(1. / (var_ref + epsilon), dtype=tf.float32)
mean_ref = tf.reshape(mean_ref, shape=-1)
var_ref = tf.reshape(var_ref, shape=-1)
y, mean, inv_std = fused_layer_norm_op(x, gamma, beta, axis=axis)
self.assertAllClose(y, y_ref, rtol=0.01, atol=0.01)
self.assertAllClose(mean, mean_ref, rtol=0.01, atol=0.01)
self.assertAllClose(inv_std**2, inv_var_ref, rtol=0.01, atol=0.01)
def _runBackward(self, x_shape, data_dtype, axis, epsilon=0.001):
validated_axis = sorted(set([i % len(x_shape) for i in axis]))
weight_shape = [x_shape[i] for i in validated_axis]
x_np = np.random.normal(0.0, 10.0, size=x_shape)
dy_np = np.random.normal(size=x_shape)
gamma_np = np.random.normal(size=weight_shape)
x = tf.constant(x_np, dtype=data_dtype)
dy = tf.constant(dy_np, dtype=data_dtype)
gamma = tf.constant(gamma_np, dtype=tf.float32)
mean, var = tf.nn.moments(x, axes=validated_axis)
mean = tf.reshape(mean, shape=-1)
var = tf.reshape(var, shape=-1)
inv_std = tf.constant(1. / np.sqrt(var + epsilon), dtype=tf.float32)
cache = {}
cache["istd"] = tf.cast(inv_std, tf.float32)
cache["mean"] = tf.cast(mean, tf.float32)
dx, dgamma, dbeta = fused_layer_norm_grad_op(
dy, x, gamma, cache["mean"], cache["istd"], axis=axis)
dgamma_ref, dbeta_ref, dx_ref = layer_norm_grad_np(
x_np, dy_np, gamma_np, cache, axis=validated_axis)
self.assertAllClose(dx_ref, dx, rtol=0.01, atol=0.08)
self.assertAllClose(dbeta_ref, dbeta, rtol=0.01, atol=0.01)
self.assertAllClose(dgamma_ref, dgamma, rtol=0.02, atol=0.02)
@test_util.run_gpu_only
def testFusedLayerNormOp(self):
with self.cached_session(use_gpu=True):
dtypes = [tf.float32, tf.float16]
ranks = [2, 3]
batches = [1, 2, 5, 8]
features = [4, 8, 10, 15, 18, 19]
for dtype, rank, N, D in itertools.product(dtypes, ranks, batches,
features):
axis = [-1] if rank == 2 else [-2, -1]
x_shape = [N] * (rank - 1)
x_shape.append(2**D)
self._runForward(x_shape, dtype, axis)
self._runBackward(x_shape, dtype, axis)
@test_util.run_gpu_only
def testFusedLayerNormOpWithNonTypicalInputShapes(self):
with self.cached_session(use_gpu=True):
dtypes = [tf.float16, tf.float32]
N = 2
features = [11, 12, 31, 2003, 4001, 5002, 2**14 + 1, 2**16 + 2, 2**18 + 3]
for dtype, D in itertools.product(dtypes, features):
x_shape = [N, D]
self._runForward(x_shape, dtype, [-1])
self._runBackward(x_shape, dtype, [-1])
@test_util.run_gpu_only
def testFusedLayerNormOpWithBrittleShapes(self):
with self.cached_session(use_gpu=True):
x_shapes = [
[12000, 128, 8, 8],
[1200, 128, 8, 6],
[2, 128, 8, 8],
[1, 64, 384, 276]]
for x_shape in x_shapes:
self._runForward(x_shape, tf.float32, [1, 2, 3])
self._runBackward(x_shape, tf.float32, [1, 2, 3])
@test_util.run_gpu_only
def testFusedLayerNormEmptyInput(self):
with self.cached_session(use_gpu=True):
x = tf.constant([], dtype=tf.float32)
x = tf.reshape(x, shape=(0, 0))
gamma = tf.constant([], dtype=tf.float32)
beta = tf.constant([], dtype=tf.float32)
y, mean, inv_var = fused_layer_norm_op(x, gamma, beta)
self.assertAllEqual(y.shape, [0, 0])
self.assertAllEqual(mean.shape, [0])
self.assertAllEqual(inv_var.shape, [0])
@test_util.run_gpu_only
def testFusedLayerNormGradEmptyInput(self):
with self.cached_session(use_gpu=True):
dy = tf.constant([], dtype=tf.float32)
dy = tf.reshape(dy, shape=(0, 0))
x = tf.constant([], dtype=tf.float32)
x = tf.reshape(x, shape=(0, 0))
gamma = tf.constant([], dtype=tf.float32)
mean = tf.constant([], dtype=tf.float32)
inv_var = tf.constant([], dtype=tf.float32)
dx, dgamma, dbeta = fused_layer_norm_grad_op(
dy, x, gamma, mean, inv_var)
self.assertAllEqual(dx.shape, [0, 0])
self.assertAllEqual(dgamma.shape, [0])
self.assertAllEqual(dbeta.shape, [0])
class NvNormsLayerNormLayerTest(test.TestCase):
def _runForward(self, x_shape, data_dtype, axis, epsilon=0.001):
if isinstance(axis, int):
weight_shape = x_shape[axis]
else:
weight_shape = [x_shape[i] for i in axis]
x = tf.random.uniform(shape=x_shape, minval=10.0,
maxval=1000.0, dtype=data_dtype)
gamma = tf.constant(np.random.normal(size=weight_shape), dtype=tf.float32)
beta = tf.constant(np.random.normal(size=weight_shape), dtype=tf.float32)
layerN_ref = tf.keras.layers.LayerNormalization(
axis=axis, center=True, scale=True, epsilon=epsilon)
layerN_ref.build(input_shape=x_shape)
layerN_ref.set_weights([gamma, beta])
y_ref = layerN_ref(x)
layerN = LayerNormalization(axis=axis)
layerN.build(input_shape=x_shape)
layerN.set_weights([gamma, beta])
y = layerN(x)
self.assertAllClose(y, y_ref, rtol=0.01, atol=0.01)
def _runBackward(self, x_shape, data_dtype, axis):
if isinstance(axis, int):
weight_shape = x_shape[axis]
else:
weight_shape = [x_shape[i] for i in axis]
x = tf.constant(np.random.normal(size=x_shape), dtype=data_dtype)
gamma = tf.constant(np.random.normal(size=weight_shape), dtype=tf.float32)
beta = tf.constant(np.random.normal(size=weight_shape), dtype=tf.float32)
layerN = LayerNormalization(axis=axis)
layerN.build(input_shape=x_shape)
layerN.set_weights([gamma, beta])
layerN_ref = tf.keras.layers.LayerNormalization(
axis=axis, center=True, scale=True)
layerN_ref.build(input_shape=x_shape)
layerN_ref.set_weights([gamma, beta])
def get_grads(layerN):
with tf.GradientTape() as tape:
tape.watch(x)
y = layerN(x)
dx, (dgamma, dbeta) = tape.gradient(y, [x, layerN.variables])
return dx, dgamma, dbeta
dx, dgamma, dbeta = get_grads(layerN)
dx_ref, dgamma_ref, dbeta_ref = get_grads(layerN_ref)
self.assertAllClose(dx_ref, dx, rtol=0.01, atol=0.01)
self.assertAllClose(dbeta_ref, dbeta, rtol=0.01, atol=0.01)
self.assertAllClose(dgamma_ref, dgamma, rtol=0.02, atol=0.02)
@test_util.run_gpu_only
def testFusedLayerNorm(self):
with self.cached_session(use_gpu=True):
dtypes = [tf.float32, tf.float16]
rank, N, D = 3, 8, 8
for dtype in dtypes:
axis = [-1] if rank == 2 else [-2, -1]
x_shape = [N] * (rank - 1)
x_shape.append(2 ** D)
self._runForward(x_shape, dtype, axis)
self._runBackward(x_shape, dtype, axis)
@test_util.run_gpu_only
def testFusedLayerNormWithDifferentAxis(self):
axes = [[1, 2], [-2, -1], [1, -1], [-2, 2], -1]
for axis in axes:
self._runForward([2, 3, 4], tf.float32, axis)
self._runBackward([2, 3, 4], tf.float32, axis)
@test_util.run_gpu_only
def testLayerWithIntegerAxis(self):
axes = [-1, 2]
for axis in axes:
self._runForward([2, 3, 4], tf.float32, axis)
self._runBackward([2, 3, 4], tf.float32, axis)
if __name__ == '__main__':
test.main()
|
atex-release
|
atex/nv_norms/tests/fused_layer_norm_test.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# ==============================================================================
import argparse
from atex import nv_norms
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import layers, models
parser = argparse.ArgumentParser(description="Use --nvops to replace InstanceN")
parser.add_argument('--nvops', action='store_true',
help="""Whether to Fused Instance Norm.""")
args, _ = parser.parse_known_args()
N, H, W, C = (2, 32, 32, 8)
k, c, r, s = (4, C, 2, 2)
use_nv_norms = True if args.nvops else False
axis = -1
conv2d = layers.Conv2D(k, (r, s), padding='same')
instanceN = tfa.layers.InstanceNormalization(axis=axis)
if use_nv_norms:
instanceN = nv_norms.InstanceNormalization(axis=axis)
def model():
x = layers.Input(shape=(H, W, C), batch_size=None)
y = conv2d(x)
z = instanceN(y)
return models.Model(x, z, name='toy_model')
toy_model = model()
@tf.function
def train_step(x):
with tf.GradientTape() as tape:
y = toy_model(x)
loss = tf.reduce_sum(y)
if use_nv_norms:
# The weights in instanceN are no longer tracked in the toy_model.
grads = tape.gradient(loss, [toy_model.variables, instanceN.variables])
else:
grads = tape.gradient(loss, [toy_model.variables])
return grads
data = tf.random.normal((N, H, W, C))
g = train_step(data)
_ = g[0][0].numpy() # sync GPU
print("Done with", "Fused instanceN" if use_nv_norms else "tfa instanceN")
|
atex-release
|
atex/nv_norms/examples/sample_instanceN.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# ==============================================================================
import argparse
from atex import nv_norms
import tensorflow as tf
from tensorflow.keras import layers, models
parser = argparse.ArgumentParser(description="Use --nvops to replace LayerN")
parser.add_argument('--nvops', action='store_true',
help="""Whether to Fused Layer Norm.""")
args, _ = parser.parse_known_args()
N, H, W, C = (10, 3, 3, 4)
k, c, r, s = (4, C, 2, 2)
use_nv_norms = True if args.nvops else False
conv2d = layers.Conv2D(k, (r, s), padding='same')
layerN = layers.LayerNormalization(axis=(1, 2, 3))
if use_nv_norms:
layerN = nv_norms.LayerNormalization(axis=(1, 2, 3))
def model():
x = layers.Input(shape=(H, W, C), batch_size=None)
y = conv2d(x)
z = layerN(y)
return models.Model(x, z, name='toy_model')
toy_model = model()
@tf.function
def train_step(x):
with tf.GradientTape() as tape:
y = toy_model(x)
loss = tf.reduce_sum(y)
if use_nv_norms:
# The weights in layerN are no longer tracked in the toy_model.
grads = tape.gradient(loss, [toy_model.variables, layerN.variables])
else:
grads = tape.gradient(loss, [toy_model.variables])
return grads
data = tf.random.normal((N, H, W, C))
g = train_step(data)
_ = g[0][0].numpy() # sync GPU
print("Done with", "Fused LayerN" if use_nv_norms else "Keras LayerN")
|
atex-release
|
atex/nv_norms/examples/sample_layerN.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# ==============================================================================
import argparse
from atex import nv_norms
import tensorflow as tf
import time
from tensorflow.keras import mixed_precision
parser = argparse.ArgumentParser(description='Benchmark configs')
parser.add_argument('--xla', action='store_true', help='Use XLA for reference')
args = parser.parse_args()
use_xla = args.xla
def train_step_func(x, layerN):
with tf.GradientTape() as tape:
tape.watch(x)
y = layerN(x)
loss = tf.reduce_sum(y)
dx, (dgamma, dbeta) = tape.gradient(loss, [x, layerN.variables])
return dx, dgamma, dbeta
def benchmark_fn(input_shape, use_nv_ops):
mixed_precision.set_global_policy('mixed_float16')
warmup = 10
repeat = 20
train_step = train_step_func
if use_nv_ops:
layerN = nv_norms.LayerNormalization(axis=(1,))
else:
layerN = tf.keras.layers.LayerNormalization(axis=(1,))
if use_xla:
train_step = tf.function(train_step, jit_compile=True)
layerN.build(input_shape)
data = tf.random.normal(input_shape)
for i in range(warmup):
dx, dgamma, dbeta = train_step(data, layerN)
_ = tf.reduce_sum(dx).numpy()
start = time.time()
for i in range(repeat):
dx, dgamma, dbeta = train_step(data, layerN)
_ = tf.reduce_sum(dx).numpy()
result = time.time() - start
return 1000 * result / repeat
input_shapes = [
(10, 10000000),
(100, 1000000),
(1000, 100000),
(10000, 10000),
(100000, 1000),
(1000000, 100),
(10000000, 10),
(4, 400001), # Non-typical shapes
(4, 10000001),
]
for input_shape in input_shapes:
assert len(input_shape) == 2
time_tf = benchmark_fn(input_shape, False)
time_nv = benchmark_fn(input_shape, True)
print("Input: {} {} Time(ms): TF: {:0.2f} NV: {:0.2f}".format(
input_shape[0], input_shape[1], time_tf, time_nv))
|
atex-release
|
atex/nv_norms/benchmarks/benchmark_layer_norm.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# ==============================================================================
import argparse
from atex import nv_norms
import tensorflow as tf
import tensorflow_addons as tfa
import time
from tensorflow.keras import mixed_precision
parser = argparse.ArgumentParser(description='Benchmark configs')
parser.add_argument('--xla', action='store_true', help='Use XLA for reference')
args = parser.parse_args()
use_xla = args.xla
def train_step_func(x, instanceN):
with tf.GradientTape() as tape:
tape.watch(x)
y = instanceN(x)
loss = tf.reduce_sum(y)
dx, (dgamma, dbeta) = tape.gradient(loss, [x, instanceN.variables])
return dx, dgamma, dbeta
def benchmark_fn(input_shape, use_nvops, axis):
mixed_precision.set_global_policy('mixed_float16')
warmup = 10
repeat = 20
train_step = train_step_func
if use_nvops:
instanceN = nv_norms.InstanceNormalization(axis=axis)
else:
instanceN = tfa.layers.InstanceNormalization(axis=axis)
if use_xla:
train_step = tf.function(train_step, jit_compile=True)
instanceN.build(input_shape)
data = tf.random.normal(input_shape)
for i in range(warmup):
dx, dgamma, dbeta = train_step(data, instanceN)
_ = tf.reduce_sum(dx).numpy()
start = time.time()
for i in range(repeat):
dx, dgamma, dbeta = train_step(data, instanceN)
_ = tf.reduce_sum(dx).numpy()
result = time.time() - start
return 1000 * result / repeat
# denote N C D/H/W dim
input_shapes = [
(2, 32, 6),
(2, 32, 128),
(2, 64, 128),
(4, 32, 128),
(4, 64, 64),
(8, 32, 64),
(8, 64, 64),
(8, 128, 64),
(4, 256, 32),
(8, 256, 32),
]
def get_shape(x, channel_last):
if channel_last:
return (x[0], x[2], x[2], x[2], x[1])
else:
return (x[0], x[1], x[2], x[2], x[2])
for input_shape in input_shapes:
expanded_shape = get_shape(input_shape, True)
time_tf = benchmark_fn(expanded_shape, False, axis=-1)
time_nv = benchmark_fn(expanded_shape, True, axis=-1)
print("Input: {} Time(ms): TF: {:0.2f} NV: {:0.2f}".format(
expanded_shape, time_tf, time_nv))
print("End of channel last layout.")
for input_shape in input_shapes:
expanded_shape = get_shape(input_shape, False)
time_tf = benchmark_fn(expanded_shape, False, axis=1)
time_nv = benchmark_fn(expanded_shape, True, axis=1)
print("Input: {} Time(ms): TF: {:0.2f} NV: {:0.2f}".format(
expanded_shape, time_tf, time_nv))
print("End of channel first layout.")
|
atex-release
|
atex/nv_norms/benchmarks/benchmark_instance_norm.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.utils as utils
import torch.nn as nn
from smooth_filter import smooth_filter
from process_stylization import Timer, memory_limit_image_resize
from scipy.io import loadmat
colors = loadmat('segmentation/data/color150.mat')['colors']
def overlay(img, pred_color, blend_factor=0.4):
import cv2
edges = cv2.Canny(pred_color, 20, 40)
edges = cv2.dilate(edges, np.ones((5,5),np.uint8), iterations=1)
out = (1-blend_factor)*img + blend_factor * pred_color
edge_pixels = (edges==255)
new_color = [0,0,255]
for i in range(0,3):
timg = out[:,:,i]
timg[edge_pixels]=new_color[i]
out[:,:,i] = timg
return out
def visualize_result(label_map):
label_map = label_map.astype('int')
label_map_rgb = np.zeros((label_map.shape[0], label_map.shape[1], 3), dtype=np.uint8)
for label in np.unique(label_map):
label_map_rgb += (label_map == label)[:, :, np.newaxis] * \
np.tile(colors[label],(label_map.shape[0], label_map.shape[1], 1))
return label_map_rgb
class SegReMapping:
def __init__(self, mapping_name, min_ratio=0.02):
self.label_mapping = np.load(mapping_name)
self.min_ratio = min_ratio
def cross_remapping(self, cont_seg, styl_seg):
cont_label_info = []
new_cont_label_info = []
for label in np.unique(cont_seg):
cont_label_info.append(label)
new_cont_label_info.append(label)
style_label_info = []
new_style_label_info = []
for label in np.unique(styl_seg):
style_label_info.append(label)
new_style_label_info.append(label)
cont_set_diff = set(cont_label_info) - set(style_label_info)
# Find the labels that are not covered by the style
# Assign them to the best matched region in the style region
for s in cont_set_diff:
cont_label_index = cont_label_info.index(s)
for j in range(self.label_mapping.shape[0]):
new_label = self.label_mapping[j, s]
if new_label in style_label_info:
new_cont_label_info[cont_label_index] = new_label
break
new_cont_seg = cont_seg.copy()
for i,current_label in enumerate(cont_label_info):
new_cont_seg[(cont_seg == current_label)] = new_cont_label_info[i]
cont_label_info = []
for label in np.unique(new_cont_seg):
cont_label_info.append(label)
styl_set_diff = set(style_label_info) - set(cont_label_info)
valid_styl_set = set(style_label_info) - set(styl_set_diff)
for s in styl_set_diff:
style_label_index = style_label_info.index(s)
for j in range(self.label_mapping.shape[0]):
new_label = self.label_mapping[j, s]
if new_label in valid_styl_set:
new_style_label_info[style_label_index] = new_label
break
new_styl_seg = styl_seg.copy()
for i,current_label in enumerate(style_label_info):
# print("%d -> %d" %(current_label,new_style_label_info[i]))
new_styl_seg[(styl_seg == current_label)] = new_style_label_info[i]
return new_cont_seg, new_styl_seg
def self_remapping(self, seg):
init_ratio = self.min_ratio
# Assign label with small portions to label with large portion
new_seg = seg.copy()
[h,w] = new_seg.shape
n_pixels = h*w
# First scan through what are the available labels and their sizes
label_info = []
ratio_info = []
new_label_info = []
for label in np.unique(seg):
ratio = np.sum(np.float32((seg == label))[:])/n_pixels
label_info.append(label)
new_label_info.append(label)
ratio_info.append(ratio)
for i,current_label in enumerate(label_info):
if ratio_info[i] < init_ratio:
for j in range(self.label_mapping.shape[0]):
new_label = self.label_mapping[j,current_label]
if new_label in label_info:
index = label_info.index(new_label)
if index >= 0:
if ratio_info[index] >= init_ratio:
new_label_info[i] = new_label
break
for i,current_label in enumerate(label_info):
new_seg[(seg == current_label)] = new_label_info[i]
return new_seg
def stylization(stylization_module, smoothing_module, content_image_path, style_image_path, content_seg_path,
style_seg_path, output_image_path,
cuda, save_intermediate, no_post, label_remapping, output_visualization=False):
# Load image
with torch.no_grad():
cont_img = Image.open(content_image_path).convert('RGB')
styl_img = Image.open(style_image_path).convert('RGB')
new_cw, new_ch = memory_limit_image_resize(cont_img)
new_sw, new_sh = memory_limit_image_resize(styl_img)
cont_pilimg = cont_img.copy()
styl_pilimg = styl_img.copy()
cw = cont_pilimg.width
ch = cont_pilimg.height
try:
cont_seg = Image.open(content_seg_path)
styl_seg = Image.open(style_seg_path)
cont_seg.resize((new_cw, new_ch), Image.NEAREST)
styl_seg.resize((new_sw, new_sh), Image.NEAREST)
except:
cont_seg = []
styl_seg = []
cont_img = transforms.ToTensor()(cont_img).unsqueeze(0)
styl_img = transforms.ToTensor()(styl_img).unsqueeze(0)
if cuda:
cont_img = cont_img.cuda(0)
styl_img = styl_img.cuda(0)
stylization_module.cuda(0)
# cont_img = Variable(cont_img, volatile=True)
# styl_img = Variable(styl_img, volatile=True)
cont_seg = np.asarray(cont_seg)
styl_seg = np.asarray(styl_seg)
cont_seg = label_remapping.self_remapping(cont_seg)
styl_seg = label_remapping.self_remapping(styl_seg)
cont_seg, styl_seg = label_remapping.cross_remapping(cont_seg, styl_seg)
if output_visualization:
import cv2
cont_seg_vis = visualize_result(cont_seg)
styl_seg_vis = visualize_result(styl_seg)
cont_seg_vis = overlay(cv2.imread(content_image_path), cont_seg_vis)
styl_seg_vis = overlay(cv2.imread(style_image_path), styl_seg_vis)
cv2.imwrite(content_seg_path + '.visualization.jpg', cont_seg_vis)
cv2.imwrite(style_seg_path + '.visualization.jpg', styl_seg_vis)
if save_intermediate:
with Timer("Elapsed time in stylization: %f"):
stylized_img = stylization_module.transform(cont_img, styl_img, cont_seg, styl_seg)
if ch != new_ch or cw != new_cw:
print("De-resize image: (%d,%d)->(%d,%d)" % (new_cw, new_ch, cw, ch))
stylized_img = nn.functional.upsample(stylized_img, size=(ch, cw), mode='bilinear')
utils.save_image(stylized_img.data.cpu().float(), output_image_path, nrow=1, padding=0)
with Timer("Elapsed time in propagation: %f"):
out_img = smoothing_module.process(output_image_path, content_image_path)
out_img.save(output_image_path)
if not cuda:
print("NotImplemented: The CPU version of smooth filter has not been implemented currently.")
return
if no_post is False:
with Timer("Elapsed time in post processing: %f"):
out_img = smooth_filter(output_image_path, content_image_path, f_radius=15, f_edge=1e-1)
out_img.save(output_image_path)
else:
with Timer("Elapsed time in stylization: %f"):
stylized_img = stylization_module.transform(cont_img, styl_img, cont_seg, styl_seg)
if ch != new_ch or cw != new_cw:
print("De-resize image: (%d,%d)->(%d,%d)" % (new_cw, new_ch, cw, ch))
stylized_img = nn.functional.upsample(stylized_img, size=(ch, cw), mode='bilinear')
grid = utils.make_grid(stylized_img.data, nrow=1, padding=0)
ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
out_img = Image.fromarray(ndarr)
with Timer("Elapsed time in propagation: %f"):
out_img = smoothing_module.process(out_img, cont_pilimg)
if no_post is False:
with Timer("Elapsed time in post processing: %f"):
out_img = smooth_filter(out_img, cont_pilimg, f_radius=15, f_edge=1e-1)
out_img.save(output_image_path)
return
|
FastPhotoStyle-master
|
process_stylization_ade20k_ssn.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from __future__ import print_function
import time
import numpy as np
from PIL import Image
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.utils as utils
import torch.nn as nn
import torch
from smooth_filter import smooth_filter
class ReMapping:
def __init__(self):
self.remapping = []
def process(self, seg):
new_seg = seg.copy()
for k, v in self.remapping.items():
new_seg[seg == k] = v
return new_seg
class Timer:
def __init__(self, msg):
self.msg = msg
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_tb):
print(self.msg % (time.time() - self.start_time))
def memory_limit_image_resize(cont_img):
# prevent too small or too big images
MINSIZE=256
MAXSIZE=960
orig_width = cont_img.width
orig_height = cont_img.height
if max(cont_img.width,cont_img.height) < MINSIZE:
if cont_img.width > cont_img.height:
cont_img.thumbnail((int(cont_img.width*1.0/cont_img.height*MINSIZE), MINSIZE), Image.BICUBIC)
else:
cont_img.thumbnail((MINSIZE, int(cont_img.height*1.0/cont_img.width*MINSIZE)), Image.BICUBIC)
if min(cont_img.width,cont_img.height) > MAXSIZE:
if cont_img.width > cont_img.height:
cont_img.thumbnail((MAXSIZE, int(cont_img.height*1.0/cont_img.width*MAXSIZE)), Image.BICUBIC)
else:
cont_img.thumbnail(((int(cont_img.width*1.0/cont_img.height*MAXSIZE), MAXSIZE)), Image.BICUBIC)
print("Resize image: (%d,%d)->(%d,%d)" % (orig_width, orig_height, cont_img.width, cont_img.height))
return cont_img.width, cont_img.height
def stylization(stylization_module, smoothing_module, content_image_path, style_image_path, content_seg_path, style_seg_path, output_image_path,
cuda, save_intermediate, no_post, cont_seg_remapping=None, styl_seg_remapping=None):
# Load image
with torch.no_grad():
cont_img = Image.open(content_image_path).convert('RGB')
styl_img = Image.open(style_image_path).convert('RGB')
new_cw, new_ch = memory_limit_image_resize(cont_img)
new_sw, new_sh = memory_limit_image_resize(styl_img)
cont_pilimg = cont_img.copy()
cw = cont_pilimg.width
ch = cont_pilimg.height
try:
cont_seg = Image.open(content_seg_path)
styl_seg = Image.open(style_seg_path)
cont_seg.resize((new_cw,new_ch),Image.NEAREST)
styl_seg.resize((new_sw,new_sh),Image.NEAREST)
except:
cont_seg = []
styl_seg = []
cont_img = transforms.ToTensor()(cont_img).unsqueeze(0)
styl_img = transforms.ToTensor()(styl_img).unsqueeze(0)
if cuda:
cont_img = cont_img.cuda(0)
styl_img = styl_img.cuda(0)
stylization_module.cuda(0)
# cont_img = Variable(cont_img, volatile=True)
# styl_img = Variable(styl_img, volatile=True)
cont_seg = np.asarray(cont_seg)
styl_seg = np.asarray(styl_seg)
if cont_seg_remapping is not None:
cont_seg = cont_seg_remapping.process(cont_seg)
if styl_seg_remapping is not None:
styl_seg = styl_seg_remapping.process(styl_seg)
if save_intermediate:
with Timer("Elapsed time in stylization: %f"):
stylized_img = stylization_module.transform(cont_img, styl_img, cont_seg, styl_seg)
if ch != new_ch or cw != new_cw:
print("De-resize image: (%d,%d)->(%d,%d)" %(new_cw,new_ch,cw,ch))
stylized_img = nn.functional.upsample(stylized_img, size=(ch,cw), mode='bilinear')
utils.save_image(stylized_img.data.cpu().float(), output_image_path, nrow=1, padding=0)
with Timer("Elapsed time in propagation: %f"):
out_img = smoothing_module.process(output_image_path, content_image_path)
out_img.save(output_image_path)
if not cuda:
print("NotImplemented: The CPU version of smooth filter has not been implemented currently.")
return
if no_post is False:
with Timer("Elapsed time in post processing: %f"):
out_img = smooth_filter(output_image_path, content_image_path, f_radius=15, f_edge=1e-1)
out_img.save(output_image_path)
else:
with Timer("Elapsed time in stylization: %f"):
stylized_img = stylization_module.transform(cont_img, styl_img, cont_seg, styl_seg)
if ch != new_ch or cw != new_cw:
print("De-resize image: (%d,%d)->(%d,%d)" %(new_cw,new_ch,cw,ch))
stylized_img = nn.functional.upsample(stylized_img, size=(ch,cw), mode='bilinear')
grid = utils.make_grid(stylized_img.data, nrow=1, padding=0)
ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
out_img = Image.fromarray(ndarr)
with Timer("Elapsed time in propagation: %f"):
out_img = smoothing_module.process(out_img, cont_pilimg)
if no_post is False:
with Timer("Elapsed time in post processing: %f"):
out_img = smooth_filter(out_img, cont_pilimg, f_radius=15, f_edge=1e-1)
out_img.save(output_image_path)
|
FastPhotoStyle-master
|
process_stylization.py
|
# Download code taken from Code taken from https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive/39225039#39225039
import requests
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
file_id = '1ENgQm9TgabE1R99zhNf5q6meBvX6WFuq'
destination = './models.zip'
download_file_from_google_drive(file_id, destination)
|
FastPhotoStyle-master
|
download_models.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch.nn as nn
class VGGEncoder(nn.Module):
def __init__(self, level):
super(VGGEncoder, self).__init__()
self.level = level
# 224 x 224
self.conv0 = nn.Conv2d(3, 3, 1, 1, 0)
self.pad1_1 = nn.ReflectionPad2d((1, 1, 1, 1))
# 226 x 226
self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 0)
self.relu1_1 = nn.ReLU(inplace=True)
# 224 x 224
if level < 2: return
self.pad1_2 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu1_2 = nn.ReLU(inplace=True)
# 224 x 224
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
# 112 x 112
self.pad2_1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 0)
self.relu2_1 = nn.ReLU(inplace=True)
# 112 x 112
if level < 3: return
self.pad2_2 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu2_2 = nn.ReLU(inplace=True)
# 112 x 112
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
# 56 x 56
self.pad3_1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 0)
self.relu3_1 = nn.ReLU(inplace=True)
# 56 x 56
if level < 4: return
self.pad3_2 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu3_2 = nn.ReLU(inplace=True)
# 56 x 56
self.pad3_3 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu3_3 = nn.ReLU(inplace=True)
# 56 x 56
self.pad3_4 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu3_4 = nn.ReLU(inplace=True)
# 56 x 56
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
# 28 x 28
self.pad4_1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 0)
self.relu4_1 = nn.ReLU(inplace=True)
# 28 x 28
def forward(self, x):
out = self.conv0(x)
out = self.pad1_1(out)
out = self.conv1_1(out)
out = self.relu1_1(out)
if self.level < 2:
return out
out = self.pad1_2(out)
out = self.conv1_2(out)
pool1 = self.relu1_2(out)
out, pool1_idx = self.maxpool1(pool1)
out = self.pad2_1(out)
out = self.conv2_1(out)
out = self.relu2_1(out)
if self.level < 3:
return out, pool1_idx, pool1.size()
out = self.pad2_2(out)
out = self.conv2_2(out)
pool2 = self.relu2_2(out)
out, pool2_idx = self.maxpool2(pool2)
out = self.pad3_1(out)
out = self.conv3_1(out)
out = self.relu3_1(out)
if self.level < 4:
return out, pool1_idx, pool1.size(), pool2_idx, pool2.size()
out = self.pad3_2(out)
out = self.conv3_2(out)
out = self.relu3_2(out)
out = self.pad3_3(out)
out = self.conv3_3(out)
out = self.relu3_3(out)
out = self.pad3_4(out)
out = self.conv3_4(out)
pool3 = self.relu3_4(out)
out, pool3_idx = self.maxpool3(pool3)
out = self.pad4_1(out)
out = self.conv4_1(out)
out = self.relu4_1(out)
return out, pool1_idx, pool1.size(), pool2_idx, pool2.size(), pool3_idx, pool3.size()
def forward_multiple(self, x):
out = self.conv0(x)
out = self.pad1_1(out)
out = self.conv1_1(out)
out = self.relu1_1(out)
if self.level < 2: return out
out1 = out
out = self.pad1_2(out)
out = self.conv1_2(out)
pool1 = self.relu1_2(out)
out, pool1_idx = self.maxpool1(pool1)
out = self.pad2_1(out)
out = self.conv2_1(out)
out = self.relu2_1(out)
if self.level < 3: return out, out1
out2 = out
out = self.pad2_2(out)
out = self.conv2_2(out)
pool2 = self.relu2_2(out)
out, pool2_idx = self.maxpool2(pool2)
out = self.pad3_1(out)
out = self.conv3_1(out)
out = self.relu3_1(out)
if self.level < 4: return out, out2, out1
out3 = out
out = self.pad3_2(out)
out = self.conv3_2(out)
out = self.relu3_2(out)
out = self.pad3_3(out)
out = self.conv3_3(out)
out = self.relu3_3(out)
out = self.pad3_4(out)
out = self.conv3_4(out)
pool3 = self.relu3_4(out)
out, pool3_idx = self.maxpool3(pool3)
out = self.pad4_1(out)
out = self.conv4_1(out)
out = self.relu4_1(out)
return out, out3, out2, out1
class VGGDecoder(nn.Module):
def __init__(self, level):
super(VGGDecoder, self).__init__()
self.level = level
if level > 3:
self.pad4_1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv4_1 = nn.Conv2d(512, 256, 3, 1, 0)
self.relu4_1 = nn.ReLU(inplace=True)
# 28 x 28
self.unpool3 = nn.MaxUnpool2d(kernel_size=2, stride=2)
# 56 x 56
self.pad3_4 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu3_4 = nn.ReLU(inplace=True)
# 56 x 56
self.pad3_3 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu3_3 = nn.ReLU(inplace=True)
# 56 x 56
self.pad3_2 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu3_2 = nn.ReLU(inplace=True)
# 56 x 56
if level > 2:
self.pad3_1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3_1 = nn.Conv2d(256, 128, 3, 1, 0)
self.relu3_1 = nn.ReLU(inplace=True)
# 56 x 56
self.unpool2 = nn.MaxUnpool2d(kernel_size=2, stride=2)
# 112 x 112
self.pad2_2 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu2_2 = nn.ReLU(inplace=True)
# 112 x 112
if level > 1:
self.pad2_1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv2_1 = nn.Conv2d(128, 64, 3, 1, 0)
self.relu2_1 = nn.ReLU(inplace=True)
# 112 x 112
self.unpool1 = nn.MaxUnpool2d(kernel_size=2, stride=2)
# 224 x 224
self.pad1_2 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu1_2 = nn.ReLU(inplace=True)
# 224 x 224
if level > 0:
self.pad1_1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv1_1 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x, pool1_idx=None, pool1_size=None, pool2_idx=None, pool2_size=None, pool3_idx=None,
pool3_size=None):
out = x
if self.level > 3:
out = self.pad4_1(out)
out = self.conv4_1(out)
out = self.relu4_1(out)
out = self.unpool3(out, pool3_idx, output_size=pool3_size)
out = self.pad3_4(out)
out = self.conv3_4(out)
out = self.relu3_4(out)
out = self.pad3_3(out)
out = self.conv3_3(out)
out = self.relu3_3(out)
out = self.pad3_2(out)
out = self.conv3_2(out)
out = self.relu3_2(out)
if self.level > 2:
out = self.pad3_1(out)
out = self.conv3_1(out)
out = self.relu3_1(out)
out = self.unpool2(out, pool2_idx, output_size=pool2_size)
out = self.pad2_2(out)
out = self.conv2_2(out)
out = self.relu2_2(out)
if self.level > 1:
out = self.pad2_1(out)
out = self.conv2_1(out)
out = self.relu2_1(out)
out = self.unpool1(out, pool1_idx, output_size=pool1_size)
out = self.pad1_2(out)
out = self.conv1_2(out)
out = self.relu1_2(out)
if self.level > 0:
out = self.pad1_1(out)
out = self.conv1_1(out)
return out
|
FastPhotoStyle-master
|
models.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
src = '''
#include "/usr/local/cuda/include/math_functions.h"
#define TB 256
#define EPS 1e-7
__device__ bool InverseMat4x4(double m_in[4][4], double inv_out[4][4]) {
double m[16], inv[16];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
m[i * 4 + j] = m_in[i][j];
}
}
inv[0] = m[5] * m[10] * m[15] -
m[5] * m[11] * m[14] -
m[9] * m[6] * m[15] +
m[9] * m[7] * m[14] +
m[13] * m[6] * m[11] -
m[13] * m[7] * m[10];
inv[4] = -m[4] * m[10] * m[15] +
m[4] * m[11] * m[14] +
m[8] * m[6] * m[15] -
m[8] * m[7] * m[14] -
m[12] * m[6] * m[11] +
m[12] * m[7] * m[10];
inv[8] = m[4] * m[9] * m[15] -
m[4] * m[11] * m[13] -
m[8] * m[5] * m[15] +
m[8] * m[7] * m[13] +
m[12] * m[5] * m[11] -
m[12] * m[7] * m[9];
inv[12] = -m[4] * m[9] * m[14] +
m[4] * m[10] * m[13] +
m[8] * m[5] * m[14] -
m[8] * m[6] * m[13] -
m[12] * m[5] * m[10] +
m[12] * m[6] * m[9];
inv[1] = -m[1] * m[10] * m[15] +
m[1] * m[11] * m[14] +
m[9] * m[2] * m[15] -
m[9] * m[3] * m[14] -
m[13] * m[2] * m[11] +
m[13] * m[3] * m[10];
inv[5] = m[0] * m[10] * m[15] -
m[0] * m[11] * m[14] -
m[8] * m[2] * m[15] +
m[8] * m[3] * m[14] +
m[12] * m[2] * m[11] -
m[12] * m[3] * m[10];
inv[9] = -m[0] * m[9] * m[15] +
m[0] * m[11] * m[13] +
m[8] * m[1] * m[15] -
m[8] * m[3] * m[13] -
m[12] * m[1] * m[11] +
m[12] * m[3] * m[9];
inv[13] = m[0] * m[9] * m[14] -
m[0] * m[10] * m[13] -
m[8] * m[1] * m[14] +
m[8] * m[2] * m[13] +
m[12] * m[1] * m[10] -
m[12] * m[2] * m[9];
inv[2] = m[1] * m[6] * m[15] -
m[1] * m[7] * m[14] -
m[5] * m[2] * m[15] +
m[5] * m[3] * m[14] +
m[13] * m[2] * m[7] -
m[13] * m[3] * m[6];
inv[6] = -m[0] * m[6] * m[15] +
m[0] * m[7] * m[14] +
m[4] * m[2] * m[15] -
m[4] * m[3] * m[14] -
m[12] * m[2] * m[7] +
m[12] * m[3] * m[6];
inv[10] = m[0] * m[5] * m[15] -
m[0] * m[7] * m[13] -
m[4] * m[1] * m[15] +
m[4] * m[3] * m[13] +
m[12] * m[1] * m[7] -
m[12] * m[3] * m[5];
inv[14] = -m[0] * m[5] * m[14] +
m[0] * m[6] * m[13] +
m[4] * m[1] * m[14] -
m[4] * m[2] * m[13] -
m[12] * m[1] * m[6] +
m[12] * m[2] * m[5];
inv[3] = -m[1] * m[6] * m[11] +
m[1] * m[7] * m[10] +
m[5] * m[2] * m[11] -
m[5] * m[3] * m[10] -
m[9] * m[2] * m[7] +
m[9] * m[3] * m[6];
inv[7] = m[0] * m[6] * m[11] -
m[0] * m[7] * m[10] -
m[4] * m[2] * m[11] +
m[4] * m[3] * m[10] +
m[8] * m[2] * m[7] -
m[8] * m[3] * m[6];
inv[11] = -m[0] * m[5] * m[11] +
m[0] * m[7] * m[9] +
m[4] * m[1] * m[11] -
m[4] * m[3] * m[9] -
m[8] * m[1] * m[7] +
m[8] * m[3] * m[5];
inv[15] = m[0] * m[5] * m[10] -
m[0] * m[6] * m[9] -
m[4] * m[1] * m[10] +
m[4] * m[2] * m[9] +
m[8] * m[1] * m[6] -
m[8] * m[2] * m[5];
double det = m[0] * inv[0] + m[1] * inv[4] + m[2] * inv[8] + m[3] * inv[12];
if (abs(det) < 1e-9) {
return false;
}
det = 1.0 / det;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
inv_out[i][j] = inv[i * 4 + j] * det;
}
}
return true;
}
extern "C"
__global__ void best_local_affine_kernel(
float *output, float *input, float *affine_model,
int h, int w, float epsilon, int kernel_radius
)
{
int size = h * w;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int x = id % w, y = id / w;
double Mt_M[4][4] = {}; // 4x4
double invMt_M[4][4] = {};
double Mt_S[3][4] = {}; // RGB -> 1x4
double A[3][4] = {};
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++) {
Mt_M[i][j] = 0, invMt_M[i][j] = 0;
if (i != 3) {
Mt_S[i][j] = 0, A[i][j] = 0;
if (i == j)
Mt_M[i][j] = 1e-3;
}
}
for (int dy = -kernel_radius; dy <= kernel_radius; dy++) {
for (int dx = -kernel_radius; dx <= kernel_radius; dx++) {
int xx = x + dx, yy = y + dy;
int id2 = yy * w + xx;
if (0 <= xx && xx < w && 0 <= yy && yy < h) {
Mt_M[0][0] += input[id2 + 2*size] * input[id2 + 2*size];
Mt_M[0][1] += input[id2 + 2*size] * input[id2 + size];
Mt_M[0][2] += input[id2 + 2*size] * input[id2];
Mt_M[0][3] += input[id2 + 2*size];
Mt_M[1][0] += input[id2 + size] * input[id2 + 2*size];
Mt_M[1][1] += input[id2 + size] * input[id2 + size];
Mt_M[1][2] += input[id2 + size] * input[id2];
Mt_M[1][3] += input[id2 + size];
Mt_M[2][0] += input[id2] * input[id2 + 2*size];
Mt_M[2][1] += input[id2] * input[id2 + size];
Mt_M[2][2] += input[id2] * input[id2];
Mt_M[2][3] += input[id2];
Mt_M[3][0] += input[id2 + 2*size];
Mt_M[3][1] += input[id2 + size];
Mt_M[3][2] += input[id2];
Mt_M[3][3] += 1;
Mt_S[0][0] += input[id2 + 2*size] * output[id2 + 2*size];
Mt_S[0][1] += input[id2 + size] * output[id2 + 2*size];
Mt_S[0][2] += input[id2] * output[id2 + 2*size];
Mt_S[0][3] += output[id2 + 2*size];
Mt_S[1][0] += input[id2 + 2*size] * output[id2 + size];
Mt_S[1][1] += input[id2 + size] * output[id2 + size];
Mt_S[1][2] += input[id2] * output[id2 + size];
Mt_S[1][3] += output[id2 + size];
Mt_S[2][0] += input[id2 + 2*size] * output[id2];
Mt_S[2][1] += input[id2 + size] * output[id2];
Mt_S[2][2] += input[id2] * output[id2];
Mt_S[2][3] += output[id2];
}
}
}
bool success = InverseMat4x4(Mt_M, invMt_M);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
for (int k = 0; k < 4; k++) {
A[i][j] += invMt_M[j][k] * Mt_S[i][k];
}
}
}
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
int affine_id = i * 4 + j;
affine_model[12 * id + affine_id] = A[i][j];
}
}
}
return ;
}
extern "C"
__global__ void bilateral_smooth_kernel(
float *affine_model, float *filtered_affine_model, float *guide,
int h, int w, int kernel_radius, float sigma1, float sigma2
)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
if (id < size) {
int x = id % w;
int y = id / w;
double sum_affine[12] = {};
double sum_weight = 0;
for (int dx = -kernel_radius; dx <= kernel_radius; dx++) {
for (int dy = -kernel_radius; dy <= kernel_radius; dy++) {
int yy = y + dy, xx = x + dx;
int id2 = yy * w + xx;
if (0 <= xx && xx < w && 0 <= yy && yy < h) {
float color_diff1 = guide[yy*w + xx] - guide[y*w + x];
float color_diff2 = guide[yy*w + xx + size] - guide[y*w + x + size];
float color_diff3 = guide[yy*w + xx + 2*size] - guide[y*w + x + 2*size];
float color_diff_sqr =
(color_diff1*color_diff1 + color_diff2*color_diff2 + color_diff3*color_diff3) / 3;
float v1 = exp(-(dx * dx + dy * dy) / (2 * sigma1 * sigma1));
float v2 = exp(-(color_diff_sqr) / (2 * sigma2 * sigma2));
float weight = v1 * v2;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
int affine_id = i * 4 + j;
sum_affine[affine_id] += weight * affine_model[id2*12 + affine_id];
}
}
sum_weight += weight;
}
}
}
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
int affine_id = i * 4 + j;
filtered_affine_model[id*12 + affine_id] = sum_affine[affine_id] / sum_weight;
}
}
}
return ;
}
extern "C"
__global__ void reconstruction_best_kernel(
float *input, float *filtered_affine_model, float *filtered_best_output,
int h, int w
)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
if (id < size) {
double out1 =
input[id + 2*size] * filtered_affine_model[id*12 + 0] + // A[0][0] +
input[id + size] * filtered_affine_model[id*12 + 1] + // A[0][1] +
input[id] * filtered_affine_model[id*12 + 2] + // A[0][2] +
filtered_affine_model[id*12 + 3]; //A[0][3];
double out2 =
input[id + 2*size] * filtered_affine_model[id*12 + 4] + //A[1][0] +
input[id + size] * filtered_affine_model[id*12 + 5] + //A[1][1] +
input[id] * filtered_affine_model[id*12 + 6] + //A[1][2] +
filtered_affine_model[id*12 + 7]; //A[1][3];
double out3 =
input[id + 2*size] * filtered_affine_model[id*12 + 8] + //A[2][0] +
input[id + size] * filtered_affine_model[id*12 + 9] + //A[2][1] +
input[id] * filtered_affine_model[id*12 + 10] + //A[2][2] +
filtered_affine_model[id*12 + 11]; // A[2][3];
filtered_best_output[id] = out1;
filtered_best_output[id + size] = out2;
filtered_best_output[id + 2*size] = out3;
}
return ;
}
'''
import torch
import numpy as np
from PIL import Image
from cupy.cuda import function
from pynvrtc.compiler import Program
from collections import namedtuple
def smooth_local_affine(output_cpu, input_cpu, epsilon, patch, h, w, f_r, f_e):
# program = Program(src.encode('utf-8'), 'best_local_affine_kernel.cu'.encode('utf-8'))
# ptx = program.compile(['-I/usr/local/cuda/include'.encode('utf-8')])
program = Program(src, 'best_local_affine_kernel.cu')
ptx = program.compile(['-I/usr/local/cuda/include'])
m = function.Module()
m.load(bytes(ptx.encode()))
_reconstruction_best_kernel = m.get_function('reconstruction_best_kernel')
_bilateral_smooth_kernel = m.get_function('bilateral_smooth_kernel')
_best_local_affine_kernel = m.get_function('best_local_affine_kernel')
Stream = namedtuple('Stream', ['ptr'])
s = Stream(ptr=torch.cuda.current_stream().cuda_stream)
filter_radius = f_r
sigma1 = filter_radius / 3
sigma2 = f_e
radius = (patch - 1) / 2
filtered_best_output = torch.zeros(np.shape(input_cpu)).cuda()
affine_model = torch.zeros((h * w, 12)).cuda()
filtered_affine_model =torch.zeros((h * w, 12)).cuda()
input_ = torch.from_numpy(input_cpu).cuda()
output_ = torch.from_numpy(output_cpu).cuda()
_best_local_affine_kernel(
grid=(int((h * w) / 256 + 1), 1),
block=(256, 1, 1),
args=[output_.data_ptr(), input_.data_ptr(), affine_model.data_ptr(),
np.int32(h), np.int32(w), np.float32(epsilon), np.int32(radius)], stream=s
)
_bilateral_smooth_kernel(
grid=(int((h * w) / 256 + 1), 1),
block=(256, 1, 1),
args=[affine_model.data_ptr(), filtered_affine_model.data_ptr(), input_.data_ptr(), np.int32(h), np.int32(w), np.int32(f_r), np.float32(sigma1), np.float32(sigma2)], stream=s
)
_reconstruction_best_kernel(
grid=(int((h * w) / 256 + 1), 1),
block=(256, 1, 1),
args=[input_.data_ptr(), filtered_affine_model.data_ptr(), filtered_best_output.data_ptr(),
np.int32(h), np.int32(w)], stream=s
)
numpy_filtered_best_output = filtered_best_output.cpu().numpy()
return numpy_filtered_best_output
def smooth_filter(initImg, contentImg, f_radius=15,f_edge=1e-1):
'''
:param initImg: intermediate output. Either image path or PIL Image
:param contentImg: content image output. Either path or PIL Image
:return: stylized output image. PIL Image
'''
if type(initImg) == str:
initImg = Image.open(initImg).convert("RGB")
best_image_bgr = np.array(initImg, dtype=np.float32)
bW, bH, bC = best_image_bgr.shape
best_image_bgr = best_image_bgr[:, :, ::-1]
best_image_bgr = best_image_bgr.transpose((2, 0, 1))
if type(contentImg) == str:
contentImg = Image.open(contentImg).convert("RGB")
content_input = contentImg.resize((bH,bW))
content_input = np.array(content_input, dtype=np.float32)
content_input = content_input[:, :, ::-1]
content_input = content_input.transpose((2, 0, 1))
input_ = np.ascontiguousarray(content_input, dtype=np.float32) / 255.
_, H, W = np.shape(input_)
output_ = np.ascontiguousarray(best_image_bgr, dtype=np.float32) / 255.
best_ = smooth_local_affine(output_, input_, 1e-7, 3, H, W, f_radius, f_edge)
best_ = best_.transpose(1, 2, 0)
result = Image.fromarray(np.uint8(np.clip(best_ * 255., 0, 255.)))
return result
|
FastPhotoStyle-master
|
smooth_filter.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from __future__ import division
import torch.nn as nn
import scipy.misc
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
from numpy.lib.stride_tricks import as_strided
from PIL import Image
class Propagator(nn.Module):
def __init__(self, beta=0.9999):
super(Propagator, self).__init__()
self.beta = beta
def process(self, initImg, contentImg):
if type(contentImg) == str:
content = scipy.misc.imread(contentImg, mode='RGB')
else:
content = contentImg.copy()
# content = scipy.misc.imread(contentImg, mode='RGB')
if type(initImg) == str:
B = scipy.misc.imread(initImg, mode='RGB').astype(np.float64) / 255
else:
B = scipy.asarray(initImg).astype(np.float64) / 255
# B = self.
# B = scipy.misc.imread(initImg, mode='RGB').astype(np.float64)/255
h1,w1,k = B.shape
h = h1 - 4
w = w1 - 4
B = B[int((h1-h)/2):int((h1-h)/2+h),int((w1-w)/2):int((w1-w)/2+w),:]
content = scipy.misc.imresize(content,(h,w))
B = self.__replication_padding(B,2)
content = self.__replication_padding(content,2)
content = content.astype(np.float64)/255
B = np.reshape(B,(h1*w1,k))
W = self.__compute_laplacian(content)
W = W.tocsc()
dd = W.sum(0)
dd = np.sqrt(np.power(dd,-1))
dd = dd.A.squeeze()
D = scipy.sparse.csc_matrix((dd, (np.arange(0,w1*h1), np.arange(0,w1*h1)))) # 0.026
S = D.dot(W).dot(D)
A = scipy.sparse.identity(w1*h1) - self.beta*S
A = A.tocsc()
solver = scipy.sparse.linalg.factorized(A)
V = np.zeros((h1*w1,k))
V[:,0] = solver(B[:,0])
V[:,1] = solver(B[:,1])
V[:,2] = solver(B[:,2])
V = V*(1-self.beta)
V = V.reshape(h1,w1,k)
V = V[2:2+h,2:2+w,:]
img = Image.fromarray(np.uint8(np.clip(V * 255., 0, 255.)))
return img
# Returns sparse matting laplacian
# The implementation of the function is heavily borrowed from
# https://github.com/MarcoForte/closed-form-matting/blob/master/closed_form_matting.py
# We thank Marco Forte for sharing his code.
def __compute_laplacian(self, img, eps=10**(-7), win_rad=1):
win_size = (win_rad*2+1)**2
h, w, d = img.shape
c_h, c_w = h - 2*win_rad, w - 2*win_rad
win_diam = win_rad*2+1
indsM = np.arange(h*w).reshape((h, w))
ravelImg = img.reshape(h*w, d)
win_inds = self.__rolling_block(indsM, block=(win_diam, win_diam))
win_inds = win_inds.reshape(c_h, c_w, win_size)
winI = ravelImg[win_inds]
win_mu = np.mean(winI, axis=2, keepdims=True)
win_var = np.einsum('...ji,...jk ->...ik', winI, winI)/win_size - np.einsum('...ji,...jk ->...ik', win_mu, win_mu)
inv = np.linalg.inv(win_var + (eps/win_size)*np.eye(3))
X = np.einsum('...ij,...jk->...ik', winI - win_mu, inv)
vals = (1/win_size)*(1 + np.einsum('...ij,...kj->...ik', X, winI - win_mu))
nz_indsCol = np.tile(win_inds, win_size).ravel()
nz_indsRow = np.repeat(win_inds, win_size).ravel()
nz_indsVal = vals.ravel()
L = scipy.sparse.coo_matrix((nz_indsVal, (nz_indsRow, nz_indsCol)), shape=(h*w, h*w))
return L
def __replication_padding(self, arr,pad):
h,w,c = arr.shape
ans = np.zeros((h+pad*2,w+pad*2,c))
for i in range(c):
ans[:,:,i] = np.pad(arr[:,:,i],pad_width=(pad,pad),mode='edge')
return ans
def __rolling_block(self, A, block=(3, 3)):
shape = (A.shape[0] - block[0] + 1, A.shape[1] - block[1] + 1) + block
strides = (A.strides[0], A.strides[1]) + A.strides
return as_strided(A, shape=shape, strides=strides)
|
FastPhotoStyle-master
|
photo_smooth.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from __future__ import print_function
import argparse
import os
import torch
import process_stylization_ade20k_ssn
from torch import nn
from photo_wct import PhotoWCT
from segmentation.dataset import round2nearest_multiple
from segmentation.models import ModelBuilder, SegmentationModule
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy, mark_volatile
from scipy.misc import imread, imresize
import cv2
from torchvision import transforms
import numpy as np
parser = argparse.ArgumentParser(description='Photorealistic Image Stylization')
parser.add_argument('--model_path', help='folder to model path', default='baseline-resnet50_dilated8-ppm_bilinear_deepsup')
parser.add_argument('--suffix', default='_epoch_20.pth', help="which snapshot to load")
parser.add_argument('--arch_encoder', default='resnet50_dilated8', help="architecture of net_encoder")
parser.add_argument('--arch_decoder', default='ppm_bilinear_deepsup', help="architecture of net_decoder")
parser.add_argument('--fc_dim', default=2048, type=int, help='number of features between encoder and decoder')
parser.add_argument('--num_val', default=-1, type=int, help='number of images to evalutate')
parser.add_argument('--num_class', default=150, type=int, help='number of classes')
parser.add_argument('--batch_size', default=1, type=int, help='batchsize. current only supports 1')
parser.add_argument('--imgSize', default=[300, 400, 500, 600], nargs='+', type=int, help='list of input image sizes.' 'for multiscale testing, e.g. 300 400 500')
parser.add_argument('--imgMaxSize', default=1000, type=int, help='maximum input image size of long edge')
parser.add_argument('--padding_constant', default=8, type=int, help='maxmimum downsampling rate of the network')
parser.add_argument('--segm_downsampling_rate', default=8, type=int, help='downsampling rate of the segmentation label')
parser.add_argument('--gpu_id', default=0, type=int, help='gpu_id for evaluation')
parser.add_argument('--model', default='./PhotoWCTModels/photo_wct.pth', help='Path to the PhotoWCT model. These are provided by the PhotoWCT submodule, please use `git submodule update --init --recursive` to pull.')
parser.add_argument('--content_image_path', default="./images/content3.png")
parser.add_argument('--content_seg_path', default='./results/content3_seg.pgm')
parser.add_argument('--style_image_path', default='./images/style3.png')
parser.add_argument('--style_seg_path', default='./results/style3_seg.pgm')
parser.add_argument('--output_image_path', default='./results/example3.png')
parser.add_argument('--save_intermediate', action='store_true', default=False)
parser.add_argument('--fast', action='store_true', default=False)
parser.add_argument('--no_post', action='store_true', default=False)
parser.add_argument('--output_visualization', action='store_true', default=False)
parser.add_argument('--cuda', type=int, default=1, help='Enable CUDA.')
parser.add_argument('--label_mapping', type=str, default='ade20k_semantic_rel.npy')
args = parser.parse_args()
segReMapping = process_stylization_ade20k_ssn.SegReMapping(args.label_mapping)
# Absolute paths of segmentation model weights
SEG_NET_PATH = 'segmentation'
args.weights_encoder = os.path.join(SEG_NET_PATH,args.model_path, 'encoder' + args.suffix)
args.weights_decoder = os.path.join(SEG_NET_PATH,args.model_path, 'decoder' + args.suffix)
args.arch_encoder = 'resnet50_dilated8'
args.arch_decoder = 'ppm_bilinear_deepsup'
args.fc_dim = 2048
# Load semantic segmentation network module
builder = ModelBuilder()
net_encoder = builder.build_encoder(arch=args.arch_encoder, fc_dim=args.fc_dim, weights=args.weights_encoder)
net_decoder = builder.build_decoder(arch=args.arch_decoder, fc_dim=args.fc_dim, num_class=args.num_class, weights=args.weights_decoder, use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
segmentation_module.cuda()
segmentation_module.eval()
transform = transforms.Compose([transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])])
# Load FastPhotoStyle model
p_wct = PhotoWCT()
p_wct.load_state_dict(torch.load(args.model))
if args.fast:
from photo_gif import GIFSmoothing
p_pro = GIFSmoothing(r=35, eps=0.001)
else:
from photo_smooth import Propagator
p_pro = Propagator()
if args.cuda:
p_wct.cuda(0)
def segment_this_img(f):
img = imread(f, mode='RGB')
img = img[:, :, ::-1] # BGR to RGB!!!
ori_height, ori_width, _ = img.shape
img_resized_list = []
for this_short_size in args.imgSize:
scale = this_short_size / float(min(ori_height, ori_width))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
target_height = round2nearest_multiple(target_height, args.padding_constant)
target_width = round2nearest_multiple(target_width, args.padding_constant)
img_resized = cv2.resize(img.copy(), (target_width, target_height))
img_resized = img_resized.astype(np.float32)
img_resized = img_resized.transpose((2, 0, 1))
img_resized = transform(torch.from_numpy(img_resized))
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
input = dict()
input['img_ori'] = img.copy()
input['img_data'] = [x.contiguous() for x in img_resized_list]
segSize = (img.shape[0],img.shape[1])
with torch.no_grad():
pred = torch.zeros(1, args.num_class, segSize[0], segSize[1])
for timg in img_resized_list:
feed_dict = dict()
feed_dict['img_data'] = timg.cuda()
feed_dict = async_copy_to(feed_dict, args.gpu_id)
# forward pass
pred_tmp = segmentation_module(feed_dict, segSize=segSize)
pred = pred + pred_tmp.cpu() / len(args.imgSize)
_, preds = torch.max(pred, dim=1)
preds = as_numpy(preds.squeeze(0))
return preds
cont_seg = segment_this_img(args.content_image_path)
cv2.imwrite(args.content_seg_path, cont_seg)
style_seg = segment_this_img(args.style_image_path)
cv2.imwrite(args.style_seg_path, style_seg)
process_stylization_ade20k_ssn.stylization(
stylization_module=p_wct,
smoothing_module=p_pro,
content_image_path=args.content_image_path,
style_image_path=args.style_image_path,
content_seg_path=args.content_seg_path,
style_seg_path=args.style_seg_path,
output_image_path=args.output_image_path,
cuda=True,
save_intermediate=args.save_intermediate,
no_post=args.no_post,
label_remapping=segReMapping,
output_visualization=args.output_visualization
)
|
FastPhotoStyle-master
|
demo_with_ade20k_ssn.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from __future__ import print_function
import argparse
import os
import torch
from photo_wct import PhotoWCT
import process_stylization
parser = argparse.ArgumentParser(description='Photorealistic Image Stylization')
parser.add_argument('--model', default='./PhotoWCTModels/photo_wct.pth')
parser.add_argument('--cuda', type=bool, default=True, help='Enable CUDA.')
parser.add_argument('--save_intermediate', action='store_true', default=False)
parser.add_argument('--fast', action='store_true', default=False)
parser.add_argument('--no_post', action='store_true', default=False)
parser.add_argument('--folder', type=str, default='examples')
parser.add_argument('--beta', type=float, default=0.9999)
parser.add_argument('--cont_img_ext', type=str, default='.png')
parser.add_argument('--cont_seg_ext', type=str, default='.pgm')
parser.add_argument('--styl_img_ext', type=str, default='.png')
parser.add_argument('--styl_seg_ext', type=str, default='.pgm')
args = parser.parse_args()
folder = args.folder
cont_img_folder = os.path.join(folder, 'content_img')
cont_seg_folder = os.path.join(folder, 'content_seg')
styl_img_folder = os.path.join(folder, 'style_img')
styl_seg_folder = os.path.join(folder, 'style_seg')
outp_img_folder = os.path.join(folder, 'results')
cont_img_list = [f for f in os.listdir(cont_img_folder) if os.path.isfile(os.path.join(cont_img_folder, f))]
cont_img_list.sort()
# Load model
p_wct = PhotoWCT()
p_wct.load_state_dict(torch.load(args.model))
# Load Propagator
if args.fast:
from photo_gif import GIFSmoothing
p_pro = GIFSmoothing(r=35, eps=0.01)
else:
from photo_smooth import Propagator
p_pro = Propagator(args.beta)
for f in cont_img_list:
content_image_path = os.path.join(cont_img_folder, f)
content_seg_path = os.path.join(cont_seg_folder, f).replace(args.cont_img_ext, args.cont_seg_ext)
style_image_path = os.path.join(styl_img_folder, f)
style_seg_path = os.path.join(styl_seg_folder, f).replace(args.styl_img_ext, args.styl_seg_ext)
output_image_path = os.path.join(outp_img_folder, f)
print("Content image: " + content_image_path )
if os.path.isfile(content_seg_path):
print("Content mask: " + content_seg_path )
print("Style image: " + style_image_path )
if os.path.isfile(style_seg_path):
print("Style mask: " + style_seg_path )
process_stylization.stylization(
stylization_module=p_wct,
smoothing_module=p_pro,
content_image_path=content_image_path,
style_image_path=style_image_path,
content_seg_path=content_seg_path,
style_seg_path=style_seg_path,
output_image_path=output_image_path,
cuda=args.cuda,
save_intermediate=args.save_intermediate,
no_post=args.no_post
)
|
FastPhotoStyle-master
|
process_stylization_folder.py
|
import os
import torch
import torch.nn as nn
from torch.utils.serialization import load_lua
from models import VGGEncoder, VGGDecoder
from photo_wct import PhotoWCT
def weight_assign(lua, pth, maps):
for k, v in maps.items():
getattr(pth, k).weight = nn.Parameter(lua.get(v).weight.float())
getattr(pth, k).bias = nn.Parameter(lua.get(v).bias.float())
def photo_wct_loader(p_wct):
p_wct.e1.load_state_dict(torch.load('pth_models/vgg_normalised_conv1.pth'))
p_wct.d1.load_state_dict(torch.load('pth_models/feature_invertor_conv1.pth'))
p_wct.e2.load_state_dict(torch.load('pth_models/vgg_normalised_conv2.pth'))
p_wct.d2.load_state_dict(torch.load('pth_models/feature_invertor_conv2.pth'))
p_wct.e3.load_state_dict(torch.load('pth_models/vgg_normalised_conv3.pth'))
p_wct.d3.load_state_dict(torch.load('pth_models/feature_invertor_conv3.pth'))
p_wct.e4.load_state_dict(torch.load('pth_models/vgg_normalised_conv4.pth'))
p_wct.d4.load_state_dict(torch.load('pth_models/feature_invertor_conv4.pth'))
if __name__ == '__main__':
if not os.path.exists('pth_models'):
os.mkdir('pth_models')
## VGGEncoder1
vgg1 = load_lua('models/vgg_normalised_conv1_1_mask.t7')
e1 = VGGEncoder(1)
weight_assign(vgg1, e1, {
'conv0': 0,
'conv1_1': 2,
})
torch.save(e1.state_dict(), 'pth_models/vgg_normalised_conv1.pth')
## VGGDecoder1
inv1 = load_lua('models/feature_invertor_conv1_1_mask.t7')
d1 = VGGDecoder(1)
weight_assign(inv1, d1, {
'conv1_1': 1,
})
torch.save(d1.state_dict(), 'pth_models/feature_invertor_conv1.pth')
## VGGEncoder2
vgg2 = load_lua('models/vgg_normalised_conv2_1_mask.t7')
e2 = VGGEncoder(2)
weight_assign(vgg2, e2, {
'conv0': 0,
'conv1_1': 2,
'conv1_2': 5,
'conv2_1': 9,
})
torch.save(e2.state_dict(), 'pth_models/vgg_normalised_conv2.pth')
## VGGDecoder2
inv2 = load_lua('models/feature_invertor_conv2_1_mask.t7')
d2 = VGGDecoder(2)
weight_assign(inv2, d2, {
'conv2_1': 1,
'conv1_2': 5,
'conv1_1': 8,
})
torch.save(d2.state_dict(), 'pth_models/feature_invertor_conv2.pth')
## VGGEncoder3
vgg3 = load_lua('models/vgg_normalised_conv3_1_mask.t7')
e3 = VGGEncoder(3)
weight_assign(vgg3, e3, {
'conv0': 0,
'conv1_1': 2,
'conv1_2': 5,
'conv2_1': 9,
'conv2_2': 12,
'conv3_1': 16,
})
torch.save(e3.state_dict(), 'pth_models/vgg_normalised_conv3.pth')
## VGGDecoder3
inv3 = load_lua('models/feature_invertor_conv3_1_mask.t7')
d3 = VGGDecoder(3)
weight_assign(inv3, d3, {
'conv3_1': 1,
'conv2_2': 5,
'conv2_1': 8,
'conv1_2': 12,
'conv1_1': 15,
})
torch.save(d3.state_dict(), 'pth_models/feature_invertor_conv3.pth')
## VGGEncoder4
vgg4 = load_lua('models/vgg_normalised_conv4_1_mask.t7')
e4 = VGGEncoder(4)
weight_assign(vgg4, e4, {
'conv0': 0,
'conv1_1': 2,
'conv1_2': 5,
'conv2_1': 9,
'conv2_2': 12,
'conv3_1': 16,
'conv3_2': 19,
'conv3_3': 22,
'conv3_4': 25,
'conv4_1': 29,
})
torch.save(e4.state_dict(), 'pth_models/vgg_normalised_conv4.pth')
## VGGDecoder4
inv4 = load_lua('models/feature_invertor_conv4_1_mask.t7')
d4 = VGGDecoder(4)
weight_assign(inv4, d4, {
'conv4_1': 1,
'conv3_4': 5,
'conv3_3': 8,
'conv3_2': 11,
'conv3_1': 14,
'conv2_2': 18,
'conv2_1': 21,
'conv1_2': 25,
'conv1_1': 28,
})
torch.save(d4.state_dict(), 'pth_models/feature_invertor_conv4.pth')
p_wct = PhotoWCT()
photo_wct_loader(p_wct)
torch.save(p_wct.state_dict(), 'PhotoWCTModels/photo_wct.pth')
|
FastPhotoStyle-master
|
converter.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from models import VGGEncoder, VGGDecoder
class PhotoWCT(nn.Module):
def __init__(self):
super(PhotoWCT, self).__init__()
self.e1 = VGGEncoder(1)
self.d1 = VGGDecoder(1)
self.e2 = VGGEncoder(2)
self.d2 = VGGDecoder(2)
self.e3 = VGGEncoder(3)
self.d3 = VGGDecoder(3)
self.e4 = VGGEncoder(4)
self.d4 = VGGDecoder(4)
def transform(self, cont_img, styl_img, cont_seg, styl_seg):
self.__compute_label_info(cont_seg, styl_seg)
sF4, sF3, sF2, sF1 = self.e4.forward_multiple(styl_img)
cF4, cpool_idx, cpool1, cpool_idx2, cpool2, cpool_idx3, cpool3 = self.e4(cont_img)
sF4 = sF4.data.squeeze(0)
cF4 = cF4.data.squeeze(0)
# print(cont_seg)
csF4 = self.__feature_wct(cF4, sF4, cont_seg, styl_seg)
Im4 = self.d4(csF4, cpool_idx, cpool1, cpool_idx2, cpool2, cpool_idx3, cpool3)
cF3, cpool_idx, cpool1, cpool_idx2, cpool2 = self.e3(Im4)
sF3 = sF3.data.squeeze(0)
cF3 = cF3.data.squeeze(0)
csF3 = self.__feature_wct(cF3, sF3, cont_seg, styl_seg)
Im3 = self.d3(csF3, cpool_idx, cpool1, cpool_idx2, cpool2)
cF2, cpool_idx, cpool = self.e2(Im3)
sF2 = sF2.data.squeeze(0)
cF2 = cF2.data.squeeze(0)
csF2 = self.__feature_wct(cF2, sF2, cont_seg, styl_seg)
Im2 = self.d2(csF2, cpool_idx, cpool)
cF1 = self.e1(Im2)
sF1 = sF1.data.squeeze(0)
cF1 = cF1.data.squeeze(0)
csF1 = self.__feature_wct(cF1, sF1, cont_seg, styl_seg)
Im1 = self.d1(csF1)
return Im1
def __compute_label_info(self, cont_seg, styl_seg):
if cont_seg.size == False or styl_seg.size == False:
return
max_label = np.max(cont_seg) + 1
self.label_set = np.unique(cont_seg)
self.label_indicator = np.zeros(max_label)
for l in self.label_set:
# if l==0:
# continue
is_valid = lambda a, b: a > 10 and b > 10 and a / b < 100 and b / a < 100
o_cont_mask = np.where(cont_seg.reshape(cont_seg.shape[0] * cont_seg.shape[1]) == l)
o_styl_mask = np.where(styl_seg.reshape(styl_seg.shape[0] * styl_seg.shape[1]) == l)
self.label_indicator[l] = is_valid(o_cont_mask[0].size, o_styl_mask[0].size)
def __feature_wct(self, cont_feat, styl_feat, cont_seg, styl_seg):
cont_c, cont_h, cont_w = cont_feat.size(0), cont_feat.size(1), cont_feat.size(2)
styl_c, styl_h, styl_w = styl_feat.size(0), styl_feat.size(1), styl_feat.size(2)
cont_feat_view = cont_feat.view(cont_c, -1).clone()
styl_feat_view = styl_feat.view(styl_c, -1).clone()
if cont_seg.size == False or styl_seg.size == False:
target_feature = self.__wct_core(cont_feat_view, styl_feat_view)
else:
target_feature = cont_feat.view(cont_c, -1).clone()
if len(cont_seg.shape) == 2:
t_cont_seg = np.asarray(Image.fromarray(cont_seg).resize((cont_w, cont_h), Image.NEAREST))
else:
t_cont_seg = np.asarray(Image.fromarray(cont_seg, mode='RGB').resize((cont_w, cont_h), Image.NEAREST))
if len(styl_seg.shape) == 2:
t_styl_seg = np.asarray(Image.fromarray(styl_seg).resize((styl_w, styl_h), Image.NEAREST))
else:
t_styl_seg = np.asarray(Image.fromarray(styl_seg, mode='RGB').resize((styl_w, styl_h), Image.NEAREST))
for l in self.label_set:
if self.label_indicator[l] == 0:
continue
cont_mask = np.where(t_cont_seg.reshape(t_cont_seg.shape[0] * t_cont_seg.shape[1]) == l)
styl_mask = np.where(t_styl_seg.reshape(t_styl_seg.shape[0] * t_styl_seg.shape[1]) == l)
if cont_mask[0].size <= 0 or styl_mask[0].size <= 0:
continue
cont_indi = torch.LongTensor(cont_mask[0])
styl_indi = torch.LongTensor(styl_mask[0])
if self.is_cuda:
cont_indi = cont_indi.cuda(0)
styl_indi = styl_indi.cuda(0)
cFFG = torch.index_select(cont_feat_view, 1, cont_indi)
sFFG = torch.index_select(styl_feat_view, 1, styl_indi)
# print(len(cont_indi))
# print(len(styl_indi))
tmp_target_feature = self.__wct_core(cFFG, sFFG)
# print(tmp_target_feature.size())
if torch.__version__ >= "0.4.0":
# This seems to be a bug in PyTorch 0.4.0 to me.
new_target_feature = torch.transpose(target_feature, 1, 0)
new_target_feature.index_copy_(0, cont_indi, \
torch.transpose(tmp_target_feature,1,0))
target_feature = torch.transpose(new_target_feature, 1, 0)
else:
target_feature.index_copy_(1, cont_indi, tmp_target_feature)
target_feature = target_feature.view_as(cont_feat)
ccsF = target_feature.float().unsqueeze(0)
return ccsF
def __wct_core(self, cont_feat, styl_feat):
cFSize = cont_feat.size()
c_mean = torch.mean(cont_feat, 1) # c x (h x w)
c_mean = c_mean.unsqueeze(1).expand_as(cont_feat)
cont_feat = cont_feat - c_mean
iden = torch.eye(cFSize[0]) # .double()
if self.is_cuda:
iden = iden.cuda()
contentConv = torch.mm(cont_feat, cont_feat.t()).div(cFSize[1] - 1) + iden
# del iden
c_u, c_e, c_v = torch.svd(contentConv, some=False)
# c_e2, c_v = torch.eig(contentConv, True)
# c_e = c_e2[:,0]
k_c = cFSize[0]
for i in range(cFSize[0] - 1, -1, -1):
if c_e[i] >= 0.00001:
k_c = i + 1
break
sFSize = styl_feat.size()
s_mean = torch.mean(styl_feat, 1)
styl_feat = styl_feat - s_mean.unsqueeze(1).expand_as(styl_feat)
styleConv = torch.mm(styl_feat, styl_feat.t()).div(sFSize[1] - 1)
s_u, s_e, s_v = torch.svd(styleConv, some=False)
k_s = sFSize[0]
for i in range(sFSize[0] - 1, -1, -1):
if s_e[i] >= 0.00001:
k_s = i + 1
break
c_d = (c_e[0:k_c]).pow(-0.5)
step1 = torch.mm(c_v[:, 0:k_c], torch.diag(c_d))
step2 = torch.mm(step1, (c_v[:, 0:k_c].t()))
whiten_cF = torch.mm(step2, cont_feat)
s_d = (s_e[0:k_s]).pow(0.5)
targetFeature = torch.mm(torch.mm(torch.mm(s_v[:, 0:k_s], torch.diag(s_d)), (s_v[:, 0:k_s].t())), whiten_cF)
targetFeature = targetFeature + s_mean.unsqueeze(1).expand_as(targetFeature)
return targetFeature
@property
def is_cuda(self):
return next(self.parameters()).is_cuda
def forward(self, *input):
pass
|
FastPhotoStyle-master
|
photo_wct.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from __future__ import division
from PIL import Image
from torch import nn
import numpy as np
import cv2
from cv2.ximgproc import guidedFilter
class GIFSmoothing(nn.Module):
def forward(self, *input):
pass
def __init__(self, r, eps):
super(GIFSmoothing, self).__init__()
self.r = r
self.eps = eps
def process(self, initImg, contentImg):
return self.process_opencv(initImg, contentImg)
def process_opencv(self, initImg, contentImg):
'''
:param initImg: intermediate output. Either image path or PIL Image
:param contentImg: content image output. Either path or PIL Image
:return: stylized output image. PIL Image
'''
if type(initImg) == str:
init_img = cv2.imread(initImg)
init_img = init_img[2:-2,2:-2,:]
else:
init_img = np.array(initImg)[:, :, ::-1].copy()
if type(contentImg) == str:
cont_img = cv2.imread(contentImg)
else:
cont_img = np.array(contentImg)[:, :, ::-1].copy()
output_img = guidedFilter(guide=cont_img, src=init_img, radius=self.r, eps=self.eps)
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2RGB)
output_img = Image.fromarray(output_img)
return output_img
|
FastPhotoStyle-master
|
photo_gif.py
|
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from __future__ import print_function
import argparse
import torch
import process_stylization
from photo_wct import PhotoWCT
parser = argparse.ArgumentParser(description='Photorealistic Image Stylization')
parser.add_argument('--model', default='./PhotoWCTModels/photo_wct.pth')
parser.add_argument('--content_image_path', default='./images/content1.png')
parser.add_argument('--content_seg_path', default=[])
parser.add_argument('--style_image_path', default='./images/style1.png')
parser.add_argument('--style_seg_path', default=[])
parser.add_argument('--output_image_path', default='./results/example1.png')
parser.add_argument('--save_intermediate', action='store_true', default=False)
parser.add_argument('--fast', action='store_true', default=False)
parser.add_argument('--no_post', action='store_true', default=False)
parser.add_argument('--cuda', type=int, default=1, help='Enable CUDA.')
args = parser.parse_args()
# Load model
p_wct = PhotoWCT()
p_wct.load_state_dict(torch.load(args.model))
if args.fast:
from photo_gif import GIFSmoothing
p_pro = GIFSmoothing(r=35, eps=0.001)
else:
from photo_smooth import Propagator
p_pro = Propagator()
if args.cuda:
p_wct.cuda(0)
process_stylization.stylization(
stylization_module=p_wct,
smoothing_module=p_pro,
content_image_path=args.content_image_path,
style_image_path=args.style_image_path,
content_seg_path=args.content_seg_path,
style_seg_path=args.style_seg_path,
output_image_path=args.output_image_path,
cuda=args.cuda,
save_intermediate=args.save_intermediate,
no_post=args.no_post
)
|
FastPhotoStyle-master
|
demo.py
|
# Copyright (c) 2020–2021, NVIDIA Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import re
default_app_name = "augment"
default_input_file = os.path.join("data", "WA_Fn-UseC_-Telco-Customer-Churn-.csv")
default_output_prefix = ""
default_output_mode = "overwrite"
default_output_kind = "parquet"
default_dup_times = 100
parser = parser = argparse.ArgumentParser()
parser.add_argument('--input-file', help='supplied input data (default="%s")' % default_input_file, default=default_input_file)
parser.add_argument('--output-mode', help='Spark data source output mode for the result (default: overwrite)', default=default_output_mode)
parser.add_argument('--output-prefix', help='text to prepend to every output file (e.g., "hdfs:///churn-data/"; the default is empty)', default=default_output_prefix)
parser.add_argument('--output-kind', help='output Spark data source type for the result (default: parquet)', default=default_output_kind)
parser.add_argument('--dup-times', help='scale factor for augmented results (default: 100)', default=default_dup_times, type=int)
parser.add_argument('--use-decimal', help='use DecimalType for currencies (default: True)', default=True, type=bool)
parser.add_argument('--decimal-precision', help='set currency precision (default: 8; minimum: 6)', default=8, type=int)
parser.add_argument('--log-level', help='set log level (default: OFF)', default="OFF")
if __name__ == '__main__':
import pyspark
args = parser.parse_args()
import churn.augment
churn.augment.register_options(
app_name = default_app_name,
input_file = args.input_file,
output_prefix = args.output_prefix,
output_mode = args.output_mode,
output_kind = args.output_kind,
dup_times = args.dup_times,
use_decimal = args.use_decimal,
decimal_precision = args.decimal_precision
)
session = pyspark.sql.SparkSession.builder.\
appName(churn.augment.options['app_name']).\
getOrCreate()
session.sparkContext.setLogLevel(args.log_level)
from churn.augment import load_supplied_data
df = load_supplied_data(session, args.input_file)
from churn.augment import billing_events
billingEvents = billing_events(df)
from churn.augment import customer_meta
customerMeta = customer_meta(df)
from churn.augment import phone_features
customerPhoneFeatures = phone_features(df)
from churn.augment import internet_features
customerInternetFeatures = internet_features(df)
from churn.augment import account_features
customerAccountFeatures = account_features(df)
from churn.augment import write_df
write_df(billingEvents, "billing_events", partition_by="month")
write_df(customerMeta, "customer_meta", skip_replication=True)
write_df(customerPhoneFeatures, "customer_phone_features")
write_df(customerInternetFeatures.orderBy("customerID"), "customer_internet_features")
write_df(customerAccountFeatures, "customer_account_features")
print("sanity-checking outputs")
import pyspark.sql.functions as F
from functools import reduce
output_dfs = []
for f in ["billing_events", "customer_meta", "customer_phone_features", "customer_internet_features", "customer_account_features"]:
output_dfs.append(
session.read.parquet(churn.augment.resolve_path(f)).select(
F.lit(f).alias("table"),
"customerID"
)
)
all_customers = reduce(lambda l, r: l.unionAll(r), output_dfs)
each_table = all_customers.groupBy("table").agg(F.approx_count_distinct("customerID").alias("approx_unique_customers"))
overall = all_customers.groupBy(F.lit("all").alias("table")).agg(F.approx_count_distinct("customerID").alias("approx_unique_customers"))
counts = dict([(row[0], row[1]) for row in each_table.union(overall).collect()])
if counts['billing_events'] != counts['all']:
print("warning: approximate customer counts for billing events and union of all tables differ")
print("warning: counts were as follows: ")
for k,v in counts.items():
print(" - %s -> %d" % (k, v))
print("warning: doing precise counts now")
all_customers = each_table.select("customerID").distinct().count()
billing_customers = each_table.where(F.col("table") == "billing_events").select("customerID").distinct().count()
assert all_customers == billing_customers, "precise counts of customers differ from the billing_events table and the union of all tables; this indicates spurious customer IDs in some table. Please file an issue."
else:
print("info: approximate counts seem okay!")
|
data-science-blueprints-main
|
churn/generate.py
|
#!/usr/bin/env python
# coding: utf-8
import os
default_spark_master = "local[*]"
app_name = "data-summary"
default_input_file = "churn-etl"
default_output_prefix = ""
default_input_kind = "parquet"
import argparse
import pyspark
import pyspark.sql.types as T
import pyspark.sql.functions as F
parser = parser = argparse.ArgumentParser()
parser.add_argument('--input-file', help='supplied input data (default="%s")' % default_input_file, default=default_input_file)
parser.add_argument('--input-kind', help='supplied input data (default="%s")' % default_input_kind, default=default_input_kind)
parser.add_argument('--output-prefix', help='text to prepend to every output file; the default is empty)', default=default_output_prefix)
def isnumeric(data_type):
numeric_types = [T.ByteType, T.ShortType, T.IntegerType, T.LongType, T.FloatType, T.DoubleType, T.DecimalType]
return any([isinstance(data_type, t) for t in numeric_types])
def percent_true(df, cols):
denominator = df.count()
return {col : df.where(F.col(col) == True).count() / denominator for col in cols}
def approx_cardinalities(df, cols):
from functools import reduce
counts = df.groupBy(
F.lit(True).alias("drop_me")
).agg(
F.count('*').alias("total"),
*[F.approx_count_distinct(F.col(c)).alias(c) for c in cols]
).drop("drop_me").cache()
result = reduce(lambda l, r: l.unionAll(r), [counts.select(F.lit(c).alias("field"), F.col(c).alias("approx_count")) for c in counts.columns]).collect()
counts.unpersist()
return dict([(r[0],r[1]) for r in result])
def likely_unique(counts):
total = counts["total"]
return [k for (k, v) in counts.items() if k != "total" and abs(total - v) < total * 0.15]
def likely_categoricals(counts):
total = counts["total"]
return [k for (k, v) in counts.items() if v < total * 0.15 or v < 128]
def unique_values(df, cols):
from functools import reduce
counts = df.groupBy(
F.lit(True).alias("drop_me")
).agg(
*[F.array_sort(F.collect_set(F.col(c))).alias(c) for c in cols]
).drop("drop_me").cache()
result = reduce(lambda l, r: l.unionAll(r), [counts.select(F.lit(c).alias("field"), F.col(c).alias("unique_vals")) for c in counts.columns]).collect()
counts.unpersist()
return dict([(r[0],r[1]) for r in result])
def approx_ecdf(df, cols):
from functools import reduce
quantiles = [0.0, 0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 1.0]
qs = df.approxQuantile(cols, quantiles, 0.01)
result = dict(zip(cols, qs))
return {c: dict(zip(quantiles, vs)) for (c, vs) in result.items()}
def gen_summary(df):
summary = {}
string_cols = []
boolean_cols = []
numeric_cols = []
other_cols = []
for field in df.schema.fields:
if isinstance(field.dataType, T.StringType):
string_cols.append(field.name)
elif isinstance(field.dataType, T.BooleanType):
boolean_cols.append(field.name)
elif isnumeric(field.dataType):
numeric_cols.append(field.name)
else:
other_cols.append(field.name)
cardinalities = approx_cardinalities(df, string_cols)
uniques = likely_unique(cardinalities)
categoricals = unique_values(df, likely_categoricals(cardinalities))
encoding_struct = {
"categorical" : categoricals,
"numeric" : numeric_cols + boolean_cols,
"unique": uniques
}
summary["schema"] = df.schema.jsonValue()
summary["ecdfs"] = approx_ecdf(df, numeric_cols)
summary["true_percentage"] = percent_true(df, boolean_cols)
summary["encoding"] = encoding_struct
return summary
if __name__ == "__main__":
import json
args = parser.parse_args()
session = pyspark.sql.SparkSession.builder.appName(app_name).getOrCreate()
df = session.read.parquet("%s.%s" % (args.input_file, args.input_kind))
summary = gen_summary(df)
with open("%ssummary.json" % args.output_prefix, "w") as sf:
json.dump(summary, sf)
with open("%sencodings.json" % args.output_prefix, "w") as ef:
json.dump(summary["encoding"], ef)
|
data-science-blueprints-main
|
churn/summarize.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020–2021, NVIDIA Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from churn.etl import cast_and_coalesce_wide_data
import os
import sys
import re
import json
app_name = "churn-etl"
default_input_files = dict(
billing="billing_events",
account_features="customer_account_features",
internet_features="customer_internet_features",
meta="customer_meta",
phone_features="customer_phone_features"
)
default_output_file = "churn-etl"
default_output_prefix = ""
default_input_prefix = ""
default_output_mode = "overwrite"
default_output_kind = "parquet"
default_input_kind = "parquet"
parser = parser = argparse.ArgumentParser()
parser.add_argument('--output-file', help='location for denormalized output data (default="%s")' % default_output_file, default=default_output_file)
parser.add_argument('--output-mode', help='Spark data source output mode for the result (default: overwrite)', default=default_output_mode)
parser.add_argument('--input-prefix', help='text to prepend to every input file path (e.g., "hdfs:///churn-raw-data/"; the default is empty)', default=default_input_prefix)
parser.add_argument('--output-prefix', help='text to prepend to every output file (e.g., "hdfs:///churn-data-etl/"; the default is empty)', default=default_output_prefix)
parser.add_argument('--output-kind', help='output Spark data source type for the result (default: parquet)', default=default_output_kind)
parser.add_argument('--input-kind', help='Spark data source type for the input (default: parquet)', default=default_input_kind)
parser.add_argument('--summary-prefix', help='text to prepend to analytic reports (e.g., "reports/"; default is empty)', default='')
parser.add_argument('--report-file', help='location in which to store a performance report', default='report.txt')
parser.add_argument('--log-level', help='set log level (default: OFF)', default="OFF")
parser.add_argument('--coalesce-output', help='coalesce output to NUM partitions', default=0, type=int)
parser.add_argument('--use-calendar-arithmetic', help='use add_months() function (default: False)', action='store_const', const=True, default=False)
parser.add_argument('--skip-eda', help='skip analytic reporting; federate only (default: False)', action='store_const', const=True, default=False)
parser.add_argument('--debug-nulls', help='print out records containing NULLs as JSON objects (default: False)', action='store_const', const=True, default=False)
if __name__ == '__main__':
import pyspark
import os
failed = False
args = parser.parse_args()
session = pyspark.sql.SparkSession.builder \
.appName(app_name) \
.getOrCreate()
session.sparkContext.setLogLevel(args.log_level)
session
import churn.etl
import churn.eda
input_files = {k: "%s%s" % (args.input_prefix, v) for k, v in default_input_files.items()}
churn.etl.register_options(
app_name = app_name,
input_files = input_files,
output_prefix = args.output_prefix,
output_mode = args.output_mode,
output_kind = args.output_kind,
input_kind = args.input_kind,
output_file = args.output_file,
coalesce_output = args.coalesce_output,
use_calendar_arithmetic = args.use_calendar_arithmetic
)
from churn.etl import read_df
billing_events = read_df(session, input_files["billing"])
from churn.etl import join_billing_data
customer_billing = join_billing_data(billing_events)
from churn.etl import customers as get_customers
customers = get_customers()
phone_features = read_df(session, input_files["phone_features"])
from churn.etl import join_phone_features
customer_phone_features = join_phone_features(phone_features)
internet_features = read_df(session, input_files["internet_features"])
from churn.etl import join_internet_features
customer_internet_features = join_internet_features(internet_features)
account_features = read_df(session, input_files["account_features"])
from churn.etl import join_account_features
customer_account_features = join_account_features(account_features)
account_meta = read_df(session, input_files["meta"])
from churn.etl import process_account_meta
customer_account_meta = process_account_meta(account_meta)
from churn.etl import join_wide_table
wide_data = join_wide_table(customer_billing, customer_phone_features, customer_internet_features, customer_account_features, customer_account_meta)
from churn.etl import write_df
import timeit
temp_output_file = "intermediate-" + churn.etl.options['output_file']
output_file = churn.etl.options['output_file']
output_kind = churn.etl.options['output_kind']
output_prefix = churn.etl.options['output_prefix']
federation_time = timeit.timeit(lambda: write_df(wide_data, temp_output_file), number=1)
# prepare data for training by casting decimals to floats and coalescing
coalesce_time = timeit.timeit(lambda: write_df(cast_and_coalesce_wide_data(session.read.parquet(output_prefix + temp_output_file + "." + output_kind)), output_file), number=1)
records = session.read.parquet(output_prefix + output_file + "." + output_kind)
record_count = records.count()
record_nonnull_count = records.dropna().count()
if not args.skip_eda:
analysis_time = timeit.timeit(lambda: churn.eda.output_reports(records, billing_events, args.summary_prefix), number=1)
first_line = "Completed analytics pipeline (version %s)\n" % churn.etl.ETL_VERSION
if not args.skip_eda:
first_line += 'Total time was %.02f to generate and process %d records\n' % (analysis_time + federation_time + coalesce_time, record_count)
first_line += 'Analytics and reporting took %.02f seconds\n' % analysis_time
else:
first_line += 'We ran with --skip-eda; not reporting analytics time\n'
first_line += 'Coalescing and casting data for reporting and ML took %.02f seconds\n' % coalesce_time
first_line += 'Federation took %.02f seconds; configuration follows:\n\n' % federation_time
print(first_line)
if record_nonnull_count != record_count:
nulls = record_count - record_nonnull_count
null_percent = (float(nulls) / record_count) * 100
print('ERROR: analytics job generated %d records with nulls (%.02f%% of total)' % (nulls, null_percent))
failed = True
if args.debug_nulls:
for row in records.subtract(records.dropna()).collect():
print(json.dumps(row.asDict()))
with open(args.report_file, "w") as report:
report.write(first_line + "\n")
for conf in session.sparkContext.getConf().getAll():
report.write(str(conf) + "\n")
print(conf)
session.stop()
if failed:
print("Job failed (most likely due to nulls in output); check logs for lines beginning with ERROR")
sys.exit(1)
|
data-science-blueprints-main
|
churn/do-analytics.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020–2021, NVIDIA Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyspark
import pyspark.sql
import pyspark.sql.functions as F
from collections import defaultdict
options = defaultdict(lambda: None)
session = None
ETL_VERSION = '0.7'
def register_options(**kwargs):
global options
for k, v in kwargs.items():
options[k] = v
def _register_session(s):
global session
session = s
def _register_views(lvars, *names):
for n in names:
if n in lvars:
lvars[n].createOrReplaceTempView(n)
def withsession(df_arg=0):
def decorate(fn):
def wrapped(*args, **kwargs):
_register_session(args[df_arg].sql_ctx.sparkSession)
fn(*args, **kwargs)
return wrapped
return decorate
def read_df(session, fn):
kwargs = {}
_register_session(session)
input_kind = options["input_kind"]
if input_kind == "csv":
kwargs["header"] = True
return getattr(session.read, input_kind)("%s.%s" % (fn, input_kind), **kwargs)
def find_customers(billing_events_df):
customers = billing_events_df.select("customerID").distinct()
if 'cache_customers' in options:
customers.cache()
customers.createOrReplaceTempView("customers")
return customers
def customers():
global session
return session.table("customers")
def join_billing_data(billing_events_df):
_register_session(billing_events_df.sql_ctx.sparkSession)
billing_events = billing_events_df.withColumn("value", billing_events_df.value)
customers = find_customers(billing_events)
counts_and_charges = billing_events.groupBy("customerID", "kind").agg(
F.count(billing_events.value).alias("event_counts"),
F.sum(billing_events.value).alias("total_charges"),
)
counts_and_charges.createOrReplaceTempView("counts_and_charges")
terminations = billing_events.where(F.col("kind") == "AccountTermination").select(
F.col("customerID").alias("Churn")
)
churned = customers.join(
terminations, customers.customerID == terminations.Churn, how="leftouter"
).select(
"customerID", F.when(F.col("Churn").isNull(), F.lit(False)).otherwise(F.lit(True)).alias("Churn")
)
customer_charges = customers.join(
counts_and_charges.where(F.col("kind") == "Charge"), "customerID", how="leftouter"
).select(
"customerID",
F.col("event_counts").alias("tenure"),
F.col("total_charges").alias("TotalCharges"),
).fillna({'tenure': 0, 'TotalCharges': 0.0})
_register_views(locals(), "counts_and_charges", "terminations", "churned", "customer_charges")
# counts_and_charges.createOrReplaceTempView("counts_and_charges")
# terminations.createOrReplaceTempView("terminations")
# churned.createOrReplaceTempView("churned")
# customer_charges.createOrReplaceTempView("customer_charges")
customer_billing = churned.join(customer_charges, "customerID")
_register_views(locals(), "counts_and_charges", "terminations", "churned", "customer_charges", "customer_billing")
return customer_billing
def join_phone_features(phone_features_df):
phone_features = phone_features_df
phone_service = phone_features.where(F.col("feature") == "PhoneService").select(
"customerID", F.lit("Yes").alias("PhoneService")
)
multiple_lines = phone_features.where(F.col("feature") == "MultipleLines").select(
"customerID", F.lit("Yes").alias("MultipleLines")
)
customer_phone_features = (
customers().join(phone_service, "customerID", how="leftouter")
.join(multiple_lines, "customerID", how="leftouter")
.select(
"customerID",
F.when(F.col("PhoneService").isNull(), "No")
.otherwise("Yes")
.alias("PhoneService"),
"MultipleLines",
)
.select(
"customerID",
"PhoneService",
F.when(F.col("PhoneService") == "No", "No phone service")
.otherwise(F.when(F.col("MultipleLines").isNull(), "No").otherwise("Yes"))
.alias("MultipleLines"),
)
)
_register_views(locals(), "phone_service", "multiple_lines", "customer_phone_features")
return customer_phone_features
def untidy_feature(df, feature):
""" 'untidies' a feature by turning it into a column """
return df.where(F.col("feature") == feature).select(
"customerID", F.col("value").alias(feature)
)
def chained_join(column, base_df, dfs, how="leftouter"):
""" repeatedly joins a sequence of data frames on the same column """
acc = base_df
for df in dfs:
acc = acc.join(df, column, how=how)
return acc
def resolve_nullable_column(df, col, null_val="No"):
return F.when(df[col].isNull(), null_val).otherwise(df[col]).alias(col)
def resolve_dependent_column(
df,
col,
parent_col="InternetService",
null_val="No",
null_parent_val="No internet service",
):
return (
F.when((df[parent_col] == "No") | (df[parent_col].isNull()), null_parent_val)
.otherwise(F.when(df[col].isNull(), null_val).otherwise(df[col]))
.alias(col)
)
def join_internet_features(internet_features_df):
internet_features = internet_features_df
internet_service = untidy_feature(internet_features, "InternetService")
online_security = untidy_feature(internet_features, "OnlineSecurity")
online_backup = untidy_feature(internet_features, "OnlineBackup")
device_protection = untidy_feature(internet_features, "DeviceProtection")
tech_support = untidy_feature(internet_features, "TechSupport")
streaming_tv = untidy_feature(internet_features, "StreamingTV")
streaming_movies = untidy_feature(internet_features, "StreamingMovies")
customer_internet_features = chained_join(
"customerID",
customers(),
[
internet_service,
online_security,
online_backup,
device_protection,
tech_support,
streaming_tv,
streaming_movies,
],
)
customer_internet_features = customer_internet_features.select(
"customerID",
resolve_nullable_column(customer_internet_features, "InternetService"),
resolve_dependent_column(
customer_internet_features, "OnlineSecurity", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "OnlineBackup", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "DeviceProtection", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "TechSupport", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "StreamingTV", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "StreamingMovies", "InternetService"
),
)
_register_views(locals(),
"internet_service",
"online_security",
"online_backup",
"device_protection",
"tech_support",
"streaming_tv",
"streaming_movies",
"customer_internet_features"
)
return customer_internet_features
def join_account_features(account_features_df):
account_features = account_features_df
contracts = untidy_feature(account_features, "Contract")
paperless = untidy_feature(account_features, "PaperlessBilling")
payment = untidy_feature(account_features, "PaymentMethod")
customer_account_features = chained_join(
"customerID", customers(), [contracts, paperless, payment]
)
customer_account_features = customer_account_features.select(
"customerID",
"Contract",
resolve_nullable_column(customer_account_features, "PaperlessBilling"),
"PaymentMethod",
)
_register_views(locals(), "contracts", "paperless", "payment", "customer_account_features")
return customer_account_features
def process_account_meta(account_meta_df, usecal=None):
def is_senior_citizen(nowcol, dobcol):
if options['use_calendar_arithmetic']:
return F.when(
F.col("now") >= F.add_months(
F.col("dateOfBirth"), 65 * 12
), F.lit(True)
).otherwise(F.lit(False))
else:
return (F.year(F.col(nowcol)) > (F.year(F.col(dobcol)) + 65)) | \
(F.year(F.col(nowcol)) == (F.year(F.col(dobcol)) + 65)) & \
(
(F.month(F.col(nowcol)) < F.month(F.col(dobcol))) | \
(
(F.month(F.col(nowcol)) == F.month(F.col(dobcol))) & \
(F.dayofmonth(F.col(nowcol)) <= F.dayofmonth(F.col(nowcol)))
)
)
customer_account_meta = account_meta_df.select(
"customerID",
is_senior_citizen("now", "dateOfBirth").alias("SeniorCitizen"),
"Partner",
"Dependents",
"gender",
"MonthlyCharges",
)
_register_views(locals(), "customer_account_meta")
return customer_account_meta
def forcefloat(c):
return F.col(c).cast("float").alias(c)
def join_wide_table(customer_billing, customer_phone_features, customer_internet_features, customer_account_features, customer_account_meta):
wide_data = chained_join(
"customerID",
customers(),
[
customer_billing,
customer_phone_features,
customer_internet_features,
customer_account_features,
customer_account_meta,
],
).select(
"customerID",
"gender",
"SeniorCitizen",
"Partner",
"Dependents",
"tenure",
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
"Contract",
"PaperlessBilling",
"PaymentMethod",
"MonthlyCharges",
"TotalCharges",
"Churn",
)
return wide_data
# In[ ]:
def cast_and_coalesce_wide_data(wd):
if options["coalesce_output"] > 0:
wd = wd.coalesce(options["coalesce_output"])
return wd.select(
"customerID",
"gender",
"SeniorCitizen",
"Partner",
"Dependents",
"tenure",
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
"Contract",
"PaperlessBilling",
"PaymentMethod",
forcefloat("MonthlyCharges"),
forcefloat("TotalCharges"),
"Churn",
)
def write_df(df, name):
output_kind = options["output_kind"]
output_mode = options["output_mode"]
output_prefix = options["output_prefix"]
name = "%s.%s" % (name, output_kind)
if output_prefix != "":
name = "%s%s" % (output_prefix, name)
kwargs = {}
if output_kind == "csv":
kwargs["header"] = True
getattr(df.write.mode(output_mode), output_kind)(name, **kwargs)
|
data-science-blueprints-main
|
churn/churn/etl.py
|
# Copyright (c) 2020–2021, NVIDIA Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pyspark
from pyspark.sql.types import StructType, StructField, StringType, DoubleType, DecimalType
import pyspark.sql.functions as F
from collections import defaultdict
options = defaultdict(lambda: None)
now = datetime.datetime.now(datetime.timezone.utc)
AUGMENT_VERSION = "0.7"
AUGMENT_CUSTOMER_TAG = "0007"
session = None
currencyType = None
def get_currency_type():
global options
global currencyType
if currencyType is not None:
return currencyType
if "use_decimal" in options and options["use_decimal"]:
if "decimal_precision" in options :
assert options["decimal_precision"] > 5, "Decimal precision is too small; was %d but should be at least 6" % options["decimal_precision"]
currencyType = DecimalType(options["decimal_precision"], 2)
else:
# "999,999.99 should be enough for anyone"
currencyType = DecimalType(8, 2)
else:
currencyType = DoubleType()
return currencyType
def _register_session(s):
global session
session = s
def _get_uniques(ct):
global session
table_names = set([table.name for table in session.catalog.listTables()])
if ("uniques_%d" % ct) in table_names:
return session.table("uniques_%d" % ct)
else:
def str_part(seed=0x5CA1AB1E):
"generate the string part of a unique ID"
import random
r = random.Random(seed)
from base64 import b64encode
while True:
yield "%s-%s" % (b64encode(r.getrandbits(72).to_bytes(9, "big"), b"@_").decode(
"utf-8"
), AUGMENT_CUSTOMER_TAG)
sp = str_part()
uniques = (
session.createDataFrame(
schema=StructType([StructField("u_value", StringType())]),
data=[dict(u_value=next(sp)) for _ in range(min(int(ct * 1.02), ct + 2))],
)
.distinct()
.orderBy("u_value")
.limit(ct)
).cache()
uc = uniques.count()
assert (uc == ct), "due to prng collision we had %d instead of %d replicas" % (uc, ct)
uniques.createOrReplaceTempView("uniques_%d" % ct)
return uniques
def register_options(**kwargs):
global options
for k, v in kwargs.items():
options[k] = v
def load_supplied_data(session, input_file):
_register_session(session)
fields = [
"customerID",
"gender",
"SeniorCitizen",
"Partner",
"Dependents",
"tenure",
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
"Contract",
"PaperlessBilling",
"PaymentMethod",
"MonthlyCharges",
"TotalCharges",
"Churn",
]
double_fields = set(["tenure", "MonthlyCharges", "TotalCharges"])
schema = pyspark.sql.types.StructType(
[
pyspark.sql.types.StructField(
f, DoubleType() if f in double_fields else StringType()
)
for f in fields
]
)
df = session.read.csv(input_file, header=True, schema=schema)
source_count = df.count()
df = df.dropna()
nn_count = df.count()
if source_count == nn_count:
print("read %d records from source dataset with no nulls -- is this what you expect?" % source_count)
else:
print("read %d records from source dataset (%d non-null records)" % (source_count, nn_count))
return df
def replicate_df(df, duplicates):
if duplicates > 1:
uniques = _get_uniques(duplicates)
df = (
df.crossJoin(uniques.distinct())
.withColumn("customerID", F.format_string("%s-%s", "customerID", "u_value"))
.drop("u_value")
)
return df
def examine_categoricals(df, columns=None):
""" Returns (to driver memory) a list of tuples consisting of every unique value
for each column in `columns` or for every categorical column in the source
data if no columns are specified """
default_columns = [
"SeniorCitizen",
"Partner",
"Dependents",
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
"Contract",
"PaperlessBilling",
"PaymentMethod",
]
columns = columns or default_columns
return [(c, [row[0] for row in df.select(c).distinct().rdd.collect()]) for c in columns]
def billing_events(df):
import datetime
MAX_MONTH = 72
def get_last_month(col):
h = F.abs(F.xxhash64(col))
h1 = (h.bitwiseAND(0xff)) % (MAX_MONTH // 2)
h2 = (F.shiftRight(h, 8).bitwiseAND(0xff)) % (MAX_MONTH // 3)
h3 = (F.shiftRight(h, 16).bitwiseAND(0xff)) % (MAX_MONTH // 5)
h4 = (F.shiftRight(h, 24).bitwiseAND(0xff)) % (MAX_MONTH // 7)
h5 = (F.shiftRight(h, 32).bitwiseAND(0xff)) % (MAX_MONTH // 11)
return -(h1 + h2 + h3 + h4 + h5)
w = pyspark.sql.Window.orderBy(F.lit("")).partitionBy(df.customerID)
charges = (
df.select(
df.customerID,
F.lit("Charge").alias("kind"),
F.explode(
F.array_repeat((df.TotalCharges / df.tenure).cast(get_currency_type()), df.tenure.cast("int"))
).alias("value"),
F.when(df.Churn == "Yes", get_last_month(df.customerID)).otherwise(0).alias("last_month")
)
.withColumn("now", F.lit(now).cast("date"))
.withColumn("month_number", -(F.row_number().over(w) + F.col("last_month")))
.withColumn("date", F.expr("add_months(now, month_number)"))
.drop("now", "month_number", "last_month")
)
serviceStarts = (
df.withColumn("last_month", F.when(df.Churn == "Yes", get_last_month(df.customerID)).otherwise(0)).select(
df.customerID,
F.lit("AccountCreation").alias("kind"),
F.lit(0.0).cast(get_currency_type()).alias("value"),
F.lit(now).alias("now"),
(-df.tenure - 1 + F.col("last_month")).alias("month_number"),
)
.withColumn("date", F.expr("add_months(now, month_number)"))
.drop("now", "month_number")
)
serviceTerminations = df.withColumn("last_month", F.when(df.Churn == "Yes", get_last_month(df.customerID)).otherwise(0)).where(
df.Churn == "Yes"
).withColumn("now", F.lit(now)).select(
df.customerID,
F.lit("AccountTermination").alias("kind"),
F.lit(0.0).cast(get_currency_type()).alias("value"),
F.expr("add_months(now, last_month)").alias("date")
)
billingEvents = charges.union(serviceStarts).union(serviceTerminations).orderBy("date").withColumn("month", F.substring("date", 0, 7))
return billingEvents
def resolve_path(name):
output_prefix = options["output_prefix"] or ""
output_mode = options["output_mode"] or "overwrite"
output_kind = options["output_kind"] or "parquet"
name = "%s.%s" % (name, output_kind)
if output_prefix != "":
name = "%s%s" % (output_prefix, name)
return name
def write_df(df, name, skip_replication=False, partition_by=None):
dup_times = options["dup_times"] or 1
output_prefix = options["output_prefix"] or ""
output_mode = options["output_mode"] or "overwrite"
output_kind = options["output_kind"] or "parquet"
if not skip_replication:
df = replicate_df(df, dup_times)
write = df.write
if partition_by is not None:
if type(partition_by) == str:
partition_by = [partition_by]
write = write.partitionBy(*partition_by)
name = "%s.%s" % (name, output_kind)
if output_prefix != "":
name = "%s%s" % (output_prefix, name)
kwargs = {}
if output_kind == "csv":
kwargs["header"] = True
getattr(write.mode(output_mode), output_kind)(name, **kwargs)
def customer_meta(df):
SENIOR_CUTOFF = 65
ADULT_CUTOFF = 18
DAYS_IN_YEAR = 365.25
EXPONENTIAL_DIST_SCALE = 6.3
augmented_original = replicate_df(df, options["dup_times"] or 1)
customerMetaRaw = augmented_original.select(
"customerID",
F.lit(now).alias("now"),
(F.abs(F.hash(augmented_original.customerID)) % 4096 / 4096).alias("choice"),
"SeniorCitizen",
"gender",
"Partner",
"Dependents",
F.col("MonthlyCharges").cast(get_currency_type()).alias("MonthlyCharges"),
)
customerMetaRaw = customerMetaRaw.withColumn(
"ageInDays",
F.floor(
F.when(
customerMetaRaw.SeniorCitizen == 0,
(
customerMetaRaw.choice
* ((SENIOR_CUTOFF - ADULT_CUTOFF - 1) * DAYS_IN_YEAR)
)
+ (ADULT_CUTOFF * DAYS_IN_YEAR),
).otherwise(
(SENIOR_CUTOFF * DAYS_IN_YEAR)
+ (
DAYS_IN_YEAR
* (-F.log1p(-customerMetaRaw.choice) * EXPONENTIAL_DIST_SCALE)
)
)
).cast("int"),
)
customerMetaRaw = customerMetaRaw.withColumn(
"dateOfBirth", F.expr("date_sub(now, ageInDays)")
)
return customerMetaRaw.select(
"customerID",
"dateOfBirth",
"gender",
"SeniorCitizen",
"Partner",
"Dependents",
"MonthlyCharges",
"now",
).orderBy("customerID")
def phone_features(df):
phoneService = df.select(
"customerID", F.lit("PhoneService").alias("feature"), F.lit("Yes").alias("value")
).where(df.PhoneService == "Yes")
multipleLines = df.select(
"customerID", F.lit("MultipleLines").alias("feature"), F.lit("Yes").alias("value")
).where(df.MultipleLines == "Yes")
return phoneService.union(multipleLines).orderBy("customerID")
def internet_features(df):
internet_service = df.select(
"customerID",
F.lit("InternetService").alias("feature"),
df.InternetService.alias("value"),
).where(df.InternetService != "No")
customerInternetFeatures = internet_service
for feature in [
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
]:
tmpdf = df.select(
"customerID",
F.lit(feature).alias("feature"),
df[feature].alias("value"),
).where(df[feature] == "Yes")
customerInternetFeatures = customerInternetFeatures.union(tmpdf)
return customerInternetFeatures
def account_features(df):
session = df.sql_ctx.sparkSession
accountSchema = pyspark.sql.types.StructType(
[
pyspark.sql.types.StructField(f, StringType())
for f in ["customerID", "feature", "value"]
]
)
customerAccountFeatures = session.createDataFrame(schema=accountSchema, data=[])
for feature in ["Contract", "PaperlessBilling", "PaymentMethod"]:
tmpdf = df.select(
"customerID",
F.lit(feature).alias("feature"),
df[feature].alias("value"),
).where(df[feature] != "No")
customerAccountFeatures = customerAccountFeatures.union(tmpdf)
return customerAccountFeatures
def debug_augmentation(df):
return (
df.select("customerID")
.distinct()
.select(
"customerID",
F.substring("customerID", 0, 10).alias("originalID"),
F.element_at(F.split("customerID", "-", -1), 3).alias("suffix"),
)
)
|
data-science-blueprints-main
|
churn/churn/augment.py
|
# Copyright (c) 2020–2021, NVIDIA Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql import types as T
from pyspark.sql import functions as F
eda_options = { 'use_array_ops' : False }
def isnumeric(data_type):
numeric_types = [T.ByteType, T.ShortType, T.IntegerType, T.LongType, T.FloatType, T.DoubleType, T.DecimalType]
return any([isinstance(data_type, t) for t in numeric_types])
def percent_true(df, cols):
denominator = df.count()
return {col : df.where(F.col(col) == True).count() / denominator for col in cols}
def cardinalities(df, cols):
from functools import reduce
counts = df.agg(
F.struct(*[F.countDistinct(F.col(c)).alias(c) for c in cols] + [F.count(F.col(cols[0])).alias('total')]).alias("results")
).select("results").collect()[0][0].asDict()
counts.update({'total' : df.count()})
return counts
def likely_unique(counts):
total = counts["total"]
return [k for (k, v) in counts.items() if k != "total" and abs(total - v) < total * 0.15]
def likely_categoricals(counts):
total = counts["total"]
return [k for (k, v) in counts.items() if v < total * 0.15 or v < 128]
def unique_values(df, cols):
if eda_options['use_array_ops']:
return unique_values_array(df, cols)
else:
return unique_values_driver(df, cols)
def unique_values_array(df, cols):
from functools import reduce
counts = df.groupBy(
F.lit(True).alias("drop_me")
).agg(
*[F.array_sort(F.collect_set(F.col(c))).alias(c) for c in cols]
).drop("drop_me").cache()
result = reduce(lambda l, r: l.unionAll(r), [counts.select(F.lit(c).alias("field"), F.col(c).alias("unique_vals")) for c in counts.columns]).collect()
return dict([(r[0],r[1]) for r in result])
def unique_values_driver(df, cols):
return { col : [v[0] for v in df.select(F.col(col).alias('value')).distinct().orderBy(F.col('value')).collect()] for col in cols}
def approx_ecdf(df, cols):
from functools import reduce
quantiles = [0.0, 0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 1.0]
qs = df.approxQuantile(cols, quantiles, 0.01)
result = dict(zip(cols, qs))
return {c: dict(zip(quantiles, vs)) for (c, vs) in result.items()}
def gen_summary(df, output_prefix=""):
summary = {}
string_cols = []
boolean_cols = []
numeric_cols = []
other_cols = []
for field in df.schema.fields:
if isinstance(field.dataType, T.StringType):
string_cols.append(field.name)
elif isinstance(field.dataType, T.BooleanType):
boolean_cols.append(field.name)
elif isnumeric(field.dataType):
numeric_cols.append(field.name)
else:
other_cols.append(field.name)
counts = cardinalities(df, string_cols)
uniques = likely_unique(counts)
categoricals = unique_values(df, likely_categoricals(counts))
for span in [2,3,4,6,12]:
thecube = df.cube("Churn", F.ceil(df.tenure / span).alias("%d_month_spans" % span), "gender", "Partner", "SeniorCitizen", "Contract", "PaperlessBilling", "PaymentMethod", F.ceil(F.log2(F.col("MonthlyCharges"))*10).alias("log_charges")).count()
therollup = df.rollup("Churn", F.ceil(df.tenure / span).alias("%d_month_spans" % span), "SeniorCitizen", "Contract", "PaperlessBilling", "PaymentMethod", F.ceil(F.log2(F.col("MonthlyCharges"))*10).alias("log_charges")).agg(F.sum(F.col("TotalCharges")).alias("sum_charges"))
thecube.write.mode("overwrite").parquet("%scube-%d.parquet" % (output_prefix, span))
therollup.write.mode("overwrite").parquet("%srollup-%d.parquet" % (output_prefix, span))
encoding_struct = {
"categorical" : categoricals,
"numeric" : numeric_cols + boolean_cols,
"unique": uniques
}
summary["schema"] = df.schema.jsonValue()
summary["ecdfs"] = approx_ecdf(df, numeric_cols)
summary["true_percentage"] = percent_true(df, boolean_cols)
summary["encoding"] = encoding_struct
summary["distinct_customers"] = df.select(df.customerID).distinct().count()
return summary
def losses_by_month(be):
customer_lifetime_values = be.groupBy("customerID").sum("value").alias("value")
return be.where(be.kind == "AccountTermination").join(customer_lifetime_values, "customerID").groupBy("month").sum("value").alias("value").sort("month").toPandas().to_json()
def output_reports(df, be=None, report_prefix=""):
import json
summary = gen_summary(df, report_prefix)
if be is not None:
summary["losses_by_month"] = losses_by_month(be)
with open("%ssummary.json" % report_prefix, "w") as sf:
json.dump(summary, sf)
with open("%sencodings.json" % report_prefix, "w") as ef:
json.dump(summary["encoding"], ef)
|
data-science-blueprints-main
|
churn/churn/eda.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PixelView.topLevel import VERSION
from setuptools import setup, find_packages
setup(
name='PixelView',
version=VERSION,
packages=find_packages(),
package_data={'': ['*.json', '*.txt']},
entry_points={'console_scripts': ['PixelView = PixelView.cli:run']},
)
|
PixelView-master
|
setup.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pUtils
from PixelView.utils.image import loadImage
from PixelView.utils.cli import pprint, COLOR
from PixelView.gui.mainWindow import launch, MAIN_WINDOW_MODE
from PixelView.imageContainers.rgb888Image import Rgb888Image
from PixelView.imageContainers.rgba8888Image import Rgba8888Image
VERSION = '.01'
def version(**kwargs):
pprint('Version: ', color=COLOR.TEAL, endLine=False); pprint(VERSION)
def info(filePath, **kwargs):
try:
img = loadImage(filePath)
except IOError as e:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('[I/O] ({0}): {1}'.format(e.errno, e.strerror))
exit(1)
except Exception:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('Unsupported image format')
exit(1)
pprint('-----------------------------------')
pprint('srcFileName: ' + os.path.basename(img.srcFilePath))
pprint('mode: ' + img.mode)
pprint('size: ' + str(img.width) + 'x' + str(img.height))
pprint('srcFileFormat: ' + img.srcFileFormat)
pprint('-----------------------------------')
def printVal(filePath, x, y, **kwargs):
try:
img = loadImage(filePath)
except IOError as e:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('[I/O] ({0}): {1}'.format(e.errno, e.strerror))
exit(1)
except Exception:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('Unsupported image format')
exit(1)
start = (img.width * y + x) * img.bytesPerPixel
pprint(pUtils.formatHex(img.data[start:start + img.bytesPerPixel]))
def genCanvas(outFilePath, red, green, blue, width, height, alpha, **kwargs):
if os.path.exists(outFilePath):
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('File:')
pprint(' %s' % outFilePath, color=COLOR.TEAL)
pprint('Already exists')
exit(1)
if alpha is None:
img = Rgb888Image(bytearray([red, green, blue] * width * height), width, height)
else:
img = Rgba8888Image(bytearray([red, green, blue, alpha] * width * height), width, height)
img.save(outFilePath)
pprint('DONE', color=COLOR.TEAL)
def genConfig(dirPath, configManager, **kwargs):
pUtils.createDirectory(os.path.abspath(dirPath))
filePath = os.path.join(dirPath, 'config1.json')
if configManager.saveFullConfig(filePath) != 0:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('File: %s already exists' % filePath)
exit(1)
filePath = os.path.join(dirPath, 'configMenu.json')
if configManager.genMenuConfigFile(filePath, configName='config1', configPath='config1.json'):
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('File: %s already exists' % filePath)
exit(1)
pprint('DONE', color=COLOR.TEAL)
def setConfigStart(filePath, configManager, **kwargs):
configManager.setConfigStart(filePath)
def clearConfigStart(configManager, **kwargs):
configManager.clearConfigStart()
def view(filePathList, configManager, **kwargs):
launch(configManager, filePathList, mode=MAIN_WINDOW_MODE.VIEW, **kwargs)
def compare(filePathList1, filePathList2, fList, configManager, **kwargs):
launch(configManager, filePathList1, filePathList2, mode=MAIN_WINDOW_MODE.COMPARE, **kwargs)
|
PixelView-master
|
PixelView/topLevel.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PixelView-master
|
PixelView/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from PixelView import topLevel
from PixelView.utils.cli import paramList, handleCli
from PixelView.imageContainers.common import Geometry
from PixelView.config.configManager import ConfigManager
def subCommands(subparsers):
command = 'version'
subparser = subparsers.add_parser(command)
subparser.set_defaults(command=command)
command = 'info'
subparser = subparsers.add_parser(command)
subparser.set_defaults(command=command)
subparser.add_argument('filePath', help='Image File Path')
command = 'printVal'
subparser = subparsers.add_parser(command)
subparser.set_defaults(command=command)
subparser.add_argument('filePath', help='Image File Path')
subparser.add_argument('x', help='X coordinate', type=int)
subparser.add_argument('y', help='Y coordinate', type=int)
command = 'genCanvas'
subparser = subparsers.add_parser(command)
subparser.set_defaults(command=command)
subparser.add_argument('outFilePath', help='Output File Path')
subparser.add_argument('red', help='Red component [0-255]', type=int)
subparser.add_argument('green', help='Green component [0-255]', type=int)
subparser.add_argument('blue', help='Blue component [0-255]', type=int)
subparser.add_argument('--alpha', help='Alpha component [0-255]', type=int)
subparser.add_argument('--width', help='Surface Area Width', type=int, default='320')
subparser.add_argument('--height', help='Surface Area Height', type=int, default='240')
command = 'genConfig'
subparser = subparsers.add_parser(command)
subparser.set_defaults(command=command)
subparser.add_argument('dirPath', help='Path to directory that will contain the config files\n'
'If it does not exists, it is created')
command = 'setConfigStart'
subparser = subparsers.add_parser(command)
subparser.set_defaults(command=command)
subparser.add_argument('filePath', help='Path to the configMenu File')
command = 'clearConfigStart'
subparser = subparsers.add_parser(command)
subparser.set_defaults(command=command)
command = 'view'
subparser = subparsers.add_parser(command, formatter_class=argparse.RawTextHelpFormatter)
subparser.set_defaults(command=command)
subparser.set_defaults(fListVarNameList=['filePathList'])
subparser.add_argument('filePathList',
help='This argument can be either:\n'
'- The file path for the image\n'
'- A commaseparated list of file paths for the images\n'
'- A path of a file that contains file paths for the images (with --fList flag)',
type=paramList)
subparser.add_argument('--fList', action='store_true',
help='If present, any path provided is treated as a file that contains file paths to images')
command = 'compare'
subparser = subparsers.add_parser(command, formatter_class=argparse.RawTextHelpFormatter)
subparser.set_defaults(command=command)
subparser.set_defaults(fListVarNameList=['filePathList1', 'filePathList2'])
subparser.add_argument('filePathList1',
help='This argument can be either:\n'
'- The file path for the image\n'
'- A commaseparated list of file paths for the images\n'
'- A path of a file that contains file paths for the images (with --fList flag)',
type=paramList)
subparser.add_argument('filePathList2',
help='Same as filePathList1, but for the second image (or second set of images)',
type=paramList)
subparser.add_argument('--fList', action='store_true',
help='If present, any path provided is treated as a file that contains file paths to images')
subparser.add_argument('--geometry1', help='The area within the image to compare, of the form: <width>x<height>+<x>+<y>', type=Geometry)
subparser.add_argument('--geometry2', help='The area within the image to compare, of the form: <width>x<height>+<x>+<y>', type=Geometry)
def run():
parser = argparse.ArgumentParser(prog='PixelView', formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
parser.add_argument('-v', '--verbose', help='Verbose', action='store_true')
parser.add_argument('--cf', help='Config File Path', dest='configFilePath')
parser.add_argument('--cn', help='Config Name', dest='configName')
parser.add_argument('--useInternalDefaults', help='Config Name', dest='isUseInternalDefaults', action='store_true')
subCommands(subparsers)
args = parser.parse_args()
handleCli(args, topLevel, ConfigManager)
|
PixelView-master
|
PixelView/cli.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PixelView.cli import run
run()
|
PixelView-master
|
PixelView/__main__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
from .common import Geometry, COMPARE_TYPE
class AbstractImage(object):
def __init__(self, data, width, height):
self.data = bytearray(data)
self.width = width
self.height = height
self.mode = None
self.srcFilePath = None
self.srcFileFormat = None
def getImageInfo(self):
t = deepcopy(self.__dict__)
t.pop('data')
return t
def getImageName(self):
if self.filePath: return os.path.basename(self.filePath)
return None
def validateGeometry(self, geometry):
return (geometry.width + geometry.x <= self.width and
geometry.height + geometry.y <= self.height)
def getDiff(self, other, geometry1=None, geometry2=None, stopOnDiff=False, compareType=COMPARE_TYPE.FULL, returnFailPixelList=False, colorDict=None):
"""
Compares two images: self vs other
Args:
other: image object to compare (against self)
geometry1: The rectangular area within the 'self' image to compare
geometry2: The rectangular area within the 'other' image to compare
stopOnDiff: If True, return as soon as the first pixel difference is detected.
If False, continue comparing the image until the end
compareType: What type of comparison to perform for details see the
COMPARE_TYPE enum above
returnFailPixelList: If true, return a list with information for every pixel
that was found different in the comparison.
If false, don't return nor collect this data.
colorDict: A dictionary with the colors to use for the deltaImages.
Returns:
A dictionary that always has the item 'isDiff', and additional data depending
the case.
If the areas of the images within its respective geometries are equal, then
isDiff=False if not isdiff=True.
Also if the geometries are invalid (e.g. the area specified by the geometry is
beyond the image) or the geometries does not have the same area, then
isDiff=True.
The following items are included in the return dictionary if the image
comparison reaches the end:
'deltaImageRgbData': Delta image for the RGB channels
'deltaImageAlphaData': Delta image for the alpha channel
'img1AlphaData': Image for the alpha channel of the 'self' image
'img2AlphaData': Image for the alpha channel of the 'other' image
'pixelDiffCount': How may pixels were different
'absDiffCount': The sum of the differences per channel of every pixel comapred
'maxChannelDelta': The max difference found in a channel
'diffPixelRgbList': The list of pixels that were different for the RGB channels
'diffPixelAlphaList': The list of pixels that were different for the alpha channel
"""
if geometry1 is None:
geometry1 = Geometry(0, 0, self.width, self.height)
else:
geometry1 = Geometry(geometry1)
if geometry2 is None:
geometry2 = Geometry(0, 0, other.width, other.height)
else:
geometry2 = Geometry(geometry2)
if not geometry1.isAreaEqual(geometry2):
return {'isDiff': True, 'debugData': {'msg': 'Geometry mismatch', 'geometry1': str(geometry1), 'geometry2': str(geometry2)}}
if (not self.validateGeometry(geometry1) or not other.validateGeometry(geometry2)):
return {'isDiff': True, 'debugData': {'msg': 'Invalid geometry',
'geometry1': str(geometry1), 'geometry2': str(geometry2),
'width1': str(self.width), 'width2': str(other.width),
'height1': str(self.height), 'height2': str(other.height)}}
if (((compareType is COMPARE_TYPE.ALPHA_HI1 or compareType is COMPARE_TYPE.ALPHA_LO1) and self.bytesPerPixel != 4) or
((compareType is COMPARE_TYPE.ALPHA_HI2 or compareType is COMPARE_TYPE.ALPHA_LO2) and other.bytesPerPixel != 4)):
return {'isDiff': True, 'debugData': {'msg': 'Invalid image format and comparison type combo',
'compareType': str(compareType),
'bytesPerPixel1': self.bytesPerPixel, 'bytesPerPixel2': other.bytesPerPixel}}
flagCompareAlpha = False
if compareType is COMPARE_TYPE.FULL:
if (self.bytesPerPixel == 4) != (other.bytesPerPixel == 4): return {'isDiff': True}
if (self.bytesPerPixel == 4) and (other.bytesPerPixel == 4): flagCompareAlpha = True
deltaImageRgb = bytearray(b'\x00\x00\x00' * geometry1.width * geometry1.height)
deltaImageAlpha = bytearray(b'\x00\x00\x00' * geometry1.width * geometry1.height)
img1Alpha = bytearray(b'\x00\x00\x00' * geometry1.width * geometry1.height)
img2Alpha = bytearray(b'\x00\x00\x00' * geometry1.width * geometry1.height)
maxChannelDelta = 0
pixelDiffCount = 0 # Total pixels that differ
absDiffCount = 0 # The sum of the absolute valuies of all bytes differences for all pixels
absDiffCountPixel = 0 # The abs diff count of a single pixel
diffPixelRgbList = []
diffPixelAlphaList = []
for j in range(0, geometry1.height):
for i in range(0, geometry1.width):
outputPixelIndex = (j * geometry1.width + i) * 3
inputPixelIndex1 = ((j + geometry1.y) * self.width + i + geometry1.x) * self.bytesPerPixel
inputPixelIndex2 = ((j + geometry2.y) * other.width + i + geometry2.x) * other.bytesPerPixel
if compareType is COMPARE_TYPE.ALPHA_HI1 and self.data[inputPixelIndex1 + 3] != 0xFF: continue
if compareType is COMPARE_TYPE.ALPHA_LO1 and self.data[inputPixelIndex1 + 3] == 0x00: continue
if compareType is COMPARE_TYPE.ALPHA_HI2 and other.data[inputPixelIndex2 + 3] != 0xFF: continue
if compareType is COMPARE_TYPE.ALPHA_LO2 and other.data[inputPixelIndex2 + 3] == 0x00: continue
absDiffCountPixelRed = abs(self.data[inputPixelIndex1 + 0] - other.data[inputPixelIndex2 + 0])
absDiffCountPixelGreen = abs(self.data[inputPixelIndex1 + 1] - other.data[inputPixelIndex2 + 1])
absDiffCountPixelBlue = abs(self.data[inputPixelIndex1 + 2] - other.data[inputPixelIndex2 + 2])
absDiffCountPixel = absDiffCountPixelRed + absDiffCountPixelGreen + absDiffCountPixelBlue
absDiffCountPixelAlpha = 0
if flagCompareAlpha:
absDiffCountPixelAlpha = abs(self.data[inputPixelIndex1 + 3] - other.data[inputPixelIndex2 + 3])
img1Alpha[outputPixelIndex + 0] = self.data[inputPixelIndex1 + 3]
img1Alpha[outputPixelIndex + 1] = self.data[inputPixelIndex1 + 3]
img1Alpha[outputPixelIndex + 2] = self.data[inputPixelIndex1 + 3]
img2Alpha[outputPixelIndex + 0] = other.data[inputPixelIndex2 + 3]
img2Alpha[outputPixelIndex + 1] = other.data[inputPixelIndex2 + 3]
img2Alpha[outputPixelIndex + 2] = other.data[inputPixelIndex2 + 3]
maxChannelDelta = max(maxChannelDelta, absDiffCountPixelRed, absDiffCountPixelGreen, absDiffCountPixelBlue, absDiffCountPixelAlpha)
absDiffCountPixel = absDiffCountPixelRed + absDiffCountPixelGreen + absDiffCountPixelBlue + absDiffCountPixelAlpha
absDiffCount += absDiffCountPixel
pixelDiffCountTmp = 0
if (absDiffCountPixelRed > 0 or
absDiffCountPixelGreen > 0 or
absDiffCountPixelBlue > 0):
pixelDiffCountTmp = 1
t = max(absDiffCountPixelRed, absDiffCountPixelGreen, absDiffCountPixelBlue)
color = [0xFF, 0xFF, 0xFF]
if colorDict:
color = colorDict.get(str(t), colorDict.get('default', color))
deltaImageRgb[outputPixelIndex: outputPixelIndex + 3] = color
if returnFailPixelList:
diffPixelEntry = []
diffPixelEntry.append(inputPixelIndex1)
diffPixelEntry.append(inputPixelIndex2)
diffPixelRgbList.append(diffPixelEntry)
if absDiffCountPixelAlpha > 0:
pixelDiffCountTmp = 1
t = absDiffCountPixelAlpha
color = [0xFF, 0xFF, 0xFF]
if colorDict:
color = colorDict.get(str(t), colorDict.get('default', color))
deltaImageAlpha[outputPixelIndex: outputPixelIndex + 3] = color
if returnFailPixelList:
diffPixelEntry = []
diffPixelEntry.append(inputPixelIndex1)
diffPixelEntry.append(inputPixelIndex2)
diffPixelAlphaList.append(diffPixelEntry)
pixelDiffCount += pixelDiffCountTmp
if stopOnDiff and pixelDiffCount > 0: return {'isDiff': True}
returnDict = {'isDiff': pixelDiffCount != 0,
'deltaImageRgbData': (deltaImageRgb, geometry1.width, geometry1.height),
'maxChannelDelta': maxChannelDelta,
'pixelDiffCount': pixelDiffCount,
'absDiffCount': absDiffCount,
'diffPixelRgbList': diffPixelRgbList,
'geometry1': geometry1,
'geometry2': geometry2}
if flagCompareAlpha:
alphaDict = {'deltaImageAlphaData': (deltaImageAlpha, geometry1.width, geometry1.height),
'img1AlphaData': (img1Alpha, geometry1.width, geometry1.height),
'img2AlphaData': (img2Alpha, geometry1.width, geometry1.height),
'diffPixelAlphaList': diffPixelAlphaList}
returnDict.update(alphaDict)
return returnDict
|
PixelView-master
|
PixelView/imageContainers/abstractImage.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PixelView-master
|
PixelView/imageContainers/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import enum
@enum.unique
class COMPARE_TYPE(enum.Enum):
# Alpha component is ignored if present
ALPHALESS = 1
# The alpha component from the first image is used as a bitmask
# If the alpha is 255 the corresponding pixel is compared normally
# Otherwise the corresponding pixel is excluded from the comparison
ALPHA_HI1 = 2
# The alpha component from the second image is used as a bitmask
# If the alpha is 255 the corresponding pixel is compared normally
# Otherwise the corresponding pixel is excluded from the comparison
ALPHA_HI2 = 3
# The alpha component from the first image is used as a bitmask
# If the alpha is 0 the corresponding pixel is excluded from the comparison
# Otherwise the corresponding pixel is compared normally
ALPHA_LO1 = 4
# The alpha component from the second image is used as a bitmask
# If the alpha is 0 the corresponding pixel is excluded from the comparison
# Otherwise the corresponding pixel is compared normally
ALPHA_LO2 = 5
# Compares color channels as well as the alpha channel
# A difference of value for a pixel in the alpha channel is enough to deem
# the result of the comparison as 2 different images
# images without alpha can be compared like this as well
# (For such case being equivalent of "ALPHALESS"
# Alpha in one image and no alpha on the other is considered a difference
FULL = 6
class Geometry:
def __init__(self, data=0, y=0, width=0, height=0):
if isinstance(data, Geometry):
self.x = data.x
self.y = data.y
self.width = data.width
self.height = data.height
return
if isinstance(data, int):
self.x = data
self.y = y
self.width = width
self.height = height
return
if isinstance(data, str):
# Sample string to match: 320x240+0+0
t = re.match(r'([0-9]+)x([0-9]+)\+([0-9]+)\+([0-9]+)', data)
if t:
self.x = int(t.group(3))
self.y = int(t.group(4))
self.width = int(t.group(1))
self.height = int(t.group(2))
return
def __str__(self):
return ','.join([str(item) for item in [self.x, self.y, self.width, self.height]])
def __eq__(self, other):
if other is None: return False
return (self.__dict__ == other.__dict__)
def isAreaEqual(self, other):
return (self.width == other.width and
self.height == other.height)
|
PixelView-master
|
PixelView/imageContainers/common.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.