python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# python ./convertInput.py -t $data_name -b $batch_size -d $data_dir -l $seq_len -s $spiece_model_file -o $data_file -u 0
from os.path import join
from absl import flags
import os
import sys
import getopt
import csv
import collections
import numpy as np
import json
import random
from copy import copy
from collections import defaultdict as dd
import numpy as np
import six
import unicodedata
import sentencepiece as spm
SEG_ID_A = 0
SEG_ID_B = 1
SEG_ID_CLS = 2
SEG_ID_SEP = 3
SEG_ID_PAD = 4
special_symbols = {
"<unk>": 0,
"<s>": 1,
"</s>": 2,
"<cls>": 3,
"<sep>": 4,
"<pad>": 5,
"<mask>": 6,
"<eod>": 7,
"<eop>": 8,
}
SEP_ID = special_symbols["<sep>"]
CLS_ID = special_symbols["<cls>"]
SPIECE_UNDERLINE = '▁'
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
"""
DataProcessor Classes
"""
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if len(line) == 0:
continue
lines.append(line)
return lines
class GLUEProcessor(DataProcessor):
def __init__(self):
self.train_file = "train.tsv"
self.dev_file = "dev.tsv"
self.test_file = "test.tsv"
self.label_column = None
self.text_a_column = None
self.text_b_column = None
self.contains_header = True
self.test_text_a_column = None
self.test_text_b_column = None
self.test_contains_header = True
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, self.train_file)), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, self.dev_file)), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
if self.test_text_a_column is None:
self.test_text_a_column = self.text_a_column
if self.test_text_b_column is None:
self.test_text_b_column = self.text_b_column
return self._create_examples(
self._read_tsv(os.path.join(data_dir, self.test_file)), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 and self.contains_header and set_type != "test":
continue
if i == 0 and self.test_contains_header and set_type == "test":
continue
guid = "%s-%s" % (set_type, i)
a_column = (self.text_a_column if set_type != "test" else
self.test_text_a_column)
b_column = (self.text_b_column if set_type != "test" else
self.test_text_b_column)
# there are some incomplete lines in QNLI
if len(line) <= a_column:
tf.logging.warning('Incomplete line, ignored.')
continue
text_a = line[a_column]
if b_column is not None:
if len(line) <= b_column:
tf.logging.warning('Incomplete line, ignored.')
continue
text_b = line[b_column]
else:
text_b = None
if set_type == "test":
label = self.get_labels()[0]
else:
if len(line) <= self.label_column:
tf.logging.warning('Incomplete line, ignored.')
continue
label = line[self.label_column]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class Yelp5Processor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "train.csv"))
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "test.csv"))
def get_labels(self):
"""See base class."""
return ["1", "2", "3", "4", "5"]
def _create_examples(self, input_file):
"""Creates examples for the training and dev sets."""
examples = []
with tf.gfile.Open(input_file) as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
label = line[0]
text_a = line[1].replace('""', '"').replace('\\"', '"')
examples.append(
InputExample(guid=str(i), text_a=text_a, text_b=None, label=label))
return examples
class ImdbProcessor(DataProcessor):
def get_labels(self):
return ["neg", "pos"]
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "train"))
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "test"))
def _create_examples(self, data_dir):
examples = []
for label in ["neg", "pos"]:
cur_dir = os.path.join(data_dir, label)
for filename in tf.gfile.ListDirectory(cur_dir):
if not filename.endswith("txt"):
continue
path = os.path.join(cur_dir, filename)
with tf.gfile.Open(path) as f:
text = f.read().strip().replace("<br />", " ")
examples.append(InputExample(
guid="unused_id", text_a=text, text_b=None, label=label))
return examples
class MnliMatchedProcessor(GLUEProcessor):
def __init__(self):
super(MnliMatchedProcessor, self).__init__()
self.dev_file = "dev_matched.tsv"
self.test_file = "test_matched.tsv"
self.label_column = -1
self.text_a_column = 8
self.text_b_column = 9
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
class MnliMismatchedProcessor(MnliMatchedProcessor):
def __init__(self):
super(MnliMismatchedProcessor, self).__init__()
self.dev_file = "dev_mismatched.tsv"
self.test_file = "test_mismatched.tsv"
class StsbProcessor(GLUEProcessor):
def __init__(self):
super(StsbProcessor, self).__init__()
self.label_column = 9
self.text_a_column = 7
self.text_b_column = 8
def get_labels(self):
return [0.0]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 and self.contains_header and set_type != "test":
continue
if i == 0 and self.test_contains_header and set_type == "test":
continue
guid = "%s-%s" % (set_type, i)
a_column = (self.text_a_column if set_type != "test" else
self.test_text_a_column)
b_column = (self.text_b_column if set_type != "test" else
self.test_text_b_column)
# there are some incomplete lines in QNLI
if len(line) <= a_column:
tf.logging.warning('Incomplete line, ignored.')
continue
text_a = line[a_column]
if b_column is not None:
if len(line) <= b_column:
tf.logging.warning('Incomplete line, ignored.')
continue
text_b = line[b_column]
else:
text_b = None
if set_type == "test":
label = self.get_labels()[0]
else:
if len(line) <= self.label_column:
tf.logging.warning('Incomplete line, ignored.')
continue
label = float(line[self.label_column])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
# Tokenize
def preprocess_text(inputs, lower=False, remove_space=True, keep_accents=False):
if remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if six.PY2 and isinstance(outputs, str):
outputs = outputs.decode('utf-8')
if not keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
def encode_pieces(sp_model, text, return_unicode=True, sample=False):
# return_unicode is used only for py2
# note(zhiliny): in some systems, sentencepiece only accepts str for py2
if six.PY2 and isinstance(text, unicode):
text = text.encode('utf-8')
if not sample:
pieces = sp_model.EncodeAsPieces(text)
else:
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit():
cur_pieces = sp_model.EncodeAsPieces(
piece[:-1].replace(SPIECE_UNDERLINE, ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
# note(zhiliny): convert back to unicode for py2
if six.PY2 and return_unicode:
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = piece.decode('utf-8')
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def encode_ids(sp_model, text, sample=False):
pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample)
ids = [sp_model.PieceToId(piece) for piece in pieces]
return ids
# Convert functions
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenize_fn):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[1] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
if label_list is not None:
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenize_fn(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenize_fn(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for two [SEP] & one [CLS] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for one [SEP] & one [CLS] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:max_seq_length - 2]
tokens = []
segment_ids = []
for token in tokens_a:
tokens.append(token)
segment_ids.append(SEG_ID_A)
tokens.append(SEP_ID)
segment_ids.append(SEG_ID_A)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(SEG_ID_B)
tokens.append(SEP_ID)
segment_ids.append(SEG_ID_B)
tokens.append(CLS_ID)
segment_ids.append(SEG_ID_CLS)
input_ids = tokens
# The mask has 0 for real tokens and 1 for padding tokens. Only real
# tokens are attended to.
input_mask = [0] * len(input_ids)
# Zero-pad up to the sequence length.
if len(input_ids) < max_seq_length:
delta_len = max_seq_length - len(input_ids)
input_ids = [0] * delta_len + input_ids
input_mask = [1] * delta_len + input_mask
segment_ids = [SEG_ID_PAD] * delta_len + segment_ids
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_list is not None:
label_id = label_map[example.label]
else:
label_id = example.label
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_npz(
examples, label_list, max_seq_length, tokenize_fn, output_file,
num_passes=1):
"""Convert a set of `InputExample`s to a NPZ file."""
if num_passes > 1:
examples *= num_passes
data = {}
arr_input_ids = []
arr_input_mask = []
arr_segment_ids = []
arr_label_ids = []
for (ex_index, example) in enumerate(examples):
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenize_fn)
arr_input_ids.append(feature.input_ids)
arr_input_mask.append(feature.input_mask)
arr_segment_ids.append(feature.segment_ids)
arr_label_ids.append(feature.label_id)
# if ex_index % 100 == 0:
# print("Writing example {} of {} with {} {} {} {}".format(ex_index,len(examples),feature.input_ids,
# feature.input_mask, feature.segment_ids, feature.label_id))
data["input_ids:0"] = np.array(arr_input_ids, dtype=np.int32)
data["input_mask:0"] = np.array(arr_input_mask, dtype=np.float32)
data["segment_ids:0"] = np.array(arr_segment_ids, dtype=np.int32)
data["label_ids:0"] = np.array(arr_label_ids, dtype=np.int32)
print("Save Input to file {}".format(output_file))
np.savez(output_file, **data)
def usage():
print(" -t task_name")
print(" -b batch_size")
print(" -d data_dir")
print(" -r is_regression")
print(" -l max_seq_length")
print(" -s spiece_model_file")
print(" -o output_file")
print(" -u uncased")
print("Example: python convertInput.py -t sts-b -b 8 -d ../../../Data/glue_data/STS-B -r 1 -l 128 -s ../../../Data/xlnet_cased_L-12_H-768_A-12/spiece.model -o ./data.npz -u 0 ")
if __name__ == "__main__":
# Input parameters
task_name = "sts-b"
batch_size = 8
data_dir = "../../../Data/glue_data/STS-B/"
is_regression = True
max_seq_length = 128
spiece_model_file = "../../../Data/xlnet_cased_L-12_H-768_A-12/spiece.model"
output_file = "./data.npz"
uncased = False
# Set perameter
opts, args = getopt.getopt(sys.argv[1:], "ht:b:d:r:l:s:o:u:")
for op, value in opts:
if op == "-t":
task_name = value
elif op == "-b":
batch_size = int(value)
elif op == "-d":
data_dir = value
elif op == "-r":
is_regression = bool(value)
elif op == "-l":
max_seq_length = int(value)
elif op == "-s":
spiece_model_file = value
elif op == "-o":
output_file = value
elif op == "-u":
uncased = bool(value)
elif op == "-h":
usage()
sys.exit()
# Set processor
processors = {
"mnli_matched": MnliMatchedProcessor,
"mnli_mismatched": MnliMismatchedProcessor,
'sts-b': StsbProcessor,
'imdb': ImdbProcessor,
"yelp5": Yelp5Processor
}
processor = processors[task_name]()
label_list = processor.get_labels() if not is_regression else None
# Acquire examples
eval_examples = processor.get_test_examples(data_dir)
while len(eval_examples) % batch_size != 0:
eval_examples.append(PaddingInputExample())
# Convert examples to numpy
sp = spm.SentencePieceProcessor()
sp.Load(spiece_model_file)
def tokenize_fn(text):
text = preprocess_text(text, lower=uncased)
return encode_ids(sp, text)
file_based_convert_examples_to_npz(eval_examples, label_list,
max_seq_length, tokenize_fn, output_file)
#np.save('extra.npy', extra.transpose((1, 0, 2)))
|
FasterTransformer-main
|
examples/tensorflow/xlnet/convertInput.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def embedding_lookup(x, n_token, d_embed, initializer, use_tpu=True,
scope='embedding', reuse=None, dtype=tf.float32):
"""TPU and GPU embedding_lookup function."""
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table', [n_token, d_embed], dtype=dtype, initializer=initializer)
if use_tpu:
one_hot_idx = tf.one_hot(x, n_token, dtype=dtype)
if one_hot_idx.shape.ndims == 2:
return tf.einsum('in,nd->id', one_hot_idx, lookup_table), lookup_table
else:
return tf.einsum('ibn,nd->ibd', one_hot_idx, lookup_table), lookup_table
else:
return tf.nn.embedding_lookup(lookup_table, x), lookup_table
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = tf.einsum('i,d->id', pos_seq, inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = tf.tile(pos_emb, [1, bsz, 1])
return pos_emb
def positionwise_ffn(inp, d_model, d_inner, dropout, kernel_initializer,
activation_type='relu', scope='ff', is_training=True,
reuse=None):
"""Position-wise Feed-forward Network."""
if activation_type == 'relu':
activation = tf.nn.relu
elif activation_type == 'gelu':
activation = gelu
else:
raise ValueError('Unsupported activation type {}'.format(activation_type))
output = inp
inter_res_1 = []
with tf.variable_scope(scope, reuse=reuse):
output = tf.layers.dense(output, d_inner, activation=activation,
kernel_initializer=kernel_initializer,
name='layer_1')
inter_res_1.append(output)
output = tf.layers.dropout(output, dropout, training=is_training,
name='drop_1')
output = tf.layers.dense(output, d_model,
kernel_initializer=kernel_initializer,
name='layer_2')
output = tf.layers.dropout(output, dropout, training=is_training,
name='drop_2')
output = tf.contrib.layers.layer_norm(output + inp, begin_norm_axis=-1,
scope='LayerNorm')
return output, inter_res_1
def head_projection(h, d_model, n_head, d_head, kernel_initializer, name):
"""Project hidden states to a specific head with a 4D-shape."""
proj_weight = tf.get_variable('{}/kernel'.format(name),
[d_model, n_head, d_head], dtype=h.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', h, proj_weight)
return head
def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training,
kernel_initializer, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head],
dtype=h.dtype, initializer=kernel_initializer)
attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec, proj_o)
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
if residual:
output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1, scope='LayerNorm')
else:
output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1, scope='LayerNorm')
return output
def abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, is_training,
scale):
"""Core absolute positional attention operations."""
attn_score = tf.einsum('ibnd,jbnd->ijbn', q_head, k_head)
attn_score *= scale
if attn_mask is not None:
attn_score = attn_score - 1e30 * attn_mask
# attention probability
attn_prob = tf.nn.softmax(attn_score, 1)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
# attention output
attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head)
return attn_vec
def rel_attn_core(q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat,
r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt, is_training,
scale):
"""Core relative positional attention operations."""
# content based attention score
ac = tf.einsum('ibnd,jbnd->ijbn', q_head + r_w_bias, k_head_h)
# position based attention score
bd = tf.einsum('ibnd,jbnd->ijbn', q_head + r_r_bias, k_head_r)
bd = rel_shift(bd, klen=tf.shape(ac)[1])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = tf.einsum('ibnd,snd->ibns', q_head + r_s_bias, seg_embed)
ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * scale
if attn_mask is not None:
if attn_score.dtype==tf.float16:
attn_score = attn_score - 1e4 * attn_mask
else:
attn_score = attn_score - 1e30 * attn_mask
# attention probability
attn_prob = tf.nn.softmax(attn_score, 1)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
# attention output
attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)
return attn_vec
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = tf.shape(x)
x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])
x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])
x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1])
return x
def _create_mask(qlen, mlen, dtype, same_length=False):
"""create causal attention mask."""
attn_mask = tf.ones([qlen, qlen], dtype=dtype)
mask_u = tf.matrix_band_part(attn_mask, 0, -1)
mask_dia = tf.matrix_band_part(attn_mask, 0, 0)
attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype)
ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
if same_length:
mask_l = tf.matrix_band_part(attn_mask, -1, 0)
ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)
return ret
def _cache_mem(curr_out, prev_mem, mem_len, reuse_len=None):
"""cache hidden states into memory."""
if mem_len is None or mem_len == 0:
return None
else:
if reuse_len is not None and reuse_len > 0:
curr_out = curr_out[:reuse_len]
if prev_mem is None:
new_mem = curr_out[-mem_len:]
else:
new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:]
return tf.stop_gradient(new_mem)
def relative_positional_encoding(qlen, klen, d_model, clamp_len, attn_type,
bi_data, bsz=None, dtype=None):
"""create relative positional encoding."""
freq_seq = tf.range(0, d_model, 2.0)
if dtype is not None and dtype != tf.float32:
freq_seq = tf.cast(freq_seq, dtype=dtype)
inv_freq = 1 / (10000 ** (freq_seq / d_model))
if attn_type == 'bi':
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif attn_type == 'uni':
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError('Unknown `attn_type` {}.'.format(attn_type))
if bi_data:
fwd_pos_seq = tf.range(beg, end, -1.0)
bwd_pos_seq = tf.range(-beg, -end, 1.0)
if dtype is not None and dtype != tf.float32:
fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)
bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype)
if clamp_len > 0:
fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len)
bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -clamp_len, clamp_len)
if bsz is not None:
# With bi_data, the batch size should be divisible by 2.
assert bsz % 2 == 0
fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1)
else:
fwd_pos_seq = tf.range(beg, end, -1.0)
if dtype is not None and dtype != tf.float32:
fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)
if clamp_len > 0:
fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len)
pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz)
return pos_emb
def multihead_attn(q, k, v, attn_mask, d_model, n_head, d_head, dropout,
dropatt, is_training, kernel_initializer, residual=True,
scope='abs_attn', reuse=None):
"""Standard multi-head attention with absolute positional embedding."""
scale = 1 / (d_head ** 0.5)
with tf.variable_scope(scope, reuse=reuse):
# attention heads
q_head = head_projection(q, d_model, n_head, d_head, kernel_initializer, 'q')
k_head = head_projection(k, d_model, n_head, d_head, kernel_initializer, 'k')
v_head = head_projection(v, d_model, n_head, d_head, kernel_initializer, 'v')
# attention vector
attn_vec = abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, is_training, scale)
# post processing
output = post_attention(v, attn_vec, d_model, n_head, d_head, dropout,
is_training, kernel_initializer, residual)
return output
def rel_multihead_attn(h, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed,
attn_mask, mems, d_model, n_head, d_head, dropout,
dropatt, is_training, kernel_initializer,
scope='rel_attn', reuse=None):
"""Multi-head attention with relative positional encoding."""
inter = []
scale = 1 / (d_head ** 0.5)
with tf.variable_scope(scope, reuse=reuse):
if mems is not None and mems.shape.ndims > 1:
cat = tf.concat([mems, h], 0)
else:
cat = h
# content heads
q_head_h = head_projection(h, d_model, n_head, d_head, kernel_initializer, 'q')
k_head_h = head_projection(cat, d_model, n_head, d_head, kernel_initializer, 'k')
v_head_h = head_projection(cat, d_model, n_head, d_head, kernel_initializer, 'v')
inter.append(q_head_h)
inter.append(k_head_h)
inter.append(v_head_h)
# positional heads
k_head_r = head_projection(r, d_model, n_head, d_head, kernel_initializer, 'r')
inter.append(k_head_r)
# core attention ops
attn_vec = rel_attn_core(q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,
r_r_bias, r_s_bias, attn_mask, dropatt, is_training, scale)
inter.append(attn_vec)
# post processing
output = post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training, kernel_initializer)
inter.append(output)
return output, inter
def two_stream_rel_attn(h, g, r, mems, r_w_bias, r_r_bias, seg_mat, r_s_bias,
seg_embed, attn_mask_h, attn_mask_g, target_mapping,
d_model, n_head, d_head, dropout, dropatt, is_training,
kernel_initializer, scope='rel_attn'):
"""Two-stream attention with relative positional encoding."""
scale = 1 / (d_head ** 0.5)
with tf.variable_scope(scope, reuse=False):
# content based attention score
if mems is not None and mems.shape.ndims > 1:
cat = tf.concat([mems, h], 0)
else:
cat = h
# content-based key head
k_head_h = head_projection(cat, d_model, n_head, d_head, kernel_initializer, 'k')
# content-based value head
v_head_h = head_projection(cat, d_model, n_head, d_head, kernel_initializer, 'v')
# position-based key head
k_head_r = head_projection(r, d_model, n_head, d_head, kernel_initializer, 'r')
# h-stream
# content-stream query head
q_head_h = head_projection(h, d_model, n_head, d_head, kernel_initializer, 'q')
# core attention ops
attn_vec_h = rel_attn_core(q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,
r_r_bias, r_s_bias, attn_mask_h, dropatt, is_training, scale)
# post processing
output_h = post_attention(h, attn_vec_h, d_model, n_head, d_head, dropout, is_training, kernel_initializer)
with tf.variable_scope(scope, reuse=True):
# g-stream
# query-stream query head
q_head_g = head_projection(
g, d_model, n_head, d_head, kernel_initializer, 'q')
# core attention ops
if target_mapping is not None:
q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
attn_vec_g = rel_attn_core(q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,
r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale)
attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
else:
attn_vec_g = rel_attn_core(q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,
r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale)
# post processing
output_g = post_attention(g, attn_vec_g, d_model, n_head, d_head, dropout, is_training, kernel_initializer)
return output_h, output_g
def transformer_xl(inp_k, n_token, n_layer, d_model, n_head,
d_head, d_inner, dropout, dropatt, attn_type,
bi_data, initializer, is_training, mem_len=None,
inp_q=None, mems=None,
same_length=False, clamp_len=-1, untie_r=False,
use_tpu=True, input_mask=None,
perm_mask=None, seg_id=None, reuse_len=None,
ff_activation='relu', target_mapping=None,
data_type="fp32", scope='transformer', **kwargs):
"""
Defines a Transformer-XL computation graph with additional
support for XLNet.
Args:
inp_k: int32 Tensor in shape [len, bsz], the input token IDs.
seg_id: int32 Tensor in shape [len, bsz], the input segment IDs.
input_mask: float32 Tensor in shape [len, bsz], the input mask.
0 for real tokens and 1 for padding.
mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
from previous batches. The length of the list equals n_layer.
If None, no memory is used.
perm_mask: float32 Tensor in shape [len, len, bsz].
If perm_mask[i, j, k] = 0, i attend to j in batch k;
if perm_mask[i, j, k] = 1, i does not attend to j in batch k.
If None, each position attends to all the others.
target_mapping: float32 Tensor in shape [num_predict, len, bsz].
If target_mapping[i, j, k] = 1, the i-th predict in batch k is
on the j-th token.
Only used during pretraining for partial prediction.
Set to None during finetuning.
inp_q: float32 Tensor in shape [len, bsz].
1 for tokens with losses and 0 for tokens without losses.
Only used during pretraining for two-stream attention.
Set to None during finetuning.
n_layer: int, the number of layers.
d_model: int, the hidden size.
n_head: int, the number of attention heads.
d_head: int, the dimension size of each attention head.
d_inner: int, the hidden size in feed-forward layers.
ff_activation: str, "relu" or "gelu".
untie_r: bool, whether to untie the biases in attention.
n_token: int, the vocab size.
is_training: bool, whether in training mode.
use_tpu: bool, whether TPUs are used.
use_float16: bool, use float16 instead of float32.
dropout: float, dropout rate.
dropatt: float, dropout rate on attention probabilities.
init: str, the initialization scheme, either "normal" or "uniform".
init_range: float, initialize the parameters with a uniform distribution
in [-init_range, init_range]. Only effective when init="uniform".
init_std: float, initialize the parameters with a normal distribution
with mean 0 and stddev init_std. Only effective when init="normal".
mem_len: int, the number of tokens to cache.
reuse_len: int, the number of tokens in the current batch to be cached
and reused in the future.
bi_data: bool, whether to use bidirectional input pipeline.
Usually set to True during pretraining and False during finetuning.
clamp_len: int, clamp all relative distances larger than clamp_len.
-1 means no clamping.
same_length: bool, whether to use the same attention length for each token.
summary_type: str, "last", "first", "mean", or "attn". The method
to pool the input to get a vector representation.
initializer: A tf initializer.
scope: scope name for the computation graph.
"""
tf.logging.info('memory input {}'.format(mems))
tf_float = tf.float32
if data_type == "fp16":
tf_float = tf.float16
elif data_type == "bf16":
tf_float = tf.bfloat16
tf.logging.info('Use float type {}'.format(tf_float))
new_mems = []
with tf.variable_scope(scope):
if untie_r:
r_w_bias = tf.get_variable('r_w_bias', [n_layer, n_head, d_head], dtype=tf_float, initializer=initializer)
r_r_bias = tf.get_variable('r_r_bias', [n_layer, n_head, d_head], dtype=tf_float, initializer=initializer)
else:
r_w_bias = tf.get_variable('r_w_bias', [n_head, d_head], dtype=tf_float, initializer=initializer)
r_r_bias = tf.get_variable('r_r_bias', [n_head, d_head], dtype=tf_float, initializer=initializer)
bsz = tf.shape(inp_k)[1]
qlen = tf.shape(inp_k)[0]
mlen = tf.shape(mems[0])[0] if mems is not None else 0
klen = mlen + qlen
# Attention mask
# causal attention mask
if attn_type == 'uni':
attn_mask = _create_mask(qlen, mlen, tf_float, same_length)
attn_mask = attn_mask[:, :, None, None]
elif attn_type == 'bi':
attn_mask = None
else:
raise ValueError('Unsupported attention type: {}'.format(attn_type))
# data mask: input mask & perm mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
data_mask = tf.cast(data_mask, dtype=tf_float)
# all mems can be attended to
mems_mask = tf.zeros([tf.shape(data_mask)[0], mlen, bsz], dtype=tf_float)
data_mask = tf.concat([mems_mask, data_mask], 1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = tf.cast(attn_mask > 0, dtype=tf_float)
if attn_mask is not None:
non_tgt_mask = -tf.eye(qlen, dtype=tf_float)
non_tgt_mask = tf.concat([tf.zeros([qlen, mlen], dtype=tf_float), non_tgt_mask], axis=-1)
non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=tf_float)
else:
non_tgt_mask = None
# Word embedding
word_emb_k, lookup_table = embedding_lookup(
x=inp_k,
n_token=n_token,
d_embed=d_model,
initializer=initializer,
use_tpu=use_tpu,
dtype=tf_float,
scope='word_embedding')
if inp_q is not None:
with tf.variable_scope('mask_emb'):
mask_emb = tf.get_variable('mask_emb', [1, 1, d_model], dtype=tf_float)
if target_mapping is not None:
word_emb_q = tf.tile(mask_emb, [tf.shape(target_mapping)[0], bsz, 1])
else:
inp_q_ext = inp_q[:, :, None]
word_emb_q = inp_q_ext * mask_emb + (1 - inp_q_ext) * word_emb_k
output_h = tf.layers.dropout(word_emb_k, dropout, training=is_training)
if inp_q is not None:
output_g = tf.layers.dropout(word_emb_q, dropout, training=is_training)
# Segment embedding
if seg_id is not None:
if untie_r:
r_s_bias = tf.get_variable('r_s_bias', [n_layer, n_head, d_head],
dtype=tf_float, initializer=initializer)
else:
# default case (tie)
r_s_bias = tf.get_variable('r_s_bias', [n_head, d_head], dtype=tf_float, initializer=initializer)
seg_embed = tf.get_variable('seg_embed', [n_layer, 2, n_head, d_head],
dtype=tf_float, initializer=initializer)
# Convert `seg_id` to one-hot `seg_mat`
mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32)
cat_ids = tf.concat([mem_pad, seg_id], 0)
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat0 = tf.cast(
tf.logical_not(tf.equal(seg_id[:, None], cat_ids[None, :])),
tf.int32)
seg_mat = tf.one_hot(seg_mat0, 2, dtype=tf_float)
else:
seg_mat = None
# Positional encoding
pos_emb = relative_positional_encoding(
qlen, klen, d_model, clamp_len, attn_type, bi_data,
bsz=bsz, dtype=tf_float)
pos_emb = tf.layers.dropout(pos_emb, dropout, training=is_training)
# Attention layers
if mems is None:
mems = [None] * n_layer
print(non_tgt_mask, output_h, seg_mat, pos_emb)
# arr_output=[non_tgt_mask,output_h,seg_mat,pos_emb]
record_output = [non_tgt_mask, output_h, seg_mat, pos_emb]
for i in range(n_layer):
# cache new mems
new_mems.append(_cache_mem(output_h, mems[i], mem_len, reuse_len))
# segment bias
if seg_id is None:
r_s_bias_i = None
seg_embed_i = None
else:
r_s_bias_i = r_s_bias if not untie_r else r_s_bias[i]
seg_embed_i = seg_embed[i]
with tf.variable_scope('layer_{}'.format(i)):
if inp_q is not None:
output_h, output_g = two_stream_rel_attn(
h=output_h,
g=output_g,
r=pos_emb,
r_w_bias=r_w_bias if not untie_r else r_w_bias[i],
r_r_bias=r_r_bias if not untie_r else r_r_bias[i],
seg_mat=seg_mat,
r_s_bias=r_s_bias_i,
seg_embed=seg_embed_i,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
mems=mems[i],
target_mapping=target_mapping,
d_model=d_model,
n_head=n_head,
d_head=d_head,
dropout=dropout,
dropatt=dropatt,
is_training=is_training,
kernel_initializer=initializer)
reuse = True
else:
reuse = False
output_h, inter_res_0 = rel_multihead_attn(
h=output_h,
r=pos_emb,
r_w_bias=r_w_bias if not untie_r else r_w_bias[i],
r_r_bias=r_r_bias if not untie_r else r_r_bias[i],
seg_mat=seg_mat,
r_s_bias=r_s_bias_i,
seg_embed=seg_embed_i,
attn_mask=non_tgt_mask,
mems=mems[i],
d_model=d_model,
n_head=n_head,
d_head=d_head,
dropout=dropout,
dropatt=dropatt,
is_training=is_training,
kernel_initializer=initializer,
reuse=reuse)
record_output = record_output + inter_res_0
if inp_q is not None:
output_g, _ = positionwise_ffn(
inp=output_g,
d_model=d_model,
d_inner=d_inner,
dropout=dropout,
kernel_initializer=initializer,
activation_type=ff_activation,
is_training=is_training)
output_h, inter_res_1 = positionwise_ffn(
inp=output_h,
d_model=d_model,
d_inner=d_inner,
dropout=dropout,
kernel_initializer=initializer,
activation_type=ff_activation,
is_training=is_training,
reuse=reuse)
record_output = record_output + inter_res_1
record_output.append(output_h)
if inp_q is not None:
output = tf.layers.dropout(output_g, dropout, training=is_training)
else:
output = tf.layers.dropout(output_h, dropout, training=is_training)
return output, record_output
def lm_loss(hidden, target, n_token, d_model, initializer, lookup_table=None,
tie_weight=False, bi_data=True, use_tpu=False):
"""doc."""
with tf.variable_scope('lm_loss'):
if tie_weight:
assert lookup_table is not None, \
'lookup_table cannot be None for tie_weight'
softmax_w = lookup_table
else:
softmax_w = tf.get_variable('weight', [n_token, d_model], dtype=hidden.dtype, initializer=initializer)
softmax_b = tf.get_variable('bias', [n_token], dtype=hidden.dtype, initializer=tf.zeros_initializer())
logits = tf.einsum('ibd,nd->ibn', hidden, softmax_w) + softmax_b
if use_tpu:
one_hot_target = tf.one_hot(target, n_token, dtype=logits.dtype)
loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=logits)
return loss
def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout,
dropatt, input_mask, is_training, initializer,
scope=None, reuse=None, use_proj=True):
"""
Different classification tasks may not may not share the same parameters
to summarize the sequence features.
If shared, one can keep the `scope` to the default value `None`.
Otherwise, one should specify a different `scope` for each task.
"""
with tf.variable_scope(scope, 'sequnece_summary', reuse=reuse):
if summary_type == 'last':
summary = hidden[-1]
elif summary_type == 'first':
summary = hidden[0]
elif summary_type == 'mean':
summary = tf.reduce_mean(hidden, axis=0)
elif summary_type == 'attn':
bsz = tf.shape(hidden)[1]
summary_bias = tf.get_variable('summary_bias', [d_model], dtype=hidden.dtype, initializer=initializer)
summary_bias = tf.tile(summary_bias[None, None], [1, bsz, 1])
if input_mask is not None:
input_mask = input_mask[None, :, :, None]
summary = multihead_attn(summary_bias, hidden, hidden, input_mask, d_model, n_head, d_head, dropout, dropatt,
is_training, initializer, residual=False)
summary = summary[0]
else:
raise ValueError('Unsupported summary type {}'.format(summary_type))
# use another projection as in BERT
if use_proj:
summary = tf.layers.dense(
summary,
d_model,
activation=tf.tanh,
kernel_initializer=initializer,
name='summary')
# dropout
summary = tf.layers.dropout(
summary, dropout, training=is_training,
name='dropout')
return summary
def classification_loss(hidden, labels, n_class, initializer, scope, reuse=None,
return_logits=False):
"""
Different classification tasks should use different scope names to ensure
different dense layers (parameters) are used to produce the logits.
An exception will be in transfer learning, where one hopes to transfer
the classification weights.
"""
with tf.variable_scope(scope, reuse=reuse):
logits = tf.layers.dense(
hidden,
n_class,
kernel_initializer=initializer,
name='logit')
one_hot_target = tf.one_hot(labels, n_class, dtype=hidden.dtype)
loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1)
if return_logits:
return loss, logits
return loss
def regression_loss(hidden, labels, initializer, scope, reuse=None, return_logits=False):
with tf.variable_scope(scope, reuse=reuse):
logits = tf.layers.dense(
hidden,
1,
kernel_initializer=initializer,
name='logit')
logits = tf.squeeze(logits, axis=-1)
loss = tf.square(logits - labels)
if return_logits:
return loss, logits
return loss
|
FasterTransformer-main
|
examples/tensorflow/xlnet/modeling.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# python ./convertModel.py -i $ckpt_file -o $model_file
import getopt
import sys
import numpy as np
import tensorflow as tf
import absl.logging as _logging # pylint: disable=unused-import
from absl import flags
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def usage():
print(" -o output_file")
print(" -i ckpt_dir")
print("Example: python convertModel.py -i xlnet_cased_L-12_H-768_A-12/xlnet_model.ckpt -o ./model.npz ")
if __name__ == "__main__":
m = {}
ckpt_dir = "../../../Data/xlnet_cased_L-12_H-768_A-12/xlnet_model.ckpt"
output_file = "./model.npz"
# Set perameter
opts, args = getopt.getopt(sys.argv[1:], "hi:o:")
for op, value in opts:
if op == "-i":
ckpt_dir = value
if op == "-o":
output_file = value
if op == "-h":
usage()
sys.exit()
saver = tf.train.import_meta_graph('{}.meta'.format(ckpt_dir))
with tf.Session() as sess:
saver.restore(sess, ckpt_dir)
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
idx = 0
for var in all_vars:
m[var.name] = sess.run(var)
print(str(idx) + " " + str(var.name) + " " + str(var.shape))
# print(m[var.name].flatten()[:10])
idx += 1
np.savez(output_file, **m)
|
FasterTransformer-main
|
examples/tensorflow/xlnet/convertModel.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import pathlib
import tarfile
import typing
import torch
import yaml
from .utils import cpu_map_location, gpu_map_location
LOGGER = logging.getLogger(__name__)
def unpack_nemo_ckpt(
nemo_archive_path: typing.Union[str, pathlib.Path],
out_dir_path: typing.Union[str, pathlib.Path],
):
nemo_archive_path = pathlib.Path(nemo_archive_path)
if not nemo_archive_path.exists():
raise FileNotFoundError(f"{nemo_archive_path} does not exist")
for tar_mode in ["r:", "r:gz"]:
try:
with tarfile.open(nemo_archive_path, mode=tar_mode) as tar_file:
import os
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(tar_file, path=out_dir_path)
return out_dir_path
except tarfile.ReadError:
pass
raise RuntimeError(f"Could not unpack {nemo_archive_path}")
def extract_layers_with_prefix(model_, prefix):
length_to_trim = len(prefix)
model_state = model_.get("state_dict", model_)
return {key[length_to_trim:]: model_state[key] for key in model_state.keys() if prefix in key}
class UnpackedNemoCheckpointDir:
def __init__(self, checkpoints_dir: typing.Union[str, pathlib.Path], load_checkpoints_to_cpu: bool = False):
self._checkpoints_dir = pathlib.Path(checkpoints_dir)
self._load_checkpoints_to_cpu = load_checkpoints_to_cpu
@property
@functools.lru_cache
def model_config(self):
model_config = None
model_config_filename = "model_config.yaml"
model_configs_paths = list(self._checkpoints_dir.rglob(model_config_filename))
if model_configs_paths:
if len(model_configs_paths) > 1:
raise RuntimeError(
f"There are more than single {model_config_filename} "
f"in {self._checkpoints_dir}: {', '.join(map(lambda p: p.as_posix(), model_configs_paths))}"
)
model_config_path = model_configs_paths[0]
LOGGER.debug("Loading model config from %s", model_config_path)
with model_config_path.open("r") as model_config_file:
model_config = yaml.load(model_config_file, Loader=yaml.SafeLoader)
else:
LOGGER.debug("Searching model config in checkpoints")
# try to obtain from checkpoint
checkpoint_name = self.checkpoint_name
checkpoints_paths = sorted(self._checkpoints_dir.rglob(checkpoint_name))
if checkpoints_paths:
# assume that parallel ranks 0 checkpoint should have model config embedded
checkpoint_path = checkpoints_paths[0]
map_location_fn = cpu_map_location if self._load_checkpoints_to_cpu else gpu_map_location
model_00 = torch.load(checkpoint_path, map_location=map_location_fn)
if "hyper_parameters" in model_00 and "cfg" in model_00["hyper_parameters"]:
model_config = model_00["hyper_parameters"]["cfg"]
LOGGER.debug("Loaded model config from checkpoint %s", checkpoint_path)
else:
LOGGER.debug("Could not find model config in checkpoint %s", checkpoint_path)
del model_00
if model_config is None:
LOGGER.warning("Could not find checkpoint with NeMo model config in %s", self._checkpoints_dir)
LOGGER.debug("Loaded model config %s", model_config)
return model_config
@property
def checkpoints_dir(self):
return self._checkpoints_dir
def get_checkpoints_paths(self, tensor_model_parallel_size=1, pipeline_model_parallel_size=1):
"""
Injects tensor/pipeline model parallel ranks into the filepath.
Does nothing if not using model parallelism.
"""
checkpoint_path_without_rank = self.checkpoints_dir / self.checkpoint_name
def _inject_parallel_ranks(tp_rank, pp_rank):
if tensor_model_parallel_size > 1 or pipeline_model_parallel_size > 1:
if pipeline_model_parallel_size is None or pipeline_model_parallel_size == 1:
checkpoint_path = (
checkpoint_path_without_rank.parent
/ f"mp_rank_{tp_rank:02d}"
/ checkpoint_path_without_rank.name
)
else:
checkpoint_path = (
checkpoint_path_without_rank.parent
/ f"tp_rank_{tp_rank:02d}_pp_rank_{pp_rank:03d}"
/ checkpoint_path_without_rank.name
)
return checkpoint_path
else:
return checkpoint_path_without_rank
return [
[
_inject_parallel_ranks(tp_rank=tp_rank, pp_rank=pp_rank)
for pp_rank in range(pipeline_model_parallel_size)
]
for tp_rank in range(tensor_model_parallel_size)
]
@property
@functools.lru_cache
def checkpoint_name(self):
patterns = [
"model_weights.ckpt", # older megatron checkpoints
"*last.ckpt", # newer format of checkpoints
]
for pattern in patterns:
model_files = sorted(list(self._checkpoints_dir.rglob(pattern)))
if model_files:
return model_files[0].name
raise ValueError(f"Could not find checkpoint files in {self._checkpoints_dir}")
@functools.lru_cache
def get_tokenizer_file_path(self, tokenizer_key, file_key, default_filename_pattern):
model_config = self.model_config
file_property = None
if tokenizer_key in model_config and file_key in model_config[tokenizer_key]:
file_property = model_config[tokenizer_key][file_key]
elif file_key in model_config:
file_property = model_config[file_key]
LOGGER.debug("model_config[%s][%s]=%s", tokenizer_key, file_key, file_property)
if file_property and file_property.startswith("nemo:"):
filename = file_property.split("nemo:")[1]
filename_pattern = f"*{filename}"
elif file_property and file_property.startswith("/artifacts/"):
filename = pathlib.Path(file_property).name
filename_pattern = f"*{filename}"
elif file_property is None or file_property == "None":
filename_pattern = None
else:
filename_pattern = default_filename_pattern
LOGGER.warning(
f"Tokenizer file from config: {tokenizer_key}.{file_key}={file_property} "
f"looks like unsupported path. Pattern {filename_pattern} will be used."
)
file_path = None
if filename_pattern is not None:
files_paths = list(self._checkpoints_dir.glob(filename_pattern))
if files_paths:
assert len(files_paths) == 1
file_path = files_paths[0]
return file_path
|
FasterTransformer-main
|
examples/pytorch/nemo.py
|
FasterTransformer-main
|
examples/pytorch/__init__.py
|
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def add_special_tokens_to_tokenizer(tokenizer):
# Need to add cls, sep, mask tokens to the tokenizer if they don't exist.
# If cls, sep and mask are not attributes of the tokenizer, add it.
if not hasattr(tokenizer, 'cls_token'):
tokenizer.add_special_tokens({'cls_token': '<cls>'})
if not hasattr(tokenizer.tokenizer, 'sep_id'):
tokenizer.add_special_tokens({'sep_token': '<sep>'})
if not hasattr(tokenizer.tokenizer, 'mask_id'):
tokenizer.add_special_tokens({'mask_token': '<mask>'})
# bos, eos, pad and unk may be present in the provided spm .model file, if they are, use it.
if not hasattr(tokenizer, 'pad_token'):
if hasattr(tokenizer.tokenizer, 'pad_id') and tokenizer.tokenizer.pad_id() > 0:
tokenizer.pad_token = tokenizer.tokenizer.id_to_piece(tokenizer.tokenizer.pad_id())
else:
tokenizer.add_special_tokens({'pad_token': '<pad>'})
else:
tokenizer.add_special_tokens({'pad_token': '<pad>'})
if not hasattr(tokenizer, 'bos_token'):
if hasattr(tokenizer.tokenizer, 'bos_id') and tokenizer.tokenizer.bos_id() > 0:
tokenizer.bos_token = tokenizer.tokenizer.id_to_piece(tokenizer.tokenizer.bos_id())
else:
tokenizer.add_special_tokens({'bos_token': '<bos>'})
else:
tokenizer.add_special_tokens({'bos_token': '<s>'})
if not hasattr(tokenizer, 'eos_token'):
if hasattr(tokenizer.tokenizer, 'eos_id') and tokenizer.tokenizer.eos_id() > 0:
tokenizer.eos_token = tokenizer.tokenizer.id_to_piece(tokenizer.tokenizer.eos_id())
else:
tokenizer.add_special_tokens({'eos_token': '<eos>'})
else:
tokenizer.add_special_tokens({'eos_token': '</s>'})
|
FasterTransformer-main
|
examples/pytorch/tokenizer.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import typing
def print_memory_usage(info=""):
t = torch.cuda.get_device_properties(0).total_memory / 1024 ** 2
r = torch.cuda.memory_reserved(0) / 1024 ** 2
a = torch.cuda.memory_allocated(0) / 1024 ** 2
f = r - a # free inside reserved
print(f"[INFO][{info}] total_memory: {t}, reversed: {r}, allocated: {a}")
def torch2np(tensor: torch.Tensor, np_data_type: typing.Optional[np.dtype] = None):
tensor = tensor.cpu()
if tensor.dtype == torch.bfloat16:
tensor = tensor.to(torch.float32)
data = tensor.numpy()
if np_data_type is not None:
data = data.astype(np_data_type)
return data
def safe_transpose(tensor):
if tensor.dim() <= 1:
return tensor
if tensor.dim() == 2:
return tensor.T
raise ValueError("Tensor has more than 2 dimensions, unable to safely transpose.")
WEIGHT2DTYPE = {
"fp32": np.float32,
"fp16": np.float16,
}
def cpu_map_location(storage, loc):
return storage.cpu()
def gpu_map_location(storage, loc):
if loc.startswith("cuda"):
training_gpu_idx = int(loc.split(":")[1])
inference_gpu_idx = training_gpu_idx % torch.cuda.device_count()
return storage.cuda(inference_gpu_idx)
elif loc.startswith("cpu"):
return storage.cpu()
else:
raise NotImplementedError(f"Not handled {loc}")
|
FasterTransformer-main
|
examples/pytorch/utils.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import re
import numpy as np
import torch
ACTIVATION_AMAX_NUM = 72
INT8O_KERNEL_NUM = 5
INT8O_GEMM_NUM = 7
TRT_FUSED_MHA_AMAX_NUM = 3
SCALE_RESERVE_NUM = 8
def extract_amaxlist(init_dict, depths, version=1, ths_path='../../../build/lib/libth_transformer.so', verbose=True):
# print("Quantizing checkpoint ...")
torch.classes.load_library(ths_path)
weight_quantize = torch.ops.fastertransformer.swin_weight_quantize
layer_num = len(depths)
amaxTotalNum = ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM + INT8O_GEMM_NUM + 1 + TRT_FUSED_MHA_AMAX_NUM + SCALE_RESERVE_NUM
kernel_name_list = ["attn.qkv",
"attn.proj",
"mlp.fc1",
"mlp.fc2"]
amax_name_list = ["attn.qkv._input_quantizer",
"attn.qkv._aftergemm_quantizer",
"attn.proj._input_quantizer",
"attn.proj._aftergemm_quantizer",
"attn.matmul_q_input_quantizer",
"attn.matmul_k_input_quantizer",
"attn.matmul_v_input_quantizer",
"attn.matmul_a_input_quantizer",
"attn.softmax_input_quantizer",
"mlp.fc1._input_quantizer",
"mlp.fc1._aftergemm_quantizer",
"mlp.fc2._input_quantizer",
"mlp.fc2._aftergemm_quantizer",
"add1_residual_input_quantizer" if version == 1 else "attn.mha_q_input_quantizer",
"add2_residual_input_quantizer" if version == 1 else "attn.mha_k_input_quantizer"
]
int8O_gemm_weight_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_weight_list = ["attn.qkv",
"attn.proj",
"mlp.fc1",
"mlp.fc2",
"attn.matmul_k_input_quantizer" if version == 1 else "attn.mha_k_input_quantizer",
"attn.matmul_v_input_quantizer"]
int8O_gemm_input_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_input_list = ["attn.qkv._input_quantizer",
"attn.proj._input_quantizer",
"mlp.fc1._input_quantizer",
"mlp.fc2._input_quantizer",
"attn.matmul_q_input_quantizer" if version == 1 else "attn.mha_q_input_quantizer",
"attn.matmul_a_input_quantizer"]
int8O_gemm_output_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_output_list = ["attn.qkv._aftergemm_quantizer",
"attn.proj._aftergemm_quantizer",
"mlp.fc1._aftergemm_quantizer",
"mlp.fc2._aftergemm_quantizer",
"attn.softmax_input_quantizer",
"attn.proj._input_quantizer"]
downsample_input = "downsample.reduction._input_quantizer"
downsample_weight = "downsample.reduction._weight_quantizer"
downsample_out = "downsample.reduction._aftergemm_quantizer"
for i in range(layer_num):
for depth in range(depths[i]):
amaxList = np.zeros([amaxTotalNum]).astype(np.float32)
amax_id = 0
for amax_name in amax_name_list:
quant_max = init_dict["layers.{}.blocks.{}.{}._amax".format(i, depth, amax_name)].item()
amax = abs(quant_max)#round(abs(quant_max)*factor)/factor
if amax_name in int8O_gemm_input_list:
int8O_gemm_input_amax_list[int8O_gemm_input_list.index(amax_name)] = amax
if amax_name in int8O_gemm_output_list:
int8O_gemm_output_amax_list[int8O_gemm_output_list.index(amax_name)] = amax
if amax_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(amax_name)] = amax
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
if verbose:
print(i, amax_name)
print('quant_max:', quant_max)
print('amax:', amax)
if i != layer_num - 1:
amax = init_dict["layers.{}.{}._amax".format(i, downsample_input)].item()
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
amax = init_dict["layers.{}.{}._amax".format(i, downsample_out)].item()
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
else:
amax_id += 8
if verbose:
print("done process layer_{} block_{} activation amax".format(i, depth))
#kernel amax starts from ACTIVATION_AMAX_NUM
assert amax_id == 68
amax_id = ACTIVATION_AMAX_NUM
for kernel_id, kernel_name in enumerate(kernel_name_list):
kernel = init_dict["layers.{}.blocks.{}.{}.weight".format(i, depth, kernel_name)].transpose(-1, -2).contiguous()
quant_max2 = init_dict["layers.{}.blocks.{}.{}._weight_quantizer._amax".format(i, depth, kernel_name)]
amax2 = abs(quant_max2)
# if (amax2.dim() == 0):
# quant_max_processed = torch.full((kernel.size(1),), amax2.item(), dtype=amax2.dtype, device=amax2.device)
# else:
# quant_max_processed = amax2.view(-1)
kernel_processed = weight_quantize(kernel.half(), amax2.cuda())
init_dict["layers.{}.blocks.{}.{}.weight".format(i, depth, kernel_name)] = kernel_processed
if kernel_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(kernel_name)] = amax2.item()
amaxList[amax_id] = amax2
amax_id += 1
if verbose:
print(i, kernel_id, kernel_name)
print('kernel:', kernel)
print('quant_max2:', quant_max2)
# print('quant_max_processed_:', quant_max_processed)
if i != layer_num - 1:
amaxList[amax_id] = init_dict["layers.{}.downsample.reduction._weight_quantizer._amax".format(i)].item()
amax_id += 1
assert amax_id == ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM
#for int8O gemm deQuant
for j in range(INT8O_GEMM_NUM - 1):
amaxList[amax_id] = (int8O_gemm_input_amax_list[j]*int8O_gemm_weight_amax_list[j])/(127.0*int8O_gemm_output_amax_list[j])
if verbose:
print('layernum:', i, 'j:', j, ' gemm_int8IO_scale:',amaxList[amax_id])
print(int8O_gemm_input_amax_list[j], int8O_gemm_weight_amax_list[j], int8O_gemm_output_amax_list[j])
amax_id += 1
if i != layer_num - 1:
patchMerge_i = init_dict["layers.{}.{}._amax".format(i, downsample_input)].item()
patchMerge_w = init_dict["layers.{}.{}._amax".format(i, downsample_weight)].item()
patchMerge_o = init_dict["layers.{}.{}._amax".format(i, downsample_out)].item()
amaxList[amax_id] = (patchMerge_i * patchMerge_w) / (127 * patchMerge_o)
amax_id += 1
assert amax_id == ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM + INT8O_GEMM_NUM
amax_id += 1
#for trt fused MHA amax
#### QKV_addBias_amax
# amaxList[amax_id] = np.maximum(np.maximum(amaxList[16],amaxList[20]), amaxList[24])
# amax_id += 1
# #### softmax amax
# amaxList[amax_id] = amaxList[28]
# amax_id += 1
# #### bmm2 amax
# amaxList[amax_id] = amaxList[8]
# amax_id += 1
# qkvMax = np.maximum(np.maximum(amaxList[16],amaxList[20]), amaxList[24])
if version == 2:
amaxList[amax_id] = amaxList[52] * amaxList[56] / (127.0 * 127.0)
else:
amaxList[amax_id] = amaxList[16] * amaxList[20] / (127.0 * 127.0)
amax_id += 1
amaxList[amax_id] = 127.0 / amaxList[28]
amax_id += 1
amaxList[amax_id] = amaxList[24] * amaxList[28] / (127.0 * amaxList[8])
amax_id += 1
init_dict["layers.{}.blocks.{}.amaxList".format(i, depth)] = torch.tensor(amaxList, dtype=torch.float32)
if verbose:
print("done process layer_{} block_{} kernel weight".format(i, depth))
if i != layer_num - 1:
kernel = init_dict["layers.{}.downsample.reduction.weight".format(i)]
quant_max2 = init_dict["layers.{}.downsample.reduction._weight_quantizer._amax".format(i)]
amax2 = abs(quant_max2)
kernel = kernel.transpose(-1, -2).contiguous()
kernel_processed = weight_quantize(kernel.half(), amax2.cuda())
init_dict["layers.{}.downsample.reduction.weight".format(i)] = kernel_processed
# print("Quantizing checkpoint done.")
return init_dict
if __name__ == '__main__':
weights = torch.load('Swin-Transformer-Quantization/calib-checkpoint/swinv2_tiny_patch4_window8_256_calib.pth')
extract_amaxlist(weights, [2, 2, 6, 2], version=2, verbose=True)
|
FasterTransformer-main
|
examples/pytorch/swin/checkpoint_quantization.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from tqdm import tqdm
import sys
sys.path.insert(0, "./Swin-Transformer-Quantization")
from SwinTransformer.config import get_config
from models import build_model
from data import build_val_loader
from SwinTransformer.utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
from SwinTransformerINT8Weight import SwinTransformerINT8Weight
import quant_utils
test_time = 100
warmup_time = 10
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--version', type=int, default=1, help='version of swin', choices=[1, 2])
parser.add_argument('--disable_amp', type=bool, default=True, help='disable amp', )
parser.add_argument('--fused_window_process', type=bool, default=False, help='whether use fused window process', )
parser.add_argument('--th-path', type=str, help='path to pytorch library')
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', required=True, help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O0', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--profile', action='store_true', help='Perform profiling only, with some random data')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--calib', action='store_true', help='Perform calibration only')
parser.add_argument('--train', action='store_true', help='Perform training only')
parser.add_argument('--int8-mode', type=int, help='int8 mode', choices=[1, 2])
parser.add_argument('--num-calib-batch', type=int, default=4, help='Number of batches for calibration. 0 will disable calibration.')
# distributed training
parser.add_argument("--local_rank", type=int, default=0, help='local rank for DistributedDataParallel')
quant_utils.add_arguments(parser)
args, unparsed = parser.parse_known_args()
args = quant_utils.set_args(args)
quant_utils.set_default_quantizers(args)
config = get_config(args)
return args, config
def main(args, config):
model = build_model(config)
model.cuda()
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
msg = model.load_state_dict(checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint, strict=False)
print(msg)
del checkpoint
if args.profile:
quant_utils.configure_model(model, args, calib=False)
validate_with_random_data(config, args, model)
elif args.eval:
dataset_val, data_loader_val = build_val_loader(config)
quant_utils.configure_model(model, args, calib=False)
acc1, acc5, loss = validate(config, args, data_loader_val, model)
print(f"Accuracy of resumed network on the {len(dataset_val)} test images: {acc1:.1f}%")
@torch.no_grad()
def validate(config, args, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
if args.version == 1:
depths = config.MODEL.SWIN.DEPTHS
num_heads = config.MODEL.SWIN.NUM_HEADS
window_size = config.MODEL.SWIN.WINDOW_SIZE
patch_size = config.MODEL.SWIN.PATCH_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
ape = config.MODEL.SWIN.APE
patch_norm = config.MODEL.SWIN.PATCH_NORM
mlp_ratio = config.MODEL.SWIN.MLP_RATIO
qkv_bias = config.MODEL.SWIN.QKV_BIAS
if config.MODEL.SWIN.QK_SCALE is not None:
qk_scale = config.MODEL.SWIN.QK_SCALE
else:
qk_scale = 1.0
elif args.version == 2:
depths = config.MODEL.SWINV2.DEPTHS
num_heads = config.MODEL.SWINV2.NUM_HEADS
window_size = config.MODEL.SWINV2.WINDOW_SIZE
patch_size = config.MODEL.SWINV2.PATCH_SIZE
in_chans = config.MODEL.SWINV2.IN_CHANS
embed_dim = config.MODEL.SWINV2.EMBED_DIM
ape = config.MODEL.SWINV2.APE
patch_norm = config.MODEL.SWINV2.PATCH_NORM
mlp_ratio = config.MODEL.SWINV2.MLP_RATIO
qkv_bias = config.MODEL.SWINV2.QKV_BIAS
qk_scale = 1.0
int8_mode = args.int8_mode
version = args.version
th_path = args.th_path
depths_tensor = torch.tensor(depths, dtype=torch.int)
num_heads_tensor = torch.tensor(num_heads, dtype=torch.int)
layer_num = len(depths)
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
sw_weights = SwinTransformerINT8Weight(layer_num, window_size, depths, num_heads, th_path, model.state_dict(), version=version)
torch.classes.load_library(th_path)
try:
swin_transformer = torch.classes.SwinTransformerINT8.Class(sw_weights.weights, int8_mode, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale, version)
except:
# legacy ths for 20.03 image
swin_transformer = torch.classes.SwinTransformerINT8Class(sw_weights.weights, int8_mode, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale, version)
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images_half = images.half()
images_half = images_half.cuda(non_blocking=True)
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
swin_tansformer_output = swin_transformer.forward(images_half)
output = model.head(swin_tansformer_output.float())
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
print(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
print(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def run_swintransformernv_op(config, args, model, images, use_fp16):
if args.version == 1:
depths = config.MODEL.SWIN.DEPTHS
num_heads = config.MODEL.SWIN.NUM_HEADS
window_size = config.MODEL.SWIN.WINDOW_SIZE
patch_size = config.MODEL.SWIN.PATCH_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
ape = config.MODEL.SWIN.APE
patch_norm = config.MODEL.SWIN.PATCH_NORM
mlp_ratio = config.MODEL.SWIN.MLP_RATIO
qkv_bias = config.MODEL.SWIN.QKV_BIAS
if config.MODEL.SWIN.QK_SCALE is not None:
qk_scale = config.MODEL.SWIN.QK_SCALE
else:
qk_scale = 1.0
elif args.version == 2:
depths = config.MODEL.SWINV2.DEPTHS
num_heads = config.MODEL.SWINV2.NUM_HEADS
window_size = config.MODEL.SWINV2.WINDOW_SIZE
patch_size = config.MODEL.SWINV2.PATCH_SIZE
in_chans = config.MODEL.SWINV2.IN_CHANS
embed_dim = config.MODEL.SWINV2.EMBED_DIM
ape = config.MODEL.SWINV2.APE
patch_norm = config.MODEL.SWINV2.PATCH_NORM
mlp_ratio = config.MODEL.SWINV2.MLP_RATIO
qkv_bias = config.MODEL.SWINV2.QKV_BIAS
qk_scale = 1.0
int8_mode = args.int8_mode
version = args.version
th_path = args.th_path
depths_tensor = torch.tensor(depths, dtype=torch.int)
num_heads_tensor = torch.tensor(num_heads, dtype=torch.int)
layer_num = len(depths)
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
print('window_size', window_size, img_size)
torch.classes.load_library(th_path)
sw_weights = SwinTransformerINT8Weight(layer_num, window_size, depths, num_heads, th_path, model.state_dict(), version=version)
##run pytorch op
try:
swin_transformer = torch.classes.SwinTransformerINT8.Class(sw_weights.weights, int8_mode, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale, version)
except:
# legacy ths for 20.03 image
swin_transformer = torch.classes.SwinTransformerINT8Class(sw_weights.weights, int8_mode, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale, version)
# warm up
for i in range(warmup_time):
op_embedding = swin_transformer.forward(images)
op_output = model.head(op_embedding.float())
torch.cuda.synchronize()
op_begin = time.time()
for i in range(test_time):
op_embedding = swin_transformer.forward(images)
torch.cuda.synchronize()
op_end = time.time()
op_output = op_output.cpu().numpy()
if use_fp16:
print("INT8 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
else:
print("INT8 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
return op_output
@torch.no_grad()
def validate_with_random_data(config, args, model):
model.eval()
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
print(images.shape)
images_half = torch.tensor(images, dtype=torch.half)
images_float = torch.tensor(images, dtype=torch.float)
images_half = images_half.cuda(non_blocking=True)
images_float = images_float.cuda(non_blocking=True)
test_time = 100
warmup_time = 10
INT8_op_output = run_swintransformernv_op(config, args, model, images_half, True)
INT8_torch_output = model(images_float)
INT8_torch_output = INT8_torch_output.cpu().numpy()
diff = abs(INT8_torch_output - INT8_op_output)
assert diff.mean() < 0.1, "[ERROR] SWIN INT8 Op TEST FAIL !"
print("INT8_torch_output vs INT8_op_output , avg diff : ", diff.mean((1)), "max diff : ", diff.max((1)))
if __name__ == '__main__':
args, config = parse_option()
seed = config.SEED
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
main(args, config)
|
FasterTransformer-main
|
examples/pytorch/swin/infer_swintransformer_int8_op.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import sys
sys.path.insert(0, "./Swin-Transformer-Quantization/SwinTransformer")
from config import get_config
from models import build_model
from SwinTransformerWeightTransposeQKVWeight import SwinTransformerWeightTransposeQKVWeight
#from torch._C import _nvtx
test_time = 100
warmup_time = 10
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer evaluation script', add_help=False)
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--version', type=int, default=1, help='version of swin', )
parser.add_argument('--disable_amp', type=bool, default=True, help='disable amp', )
parser.add_argument('--fused_window_process', type=bool, default=False, help='whether use fused window process', )
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
# easy config modification
parser.add_argument('--th-path', type=str, help='path to pytorch library')
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
# distributed training
parser.add_argument("--local_rank", type=int, default=0, help='local rank for DistributedDataParallel')
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config
def main(args, config):
model = build_model(config)
model.cuda()
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
model.load_state_dict(checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint, strict=False)
validate_with_random_data(args, config, model)
@torch.no_grad()
def run_swintransformernv_op(args, config, model, images, data_type):
if args.version == 1:
depths = config.MODEL.SWIN.DEPTHS
num_heads = config.MODEL.SWIN.NUM_HEADS
window_size = config.MODEL.SWIN.WINDOW_SIZE
patch_size = config.MODEL.SWIN.PATCH_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
ape = config.MODEL.SWIN.APE
patch_norm = config.MODEL.SWIN.PATCH_NORM
mlp_ratio = config.MODEL.SWIN.MLP_RATIO
qkv_bias = config.MODEL.SWIN.QKV_BIAS
if config.MODEL.SWIN.QK_SCALE is not None:
qk_scale = config.MODEL.SWIN.QK_SCALE
else:
qk_scale = 1.0
elif args.version == 2:
depths = config.MODEL.SWINV2.DEPTHS
num_heads = config.MODEL.SWINV2.NUM_HEADS
window_size = config.MODEL.SWINV2.WINDOW_SIZE
patch_size = config.MODEL.SWINV2.PATCH_SIZE
in_chans = config.MODEL.SWINV2.IN_CHANS
embed_dim = config.MODEL.SWINV2.EMBED_DIM
ape = config.MODEL.SWINV2.APE
patch_norm = config.MODEL.SWINV2.PATCH_NORM
mlp_ratio = config.MODEL.SWINV2.MLP_RATIO
qkv_bias = config.MODEL.SWINV2.QKV_BIAS
qk_scale = 1.0
version = args.version
th_path = args.th_path
depths_tensor = torch.tensor(depths, dtype=torch.int)
num_heads_tensor = torch.tensor(num_heads, dtype=torch.int)
layer_num = len(depths)
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
torch.classes.load_library(th_path)
sw_weights = SwinTransformerWeightTransposeQKVWeight(layer_num, window_size, depths, num_heads, th_path, model.state_dict(), version)
if data_type == 'fp16':
sw_weights.to_half()
model.half()
elif data_type == 'bf16':
sw_weights.to_bfloat16()
model.bfloat16()
elif data_type == 'fp32':
sw_weights.to_float32()
model.float()
sw_weights.to_cuda()
##run pytorch op
try:
swin_transformer = torch.classes.SwinTransformer.Class(sw_weights.weights, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale, version)
except:
# legacy ths for 20.03 image
swin_transformer = torch.classes.SwinTransformerClass(sw_weights.weights, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale, version)
# warm up
for i in range(warmup_time):
op_embedding = swin_transformer.forward(images)
op_output = model.head(op_embedding)
torch.cuda.synchronize()
op_begin = time.time()
#_nvtx.rangePushA("op")
for i in range(test_time):
op_embedding = swin_transformer.forward(images)
op_output = model.head(op_embedding)
#_nvtx.rangePop()
torch.cuda.synchronize()
op_end = time.time()
op_output = op_output.float().cpu().numpy()
if data_type == 'fp16':
print("FP16 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
elif data_type == 'bf16':
print("BF16 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
else:
print("FP32 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
return op_output
@torch.no_grad()
def run_torch(model, images, mark):
# warm up
for i in range(warmup_time):
output = model(images)
torch.cuda.synchronize()
torch_start = time.time()
#_nvtx.rangePushA("torch")
for i in range(test_time):
torch_output = model(images)
#_nvtx.rangePop()
torch.cuda.synchronize()
torch_end = time.time()
torch_output = torch_output.float().cpu().numpy() # Numpy doesn't support BF16
print(mark + " time : ", (torch_end - torch_start)/test_time*1000.0, "ms")
return torch_output
@torch.no_grad()
def validate_with_random_data(args, config, model):
model.eval()
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
if args.version == 1:
in_chans = config.MODEL.SWIN.IN_CHANS
elif args.version == 2:
in_chans = config.MODEL.SWINV2.IN_CHANS
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
images_half = torch.tensor(images, dtype=torch.half)
images_bfloat16 = torch.tensor(images, dtype=torch.bfloat16)
images_float = torch.tensor(images, dtype=torch.float)
images_half = images_half.cuda(non_blocking=True)
images_bfloat16 = images_bfloat16.cuda(non_blocking=True)
images_float = images_float.cuda(non_blocking=True)
##run original swin-transformer
# run pytorch op
FP32_op_output = run_swintransformernv_op(args, config, model, images_float, 'fp32')
traced_module_float = torch.jit.trace(model, images_float)
FP32_torch_traced_output = run_torch(traced_module_float, images_float, "FP32 torch trace")
FP32_torch_output = run_torch(model, images_float, "FP32 torch")
FP16_op_output = run_swintransformernv_op(args, config, model, images_half, 'fp16')
traced_module_half = torch.jit.trace(model.half(), images_half)
FP16_torch_traced_output = run_torch(traced_module_half, images_half, "FP16 torch trace")
FP16_torch_output = run_torch(model, images_half, "FP16 torch")
diff = abs(FP32_torch_traced_output - FP32_op_output)
assert diff.mean() < 0.01, "[ERROR] SWIN FP32 Op TEST FAIL !"
print("FP32_torch_traced_output vs FP32_op_output , avg diff : ", diff.mean(), "max diff : ", diff.max())
diff = abs(FP16_torch_traced_output - FP16_op_output)
assert diff.mean() < 0.01, "[ERROR] SWIN FP16 Op TEST FAIL !"
print("FP16_torch_traced_output vs FP16_op_output , avg diff : ", diff.mean(), "max diff : ", diff.max())
if __name__ == '__main__':
args, config = parse_option()
# seed = config.SEED + int(time.time())
seed = config.SEED
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
main(args, config)
|
FasterTransformer-main
|
examples/pytorch/swin/infer_swintransformer_op.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import math
import numpy as np
class SwinTransformerWeightTransposeQKVWeight(object):
def __init__(self, layer_num, window_size, depths, num_heads, ths_path, weights=None, version=1):
"""weights need be a state_dict of swin transformer model"""
block_weight_suffixes = ['attn.proj.weight',
'attn.proj.bias',
'mlp.fc1.weight',
'mlp.fc1.bias',
'mlp.fc2.weight',
'mlp.fc2.bias',
'norm1.weight',
'norm1.bias',
'norm2.weight',
'norm2.bias']
layer_weight_suffixes = ['downsample.norm.weight',
'downsample.norm.bias',
'downsample.reduction.weight']
sw_weight_suffixes = ['patch_embed.proj.weight',
'patch_embed.proj.bias',
'patch_embed.norm.weight',
'patch_embed.norm.bias',
'norm.weight',
'norm.bias']
self.layer_num = layer_num
self.depths = depths
self.weights = []
torch.classes.load_library(ths_path)
gen_relative_pos_bias = torch.ops.fastertransformer.gen_relative_pos_bias
transform_trt_mask = torch.ops.fastertransformer.transform_trt_mask
if weights is None:
print("[ERROR][SwinTransformerWeights::__init__] weights should not be empty!")
exit(-1)
else:
self._generated_weights = False
#calculate size_per_head
qkv_weight_name = "layers.0.blocks.0.attn.qkv.weight"
shape = weights[qkv_weight_name].shape
#in case we flatten this weight
if len(shape) == 1:
dim = int(math.sqrt(shape[0]/3))
weights[qkv_weight_name] = weights[qkv_weight_name].reshape([3*dim, dim])
shape = weights[qkv_weight_name].shape
size_per_head = int(shape[0]/3/num_heads[0])
#loop over layers
for layer_idx in range(layer_num):
##loop over blocks
for block_idx in range(depths[layer_idx]):
#transpose qkv weight [3*head*size, k] --> [k, head*3*size]
qkv_weight_name = "layers.{}.blocks.{}.attn.qkv.weight".format(layer_idx, block_idx)
if qkv_weight_name in weights:
shape = weights[qkv_weight_name].shape
#in case we flatten this weight
if len(shape) == 1:
dim = int(math.sqrt(shape[0]/3))
weights[qkv_weight_name] = weights[qkv_weight_name].reshape([3*dim, dim])
shape = weights[qkv_weight_name].shape
weights[qkv_weight_name] = weights[qkv_weight_name].reshape([3, num_heads[layer_idx], int(shape[0]/3/num_heads[layer_idx]), -1]).permute(3, 1, 0, 2).reshape(shape[1], -1)
self.weights.append(weights[qkv_weight_name])
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(qkv_weight_name))
exit(-1)
#transpose qkv bias [3*head*size] --> [head*3*size]
if version == 1:
qkv_bias_name = "layers.{}.blocks.{}.attn.qkv.bias".format(layer_idx, block_idx)
if qkv_bias_name in weights:
shape = weights[qkv_bias_name].shape
weights[qkv_bias_name] = weights[qkv_bias_name].reshape([3, num_heads[layer_idx], int(shape[0]/3/num_heads[layer_idx])]).permute(1, 0, 2).reshape(-1)
self.weights.append(weights[qkv_bias_name])
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(qkv_bias_name))
exit(-1)
elif version == 2:
q_bias_name = "layers.{}.blocks.{}.attn.q_bias".format(layer_idx, block_idx)
v_bias_name = "layers.{}.blocks.{}.attn.v_bias".format(layer_idx, block_idx)
if q_bias_name in weights and v_bias_name in weights:
qkv_weights = torch.cat((weights[q_bias_name], torch.zeros_like(weights[v_bias_name], requires_grad=False), weights[v_bias_name]))
qkv_weights = qkv_weights.reshape([3, num_heads[layer_idx], int(shape[0]/3/num_heads[layer_idx])]).permute(1, 0, 2).reshape(-1)
self.weights.append(qkv_weights)
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {} or {}.".format(q_bias_name, v_bias_name))
exit(-1)
###block_weight_suffixes
for block_weight_suffix in block_weight_suffixes:
weight_name = 'layers.{}.blocks.{}.{}'.format(layer_idx, block_idx, block_weight_suffix)
if weight_name in weights:
self.weights.append(weights[weight_name])
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(weight_name))
exit(-1)
###get relative position bias
###Notice : for some model, like (img_size, window_size) = (224, 16),
###the window_size_in_use of last layer may changes.
if version == 1:
index_name = 'layers.{}.blocks.{}.attn.relative_position_index'.format(layer_idx, block_idx)
table_name = 'layers.{}.blocks.{}.attn.relative_position_bias_table'.format(layer_idx, block_idx)
if index_name in weights and table_name in weights:
window_size_in_use = int(math.sqrt(weights[index_name].shape[0]))
relative_position_bias = gen_relative_pos_bias(weights[table_name], weights[index_name], window_size_in_use, num_heads[layer_idx], weights[table_name], weights[table_name], weights[table_name], version)
self.weights.append(relative_position_bias)
if relative_position_bias.shape[1] <= 256 and size_per_head == 32:
trt_relative_position_bias = transform_trt_mask(relative_position_bias.half(), relative_position_bias.shape[0], relative_position_bias.shape[1], False)
self.weights.append(trt_relative_position_bias.half())
else:
self.weights.append(torch.Tensor())
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {} or {}.".format(index_name, table_name))
exit(-1)
elif version == 2:
index_name = 'layers.{}.blocks.{}.attn.relative_position_index'.format(layer_idx, block_idx)
table_name = 'layers.{}.blocks.{}.attn.relative_coords_table'.format(layer_idx, block_idx)
cpb_mlp_weight1_name = 'layers.{}.blocks.{}.attn.cpb_mlp.0.weight'.format(layer_idx, block_idx)
cpb_mlp_bias1_name = 'layers.{}.blocks.{}.attn.cpb_mlp.0.bias'.format(layer_idx, block_idx)
cpb_mlp_weight2_name = 'layers.{}.blocks.{}.attn.cpb_mlp.2.weight'.format(layer_idx, block_idx)
if index_name in weights and table_name in weights and cpb_mlp_weight1_name in weights and cpb_mlp_bias1_name in weights and cpb_mlp_weight2_name in weights:
window_size_in_use = int(math.sqrt(weights[index_name].shape[0]))
relative_position_bias = gen_relative_pos_bias(weights[table_name], weights[index_name], window_size_in_use, num_heads[layer_idx], weights[cpb_mlp_weight1_name], weights[cpb_mlp_bias1_name], weights[cpb_mlp_weight2_name], version)
self.weights.append(relative_position_bias)
if relative_position_bias.shape[1] <= 256 and size_per_head == 32:
trt_relative_position_bias = transform_trt_mask(relative_position_bias.half(), relative_position_bias.shape[0], relative_position_bias.shape[1], False)
self.weights.append(trt_relative_position_bias.half())
else:
self.weights.append(torch.Tensor())
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {} or {} or {} or {} or {}.".format(index_name, table_name, cpb_mlp_weight1_name, cpb_mlp_bias1_name, cpb_mlp_weight2_name))
exit(-1)
print('relative_position_bias', self.weights[-2].shape,'=>', self.weights[-1].shape)
##process attn.logit_scale for version 2
if version == 2:
logit_scale_name = 'layers.{}.blocks.{}.attn.logit_scale'.format(layer_idx, block_idx)
if logit_scale_name in weights:
self.weights.append(torch.clamp(weights[logit_scale_name], max=torch.log(torch.tensor(1. / 0.01))).exp())
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(logit_scale_name))
exit(-1)
##deal with layer weights
###loop over layer_weight_suffixes
for layer_weight_suffix in layer_weight_suffixes:
weight_name = 'layers.{}.{}'.format(layer_idx, layer_weight_suffix)
if weight_name in weights:
self.weights.append(weights[weight_name])
else:
####the last layer has not dowmsample weight
if layer_idx == layer_num - 1:
self.weights.append(torch.Tensor())
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(weight_name))
exit(-1)
###get attn_mask (same for each layer, some layer may not has one)
attn_mask_name = 'layers.{}.blocks.1.attn_mask'.format(layer_idx)
if attn_mask_name in weights:
self.weights.append(weights[attn_mask_name])
if weights[attn_mask_name].shape[1] <= 256 and size_per_head == 32:
trt_attn_mask = transform_trt_mask(weights[attn_mask_name].half(), weights[attn_mask_name].shape[0], weights[attn_mask_name].shape[1], False)
self.weights.append(trt_attn_mask.half())
else:
self.weights.append(torch.Tensor())
else:
self.weights.append(torch.Tensor())
self.weights.append(torch.Tensor())
print('attn_mask', self.weights[-2].shape, '=>', self.weights[-1].shape)
#deal with sw weights
for sw_weight_suffix in sw_weight_suffixes:
weight_name = '{}'.format(sw_weight_suffix)
if weight_name in weights:
self.weights.append(weights[weight_name])
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(weight_name))
exit(-1)
def to_cuda(self):
for idx, v in enumerate(self.weights):
self.weights[idx] = v.cuda()
def to_float32(self):
for idx, v in enumerate(self.weights):
self.weights[idx] = v.float()
def to_half(self):
for idx, v in enumerate(self.weights):
self.weights[idx] = v.half()
def to_bfloat16(self):
for idx, v in enumerate(self.weights):
self.weights[idx] = v.bfloat16()
|
FasterTransformer-main
|
examples/pytorch/swin/SwinTransformerWeightTransposeQKVWeight.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import math
from checkpoint_quantization import extract_amaxlist
class SwinTransformerINT8Weight(object):
def __init__(self, layer_num, window_size, depths, num_heads, ths_path, weights=None, version=1):
"""weights need be a state_dict of swin transformer model"""
block_weight_suffixes = ['attn.proj.weight',
'attn.proj.bias',
'mlp.fc1.weight',
'mlp.fc1.bias',
'mlp.fc2.weight',
'mlp.fc2.bias',
'norm1.weight',
'norm1.bias',
'norm2.weight',
'norm2.bias',
'amaxList',
'h_amaxList']
layer_weight_suffixes = ['downsample.norm.weight',
'downsample.norm.bias',
'downsample.reduction.weight']
sw_weight_suffixes = ['patch_embed.proj.weight',
'patch_embed.proj.bias',
'patch_embed.norm.weight',
'patch_embed.norm.bias',
'norm.weight',
'norm.bias']
self.layer_num = layer_num
self.depths = depths
self.weights = []
torch.classes.load_library(ths_path)
gen_relative_pos_bias = torch.ops.fastertransformer.gen_relative_pos_bias
transform_trt_mask = torch.ops.fastertransformer.transform_trt_mask
if weights is None:
print("[ERROR][SwinTransformerWeights::__init__] weights should not be empty!")
exit(-1)
if 'layers.0.blocks.0.attn.qkv._input_quantizer._amax' not in weights.keys():
raise RuntimeError("There is no quantization node in the checkpoint, cannot be quantized to int8.")
for k, v in weights.items():
if k.endswith('_amax'):
weights[k] = v.cpu()
elif k.endswith('relative_position_index'):
weights[k] = v
else:
weights[k] = v.half()
# exit(0)
weights = extract_amaxlist(weights, depths, version=version, ths_path=ths_path, verbose=False)
h_scale_list = {}
for k, v in weights.items():
if "amaxList" in k:
k_h = k.replace("amaxList", "h_amaxList")
h_scale_list[k_h] = v
weights[k] = v.cuda()
for k, v in h_scale_list.items():
weights[k] = v
#calculate size_per_head
qkv_weight_name = "layers.0.blocks.0.attn.qkv.weight"
shape = weights[qkv_weight_name].shape
#in case we flatten this weight
if len(shape) == 1:
dim = int(math.sqrt(shape[0]/3))
weights[qkv_weight_name] = weights[qkv_weight_name].reshape([3*dim, dim])
shape = weights[qkv_weight_name].shape
size_per_head = int(shape[0]/3/num_heads[0])
#loop over layers
for layer_idx in range(layer_num):
##loop over blocks
for block_idx in range(depths[layer_idx]):
# deal with attn.qkv.weight
qkv_weight_name = "layers.{}.blocks.{}.attn.qkv.weight".format(layer_idx, block_idx)
if qkv_weight_name in weights:
self.weights.append(weights[qkv_weight_name])
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(qkv_weight_name))
exit(-1)
# deal with attn.qkv.bias
if version == 1:
qkv_bias_name = "layers.{}.blocks.{}.attn.qkv.bias".format(layer_idx, block_idx)
if qkv_bias_name in weights:
self.weights.append(weights[qkv_bias_name])
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(qkv_bias_name))
exit(-1)
elif version == 2:
q_bias_name = "layers.{}.blocks.{}.attn.q_bias".format(layer_idx, block_idx)
v_bias_name = "layers.{}.blocks.{}.attn.v_bias".format(layer_idx, block_idx)
if q_bias_name in weights and v_bias_name in weights:
qkv_weights = torch.cat((weights[q_bias_name], torch.zeros_like(weights[v_bias_name], requires_grad=False), weights[v_bias_name]))
self.weights.append(qkv_weights)
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {} or {}.".format(q_bias_name, v_bias_name))
exit(-1)
###block_weight_suffixes
for block_weight_suffix in block_weight_suffixes:
weight_name = 'layers.{}.blocks.{}.{}'.format(layer_idx, block_idx, block_weight_suffix)
if weight_name in weights:
self.weights.append(weights[weight_name])
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(weight_name))
exit(-1)
###get relative position bias
###Notice : for some model, like (img_size, window_size) = (224, 16),
###the window_size_in_use of last layer may changes.
if version == 1:
index_name = 'layers.{}.blocks.{}.attn.relative_position_index'.format(layer_idx, block_idx)
table_name = 'layers.{}.blocks.{}.attn.relative_position_bias_table'.format(layer_idx, block_idx)
if index_name in weights and table_name in weights:
window_size_in_use = int(math.sqrt(weights[index_name].shape[0]))
relative_position_bias = gen_relative_pos_bias(weights[table_name], weights[index_name], window_size_in_use, num_heads[layer_idx], weights[table_name], weights[table_name], weights[table_name], version)
self.weights.append(relative_position_bias.half())
if relative_position_bias.shape[1] <= 256 and size_per_head == 32:
trt_relative_position_bias = transform_trt_mask(relative_position_bias.half(), relative_position_bias.shape[0], relative_position_bias.shape[1], True)
self.weights.append(trt_relative_position_bias.half())
else:
self.weights.append(torch.HalfTensor())
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {} or {}.".format(index_name, table_name))
exit(-1)
elif version == 2:
index_name = 'layers.{}.blocks.{}.attn.relative_position_index'.format(layer_idx, block_idx)
table_name = 'layers.{}.blocks.{}.attn.relative_coords_table'.format(layer_idx, block_idx)
cpb_mlp_weight1_name = 'layers.{}.blocks.{}.attn.cpb_mlp.0.weight'.format(layer_idx, block_idx)
cpb_mlp_bias1_name = 'layers.{}.blocks.{}.attn.cpb_mlp.0.bias'.format(layer_idx, block_idx)
cpb_mlp_weight2_name = 'layers.{}.blocks.{}.attn.cpb_mlp.2.weight'.format(layer_idx, block_idx)
if index_name in weights and table_name in weights and cpb_mlp_weight1_name in weights and cpb_mlp_bias1_name in weights and cpb_mlp_weight2_name in weights:
window_size_in_use = int(math.sqrt(weights[index_name].shape[0]))
relative_position_bias = gen_relative_pos_bias(weights[table_name], weights[index_name], window_size_in_use, num_heads[layer_idx], weights[cpb_mlp_weight1_name], weights[cpb_mlp_bias1_name], weights[cpb_mlp_weight2_name], version)
self.weights.append(relative_position_bias)
if relative_position_bias.shape[1] <= 256 and size_per_head == 32:
trt_relative_position_bias = transform_trt_mask(relative_position_bias.half(), relative_position_bias.shape[0], relative_position_bias.shape[1], True)
self.weights.append(trt_relative_position_bias.half())
else:
self.weights.append(torch.HalfTensor())
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {} or {} or {} or {} or {}.".format(index_name, table_name, cpb_mlp_weight1_name, cpb_mlp_bias1_name, cpb_mlp_weight2_name))
exit(-1)
print('relative_position_bias', self.weights[-2].shape, '=>', self.weights[-1].shape)
##process attn.logit_scale for version 2
if version == 2:
logit_scale_name = 'layers.{}.blocks.{}.attn.logit_scale'.format(layer_idx, block_idx)
if logit_scale_name in weights:
self.weights.append(torch.clamp(weights[logit_scale_name], max=torch.log(torch.tensor(1. / 0.01))).exp())
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(logit_scale_name))
exit(-1)
##deal with layer weights
###loop over layer_weight_suffixes
for layer_weight_suffix in layer_weight_suffixes:
weight_name = 'layers.{}.{}'.format(layer_idx, layer_weight_suffix)
if weight_name in weights:
self.weights.append(weights[weight_name])
else:
####the last layer do not have downsample weight
if layer_idx == layer_num - 1:
self.weights.append(torch.Tensor())
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(weight_name))
exit(-1)
###get attn_mask (same for each layer, some layer may not has one)
attn_mask_name = 'layers.{}.blocks.1.attn_mask'.format(layer_idx)
if attn_mask_name in weights:
self.weights.append(weights[attn_mask_name].half())
if weights[attn_mask_name].shape[1] <= 256 and size_per_head == 32:
trt_attn_mask = transform_trt_mask(weights[attn_mask_name].cuda().half(), weights[attn_mask_name].shape[0], weights[attn_mask_name].shape[1], True)
self.weights.append(trt_attn_mask.half())
else:
self.weights.append(torch.HalfTensor())
else:
self.weights.append(torch.HalfTensor())
self.weights.append(torch.HalfTensor())
print('attn_mask', self.weights[-2].shape, '=>', self.weights[-1].shape)
#deal with sw weights
for sw_weight_suffix in sw_weight_suffixes:
weight_name = sw_weight_suffix
if weight_name in weights:
self.weights.append(weights[weight_name])
else:
print("[ERROR][SwinTransformerWeights::__init__] missing weight {}.".format(weight_name))
exit(-1)
def to_half(self):
if self.int8:
return
for idx, v in enumerate(self.weights):
self.weights[idx] = v.half()
|
FasterTransformer-main
|
examples/pytorch/swin/SwinTransformerINT8Weight.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from tqdm import tqdm
from torch._C import _nvtx
import sys
sys.path.insert(0, "./Swin-Transformer-Quantization")
# from config_modified_int8 import get_config
from SwinTransformer.config import get_config
from SwinTransformer.models import build_model
from data import build_val_loader
from SwinTransformer.optimizer import build_optimizer
from SwinTransformer.logger import create_logger
from SwinTransformer.utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
from SwinTransformerWeightTransposeQKVWeight import SwinTransformerWeightTransposeQKVWeight
def saveToTxt(x, name, clean=False):
if clean :
with open("tmp2/"+name, 'w+') as fout:
xx = x.reshape([-1])
for i in xx:
fout.write("{}\n".format(i))
else:
with open("tmp2/"+name, 'a+') as fout:
shape = x.shape
fout.write("{}\n".format(len(shape)))
fout.write(" ".join([str(s) for s in shape])+"\n")
xx = x.reshape([-1])
for i in xx:
fout.write("{}\n".format(i))
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--version', type=int, default=1, help='version of swin', )
parser.add_argument('--disable_amp', type=bool, default=True, help='disable amp', )
parser.add_argument('--fused_window_process', type=bool, default=False, help='whether use fused window process', )
parser.add_argument('--th-path', type=str, help='path to pytorch library')
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O0', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--profile', action='store_true', help='Perform profiling only, with some random data')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--calib', action='store_true', help='Perform calibration only')
parser.add_argument('--train', action='store_true', help='Perform training only')
parser.add_argument('--int8-mode', type=int, help='int8 mode', choices=[1, 2])
parser.add_argument('--fp16', action='store_true', help='Using FP16 precision instead of FP32')
parser.add_argument('--num-calib-batch', type=int, default=4, help='Number of batches for calibration. 0 will disable calibration.')
# distributed training
parser.add_argument("--local_rank", type=int, default=0, help='local rank for DistributedDataParallel')
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config
def main(args, config):
logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}")
model = build_model(config)
model.cuda()
optimizer = build_optimizer(config, model)
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model, 'flops'):
flops = model.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
lr_scheduler = None
if config.MODEL.RESUME:
# max_accuracy = load_checkpoint(config, model, optimizer, lr_scheduler, logger)
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
msg = model.load_state_dict(checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint, strict=False)
logger.info(msg)
del checkpoint
dataset_val, data_loader_val = build_val_loader(config)
acc1, acc5, loss = validate(config, args, data_loader_val, model)
logger.info(f"Accuracy of resumed network on the {len(dataset_val)} test images: {acc1:.1f}%")
return
@torch.no_grad()
def validate(config, args, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
if args.version == 1:
depths = config.MODEL.SWIN.DEPTHS
num_heads = config.MODEL.SWIN.NUM_HEADS
window_size = config.MODEL.SWIN.WINDOW_SIZE
patch_size = config.MODEL.SWIN.PATCH_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
ape = config.MODEL.SWIN.APE
patch_norm = config.MODEL.SWIN.PATCH_NORM
mlp_ratio = config.MODEL.SWIN.MLP_RATIO
qkv_bias = config.MODEL.SWIN.QKV_BIAS
if config.MODEL.SWIN.QK_SCALE is not None:
qk_scale = config.MODEL.SWIN.QK_SCALE
else:
qk_scale = 1.0
elif args.version == 2:
depths = config.MODEL.SWINV2.DEPTHS
num_heads = config.MODEL.SWINV2.NUM_HEADS
window_size = config.MODEL.SWINV2.WINDOW_SIZE
patch_size = config.MODEL.SWINV2.PATCH_SIZE
in_chans = config.MODEL.SWINV2.IN_CHANS
embed_dim = config.MODEL.SWINV2.EMBED_DIM
ape = config.MODEL.SWINV2.APE
patch_norm = config.MODEL.SWINV2.PATCH_NORM
mlp_ratio = config.MODEL.SWINV2.MLP_RATIO
qkv_bias = config.MODEL.SWINV2.QKV_BIAS
qk_scale = 1.0
version = args.version
th_path = args.th_path
depths_tensor = torch.tensor(depths, dtype=torch.int)
num_heads_tensor = torch.tensor(num_heads, dtype=torch.int)
layer_num = len(depths)
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
torch.classes.load_library(th_path)
sw_weights = SwinTransformerWeightTransposeQKVWeight(layer_num, window_size, depths, num_heads, th_path, model.state_dict(), version)
if args.fp16:
model.half()
sw_weights.to_half()
else:
sw_weights.to_float32()
sw_weights.to_cuda()
##run pytorch op
try:
swin_transformer = torch.classes.SwinTransformer.Class(sw_weights.weights, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale, version)
except:
# legacy ths for 20.03 image
swin_transformer = torch.classes.SwinTransformerClass(sw_weights.weights, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale, version)
end = time.time()
for idx, (images, target) in enumerate(data_loader):
if args.fp16:
images = images.half()
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
# output_th = model(images)
swin_tansformer_output = swin_transformer.forward(images)
output = model.head(swin_tansformer_output)
# diff = output - output_th
# print(diff.mean(), diff.max(), diff.min())
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
if __name__ == '__main__':
args, config = parse_option()
if config.AMP_OPT_LEVEL != "O0":
assert amp is not None, "amp not installed!"
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
seed = config.SEED
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# linear scale the learning rate according to total batch size, may not be optimal
linear_scaled_lr = config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE / 512.0
linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * config.DATA.BATCH_SIZE / 512.0
linear_scaled_min_lr = config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE / 512.0
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.defrost()
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.MIN_LR = linear_scaled_min_lr
config.freeze()
os.makedirs(config.OUTPUT, exist_ok=True)
logger = create_logger(output_dir=config.OUTPUT, dist_rank=0, name=f"{config.MODEL.NAME}")
# if dist.get_rank() == 0:
path = os.path.join(config.OUTPUT, "config.json")
with open(path, "w") as f:
f.write(config.dump())
logger.info(f"Full config saved to {path}")
main(args, config)
|
FasterTransformer-main
|
examples/pytorch/swin/infer_swintransformer_acc.py
|
from SwinTransformer.config import get_config
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/__init__.py
|
# coding=utf-8
# Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for training models with pytorch-quantization"""
import pickle
import re
import time
import numpy as np
import torch
import random
import pytorch_quantization as quantization
import pytorch_quantization.nn as quant_nn
from pytorch_quantization.tensor_quant import QuantDescriptor
from pytorch_quantization import calib
class Logger:
def info(self, s):
print("INFO:", s)
def warn(self, s):
print("WARN:", s)
logger = Logger()
name_width = 50 # max width of layer names
qname_width = name_width + 20 # max width of quantizer names
def add_arguments(parser):
"""Add arguments to parser for functions defined in quant_trainer."""
group = parser.add_argument_group('quant_trainer arguments')
group.add_argument('--wprec', type=int, default=8,
help='weight precision')
group.add_argument('--aprec', type=int, default=8,
help='activation precision')
group.add_argument('--quant-per-tensor', action='store_true',
help='per tensor weight scaling')
group.add_argument('--quant-disable', action='store_true',
help='disable all quantizers')
group.add_argument('--quant-disable-keyword', type=str, nargs='+',
help='disable quantizers by keyword')
group.add_argument('--calibrator', default='percentile',
help='which quantization range calibrator to use')
group.add_argument('--percentile', default=99.99, type=float,
help='percentile for PercentileCalibrator')
group.add_argument('--fuse-qkv', action='store_true',
help='use the same scale factor for qkv')
group.add_argument('--narrow-range', action='store_true',
help='use [-127, 127] range for activations rather than [-128, 127]')
group.add_argument('--quant-mode', type=str, default="ft2",
help='predefined quantization mode, choices: ["ft1", "ft2", "ft3", "trt"](Deprecated)')
def set_args(args):
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = True
args.quant_disable = False
args.quant_disable_keyword = ['final_input', 'layernorm_input', 'softmax_input', 'local_input', 'residual_input']
if args.int8_mode == 2:
args.quant_disable_keyword.append('[2n]._aftergemm')
args.fuse_qkv = True
args.narrow_range = False
return args
def set_default_quantizers(args):
"""Set default quantizers before creating the model."""
if args.calibrator == 'max':
calib_method = 'max'
elif args.calibrator == 'percentile':
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator')
calib_method = 'histogram'
elif args.calibrator == 'mse':
calib_method = 'histogram'
elif args.calibrator == 'entropy':
calib_method = 'histogram'
else:
raise ValueError(F'Invalid calibrator {args.calibrator}')
input_desc = QuantDescriptor(num_bits=args.aprec,
calib_method=calib_method,
narrow_range=args.narrow_range,
)
weight_desc = QuantDescriptor(num_bits=args.wprec,
axis=(None if args.quant_per_tensor else (0,)),
)
quant_nn.QuantLinear.set_default_quant_desc_input(input_desc)
quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc)
def configure_model(model, args, calib=False):
"""Function called before the training loop."""
logger.info('Configuring Model for Quantization')
logger.info(F'using quantization package {quantization.__file__}')
if not calib:
if args.quant_disable:
set_quantizer_by_name(model, [''], _disabled=True)
if args.quant_disable_keyword:
set_quantizer_by_name(model, args.quant_disable_keyword, _disabled=True)
if args.fuse_qkv:
fuse_qkv(model, args)
print('Configure calib={}'.format(str(calib)))
def enable_calibration(model):
"""Enable calibration of all *_input_quantizer modules in model."""
logger.info("Enabling Calibration")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:{qname_width}}: {module}")
def finish_calibration(model, args):
"""Disable calibration and load amax for all "*_input_quantizer modules in model."""
logger.info("Loading calibrated amax")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
elif args.calibrator == "percentile":
module.load_calib_amax("percentile", percentile=args.percentile)
else:
module.load_calib_amax(args.calibrator)
module.enable_quant()
module.disable_calib()
else:
module.enable()
if args.fuse_qkv:
fuse_qkv(model, args)
model.cuda()
print_quant_summary(model)
def fuse_qkv(model, args):
"""Adjust quantization ranges to match an implementation where the QKV projections are implemented with a single GEMM.
Force the weight and output scale factors to match by taking the max of (Q,K,V).
"""
def fuse3(qq, qk, qv):
if not hasattr(qq, '_amax') or not hasattr(qk, '_amax') or not hasattr(qv, '_amax'):
logger.warn('missing amax buffer, unable to fuse')
return
q = qq._amax.detach().item()
k = qk._amax.detach().item()
v = qv._amax.detach().item()
amax = max(q, k, v)
qq._amax.fill_(amax)
qk._amax.fill_(amax)
qv._amax.fill_(amax)
logger.info(f' q={q:7.4f} k={k:7.4f} v={v:7.4f} -> {amax:7.4f}')
for name, mod in model.named_modules():
if name.endswith('.attention.self'):
logger.info(f'FUSE_QKV: {name:{name_width}}')
fuse3(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer)
fuse3(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer)
fuse3(mod.query._aftergemm_quantizer, mod.key._aftergemm_quantizer, mod.value._aftergemm_quantizer)
def print_quant_summary(model):
"""Print summary of all quantizer modules in the model."""
counters = {'quantizers': 0, 'enabled_quantizers': 0,
'weights': 0, 'quant_weights': 0, 'sparse_weights': 0,
'params': 0, 'sparse_params': 0}
for name, mod in model.named_modules():
if isinstance(mod, quantization.nn.TensorQuantizer):
print(f'{name:80} {mod}')
counters['quantizers'] += 1
if not mod._disabled:
counters['enabled_quantizers'] += 1
for pname, param in mod.named_parameters():
if '.' in pname:
continue
counters['params'] += param.numel()
weight_quantizer = getattr(mod, '_weight_quantizer', None)
if pname == 'weight':
counters['weights'] += param.numel()
if weight_quantizer is not None and not weight_quantizer._disabled:
counters['quant_weights'] += param.numel()
counters['sparse_weights'] += param.eq(0).sum().item()
counters['sparse_params'] += param.eq(0).sum().item()
def print_fraction(a, b, counters, desc):
va = counters[a]
vb = counters[b]
pct = va/vb * 100 if vb != 0 else float('NaN')
print(f'{counters[a]:12}/{vb:12} ({pct:6.2f}%) {desc}')
print_fraction('enabled_quantizers', 'quantizers', counters, 'TensorQuantizers enabled')
print_fraction('quant_weights', 'weights', counters, 'Quantized weights')
print_fraction('sparse_weights', 'weights', counters, 'Zero weights')
print_fraction('weights', 'params', counters, 'Weight parameters')
print('\n\n')
def set_quantizer(name, mod, quantizer, k ,v):
"""Set attributes for mod.quantizer."""
quantizer_mod = getattr(mod, quantizer, None)
if quantizer_mod is not None:
assert hasattr(quantizer_mod, k)
setattr(quantizer_mod, k, v)
else:
logger.warn(f'{name} has no {quantizer}')
def set_quantizers(name, mod, which='both', **kwargs):
"""Set quantizer attributes for mod."""
s = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += (f' {k}={v}')
if which in ['input', 'both']:
set_quantizer(name, mod, '_input_quantizer', k, v)
if which in ['weight', 'both']:
set_quantizer(name, mod, '_weight_quantizer', k, v)
def set_quantizer_by_name(model, names, **kwargs):
"""Set quantizer attributes for layers where name contains a substring in names."""
for name, mod in model.named_modules():
if hasattr(mod, '_input_quantizer') or hasattr(mod, '_weight_quantizer'):
for n in names:
if re.search(n, name):
set_quantizers(name, mod, **kwargs)
elif name.endswith('_quantizer'):
for n in names:
if re.search(n, name):
s = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += (f' {k}={v}')
setattr(mod, k, v)
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/quant_utils.py
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import json
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from tqdm import tqdm
import quant_utils
from SwinTransformer.config import get_config
from models import build_model
from data import build_loader
from SwinTransformer.lr_scheduler import build_scheduler
from SwinTransformer.optimizer import build_optimizer
from SwinTransformer.logger import create_logger
from SwinTransformer.utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
class Knowledge_Distillation_Loss(torch.nn.Module):
def __init__(self, scale, T = 3):
super(Knowledge_Distillation_Loss, self).__init__()
self.KLdiv = torch.nn.KLDivLoss()
self.T = T
self.scale = scale
def get_knowledge_distillation_loss(self, output_student, output_teacher):
loss_kl = self.KLdiv(torch.nn.functional.log_softmax(output_student / self.T, dim=1), torch.nn.functional.softmax(output_teacher / self.T, dim=1))
loss = loss_kl
return self.scale * loss
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--disable_amp', action='store_true', help='Disable pytorch amp')
parser.add_argument('--amp-opt-level', type=str, choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used (deprecated!)')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument("--engine", type=str, help="The directory of swin tensorrt engine.")
# Calibration
parser.add_argument('--calib', action='store_true', help='Perform calibration only')
parser.add_argument('--train', action='store_true', help='Perform training only')
parser.add_argument('--int8-mode', type=int, required=True, help='int8 mode', choices=[1, 2])
parser.add_argument('--num-calib-batch', type=int, default=4, help='Number of batches for calibration. 0 will disable calibration.')
parser.add_argument('--calib-batchsz', type=int, default=8, help='Batch size when doing calibration')
parser.add_argument('--calib-output-path', type=str, help='Output directory to save calibrated model')
# distributed training
parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel')
parser.add_argument("--num-epochs", type=int, default=10, help="Number of epochs to run QAT fintuning.")
parser.add_argument("--qat-lr", type=float, default=5e-7, help="learning rate for QAT.")
parser.add_argument("--distill", action='store_true', help='Using distillation')
parser.add_argument("--teacher", type=str, help='teacher model path')
parser.add_argument('--distillation_loss_scale', type=float, default=10000., help="scale applied to distillation component of loss")
# for acceleration
parser.add_argument('--fused_window_process', action='store_true', help='Fused window shift & window partition, similar for reversed part.')
quant_utils.add_arguments(parser)
args, unparsed = parser.parse_known_args()
args = quant_utils.set_args(args)
quant_utils.set_default_quantizers(args)
config = get_config(args)
return args, config
def main(config, args):
dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader(config, args)
logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}")
model = build_model(config)
model.cuda()
# PRINT the details of model (quantized, with TensorQuantizer inserted)
# logger.info(str(model))
optimizer = build_optimizer(config, model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False)
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train))
if config.AUG.MIXUP > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif config.MODEL.LABEL_SMOOTHING > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=config.MODEL.LABEL_SMOOTHING)
else:
criterion = torch.nn.CrossEntropyLoss()
max_accuracy = 0.0
if config.MODEL.RESUME:
# max_accuracy = load_checkpoint(config, model_without_ddp, optimizer, lr_scheduler, logger)
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
msg = model_without_ddp.load_state_dict(checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint, strict=False)
logger.info(msg)
del checkpoint
if config.THROUGHPUT_MODE:
throughput(data_loader_val, model, logger)
return
if args.calib:
quant_utils.configure_model(model, args, calib=True)
model.eval()
quant_utils.enable_calibration(model)
# Run forward passes on a sample of the training set
for step, (samples, targets) in enumerate(tqdm(data_loader_train, desc='Calibration', total=args.num_calib_batch)):
if step > args.num_calib_batch:
break
outputs = model(samples)
quant_utils.finish_calibration(model, args)
# Evaluate calibrated model
quant_utils.configure_model(model, args, calib=False)
# Save calibrated checkpoint
model_to_save = model.module if hasattr(model, 'module') else model
output_model_path = os.path.join(args.calib_output_path, '{}_calib.pth'.format(config.MODEL.NAME))
if not os.path.exists(args.calib_output_path):
os.mkdir(args.calib_output_path)
torch.save(model_to_save.state_dict(), output_model_path)
print(f'Model is saved to {output_model_path}')
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
if args.train:
teacher = None
distillation_loss = None
if args.distill:
teacher = build_model(config)
print("Loading teacher model...")
teacher_ckpt = torch.load(args.teacher, map_location="cpu")
if "model" in teacher_ckpt:
teacher.load_state_dict(teacher_ckpt["model"], strict=False)
else:
teacher.load_state_dict(teacher_ckpt, strict=False)
distillation_loss = Knowledge_Distillation_Loss(scale=args.distillation_loss_scale).cuda()
teacher.cuda()
teacher.eval()
quant_utils.set_quantizer_by_name(teacher, [''], _disabled=True)
logger.info("Start training")
quant_utils.configure_model(model, args, calib=False)
start_time = time.time()
for epoch in range(args.num_epochs):
data_loader_train.sampler.set_epoch(epoch)
train_one_epoch(config, model, criterion, data_loader_train, optimizer, epoch,
mixup_fn, teacher, distillation_loss)
if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)):
save_checkpoint(config, epoch, model_without_ddp, max_accuracy, optimizer, lr_scheduler, logger)
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
max_accuracy = max(max_accuracy, acc1)
logger.info(f'Max accuracy: {max_accuracy:.2f}%')
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('Training time {}'.format(total_time_str))
if args.eval:
quant_utils.configure_model(model, args, calib=False)
if args.engine:
acc1, acc5, loss = validate_trt(config, data_loader_val, model, args.engine)
else:
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
def train_one_epoch(config, model, criterion, data_loader_train, optimizer, epoch, mixup_fn, teacher, dis_loss):
model.train()
optimizer.zero_grad()
max_accuracy = 0.0
num_steps = len(data_loader_train)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader_train):
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
outputs = model(samples)
loss = criterion(outputs, targets)
if teacher:
with torch.no_grad():
teacher_outputs = teacher(samples)
loss_t = dis_loss.get_knowledge_distillation_loss(outputs, teacher_outputs)
loss = loss + loss_t
optimizer.zero_grad()
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(model.parameters())
optimizer.step()
# lr_scheduler.step_update(epoch * num_steps + idx)
torch.cuda.synchronize()
loss_meter.update(loss.item(), targets.size(0))
norm_meter.update(grad_norm)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
f'mem {memory_used:.0f}MB')
epoch_time = time.time() - start
logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")
@torch.no_grad()
def validate(config, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def validate_trt(config, data_loader, model, engine):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
end = time.time()
import tensorrt as trt
with open(engine, 'rb') as f, trt.Runtime(trt.Logger(trt.Logger.INFO)) as runtime,\
runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
if engine is None:
print('Engine is none')
exit(-1)
context.active_optimization_profile = 0
stream = 0
context.set_binding_shape(0, (config.DATA.BATCH_SIZE, 3, config.DATA.IMG_SIZE, config.DATA.IMG_SIZE))
output_shape = tuple(context.get_binding_shape(1))
print(output_shape)
d_output = torch.empty(output_shape, dtype=torch.float32).cuda()
for idx, (images, target) in enumerate(data_loader):
images = images.half().cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
# output = model(images)
context.execute_async_v2([images.data_ptr()] + [d_output.data_ptr()], stream)
torch.cuda.synchronize()
with torch.no_grad():
output = model.module.head(d_output)
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for idx, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
if __name__ == '__main__':
args, config = parse_option()
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
torch.cuda.set_device(config.LOCAL_RANK)
torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)
torch.distributed.barrier()
seed = config.SEED + dist.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# linear scale the learning rate according to total batch size, may not be optimal
linear_scaled_lr = config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_warmup_lr = args.qat_lr * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_min_lr = config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.defrost()
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.EPOCHS = args.num_epochs
config.TRAIN.MIN_LR = linear_scaled_min_lr
config.freeze()
os.makedirs(config.OUTPUT, exist_ok=True)
logger = create_logger(output_dir=config.OUTPUT, dist_rank=dist.get_rank(), name=f"{config.MODEL.NAME}")
if dist.get_rank() == 0:
path = os.path.join(config.OUTPUT, "config.json")
with open(path, "w") as f:
f.write(config.dump())
logger.info(f"Full config saved to {path}")
main(config, args)
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/main.py
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import numpy as np
import torch.distributed as dist
from torchvision import datasets, transforms
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import Mixup
from timm.data import create_transform
from SwinTransformer.data.samplers import SubsetRandomSampler
from SwinTransformer.data.build import build_dataset
def build_val_loader(config):
config.freeze()
dataset_val, _ = build_dataset(is_train=False, config=config)
# print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
indices = np.arange(0, len(dataset_val), 1)
sampler_val = SubsetRandomSampler(indices)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=config.DATA.BATCH_SIZE,
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=False
)
return dataset_val, data_loader_val
def build_loader(config, args):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(is_train=True, config=config)
config.freeze()
print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
num_tasks = dist.get_world_size()
global_rank = dist.get_rank()
if config.DATA.ZIP_MODE and config.DATA.CACHE_MODE == 'part':
indices = np.arange(dist.get_rank(), len(dataset_train), dist.get_world_size())
sampler_train = SubsetRandomSampler(indices)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
indices = np.arange(dist.get_rank(), len(dataset_val), dist.get_world_size())
sampler_val = SubsetRandomSampler(indices)
if args.calib:
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.calib_batchsz,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=True,
)
else:
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=config.DATA.BATCH_SIZE,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=config.DATA.BATCH_SIZE,
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=False
)
# setup mixup / cutmix
mixup_fn = None
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX,
prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE,
label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES)
return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/data.py
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
from .swin_transformer import SwinTransformer
from .swin_transformer_v2 import SwinTransformerV2
def build_model(config):
model_type = config.MODEL.TYPE
if model_type == 'swin':
model = SwinTransformer(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
fused_window_process=config.FUSED_WINDOW_PROCESS)
elif model_type == 'swinv2':
model = SwinTransformerV2(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWINV2.PATCH_SIZE,
in_chans=config.MODEL.SWINV2.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWINV2.EMBED_DIM,
depths=config.MODEL.SWINV2.DEPTHS,
num_heads=config.MODEL.SWINV2.NUM_HEADS,
window_size=config.MODEL.SWINV2.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWINV2.MLP_RATIO,
qkv_bias=config.MODEL.SWINV2.QKV_BIAS,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWINV2.APE,
patch_norm=config.MODEL.SWINV2.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
pretrained_window_sizes=config.MODEL.SWINV2.PRETRAINED_WINDOW_SIZES)
else:
raise NotImplementedError(f"Unknown model: {model_type}")
return model
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/models/build.py
|
from .build import build_model
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/models/__init__.py
|
# --------------------------------------------------------
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.init as init
import torch.nn.functional as F
import sys
import math
from pytorch_quantization.nn import QuantLinear, TensorQuantizer
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
#used only for triton inference
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
# used specifically for training since torch.nn.functional.gelu breaks ONNX export
def bias_gelu_training(bias, y):
x = bias + y
return torch.nn.functional.gelu(x) # Breaks ONNX export
def bias_tanh(bias, y):
x = bias + y
return torch.tanh(x)
def swish(x):
return x * torch.sigmoid(x)
def bias_noact(bias, y):
return bias + y
#torch.nn.functional.gelu(x) # Breaks ONNX export
ACT2FN = {"gelu": gelu, "bias_gelu": bias_gelu, "bias_tanh": bias_tanh, "relu": torch.nn.functional.relu, "swish": swish,
"bias_noact": bias_noact}
class QuantizedConv2d(nn.Module):
r"""Conv2d with INT8 quantization
"""
__constants__ = ['bias']
def __init__(self, in_channel, out_channel, kernel_size, stride, bias=True):
super(QuantizedConv2d, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.kernel_size = kernel_size
self.stride = stride
self.weight = Parameter(torch.Tensor(out_channel, in_channel, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channel, 1, 1))
else:
self.register_parameter('bias', None)
self._input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self._weight_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_weight)
self._aftergemm_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_normal_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
input = self._input_quantizer(input)
weight = self._weight_quantizer(self.weight)
output = self._aftergemm_quantizer(F.conv2d(input, weight, bias=None, stride=self.stride))
if self.bias is None:
return output
else:
return self.bias + output
class LinearActivation(nn.Module):
r"""Fused Linear and Activation Module.
"""
__constants__ = ['bias']
def __init__(self, in_features, out_features, act='gelu', bias=True, do_quant=True):
super(LinearActivation, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.act_fn = nn.Identity()
self.biased_act_fn = None
if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)):
if bias and not 'bias' in act:
act = 'bias_' + act
self.biased_act_fn = ACT2FN[act]
else:
self.act_fn = ACT2FN[act]
else:
self.act_fn = act
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.do_quant = do_quant
if do_quant:
self._input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self._weight_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_weight)
self._aftergemm_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_normal_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
if self.do_quant:
input = self._input_quantizer(input)
weight = self._weight_quantizer(self.weight)
else:
weight = self.weight
if not self.bias is None:
if self.do_quant:
return self.biased_act_fn(self.bias, self._aftergemm_quantizer(F.linear(input, weight, None)))
else:
return self.biased_act_fn(self.bias, F.linear(input, weight, None))
else:
if self.do_quant:
return self.act_fn(self._aftergemm_quantizer(F.linear(input, weight, None)))
else:
return self.act_fn(F.linear(input, weight, None))
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/models/linear_activation.py
|
# --------------------------------------------------------
# Swin Transformer V2
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .linear_activation import LinearActivation
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import numpy as np
QUANT = True
if QUANT:
from pytorch_quantization.nn import QuantLinear, TensorQuantizer
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = LinearActivation(in_features, hidden_features, act='noact')
self.act = act_layer()
self.fc2 = LinearActivation(hidden_features, out_features, act='noact')
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
pretrained_window_size (tuple[int]): The height and width of the window in pre-training.
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
pretrained_window_size=[0, 0]):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.pretrained_window_size = pretrained_window_size
self.num_heads = num_heads
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True)
# mlp to generate continuous relative position bias
self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True),
nn.ReLU(inplace=True),
nn.Linear(512, num_heads, bias=False))
# get relative_coords_table
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
relative_coords_table = torch.stack(
torch.meshgrid([relative_coords_h,
relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
if pretrained_window_size[0] > 0:
relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1)
relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1)
else:
relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
relative_coords_table *= 8 # normalize to -8, 8
relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
torch.abs(relative_coords_table) + 1.0) / np.log2(8)
self.register_buffer("relative_coords_table", relative_coords_table)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = LinearActivation(dim, dim * 3, act=nn.Identity(), bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(dim))
self.v_bias = nn.Parameter(torch.zeros(dim))
else:
self.q_bias = None
self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = LinearActivation(dim, dim, act='noact')
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
if QUANT:
self.matmul_q_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_k_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_v_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_a_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.softmax_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.mha_q_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.mha_k_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
qkv = self.qkv(x) + qkv_bias
qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
# cosine attention
if QUANT:
q = self.matmul_q_input_quantizer(q)
k = self.matmul_k_input_quantizer(k)
logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01))).exp()
q = F.normalize(q, dim=-1) * logit_scale
k = F.normalize(k, dim=-1).transpose(-2, -1)
# attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
if QUANT:
q = self.mha_q_input_quantizer(q)
k = self.mha_k_input_quantizer(k)
attn = q @ k
if QUANT:
attn = self.softmax_input_quantizer(attn)
# attn = attn * logit_scale
relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
if QUANT:
attn = self.matmul_a_input_quantizer(attn)
v = self.matmul_v_input_quantizer(v)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, ' \
f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
pretrained_window_size (int): Window size in pre-training.
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, pretrained_window_size=0):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
pretrained_window_size=to_2tuple(pretrained_window_size))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
if QUANT:
self.layernorm_input1_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add1_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add1_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.layernorm_input2_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add2_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add2_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
if QUANT:
add1_local = self.drop_path(self.add1_local_input_quantizer(self.norm1(self.layernorm_input1_quantizer(x))))
add1_residual = self.add1_residual_input_quantizer(shortcut)
x = add1_local + add1_residual
add2_local = self.drop_path(self.add2_local_input_quantizer(self.norm2(self.layernorm_input2_quantizer(self.mlp(x)))))
add2_residual = self.add2_residual_input_quantizer(x)
x = add2_local + add2_residual
return x
x = shortcut + self.drop_path(self.norm1(x))
# FFN
x = x + self.drop_path(self.norm2(self.mlp(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = LinearActivation(4 * dim, 2 * dim, act=nn.Identity(), bias=False)
self.norm = norm_layer(2 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.reduction(x)
x = self.norm(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
flops += H * W * self.dim // 2
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
pretrained_window_size (int): Local window size in pre-training.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
pretrained_window_size=0):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
def _init_respostnorm(self):
for blk in self.blocks:
nn.init.constant_(blk.norm1.bias, 0)
nn.init.constant_(blk.norm1.weight, 0)
nn.init.constant_(blk.norm2.bias, 0)
nn.init.constant_(blk.norm2.weight, 0)
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformerV2(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
pretrained_window_sizes (tuple(int)): Pretrained window sizes of each layer.
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, pretrained_window_sizes=[0, 0, 0, 0], **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
pretrained_window_size=pretrained_window_sizes[i_layer])
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
for bly in self.layers:
bly._init_respostnorm()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {"cpb_mlp", "logit_scale", 'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/models/swin_transformer_v2.py
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import short
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from .linear_activation import LinearActivation, QuantizedConv2d
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
QUANT = True
if QUANT:
from pytorch_quantization.nn import QuantLinear, TensorQuantizer
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = LinearActivation(in_features, hidden_features, act='noact')
self.act = act_layer()
self.fc2 = LinearActivation(hidden_features, out_features, act='noact')
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = LinearActivation(dim, dim * 3, act='noact')
self.attn_drop = nn.Dropout(attn_drop)
self.proj = LinearActivation(dim, dim, act='noact')
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
if QUANT:
self.matmul_q_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_k_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_v_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_a_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.softmax_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
# Take the dot product between "query" and "key" to get the raw attention score
if QUANT:
attn = torch.matmul(self.matmul_q_input_quantizer(q), self.matmul_k_input_quantizer(k.transpose(-2, -1)))
attn = self.scale * self.softmax_input_quantizer(attn)
else:
attn = self.scale * torch.matmul(q, k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
if QUANT:
context_layer = torch.matmul(self.matmul_a_input_quantizer(attn),
self.matmul_v_input_quantizer(v))
else:
context_layer = torch.matmul(attn, v)
x = context_layer.transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
if QUANT:
self.layernorm_input1_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add1_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add1_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.layernorm_input2_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add2_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add2_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
if QUANT:
x = self.norm1(self.layernorm_input1_quantizer(x))
else:
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
if QUANT:
add1_local = self.add1_local_input_quantizer(self.drop_path(x))
add1_residual = self.add1_residual_input_quantizer(shortcut)
x = add1_local + add1_residual
# Save output of Part 1 as residual
shortcut2 = x
# Part 2
## LayerNorm2
x = self.norm2(self.layernorm_input2_quantizer(x))
## 2-layer MLP
x = self.drop_path(self.mlp(x))
## Add residual
add2_local = self.add2_local_input_quantizer(x)
add2_residual = self.add2_residual_input_quantizer(shortcut2)
x = add2_local + add2_residual
else:
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = LinearActivation(4 * dim, 2 * dim, act=nn.Identity(), bias=False, do_quant=True)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, do_quant=False):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
if QUANT and do_quant:
self.proj = QuantizedConv2d(in_chans, embed_dim, patch_size, patch_size, bias=True)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
|
FasterTransformer-main
|
examples/pytorch/swin/Swin-Transformer-Quantization/models/swin_transformer.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import argparse
import configparser
import dataclasses
import json
import pathlib
import time
from typing import Dict, List
import torch
import tqdm
import transformers
from utils import bloom
class TensorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, torch.Tensor):
return obj.tolist()
return super().default(obj)
class LambadaDataset(torch.utils.data.Dataset):
""" LAMBADA dataset class. """
def __init__(self,
path: str | pathlib.Path,
tokenizer: transformers.PreTrainedTokenizerBase):
self.tokenizer = tokenizer
with open(path, 'r') as f:
inputs, targets = zip(*[
json.loads(line)["text"] .strip('\n').rsplit(' ', 1)
for line in f.readlines()])
# This whitespace preprocessing (additional space to the target)
# is required.
targets = [' ' + tgt for tgt in targets]
self.encodings = self.tokenizer(list(inputs),
targets,
padding=True,
return_token_type_ids=True,
return_tensors='pt')
def __len__(self):
return len(self.encodings['input_ids'])
def __getitem__(self, idx):
return dict(
input_ids=self.encodings['input_ids'][idx],
attention_mask=self.encodings['attention_mask'][idx],
token_type_ids=self.encodings['token_type_ids'][idx]
)
@dataclasses.dataclass
class Metric:
acc: float
@dataclasses.dataclass
class RequestAndResult:
prompt: str
model_answer: str
target: str
input_ids: List[int]
input_len: int
output_len: int
model_params: bloom.BloomParam
infer_params: bloom.BloomInferParam
output_ids: List[int]
metrics: Metric
def asdict(self):
return dataclasses.asdict(self)
class Timer:
def __init__(self):
self._start_times = {}
self._total_elapsed_times = {}
def start(self, tag='__default'):
self._start_times[tag] = time.time()
def stop(self, tag='__default'):
elapsed_time = time.time() - self._start_times[tag]
if tag not in self._total_elapsed_times:
self._total_elapsed_times[tag] = 0
self._total_elapsed_times[tag] += elapsed_time
return elapsed_time
def elapsed_time_in_sec(self, tag='__default'):
if tag not in self._total_elapsed_times:
return None
return self._total_elapsed_times[tag]
def reset(self):
self._start_times.clear()
self._total_elapsed_times.clear()
def get_args():
parser = argparse.ArgumentParser(
'Evaluation: LAMBADA Task',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
bloom.BloomParam.add_args_group(parser)
bloom.BloomInferParam.add_args_group(parser)
group = parser.add_argument_group('LAMBADA Task Parameters')
group.add_argument(
'--checkpoint-path', type=str, metavar='DIR', default=None,
help='A directory of a converted pretrained checkpoint and model config '
'If None, a model will inference by random weights.')
group.add_argument(
'--dataset-path', type=str, metavar='PATH', required=True,
help="A file path to LAMBADA task dataset.")
group.add_argument(
'--output-path', type=str, metavar='PATH', default=None,
help="Path to sample output file.")
group.add_argument(
"--tokenizer-path", type=str, metavar='DIR_OR_PATH', default=None,
help='A file path of a pretrained tokenizer or a checkpoint directory '
'of HF pretrained model.')
group.add_argument(
'--lib-path', type=str, metavar='PATH', default='./lib/libth_transformer.so',
help='A FT library path to load `FasterTransformer.ParallelGptOp`')
group.add_argument(
'--test-hf', action='store_true',
help='Run a huggingface model instead of an FT model. The checkpoint '
'of the huggingface model is assumed to be at --tokenizer-path.')
group.add_argument(
'--acc-threshold', type=float, metavar='M', default=None,
help='The minimum value of the expected accuracy of the LAMBADA '
'evaluation for a test. If the achieved accuracy is less '
'than given value, a value error will occurs.')
group.add_argument(
'--show-progress', action='store_true',
help='Show evaluation progress')
group.add_argument(
'--inference-data-type', '--data-type', type=str, metavar='TYPE', default=None,
choices=[None, 'fp32', 'fp16', 'bf16'],
help='The data type to inference. If None, the data type follows the '
'checkpoint data type.')
group.add_argument(
'--weights-data-type', type=str, metavar='TYPE', default=None,
choices=[None, 'fp32', 'fp16'],
help='The data type of FT checkpoint. If None, it will be retrieved '
'from the config file in the checkpoint directory.')
group.add_argument(
'--int8_mode', type=int, default=0, choices=[0, 1],
help='The level of quantization to perform.'
' 0: No quantization. All computation in data_type'
' 1: Quantize weights to int8, all compute occurs in fp16/bf16. Not supported when data_type is fp32')
args = parser.parse_args()
print('\n=================== Arguments ===================')
for k, v in vars(args).items():
print(f' - {k.ljust(25, ".")}: {v}')
print('=================================================')
return args
def get_model_and_tokenizer(args: argparse.Namespace):
tokenizer_path = pathlib.Path(args.tokenizer_path)
# HF requires left padding for a decoder-only model.
padding_side = 'left' if args.test_hf else 'right'
if tokenizer_path.is_dir():
# Load from the HF's pretrained model directory.
tokenizer = transformers.BloomTokenizerFast.from_pretrained(
args.tokenizer_path, padding_side=padding_side)
else:
# Directly load from a tokenizer json file.
tokenizer = transformers.BloomTokenizerFast(
tokenizer_file=tokenizer_path, padding_side=padding_side)
# For open-ended generation, the pad token is sometimes replaced by the
# eos token but the Bloom of HF requires as it is to correctly generate.
if args.test_hf:
# Load HF's pretrained model for testing.
model = transformers.AutoModelForCausalLM.from_pretrained(
args.tokenizer_path).cuda()
return model, tokenizer
checkpoint_path = pathlib.Path(args.checkpoint_path)
config_path = checkpoint_path / 'config.ini'
if config_path.exists():
# Read model params from config.
cfg = configparser.ConfigParser()
cfg.read(config_path)
model_name = 'gpt'
inference_data_type = args.inference_data_type
if inference_data_type == None:
inference_data_type = cfg.get(model_name, "weight_data_type")
model_args = dict(
head_num=cfg.getint(model_name, 'head_num'),
size_per_head=cfg.getint(model_name, "size_per_head"),
layer_num=cfg.getint(model_name, "num_layer"),
tensor_para_size=cfg.getint(model_name, "tensor_para_size"),
vocab_size=cfg.getint(model_name, "vocab_size"),
start_id=cfg.getint(model_name, "start_id"),
end_id=cfg.getint(model_name, "end_id"),
weights_data_type=cfg.get(model_name, "weight_data_type"),
layernorm_eps=cfg.getfloat(model_name, 'layernorm_eps'),
inference_data_type=inference_data_type)
else:
inference_data_type = args.inference_data_type
if inference_data_type == None:
inference_data_type = args.weights_data_type
model_args = dict(head_num=args.num_heads,
size_per_head=args.size_per_head,
vocab_size=args.vocab_size,
start_id=args.start_id or tokenizer.bos_token_id,
end_id=args.end_id or tokenizer.eos_token_id,
layer_num=args.num_layers,
tensor_para_size=args.tensor_para_size,
weights_data_type=args.weights_data_type,
inference_data_type=inference_data_type)
# update common parameters
model_args.update(dict(
lib_path=args.lib_path,
pipeline_para_size=args.pipeline_para_size,
shared_contexts_ratio=args.shared_contexts_ratio,
int8_mode=args.int8_mode
))
print('[FT][INFO] Load BLOOM model')
for k, v in model_args.items():
print(f' - {k.ljust(25, ".")}: {v}')
# Check sanity and consistency between the model and tokenizer.
checklist = ['head_num', 'size_per_head', 'vocab_size', 'layer_num',
'tensor_para_size', 'tensor_para_size', 'weights_data_type']
if None in [model_args[k] for k in checklist]:
none_params = [p for p in checklist if model_args[p] is None]
print(f'[FT][WARNING] Found None parameters {none_params}. They must '
f'be provided either by config file or CLI arguments.')
if model_args['start_id'] != tokenizer.bos_token_id:
print('[FT][WARNING] Given start_id is not matched with the bos token '
'id of the pretrained tokenizer.')
if model_args['end_id'] not in (tokenizer.pad_token_id, tokenizer.eos_token_id):
print('[FT][WARNING] Given end_id is not matched with neither pad '
'token id nor eos token id of the pretrained tokenizer.')
model = bloom.Bloom(**model_args)
if not model.load(ckpt_path=args.checkpoint_path):
print('[FT][WARNING] Skip model loading since no checkpoints are found')
return model, tokenizer
def split_inputs_and_targets(entries: Dict[str, torch.LongTensor],
pad_token_id: int,
pad_to_left=False):
input_ids = entries['input_ids']
attn_mask = entries['attention_mask']
token_type_ids = entries['token_type_ids']
# Split inputs and labels by token_type_ids.
input_token_ids = [
ids[(mask == 1) & (type_ids == 0)]
for ids, mask, type_ids in zip(input_ids, attn_mask, token_type_ids)]
# FT allows int32 tensors.
input_lengths = torch.tensor(
[len(input_tokens) for input_tokens in input_token_ids]).int()
max_length = input_lengths.max()
input_token_ids = torch.stack([
torch.nn.functional.pad(
token_ids,
pad=[max_length - len(token_ids), 0]
if pad_to_left else [0, max_length - len(token_ids)],
mode='constant',
value=pad_token_id
) for token_ids in input_token_ids]).int()
target_token_ids = [
ids[(mask == 1) & (type_ids == 1)]
for ids, mask, type_ids in zip(input_ids, attn_mask, token_type_ids)]
return input_token_ids, input_lengths, target_token_ids
@torch.no_grad()
def main():
args = get_args()
model, tokenizer = get_model_and_tokenizer(args)
model.eval()
dataset = LambadaDataset(args.dataset_path, tokenizer=tokenizer)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size)
num_requests = 0
num_corrects = 0
results = {"output": {"lambada": []}, "results": {"lambada": {}}}
timer = Timer()
if args.show_progress:
data_loader = tqdm.tqdm(data_loader)
for entries in data_loader:
input_token_ids, input_lengths, target_token_ids = \
split_inputs_and_targets(entries, tokenizer.pad_token_id, args.test_hf)
batch_size = input_token_ids.shape[0]
output_length = max([len(target) for target in target_token_ids])
params = bloom.BloomInferParam.from_args(args, batch_size)
if args.test_hf:
# Outputs (batch_size, seq_length)
timer.start()
outputs = model.generate(inputs=input_token_ids.cuda(),
max_new_tokens=output_length,
num_beams=args.beam_width,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
repetition_penalty=args.repetition_penalty,
length_penalty=args.len_penalty)
timer.stop()
# output_token_ids: input/padding/output
output_token_ids = outputs[:, input_token_ids.shape[1]:]
output_token_ids = [
out[:len(tgt)].cpu()
for out, tgt in zip(output_token_ids, target_token_ids)]
else:
param_dict = params.asdict()
timer.start()
outputs = model(start_ids=input_token_ids,
start_lengths=input_lengths,
output_len=output_length,
**param_dict)
timer.stop()
if params.return_cum_log_probs or params.return_cum_log_probs > 0:
outputs = outputs[0] # output_token_ids.
# Slice the generated token ids of the 1st beam result.
# output = input tokens + generated tokens.
output_token_ids = [
out[0, length:length+len(tgt)].cpu()
for out, length, tgt
in zip(outputs, input_lengths, target_token_ids)]
output_texts = tokenizer.batch_decode(output_token_ids)
target_texts = tokenizer.batch_decode(target_token_ids)
input_texts = tokenizer.batch_decode(input_token_ids)
# Convert to output objects.
for i in range(batch_size):
out = output_token_ids[i]
tgt = target_token_ids[i].cpu()
is_correct = (tgt == out).all()
num_corrects += int(is_correct)
result = RequestAndResult(
prompt=input_texts[i],
model_answer=output_texts[i],
target=target_texts[i],
input_ids=input_token_ids[i].tolist(),
input_len=input_lengths[i].item(),
output_len=output_length,
model_params=bloom.BloomParam.from_args(args),
infer_params=params.slice_args(i),
output_ids=out,
metrics=Metric(acc=float(is_correct))
)
results['output']['lambada'].append(result.asdict())
num_requests += batch_size
accuracy = num_corrects * 100 / num_requests
# Reference: HF model's LAMBADA Accuracy for bloom-560m ~ 35.36%
print(f'Accuracy: {accuracy:0.4f}% ({num_corrects}/{num_requests}) '
f'(elapsed time: {timer.elapsed_time_in_sec():.4f} sec)')
# Dump prediction json
results['results']['lambada']['acc'] = accuracy
if args.output_path:
output_path = pathlib.Path(args.output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open(mode='w') as f:
json.dump(results, f, indent=2, cls=TensorEncoder)
if args.acc_threshold is not None:
assert accuracy >= args.acc_threshold, \
f'TEST FAIL the achieved accuracy ({accuracy:.2f}) is less ' \
f'than given threshold ({args.acc_threshold:.2f})'
print('TEST PASS')
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/bloom_lambada.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021, NAVER Corp. Authored by CLOVA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from torch.nn.utils.rnn import pad_sequence
import random
import os
import sys
import argparse
import configparser
import timeit
import torch
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.gpt.utils.gpt_fp8 import GPTFp8
from examples.pytorch.gpt.utils.gpt import GPT, GPTWeights
import examples.pytorch.gpt.utils.gpt_token_encoder as encoder
from utils import word_list
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--layer_num', type=int, default=24,
help='number of layers')
parser.add_argument('--output_len', type=int, default=32,
help='output sequence length to generate.')
parser.add_argument('--head_num', type=int, default=16,
help='head number')
parser.add_argument('--size_per_head', type=int, default=64,
help='size per head')
parser.add_argument('--vocab_size', type=int, default=50304,
help='vocab size')
parser.add_argument('--beam_width', type=int, default=1,
help='beam width for beam search. Using sampling when beam width is 1.')
parser.add_argument('--top_k', type=int, default=1,
help='top k candidate num')
parser.add_argument('--top_p', type=float, default=0.,
help='top p probability threshold')
parser.add_argument('--temperature', type=float, default=1.,
help='temperature')
parser.add_argument('--len_penalty', type=float, default=0.,
help='len_penalty')
parser.add_argument('--beam_search_diversity_rate', type=float, default=0.,
help='beam_search_diversity_rate')
parser.add_argument('--tensor_para_size', type=int, default=1,
help='tensor parallel size')
parser.add_argument('--pipeline_para_size', type=int, default=1,
help='pipeline parallel size')
parser.add_argument('--ckpt_path', type=str, default='../models/megatron-models/c-model/345m/1-gpu',
help='path to the checkpoint file.')
parser.add_argument('--lib_path', type=str, default='./lib/libth_transformer.so',
help='path to the pyt_fastertransformer dynamic lib file.')
parser.add_argument('--vocab_file', type=str, default="../models/gpt2-vocab.json",
help='vocabulary file.')
parser.add_argument('--merges_file', type=str, default="../models/gpt2-merges.txt",
help='merges file.')
parser.add_argument('--start_id', type=int, default=50256,
help='start token id.')
parser.add_argument('--end_id', type=int, default=50256,
help='end token id.')
parser.add_argument('--max_batch_size', type=int, default=8,
help='max batch size.')
parser.add_argument('--repetition_penalty', type=float, default=1.,
help='repetition penalty')
parser.add_argument('--min_length', type=int, default=0,
help='A minimum number of tokens to generate')
parser.add_argument('--max_seq_len', type=int, default=1024,
help='max sequence length for position embedding table.')
parser.add_argument('--inference_data_type', '--data_type', type=str, choices=['fp32', 'fp16', 'bf16', 'fp8'], default='fp32')
parser.add_argument('--time', action='store_true',
help='whether or not to measure time elapsed.')
parser.add_argument('--sample_input_file', type=str, default=None,
help='path to sample input file. If not set, it runs with no context inputs.')
parser.add_argument('--sample_output_file', type=str, default=None,
help='path to sample output file.')
parser.add_argument('--enable_random_seed', action='store_true',
help='is enable the random seed.')
parser.add_argument('--skip_end_tokens', dest='skip_end_tokens', action='store_true',
help='Whether to remove or not end tokens in outputs.')
parser.add_argument('--no_detokenize', dest='detokenize', action='store_false',
help='Skip detokenizing output token ids.')
parser.add_argument('--sparse', action='store_true', dest='sparse',
help='Enable sparse matrix multiplication. (Need SM 8.0 or 8.6 and SPARSITY_SUPPORT=ON)')
parser.add_argument('--use_jieba_tokenizer', action='store_true',
help='use JiebaBPETokenizer as tokenizer.')
parser.add_argument(
'--weights_data_type',
type=str,
default="fp32",
choices=["fp32", "fp16"],
help='Data type of FT checkpoint weights',
)
parser.add_argument('--return_cum_log_probs', type=int, default=0, choices=[0, 1, 2],
help='Whether to compute the cumulative log probsbility of sentences.'
' 0: do not return the cumulative log probs '
' 1: return the cumulative log probs of generated sequences'
' 2: return the cumulative log probs of sequences')
parser.add_argument('--banned_words',
type=str,
default="",
help='A comma separated list of tokens that should never be generated. Everything between the commas will'
' be tokenized and converted to token ids that will be banned.'
' Note that spaces before and after commas are included in tokenization.'
' An example highlighting this importance is that "the" and " the" are'
' two separate tokens some vocabularies.'
' Therefore, do ban a certain phrase, we would need to specify all tokens'
' in the vocabulary that include the phrase.'
' Example use: --banned_words "the, the,a,boy". This will ban the tokens "the", " the", "a" and "boy".'
' We can also use a pipe "|" to ban different tokens for different sentences in a batch.'
' Example: --banned_words "the, the|a,boy" will ban the tokens "the" and " the" in output sentence 1 and'
' ban the tokens "a" and "boy" in output sentence 2. When using this mode, we must specify a set of tokens to ban'
' for each sentence in the batch.',
)
args = parser.parse_args()
ckpt_config = configparser.ConfigParser()
ckpt_config_path = os.path.join(args.ckpt_path, 'config.ini')
if os.path.isfile(ckpt_config_path):
ckpt_config.read(ckpt_config_path)
if 'gpt' in ckpt_config.keys():
for args_key, config_key, func in [
('layer_num', 'num_layer', ckpt_config.getint),
('max_seq_len', 'max_pos_seq_len', ckpt_config.getint),
('weights_data_type', 'weight_data_type', ckpt_config.get),
]:
if config_key in ckpt_config['gpt'].keys():
prev_val = args.__dict__[args_key]
args.__dict__[args_key] = func('gpt', config_key)
print('Loading {} from config.ini, previous: {}, current: {}'.format(
args_key, prev_val, args.__dict__[args_key]))
else:
print('Not loading {} from config.ini'.format(args_key))
for key in ['head_num', 'size_per_head', 'tensor_para_size']:
if key in args.__dict__:
prev_val = args.__dict__[key]
args.__dict__[key] = ckpt_config.getint('gpt', key)
print('Loading {} from config.ini, previous: {}, current: {}'.format(
key, prev_val, args.__dict__[key]))
else:
print('Not loading {} from config.ini'.format(key))
if 'structure' in ckpt_config.keys():
gpt_with_moe = ckpt_config.getboolean('structure', 'gpt_with_moe')
expert_num = ckpt_config.getint('structure', 'expert_num')
moe_layer_index_str = ckpt_config.get('structure', 'moe_layers')
if len(moe_layer_index_str) <= 2:
moe_layer_index = []
else:
moe_layer_index = [int(n) for n in moe_layer_index_str[1:-1].replace(' ', '').split(',')]
moe_k = 1
else:
gpt_with_moe = False
expert_num = 0
moe_layer_index = []
moe_k = 0
layer_num = args.layer_num
output_len = args.output_len
head_num = args.head_num
size_per_head = args.size_per_head
vocab_size = args.vocab_size
beam_width = args.beam_width
top_k = args.top_k
top_p = args.top_p
temperature = args.temperature
len_penalty = args.len_penalty
beam_search_diversity_rate = args.beam_search_diversity_rate
tensor_para_size = args.tensor_para_size
pipeline_para_size = args.pipeline_para_size
start_id = args.start_id
end_id = args.end_id
max_batch_size = args.max_batch_size
max_seq_len = args.max_seq_len
repetition_penalty = args.repetition_penalty
min_length = args.min_length
return_cum_log_probs = args.return_cum_log_probs
return_output_length = return_cum_log_probs > 0
print("\n=============== Arguments ===============")
for arg in vars(args):
print("{}: {}".format(arg, getattr(args, arg)))
print("=========================================\n")
if args.use_jieba_tokenizer:
from examples.pytorch.gpt.utils.tokenizer import JiebaBPETokenizer
enc = JiebaBPETokenizer(args.vocab_file)
else:
enc = encoder.get_encoder(args.vocab_file, args.merges_file)
bad_words_list=None
if args.banned_words:
batch_banned_words = args.banned_words.split("|")
banned_words = [[banned_words_for_batch] for banned_words_for_batch in batch_banned_words]
bad_words_list = torch.tensor(word_list.to_word_list_format(banned_words, enc)).to("cuda")
# Inputs
contexts = []
if args.sample_input_file: # conditional case
with open(args.sample_input_file, "r") as f:
contexts = f.read().splitlines()
batch_size = min(len(contexts), max_batch_size)
contexts = contexts[:batch_size]
start_ids = [torch.IntTensor(enc.encode(c)) for c in contexts]
else: # unconditional case
batch_size = max_batch_size
contexts = ['<|endoftext|>'] * batch_size
start_ids = [torch.IntTensor([end_id])] * batch_size
print("[INFO] batch size: {}".format(batch_size))
start_lengths = [len(ids) for ids in start_ids]
start_ids = pad_sequence(start_ids, batch_first=True, padding_value=end_id)
start_lengths = torch.IntTensor(start_lengths)
if args.enable_random_seed == True:
random_seed_tensor = torch.randint(0, 10000, size=[batch_size], dtype=torch.int64)
else:
random_seed_tensor = torch.zeros([batch_size], dtype=torch.int64)
# Prepare model.
if args.inference_data_type == 'fp8':
gpt = GPTFp8(head_num, size_per_head, vocab_size, start_id, end_id, layer_num,
max_seq_len, tensor_para_size, pipeline_para_size, lib_path=args.lib_path,
ckpt_path=args.ckpt_path, weights_data_type=args.weights_data_type)
else:
gpt = GPT(head_num, size_per_head, vocab_size, start_id, end_id, layer_num,
max_seq_len, tensor_para_size, pipeline_para_size, lib_path=args.lib_path,
inference_data_type=args.inference_data_type,
weights_data_type=args.weights_data_type,
gpt_with_moe=gpt_with_moe,
expert_num=expert_num,
moe_k=moe_k,
moe_layer_index=moe_layer_index)
if not gpt.load(ckpt_path=args.ckpt_path):
print("[WARNING] Checkpoint file not found. Model loading is skipped.")
if args.sparse:
gpt.sparse()
with torch.no_grad():
# Generate tokens.
tokens_batch = gpt(
start_ids=start_ids,
start_lengths=start_lengths,
output_len=output_len,
beam_width=beam_width,
top_k=top_k * torch.ones(size=[batch_size], dtype=torch.int32),
top_p=top_p * torch.ones(size=[batch_size], dtype=torch.float32),
beam_search_diversity_rate=beam_search_diversity_rate * torch.ones(size=[batch_size], dtype=torch.float32),
temperature=temperature * torch.ones(size=[batch_size], dtype=torch.float32),
len_penalty=len_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
repetition_penalty=repetition_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
min_length=min_length * torch.ones(size=[batch_size], dtype=torch.int32),
random_seed=random_seed_tensor,
bad_words_list=bad_words_list,
return_output_length=return_output_length,
return_cum_log_probs=return_cum_log_probs)
if return_cum_log_probs > 0:
tokens_batch, _, cum_log_probs = tokens_batch
print('[INFO] Log probs of sentences:', cum_log_probs)
# only a thread (rank 0) gets the output, while the others are supposed to return None.
if tokens_batch is not None:
outputs = []
tokens_batch = tokens_batch.cpu().numpy()
for i, (context, tokens) in enumerate(zip(contexts, tokens_batch)):
for beam_id in range(beam_width):
token = tokens[beam_id][start_lengths[i]:] # exclude context input from the output
if args.skip_end_tokens:
print('skip eos', len(tokens[beam_id]), start_lengths[i], len(token), len(token[token != end_id]))
token = token[token != end_id]
output = enc.decode(token) if args.detokenize else ' '.join(str(t) for t in token.tolist())
outputs.append(output)
print(f"[INFO] batch {i}, beam {beam_id}: \n[Context]\n{context}\n\n[Output]\n{output}\n")
if args.sample_output_file:
with open(args.sample_output_file, "w+") as f:
outputs = [o.replace("\n", "\\n") for o in outputs]
f.writelines("\n".join(outputs))
# Measure inference time.
if args.time:
iterations = 10
# warmup
for i in range(iterations):
tokens_batch = gpt(
start_ids=start_ids,
start_lengths=start_lengths,
output_len=output_len,
beam_width=beam_width,
top_k=top_k * torch.ones(size=[batch_size], dtype=torch.int32),
top_p=top_p * torch.ones(size=[batch_size], dtype=torch.float32),
beam_search_diversity_rate=beam_search_diversity_rate * torch.ones(size=[batch_size], dtype=torch.float32),
temperature=temperature * torch.ones(size=[batch_size], dtype=torch.float32),
len_penalty=len_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
repetition_penalty=repetition_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
min_length=min_length * torch.ones(size=[batch_size], dtype=torch.int32),
random_seed=random_seed_tensor,
bad_words_list=bad_words_list,
return_output_length=return_output_length,
return_cum_log_probs=return_cum_log_probs)
batch_num = 0
token_num = 0
time = timeit.default_timer()
for i in range(iterations):
tokens_batch = gpt(
start_ids=start_ids,
start_lengths=start_lengths,
output_len=output_len,
beam_width=beam_width,
top_k=top_k * torch.ones(size=[batch_size], dtype=torch.int32),
top_p=top_p * torch.ones(size=[batch_size], dtype=torch.float32),
beam_search_diversity_rate=beam_search_diversity_rate * torch.ones(size=[batch_size], dtype=torch.float32),
temperature=temperature * torch.ones(size=[batch_size], dtype=torch.float32),
len_penalty=len_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
repetition_penalty=repetition_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
min_length=min_length * torch.ones(size=[batch_size], dtype=torch.int32),
random_seed=random_seed_tensor,
bad_words_list=bad_words_list,
return_output_length=return_output_length,
return_cum_log_probs=return_cum_log_probs)
batch_num += 1
for j, tokens in enumerate(tokens_batch):
token_num += tokens.shape[-1] - start_lengths[j]
time_elapsed = timeit.default_timer() - time
throughput = token_num / time_elapsed
print(f"[INFO] FT-GPT generates {batch_num} batches, taking {time_elapsed:0.3f} secs "
f"to generate {token_num} tokens, {throughput:0.3f} tokens/sec.")
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/gpt_example.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import torch.distributed as dist
from datetime import datetime
from datasets import load_dataset, load_metric
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
from tqdm import tqdm
from utils import gpt_decoder
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.gpt.utils.parallel_gpt import ParallelGPT
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--ft_model_location', type=str,
default='/models/GPT/HF/gpt2-xl/c-models')
parser.add_argument('--hf_model_name', type=str,
default='facebook/opt-350m')
parser.add_argument('--summarize', action='store_true')
parser.add_argument('--test_hf', action='store_true')
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument("--cache_path", type=str, default="/workdir/datasets/ccdv/")
parser.add_argument("--max_ite", type=int, default=20)
parser.add_argument('--lib_path', type=str, default='./lib/libth_transformer.so',
help='path to the pyt_fastertransformer dynamic lib file.')
parser.add_argument('--tensor_para_size', type=int, default=1,
help='tensor parallel size')
parser.add_argument('--pipeline_para_size', type=int, default=1,
help='pipeline parallel size')
parser.add_argument(
'--weights_data_type',
type=str,
default="fp32",
choices=["fp32", "fp16"],
help='Data type of FT checkpoint weights',
)
parser.add_argument(
'--int8_mode', type=int, default=0, choices=[0, 1],
help='The level of quantization to perform.'
' 0: No quantization. All computation in data_type'
' 1: Quantize weights to int8, all compute occurs in fp16/bf16. Not supported when data_type is fp32')
parser.add_argument(
'--use_gpt_decoder_ops', action='store_true',
help='Use separate decoder FT operators instead of end-to-end model op.')
parser.add_argument(
'--use_fp32_to_compute_logit', action='store_true',
help='Use FP32 data type for computing logit values when using gpt decoder ops. '
'FT end-to-end GPT op always uses FP32 data type when computing logit.')
parser.add_argument(
'--rougeLsum_threshold', type=float, default=None,
help='Threshold of FT rougeLsum score')
parser.add_argument(
'--verbose', action='store_true', help='Print all summary result.')
args = parser.parse_args()
np.random.seed(1) # rouge score use sampling to compute the score
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Have initialized the process group")
rank = dist.get_rank()
summarize = args.summarize
test_hf = args.test_hf
ft_model_location = args.ft_model_location
hf_model_name = args.hf_model_name
tokenizer = AutoTokenizer.from_pretrained(hf_model_name)
tokenizer.pad_token = tokenizer.eos_token
dataset_cnn = load_dataset("ccdv/cnn_dailymail", '3.0.0', cache_dir=args.cache_path)
hf_config = vars(AutoConfig.from_pretrained(hf_model_name))
head_num = hf_config['num_attention_heads']
layer_num = hf_config['num_hidden_layers']
start_id = hf_config['bos_token_id']
end_id = hf_config['eos_token_id']
size_per_head = hf_config['hidden_size'] // head_num
# opt specific params: some are fixed
layernorm_eps = 1e-5
layernorm_type = 'pre_layernorm' if hf_config['do_layer_norm_before'] else 'post_layernorm'
activation_type = 'Relu' if hf_config['activation_function'] == 'relu' else 'Gelu'
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/opt/modeling_opt.py#L498
# has post decoder layernorm when layernorm_type is pre layernorm
has_post_decoder_layernorm = layernorm_type == 'pre_layernorm'
if summarize:
top_k = 2
output_len = 100
else:
top_k = 1
output_len = 256
top_p = 0.0
temperature = 1
max_seq_len = hf_config['max_position_embeddings']
max_batch_size = 1
repetition_penalty = 1
random_seed = 0
vocab_size = hf_config['vocab_size']
tensor_para_size = args.tensor_para_size
pipeline_para_size = args.pipeline_para_size
lib_path = args.lib_path
ckpt_path = os.path.join(ft_model_location, f'{tensor_para_size}-gpu')
print(f"top_k: {top_k}")
print(f"top_p: {top_p}")
print(f"int8_mode: {args.int8_mode}")
print(f"temperature: {temperature}")
print(f"max_seq_len: {max_seq_len}")
print(f"max_batch_size: {max_batch_size}")
print(f"repetition_penalty: {repetition_penalty}")
print(f"vocab_size: {vocab_size}")
print(f"tensor_para_size: {tensor_para_size}")
print(f"pipeline_para_size: {pipeline_para_size}")
print(f"lib_path: {lib_path}")
print(f"ckpt_path: {ckpt_path}")
print(f"hf_config: {hf_config}")
infer_decode_args = dict(
beam_width=1,
top_k=top_k * torch.ones(max_batch_size, dtype=torch.int32),
top_p=top_p * torch.ones(max_batch_size, dtype=torch.float32),
temperature=temperature * torch.ones(max_batch_size, dtype=torch.float32),
repetition_penalty=repetition_penalty * torch.ones(max_batch_size, dtype=torch.float32),
random_seed=random_seed * torch.ones(max_batch_size, dtype=torch.int64)
)
if not args.use_gpt_decoder_ops:
gpt = ParallelGPT(head_num, size_per_head, vocab_size, start_id, end_id, layer_num,
max_seq_len, tensor_para_size, pipeline_para_size, lib_path,
inference_data_type=args.data_type,
layernorm_eps=layernorm_eps,
layernorm_type=layernorm_type,
activation_type=activation_type,
has_post_decoder_layernorm=has_post_decoder_layernorm,
int8_mode=args.int8_mode,
weights_data_type=args.weights_data_type)
if not gpt.load(ckpt_path=ckpt_path):
print("[WARNING] Checkpoint file not found. Model loading is skipped.")
else:
gpt = gpt_decoder.Gpt(
num_heads=head_num,
size_per_head=size_per_head,
num_layers=layer_num,
vocab_size=vocab_size,
start_id=start_id,
end_id=end_id,
tensor_para_size=tensor_para_size,
pipeline_para_size=pipeline_para_size,
lib_path=lib_path,
max_seq_len=max_seq_len,
layernorm_eps=layernorm_eps,
layernorm_type=layernorm_type,
activation_type=activation_type,
has_post_decoder_layernorm=has_post_decoder_layernorm,
int8_mode=args.int8_mode,
inference_data_type=args.data_type,
weights_data_type=args.weights_data_type,
use_fp32_to_compute_logit=args.use_fp32_to_compute_logit)
gpt.load(ckpt_path, args.data_type)
if (test_hf and summarize) or not summarize:
model = AutoModelForCausalLM.from_pretrained(hf_model_name)
model.cuda()
if args.data_type == 'fp16':
model.half()
elif args.data_type == 'bf16':
model.bfloat16()
def summarize_ft_e2e(datapoint):
if summarize:
line = datapoint['article'] + ' TL;DR: '
else:
line = datapoint['article']
line = line.strip()
line = line.replace(" n't", "n't")
line_encoded = tokenizer.encode(line, return_tensors='pt')
if summarize:
line_encoded = line_encoded[:, -923:]
else:
line_encoded = line_encoded[:, -768:]
line_encoded = line_encoded.type(torch.int32)
with torch.no_grad():
output, ft_output_len = gpt(line_encoded, torch.IntTensor([len(line_encoded[0])]),
output_len,
return_output_length=True,
**infer_decode_args)
tokens = output[0][0][len(line_encoded[0]):ft_output_len[0]].cpu().numpy()
output_lines = tokenizer.decode(output[0][0][len(line_encoded[0]):ft_output_len[0]])
output_lines = ".".join(output_lines.split('.')[:4]) + "."
return output_lines, tokens
def summarize_ft_sep(datapoint):
if summarize:
line = datapoint['article'] + ' TL;DR: '
else:
line = datapoint['article']
line = line.strip()
line = line.replace(" n't", "n't")
line_encoded = tokenizer.encode(line, return_tensors='pt')
if summarize:
line_encoded = line_encoded[:, -923:]
else:
line_encoded = line_encoded[:, -768:]
line_encoded = line_encoded.type(torch.int32).to(gpt.device)
input_lengths = torch.tensor([len(line_encoded[0])], dtype=torch.int32, device=gpt.device)
with torch.no_grad():
output_dict = gpt.generate(input_token_ids=line_encoded,
input_lengths=input_lengths,
gen_length=output_len,
eos_token_id=tokenizer.eos_token_id,
return_output_length=True,
**infer_decode_args)
output_token_ids = output_dict['output_token_ids']
output_lengths = output_dict['output_lengths']
tokens = output_token_ids[0, 0, input_lengths[0]:output_lengths[0]]
output_lines = tokenizer.decode(tokens)
output_lines = ".".join(output_lines.split('.')[:4]) + "."
return output_lines, tokens.cpu().numpy()
summarize_ft = summarize_ft_e2e if not args.use_gpt_decoder_ops else summarize_ft_sep
def summarize_hf(datapoint):
if summarize:
line = datapoint['article'] + ' TL;DR: '
else:
line = datapoint['article']
line = line.strip()
line = line.replace(" n't", "n't")
line_encoded = tokenizer.encode(line, return_tensors='pt')
if summarize:
line_encoded = line_encoded[:, -923:]
else:
line_encoded = line_encoded[:, -768:]
# line_encoded = line_encoded.to(device_hf)
line_encoded = line_encoded.cuda()
with torch.no_grad():
output = model.generate(line_encoded,
max_length=len(line_encoded[0]) + output_len,
top_k=top_k,
temperature=temperature,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id)
tokens = output[0][len(line_encoded[0]):].cpu().numpy()
output_lines = tokenizer.decode(output[0][len(line_encoded[0]):])
output_lines = ".".join(output_lines.split('.')[:4]) + "."
return output_lines, tokens
if summarize:
datapoint = dataset_cnn['test'][0]
summary, _ = summarize_ft(datapoint)
print('---------------------------------------------------------')
print('FT Generated : ')
print(' Article : ', datapoint['article'])
print('\n Highlights : ', datapoint['highlights'])
print('\n Summary : ', summary)
print('---------------------------------------------------------')
if test_hf:
summary, _ = summarize_hf(datapoint)
print('---------------------------------------------------------')
print('HF Generated : ')
print(' Article : ', datapoint['article'])
print('\n Highlights : ', datapoint['highlights'])
print('\n Summary : ', summary)
print('---------------------------------------------------------')
if summarize:
metric_ft = load_metric("rouge")
metric_hf = load_metric("rouge")
else:
tokens = []
ft_time = 0.0
hf_time = 0.0
for data_point_idx in tqdm(range(1, 11490, int(11490 / args.max_ite))):
try:
datapoint = dataset_cnn['test'][data_point_idx]
start_time = datetime.now()
summary_ft, tokens_ft = summarize_ft(datapoint)
stop_time = datetime.now()
ft_time += (stop_time - start_time).total_seconds()
if (test_hf and summarize) or not summarize:
start_time = datetime.now()
summary_hf, tokens_hf = summarize_hf(datapoint)
stop_time = datetime.now()
hf_time += (stop_time - start_time).total_seconds()
if rank == 0:
if summarize:
metric_ft.add_batch(predictions=[summary_ft], references=[datapoint['highlights']])
if test_hf:
metric_hf.add_batch(predictions=[summary_hf], references=[datapoint['highlights']])
else:
tokens.append((tokens_ft, tokens_hf))
if args.verbose:
print('-' * 100)
print('FT Summary:', summary_ft)
if test_hf:
print('HF Summary:', summary_hf)
except:
print('Error with datapoint : ', data_point_idx)
def compute_exact_match(tokens, n_tokens=[1, 10, 25, 50, 100, 150, 200, 250]):
em_metrics = []
for t in n_tokens:
errors = 0
total = 0
for ft_tokens, hf_tokens in tokens:
if len(ft_tokens) > t and len(hf_tokens) > t:
total = total + 1
if not np.array_equal(ft_tokens[:t], hf_tokens[:t]):
errors = errors + 1
if total > 0:
print(f"{t}-token exact match acc: {100*(1-errors/total):.2f}")
em_metrics.append(1 - errors / total)
else:
em_metrics.append(np.nan)
return em_metrics
if rank == 0:
if summarize:
computed_metrics_ft = metric_ft.compute()
if test_hf:
computed_metrics_hf = metric_hf.compute()
print(f'Hugging Face (total latency: {hf_time} sec)')
for key in computed_metrics_hf.keys():
print(f'{key} : {computed_metrics_hf[key].mid[2]*100}')
print(f'Faster Transformers (total latency: {ft_time} sec)')
for key in computed_metrics_ft.keys():
print(f'{key} : {computed_metrics_ft[key].mid[2]*100}')
if args.rougeLsum_threshold is not None:
assert computed_metrics_ft["rougeLsum"].mid[2]*100 >= args.rougeLsum_threshold, "[INFO] TEST FAIL !"
print(f"[INFO] TEST PASS !")
else:
em_metrics = compute_exact_match(tokens)
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/opt_summarization.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import dataclasses
import json
import pathlib
import typing
import numpy as np
import torch
import transformers
from utils.gpt import GptInitModelParameters, GptRuntimeModelParameters
from utils.parallel_gpt import ParallelGPT
class TensorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, torch.Tensor):
return obj.tolist()
return super().default(obj)
class LambadaDataset(torch.utils.data.Dataset):
def __init__(self, path, tokenizer, seq_len):
self.seq_len = seq_len
self.tokenizer = tokenizer
with open(path, "r") as f:
texts = [json.loads(line)["text"] for line in f.readlines()]
# this whitespace preprocessing (additional space and stripping) is required
labels = [" " + text.split()[-1] for text in texts]
inputs = [text[: text.rfind(label)].strip() for text, label in zip(texts, labels)]
self.encodings = self.tokenizer(
inputs,
labels,
padding="max_length",
max_length=self.seq_len,
return_token_type_ids=True,
return_tensors="pt",
)
def __len__(self):
return self.encodings["input_ids"].shape[0]
def __getitem__(self, idx):
return {
"input_ids": self.encodings["input_ids"][idx],
"attention_mask": self.encodings["attention_mask"][idx],
"token_type_ids": self.encodings["token_type_ids"][idx],
}
@dataclasses.dataclass
class Metric:
acc: float
@dataclasses.dataclass
class RequestAndResult:
prompt: str
model_answer: str
target: str
input_ids: typing.List[int]
input_len: int
output_len: int
init_model_parameters: GptInitModelParameters
runtime_model_parameters: GptRuntimeModelParameters
output_ids: typing.List[int]
metrics: Metric
def _read_config_ini(args, checkpoint_path):
config_reader = configparser.ConfigParser()
config_ini_files_in_checkpoint_dir = list(checkpoint_path.rglob("config.ini"))
if args.config_ini_path is None and not config_ini_files_in_checkpoint_dir:
raise RuntimeError(
f"Missing config.ini file in {checkpoint_path}. Use --config-ini-path to point config.ini to load"
)
config_ini_path = pathlib.Path(args.config_ini_path or config_ini_files_in_checkpoint_dir[0])
if not config_ini_path.is_file():
raise FileNotFoundError(f"Missing {config_ini_path}")
else:
config_reader.read(config_ini_path.as_posix())
return config_reader
def _get_model(args, config_reader):
init_parameters = GptInitModelParameters.from_args(args, config_reader)
print("\n=============== GPT params ===============")
for key, value in dataclasses.asdict(init_parameters).items():
print(f"{key}: {value}")
print(f"lib_path: {args.lib_path}")
print("========================================")
gpt_params = init_parameters.gpt_init_kwargs()
gpt = ParallelGPT(**gpt_params, lib_path=args.lib_path)
if not gpt.load(ckpt_path=args.checkpoint_path):
print("[WARNING] Checkpoint file not found. Model loading is skipped.")
if init_parameters.sparse:
gpt.sparse()
gpt.eval()
return gpt
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--checkpoint-path", type=str, required=True, help="Path to FasterTransformer checkpoint dir")
parser.add_argument("--lib-path", type=str, required=True, help="Path of FasterTransformer PyTorch GPT op library")
parser.add_argument(
"--config-ini-path",
type=str,
help="Path to config.ini file. If not provided <checkpoint_path>/config.ini will be used.",
)
parser.add_argument("--lambada-path", type=str, help="LAMBADA task data path")
parser.add_argument("--output-path", type=str, help="Path to sample output file.")
parser.add_argument("--batch-size", type=int, default=1, help="Batch size")
GptInitModelParameters.update_argparser(parser)
GptRuntimeModelParameters.update_argparser(parser)
args = parser.parse_args()
print("\n============== Arguments ===============")
for key, value in vars(args).items():
print(f"{key}: {value}")
print("========================================")
checkpoint_path = pathlib.Path(args.checkpoint_path)
config_reader = _read_config_ini(args, checkpoint_path)
gpt = _get_model(args, config_reader)
vocab_path = checkpoint_path / "vocab.json"
merges_path = checkpoint_path / "merges.txt"
max_seq_len = config_reader.getint("ft_instance_hyperparameter", "max_seq_len")
tokenizer = transformers.GPT2TokenizerFast(vocab_path.as_posix(), merges_path.as_posix())
tokenizer.add_special_tokens({"pad_token": tokenizer.eos_token})
if args.lambada_path:
dataset = LambadaDataset(args.lambada_path, tokenizer=tokenizer, seq_len=max_seq_len)
else:
from datasets import load_dataset
dataset = load_dataset("lambada", split="validation")
data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size)
runtime_parameters = GptRuntimeModelParameters.from_args(args, config_reader)
inference_parameters_dict = dataclasses.asdict(runtime_parameters)
print("\n=========== Inference params ===========")
for key, value in inference_parameters_dict.items():
print(f"{key}: {value}")
print("========================================")
beam_idx = 0 # use only 1st beam result
requested_num = 0
correct_num = 0
results = {"output": {"lambada": []}, "results": {"lambada": {}}}
with torch.no_grad():
for entries in data_loader:
inputs_tokens_batch = [
input_ids[(attention_mask == 1) & (token_type_ids == 0)]
for input_ids, attention_mask, token_type_ids in zip(
entries["input_ids"], entries["attention_mask"], entries["token_type_ids"]
)
]
labels_tokens_batch = [
input_ids[(attention_mask == 1) & (token_type_ids == 1)]
for input_ids, attention_mask, token_type_ids in zip(
entries["input_ids"], entries["attention_mask"], entries["token_type_ids"]
)
]
inputs_tokens_batch_padded = [
torch.nn.functional.pad(
input_tokens,
pad=[0, (max_seq_len - input_tokens.shape[0])],
mode="constant",
value=tokenizer.pad_token_id,
)
for input_tokens in inputs_tokens_batch
]
input_tokens_lengths = [input_tokens.shape[0] for input_tokens in inputs_tokens_batch]
# max is required due to scalar is used for output_seq_len input
expected_tokens_lengths = max([label_tokens.shape[0] for label_tokens in labels_tokens_batch])
start_ids = torch.stack(inputs_tokens_batch_padded) # shape=(batch_size, max_seq_len)
runtime_parameters = GptRuntimeModelParameters.from_args(args, config_reader, start_ids.shape[0])
inference_parameters_dict = dataclasses.asdict(runtime_parameters)
start_ids = start_ids.to(torch.int32)
result_all_tokens_batch = gpt(
start_ids,
torch.IntTensor(input_tokens_lengths),
expected_tokens_lengths,
**inference_parameters_dict,
)
results_idxes = [
torch.nonzero(token_type_ids, as_tuple=True)[0] for token_type_ids in entries["token_type_ids"]
]
results_tokens_batch = [
result_tokens_ids[beam_idx][result_idxes].cpu()
for result_tokens_ids, result_idxes in zip(result_all_tokens_batch, results_idxes)
]
labels_tokens_batch = [tokens.cpu() for tokens in labels_tokens_batch]
results_tokens_batch = [tokens.cpu() for tokens in results_tokens_batch]
result_text_batch = tokenizer.batch_decode(results_tokens_batch)
input_text_batch = tokenizer.batch_decode(inputs_tokens_batch)
label_text_batch = tokenizer.batch_decode(labels_tokens_batch)
for idx in range(len(inputs_tokens_batch)):
is_correct_answer = torch.all(labels_tokens_batch[idx] == results_tokens_batch[idx])
correct_num += int(is_correct_answer)
result = RequestAndResult(
prompt=input_text_batch[idx],
model_answer=result_text_batch[idx],
target=label_text_batch[idx],
input_ids=list(map(int, inputs_tokens_batch[idx])),
input_len=int(input_tokens_lengths[idx]),
output_len=expected_tokens_lengths,
init_model_parameters=GptInitModelParameters.from_args(args, config_reader),
runtime_model_parameters=runtime_parameters.slice_args(idx),
output_ids=list(map(int, result_all_tokens_batch[idx][beam_idx])),
metrics=Metric(acc=float(is_correct_answer)),
)
results["output"]["lambada"].append(dataclasses.asdict(result))
requested_num += len(inputs_tokens_batch)
accuracy = correct_num * 100 / requested_num
print(f"[INFO] accuracy: {accuracy:0.4f}% (total : {requested_num})")
# Dump prediction json
results["results"]["lambada"]["acc"] = accuracy
if args.output_path:
output_json_path = pathlib.Path(args.output_path)
output_json_path.parent.mkdir(parents=True, exist_ok=True)
with output_json_path.open(mode="w") as json_file:
json.dump(results, json_file, indent=2, cls=TensorEncoder)
print(f"[INFO] Detailed test results saved to {output_json_path}")
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/lambada_task_example.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021, NAVER Corp. Authored by CLOVA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import os
import sys
import timeit
import torch
from torch.nn.utils.rnn import pad_sequence
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(dir_path, "../../.."))
import examples.pytorch.gpt.utils.gpt_token_encoder as encoder
from examples.pytorch.gpt.utils import comm
from examples.pytorch.gpt.utils import gpt_decoder
from examples.pytorch.gpt.utils.parallel_gpt import ParallelGPT
from utils import word_list
@torch.no_grad()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--layer_num', type=int, default=24,
help='number of layers')
parser.add_argument('--input_len', type=int, default=1,
help='input sequence length to generate.')
parser.add_argument('--output_len', type=int, default=32,
help='output sequence length to generate.')
parser.add_argument('--head_num', type=int, default=16,
help='head number')
parser.add_argument('--size_per_head', type=int, default=64,
help='size per head')
parser.add_argument('--vocab_size', type=int, default=50304,
help='vocab size')
parser.add_argument('--beam_width', type=int, default=1,
help='beam width for beam search. Using sampling when beam width is 1.')
parser.add_argument('--top_k', type=int, default=1,
help='top k candidate num')
parser.add_argument('--top_p', type=float, default=0.,
help='top p probability threshold')
parser.add_argument('--temperature', type=float, default=1.,
help='temperature')
parser.add_argument('--len_penalty', type=float, default=0.,
help='len_penalty')
parser.add_argument('--beam_search_diversity_rate', type=float, default=0.,
help='beam_search_diversity_rate')
parser.add_argument('--tensor_para_size', type=int, default=1,
help='tensor parallel size')
parser.add_argument('--pipeline_para_size', type=int, default=1,
help='pipeline parallel size')
parser.add_argument('--ckpt_path', type=str, default='../models/megatron-models/c-model/345m/1-gpu',
help='path to the checkpoint file.')
parser.add_argument('--lib_path', type=str, default='./lib/libth_transformer.so',
help='path to the pyt_fastertransformer dynamic lib file.')
parser.add_argument('--vocab_file', type=str, default="../models/gpt2-vocab.json",
help='vocabulary file.')
parser.add_argument('--merges_file', type=str, default="../models/gpt2-merges.txt",
help='merges file.')
parser.add_argument('--start_id', type=int, default=50256,
help='start token id.')
parser.add_argument('--end_id', type=int, default=50256,
help='end token id.')
parser.add_argument('--max_batch_size', type=int, default=8,
help='max batch size.')
parser.add_argument('--repetition_penalty', type=float, default=1.,
help='repetition penalty')
parser.add_argument('--presence_penalty', type=float, default=0.,
help='presence penalty. Similar to repetition, but addive rather than multiplicative.')
parser.add_argument('--min_length', type=int, default=0,
help='A minimum number of tokens to generate')
parser.add_argument('--max_seq_len', type=int, default=1024,
help='max sequence length for position embedding table.')
parser.add_argument('--inference_data_type', '--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument('--time', action='store_true',
help='whether or not to measure time elapsed.')
parser.add_argument('--sample_input_file', type=str, default=None,
help='path to sample input file. If not set, it runs with no context inputs.')
parser.add_argument('--sample_output_file', type=str, default=None,
help='path to sample output file.')
parser.add_argument('--enable_random_seed', action='store_true',
help='is use the random seed for sentences in a batch.')
parser.add_argument('--skip_end_tokens', dest='skip_end_tokens', action='store_true',
help='Whether to remove or not end tokens in outputs.')
parser.add_argument('--no_detokenize', dest='detokenize', action='store_false',
help='Skip detokenizing output token ids.')
parser.add_argument('--use_jieba_tokenizer', action='store_true',
help='use JiebaBPETokenizer as tokenizer.')
parser.add_argument('--int8_mode', type=int, default=0, choices=[0, 1],
help='The level of quantization to perform.'
' 0: No quantization. All computation in data_type'
' 1: Quantize weights to int8, all compute occurs in fp16/bf16. Not supported when data_type is fp32')
parser.add_argument(
'--weights_data_type',
type=str,
default="fp32",
choices=["fp32", "fp16"],
help='Data type of FT checkpoint weights',
)
parser.add_argument('--return_cum_log_probs', type=int, default=0, choices=[0, 1, 2],
help='Whether to compute the cumulative log probsbility of sentences.'
' 0: do not return the cumulative log probs '
' 1: return the cumulative log probs of generated sequences'
' 2: return the cumulative log probs of sequences')
parser.add_argument('--shared_contexts_ratio', type=float, default=1.0,
help='Triggers the shared context optimization when'
'compact_size <= shared_contexts_ratio * batch_size'
'A value of 0.0 deactivate the optimization')
parser.add_argument('--banned_words',
type=str,
default="",
help='A comma separated list of tokens that should never be generated. Everything between the commas will'
' be tokenized and converted to token ids that will be banned.'
' Note that spaces before and after commas are included in tokenization.'
' An example highlighting this importance is that "the" and " the" are'
' two separate tokens some vocabularies.'
' Therefore, do ban a certain phrase, we would need to specify all tokens'
' in the vocabulary that include the phrase.'
' Example use: --banned_words "the, the,a,boy". This will ban the tokens "the", " the", "a" and "boy".'
' We can also use a pipe "|" to ban different tokens for different sentences in a batch.'
' Example: --banned_words "the, the|a,boy" will ban the tokens "the" and " the" in output sentence 1 and'
' ban the tokens "a" and "boy" in output sentence 2. When using this mode, we must specify a set of tokens to ban'
' for each sentence in the batch.',
)
parser.add_argument('--use_gpt_decoder_ops', action='store_true',
help='Use separate decoder FT operators instead of end-to-end model op.')
args = parser.parse_args()
ckpt_config = configparser.ConfigParser()
ckpt_config_path = os.path.join(args.ckpt_path, 'config.ini')
if os.path.isfile(ckpt_config_path):
ckpt_config.read(ckpt_config_path)
if 'gpt' in ckpt_config.keys():
for args_key, config_key, func in [
('layer_num', 'num_layer', ckpt_config.getint),
('max_seq_len', 'max_pos_seq_len', ckpt_config.getint),
('weights_data_type', 'weight_data_type', ckpt_config.get),
]:
if config_key in ckpt_config['gpt'].keys():
prev_val = args.__dict__[args_key]
args.__dict__[args_key] = func('gpt', config_key)
print('Loading {} from config.ini, previous: {}, current: {}'.format(
args_key, prev_val, args.__dict__[args_key]))
else:
print('Not loading {} from config.ini'.format(args_key))
for key in ['head_num', 'size_per_head', 'tensor_para_size']:
if key in args.__dict__:
prev_val = args.__dict__[key]
args.__dict__[key] = ckpt_config.getint('gpt', key)
print('Loading {} from config.ini, previous: {}, current: {}'.format(
key, prev_val, args.__dict__[key]))
else:
print('Not loading {} from config.ini'.format(key))
if 'structure' in ckpt_config.keys():
gpt_with_moe = ckpt_config.getboolean('structure', 'gpt_with_moe')
expert_num = ckpt_config.getint('structure', 'expert_num')
moe_layer_index_str = ckpt_config.get('structure', 'moe_layers')
if len(moe_layer_index_str) <= 2:
moe_layer_index = []
else:
moe_layer_index = [int(n) for n in moe_layer_index_str[1:-1].replace(' ', '').split(',')]
moe_k = 1
else:
gpt_with_moe = False
expert_num = 0
moe_layer_index = []
moe_k = 0
layer_num = args.layer_num
output_len = args.output_len
head_num = args.head_num
size_per_head = args.size_per_head
vocab_size = args.vocab_size
beam_width = args.beam_width
top_k = args.top_k
top_p = args.top_p
temperature = args.temperature
len_penalty = args.len_penalty
beam_search_diversity_rate = args.beam_search_diversity_rate
tensor_para_size = args.tensor_para_size
pipeline_para_size = args.pipeline_para_size
start_id = args.start_id
end_id = args.end_id
max_batch_size = args.max_batch_size
max_seq_len = args.max_seq_len
repetition_penalty = args.repetition_penalty
presence_penalty = args.presence_penalty
min_length = args.min_length
weights_data_type = args.weights_data_type
return_cum_log_probs = args.return_cum_log_probs
return_output_length = return_cum_log_probs > 0
shared_contexts_ratio = args.shared_contexts_ratio
print('\n=================== Arguments ===================')
for k, v in vars(args).items():
print(f'{k.ljust(30, ".")}: {v}')
print('=================================================\n')
if args.use_jieba_tokenizer:
from examples.pytorch.gpt.utils.tokenizer import JiebaBPETokenizer
enc = JiebaBPETokenizer(args.vocab_file)
else:
enc = encoder.get_encoder(args.vocab_file, args.merges_file)
torch.manual_seed(0)
comm.initialize_model_parallel(args.tensor_para_size, args.pipeline_para_size)
rank = comm.get_rank()
device = comm.get_device()
# Inputs
contexts = []
if args.sample_input_file: # conditional case
with open(args.sample_input_file, "r") as f:
contexts = f.read().splitlines()
batch_size = min(len(contexts), max_batch_size)
contexts = contexts[:batch_size]
start_ids = [torch.tensor(enc.encode(c), dtype=torch.int32, device=device) for c in contexts]
else: # unconditional case
batch_size = max_batch_size
contexts = ['<|endoftext|>'] * batch_size
start_ids = [torch.IntTensor([end_id for _ in range(args.input_len)])] * batch_size
start_lengths = [len(ids) for ids in start_ids]
start_ids = pad_sequence(start_ids, batch_first=True, padding_value=end_id)
start_lengths = torch.IntTensor(start_lengths)
# Prepare model.
if not args.use_gpt_decoder_ops:
gpt = ParallelGPT(head_num, size_per_head, vocab_size, start_id, end_id,
layer_num, max_seq_len, tensor_para_size, pipeline_para_size,
lib_path=args.lib_path, inference_data_type=args.inference_data_type,
int8_mode=args.int8_mode, weights_data_type=weights_data_type,
shared_contexts_ratio=shared_contexts_ratio,
gpt_with_moe=gpt_with_moe,
expert_num=expert_num,
moe_k=moe_k,
moe_layer_index=moe_layer_index)
if not gpt.load(ckpt_path=args.ckpt_path):
print("[WARNING] Checkpoint file not found. Model loading is skipped.")
else:
assert moe_layer_index == []
gpt = gpt_decoder.Gpt(
num_heads=head_num,
size_per_head=size_per_head,
num_layers=layer_num,
vocab_size=vocab_size,
start_id=start_id,
end_id=end_id,
tensor_para_size=tensor_para_size,
pipeline_para_size=pipeline_para_size,
lib_path = args.lib_path,
max_seq_len=max_seq_len,
int8_mode=args.int8_mode,
weights_data_type=args.weights_data_type)
gpt.load(args.ckpt_path, args.inference_data_type)
if args.enable_random_seed:
random_seed_tensor = torch.randint(0, 10000, size=[batch_size], dtype=torch.int64)
else:
random_seed_tensor = torch.zeros([batch_size], dtype=torch.int64)
bad_words_list=None
if args.banned_words:
batch_banned_words = args.banned_words.split("|")
banned_words = [[banned_words_for_batch] for banned_words_for_batch in batch_banned_words]
bad_words_list = torch.tensor(word_list.to_word_list_format(banned_words, enc)).to("cuda")
repetition_penalty_vec = None if repetition_penalty == 1. else repetition_penalty * torch.ones(batch_size, dtype=torch.float32)
presence_penalty_vec = None if presence_penalty == 0. else presence_penalty * torch.ones(batch_size, dtype=torch.float32)
infer_decode_args = dict(
beam_width=beam_width,
top_k=top_k * torch.ones(batch_size, dtype=torch.int32),
top_p=top_p * torch.ones(batch_size, dtype=torch.float32),
temperature=temperature * torch.ones(batch_size, dtype=torch.float32),
repetition_penalty=repetition_penalty_vec,
presence_penalty=presence_penalty_vec,
beam_search_diversity_rate=beam_search_diversity_rate * torch.ones(batch_size, dtype=torch.float32),
len_penalty=len_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
bad_words_list=bad_words_list,
min_length=min_length * torch.ones(size=[batch_size], dtype=torch.int32),
random_seed=random_seed_tensor
)
if not args.use_gpt_decoder_ops:
def gpt_generate_fn():
tokens_batch = gpt(start_ids,
start_lengths,
output_len,
return_output_length=return_output_length,
return_cum_log_probs=return_cum_log_probs,
**infer_decode_args)
return tokens_batch
else:
def gpt_generate_fn():
output_dict = gpt.generate(input_token_ids=start_ids,
input_lengths=start_lengths,
gen_length=output_len,
eos_token_id=end_id,
return_output_length=return_output_length,
return_log_probs=return_cum_log_probs,
**infer_decode_args)
return output_dict
# Generate tokens.
gen_outputs = gpt_generate_fn()
if rank == 0:
if not args.use_gpt_decoder_ops:
if return_cum_log_probs > 0:
tokens_batch, _, cum_log_probs = gen_outputs
else:
tokens_batch, cum_log_probs = gen_outputs, None
else:
tokens_batch = gen_outputs['output_token_ids']
cum_log_probs = gen_outputs['cum_log_probs'] if return_cum_log_probs > 0 else None
if cum_log_probs is not None:
print('[INFO] Log probs of sentences:', cum_log_probs)
outputs = []
tokens_batch = tokens_batch.cpu().numpy()
for i, (context, tokens) in enumerate(zip(contexts, tokens_batch)):
for beam_id in range(beam_width):
token = tokens[beam_id][start_lengths[i]:] # exclude context input from the output
if args.skip_end_tokens:
token = token[token != end_id]
output = enc.decode(token) if args.detokenize else ' '.join(str(t) for t in token.tolist())
outputs.append(output)
print(f'[INFO] batch {i}, beam {beam_id}:\n[Context]\n{context}\n\n[Output]\n{output}\n')
if args.sample_output_file:
with open(args.sample_output_file, "w+") as f:
outputs = [o.replace("\n", "\\n") for o in outputs]
f.writelines("\n".join(outputs))
# Measure inference time.
if args.time:
iterations = 10
for _ in range(iterations):
gpt_generate_fn()
time = timeit.default_timer()
for _ in range(iterations):
gpt_generate_fn()
time_elapsed = timeit.default_timer() - time
print(f'[INFO] GPT time costs: {time_elapsed * 1000 / iterations:.2f} ms')
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/multi_gpu_gpt_example.py
|
# Modify from https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/evaluate.py
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT zero-shot evaluation."""
import math
import torch
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
sys.path.append(dir_path + "/../../../3rdparty/Megatron-LM")
from megatron import get_args
from megatron.initialize import initialize_megatron
from megatron import get_args
from megatron import print_rank_0, is_last_rank
from megatron import get_tokenizer
from megatron import mpu
from megatron.model import GPTModel
from megatron.utils import get_ltor_masks_and_position_ids, unwrap_model
from megatron.p2p_communication import recv_forward
from tasks.finetune_utils import build_data_loader
from tasks.zeroshot_gpt.datasets import build_dataset
# These are needed to unwrap the model, would be nice to put these in megatron.utils if possible?
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from megatron.model import DistributedDataParallel as LocalDDP
from megatron.model import Float16Module
from examples.pytorch.gpt.utils.gpt import GPT
def get_tasks_args(parser):
"""Provide extra arguments required for tasks."""
group = parser.add_argument_group(title='tasks')
group.add_argument('--task', type=str, required=True,
help='Task name.')
group.add_argument('--epochs', type=int, default=None,
help='Number of finetunning epochs. Zero results in '
'evaluation only.')
group.add_argument('--pretrained-checkpoint', type=str, default=None,
help='Pretrained checkpoint used for finetunning.')
group.add_argument('--keep-last', action='store_true',
help='Keep the last batch (maybe incomplete) in'
'the data loader')
group.add_argument('--train-data', nargs='+', default=None,
help='Whitespace separated paths or corpora names '
'for training.')
group.add_argument('--valid-data', nargs='*', default=None,
help='path(s) to the validation data.')
group.add_argument('--overlapping-eval', type=int, default=32,
help='Sliding window for overlapping evaluation.')
group.add_argument('--strict-lambada', action='store_true',
help='Use more difficult formulation of lambada.')
# Retriever args
group.add_argument('--qa-data-dev', type=str, default=None,
help='Path to the QA dataset dev file.')
group.add_argument('--qa-data-test', type=str, default=None,
help='Path to the QA dataset test file.')
# Faiss arguments for retriever
group.add_argument('--faiss-use-gpu', action='store_true',
help='Whether create the FaissMIPSIndex on GPU')
group.add_argument('--faiss-match', type=str, default='string', \
choices=['regex', 'string'], help="Answer matching '\
'logic type")
group.add_argument('--faiss-topk-retrievals', type=int, default=100,
help='Number of blocks to use as top-k during retrieval')
# finetune for retriever
group.add_argument('--eval-micro-batch-size', type=int, default=None,
help='Eval Batch size per model instance (local batch '
'size). Global batch size is local batch size '
'times data parallel size.')
group.add_argument('--train-with-neg', action='store_true',
help='Whether to use negative examples during model '
'training')
group.add_argument('--train-hard-neg', type=int, default=0,
help='Number of hard negative examples to use during '
'training')
# parameters for Av.rank validation method
# Following options/arguments have been taken directly from DPR codebase
group.add_argument('--val-av-rank-hard-neg', type=int, default=30,
help='Av.rank validation: how many hard negatives to'
' take from each question pool')
group.add_argument('--val-av-rank-other-neg', type=int, default=30,
help='Av.rank validation: how many other negatives to'
' take from each question pool')
group.add_argument('--ckpt-path', type=str, required=True,
help='c model checkpoint path for FasterTransformer.')
group.add_argument('--lib-path', type=str, required=True,
help='library path of FT op.')
group.add_argument('--beam_width', type=int, required=True,
help='beam width for beam search.')
group.add_argument('--top_k', type=int, required=True,
help='top k for sampling.')
group.add_argument('--top_p', type=float, required=True,
help='top p for sampling.')
group.add_argument(
'--weights_data_type',
type=str,
default="fp32",
choices=["fp32", "fp16"],
help='Data type of FT checkpoint weights',
)
return parser
def get_model_provider(eval_metric):
"""Based on evaluation metric set the parallel-output flag and
return the model provider."""
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
if eval_metric == 'loss':
parallel_output = True
elif eval_metric == 'accuracy':
parallel_output = False
else:
raise NotImplementedError('output type for {} evaluation metric '
'is not supported.'.format(eval_metric))
print_rank_0('building GPT model ...')
model = GPTModel(num_tokentypes=0, parallel_output=parallel_output,
pre_process=pre_process, post_process=post_process)
return model
return model_provider
def process_batch(batch):
"""Process batch and produce inputs for the model."""
args = get_args()
tokenizer = get_tokenizer()
loss_mask = batch['pad_mask'].long().cuda().contiguous().byte()
tokens_ = batch['text'].long().cuda().contiguous()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and position ids.
attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
return tokens, labels, attention_mask, position_ids, loss_mask
def forward_step(batch, model, eval_metric, args):
"""Forward step."""
# Get the batch.
tokens, labels, attention_mask, position_ids, loss_mask = process_batch(
batch)
# Tell the model what our actual batch size will be
args = get_args()
args.micro_batch_size = len(labels)
input_tensor = recv_forward()
# Forward pass through the model.
unwrapped_model = unwrap_model(
model, (torchDDP, LocalDDP, Float16Module))
unwrapped_model.set_input_tensor(input_tensor)
start_lengths = torch.sum(tokens != model.end_id, axis=1).contiguous().int()
input_len = torch.max(start_lengths).contiguous().int()
output = []
random_seed_tensor = 0 * torch.ones([max_batch_size], dtype=torch.int64)
for i in range(input_len):
tmp_length = torch.ones(args.micro_batch_size) * (i + 1)
tmp_length = tmp_length.cuda().int()
tmp_start_lengths = torch.min(tmp_length, start_lengths).contiguous()
input_ids = tokens[:,:(i + 1)].contiguous().int()
output_id = model(input_ids,
tmp_start_lengths,
1,
args.top_k * torch.ones(size=[max_batch_size], dtype=torch.int32),
args.top_p * torch.ones(size=[max_batch_size], dtype=torch.float32),
0.0 * torch.ones(size=[max_batch_size], dtype=torch.float32),
1.0 * torch.ones(size=[max_batch_size], dtype=torch.float32),
1.0 * torch.ones(size=[max_batch_size], dtype=torch.float32),
1.0 * torch.ones(size=[max_batch_size], dtype=torch.float32),
random_seed_tensor)
output.append(output_id[:,0,-1].reshape([-1, 1]))
output = torch.cat((output), 1)
padding = torch.ones(output.shape[0], labels.shape[1] - output.shape[1]).cuda().int()
outputs = torch.cat((output, padding), 1)
if mpu.is_pipeline_last_stage():
# For loss, return the unreduced loss.
if eval_metric == 'loss':
losses = mpu.vocab_parallel_cross_entropy(
output.contiguous().float(), labels.contiguous())
loss = torch.sum(
losses.view(-1) * loss_mask.contiguous().view(-1).float())
return loss
# For accuracy, return the number of correctly predicted samples.
if eval_metric == 'accuracy':
correct = (outputs == labels).float()
correct[(1 - loss_mask).bool()] = 1
correct = correct.prod(-1)
return correct.sum()
raise NotImplementedError('forward method for evaluation metric {} '
'is not implemented.'.format(eval_metric))
return None
def evaluate(data_loader, model, eval_metric, args):
"""Evaluation."""
args = get_args()
# Turn on evaluation mode which disables dropout.
model.eval()
total_output = 0.0
with torch.no_grad():
# For all the batches in the dataset.
for iteration, batch in enumerate(data_loader):
if iteration % args.log_interval == 0:
print_rank_0('> working on iteration: {}'.format(iteration))
# Forward evaluation.
output = forward_step(batch, model, eval_metric, args)
# Reduce across processes.
if mpu.is_pipeline_last_stage():
torch.distributed.all_reduce(output,
group=mpu.get_data_parallel_group())
total_output += output
return total_output
def evaluate_and_print_results(task, data_loader, model, eval_metric, args):
"""Evaluate and print results on screen."""
# Evaluate and get results.
output = evaluate(data_loader, model, eval_metric, args)
string = ' validation results on {} | '.format(task)
if is_last_rank():
if eval_metric == 'loss':
num_tokenized_tokens = data_loader.dataset.num_tokenized_tokens
num_original_tokens = data_loader.dataset.num_original_tokens
val_loss = output / (num_tokenized_tokens - 1)
ppl = math.exp(min(20, val_loss))
token_ratio = (num_tokenized_tokens - 1) / (num_original_tokens - 1)
adjusted_ppl = math.exp(min(20, val_loss * token_ratio))
string += 'avg loss: {:.4E} | '.format(val_loss)
string += 'ppl: {:.4E} | '.format(ppl)
string += 'adjusted ppl: {:.4E} | '.format(adjusted_ppl)
string += 'token ratio: {} |'.format(token_ratio)
elif eval_metric == 'accuracy':
num_examples = len(data_loader.dataset)
acc = output / num_examples
string += 'number correct: {:.4E} | '.format(output)
string += 'total examples: {:.4E} | '.format(num_examples)
string += 'avg accuracy: {:.4E}'.format(acc)
else:
raise NotImplementedError('evaluation method for {} metric is not '
'implemented yet.'.format(eval_metric))
length = len(string) + 1
print('-' * length)
print(string)
print('-' * length)
def main():
"""Main program."""
args = get_args()
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for text generation.")
exit()
if args.task == 'LAMBADA':
eval_metric = 'accuracy'
elif args.task == 'WIKITEXT103':
eval_metric = 'loss'
else:
raise NotImplementedError('{} task is not implemented.'.format(
args.task))
tokenzier = get_tokenizer()
# Set up model and load checkpoint.
model = GPT(args.num_attention_heads, (int)(args.hidden_size / args.num_attention_heads),
args.padded_vocab_size, tokenzier.eod, tokenzier.eod,
args.num_layers, args.seq_length, 1, 1, "lib/libth_transformer.so", weights_data_type=args.weights_data_type)
if not model.load(ckpt_path=args.ckpt_path):
print("[ERROR] Checkpoint file not found at {}.".format(args.ckpt_path))
exit(-1)
if args.fp16:
assert not args.bf16
model.half()
if args.bf16:
assert not args.fp16
model.bfloat16()
# Data stuff.
dataset = build_dataset(args.task)
dataloader = build_data_loader(dataset, args.micro_batch_size,
args.num_workers, drop_last=False)
# Run evaluation.
evaluate_and_print_results(args.task, dataloader, model, eval_metric, args)
print_rank_0('done :-)')
if __name__ == '__main__':
initialize_megatron(extra_args_provider=get_tasks_args)
args = get_args()
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for downstream tasks.")
exit()
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/evaluate_zeroshot_gpt.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import json
import os
import numpy as np
import sys
import torch
from datasets import load_dataset, load_metric
from tqdm import tqdm
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from utils import comm
from utils import gpt_decoder
from utils import profiler
from utils.parallel_gpt import ParallelGPT
from utils.gpt_fp8 import GPTFp8
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--ft_model_location', type=str,
default='/models/GPT/HF/gpt2-xl/c-models')
parser.add_argument('--hf_model_location', type=str,
default='/models/GPT/HF/gpt2-xl/')
parser.add_argument('--summarize', action='store_true')
parser.add_argument('--test_hf', action='store_true')
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16', 'fp8'], default='fp32')
parser.add_argument("--cache_path", type=str, default="/workdir/datasets/ccdv/")
parser.add_argument("--max_ite", type=int, default=20)
parser.add_argument("--ft_use_hf_config", action="store_true",
help="use the hyper-parameters from the hf model")
parser.add_argument('--lib_path', type=str, default='./lib/libth_transformer.so',
help='path to the pyt_fastertransformer dynamic lib file.')
parser.add_argument('--tensor_para_size', type=int, default=1,
help='tensor parallel size')
parser.add_argument('--pipeline_para_size', type=int, default=1,
help='pipeline parallel size')
parser.add_argument(
'--weights_data_type', type=str, default='fp32', choices=['fp32', 'fp16'],
help='Data type of FT checkpoint weights')
parser.add_argument(
'--int8_mode', type=int, default=0, choices=[0, 1],
help='The level of quantization to perform.'
' 0: No quantization. All computation in data_type'
' 1: Quantize weights to int8, all compute occurs in fp16/bf16. Not supported when data_type is fp32')
parser.add_argument(
'--use_gpt_decoder_ops', action='store_true',
help='Use separate decoder FT operators instead of end-to-end model op.')
parser.add_argument(
'--use_fp32_to_compute_logit', action='store_true',
help='Use FP32 data type for computing logit values when using gpt decoder ops. '
'FT end-to-end GPT op always uses FP32 data type when computing logit.')
parser.add_argument(
'--rougeLsum_threshold', type=float, default=None,
help='Threshold of FT rougeLsum score')
parser.add_argument(
'--verbose', action='store_true', help='Print all summary result.')
args = parser.parse_args()
np.random.seed(0) # rouge score use sampling to compute the score
comm.initialize_model_parallel(args.tensor_para_size, args.pipeline_para_size)
rank = comm.get_rank()
summarize = args.summarize
test_hf = args.test_hf
ft_model_location = args.ft_model_location
hf_model_location = args.hf_model_location
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer.pad_token = tokenizer.eos_token
dataset_cnn = load_dataset("ccdv/cnn_dailymail", '3.0.0', cache_dir=args.cache_path)
hf_config = json.load(open(os.path.join(hf_model_location, 'config.json'), 'r'))
ft_config = None
head_num = hf_config['n_head']
layer_num = hf_config['n_layer']
start_id = hf_config['bos_token_id']
end_id = hf_config['eos_token_id']
size_per_head = hf_config['n_embd'] // head_num
if not args.ft_use_hf_config:
ft_config = configparser.ConfigParser()
assert ft_config.read(os.path.join(ft_model_location, '1-gpu/config.ini')) != [], "[ERROR] fail to read the config.ini of model"
head_num = ft_config.getint('gpt', 'head_num')
layer_num = ft_config.getint('gpt', 'num_layer')
start_id = ft_config.getint('gpt', 'start_id') # TODO: get this from the tokenizer
end_id = ft_config.getint('gpt', 'end_id') # TODO: get this from the tokenizer
size_per_head = ft_config.getint('gpt', 'size_per_head')
if summarize:
top_k = 2
output_len = 100
else:
top_k = 1
output_len = 256
if args.data_type == "fp8":
top_k = 1
top_p = 0.0
random_seed = 5
temperature = 1
max_seq_len = hf_config['n_ctx'] if args.ft_use_hf_config else ft_config.getint('gpt', 'max_pos_seq_len')
max_batch_size = 1
repetition_penalty = 1
vocab_size = 50257
if not args.ft_use_hf_config and ft_config.has_option('gpt', 'vocab_size'):
vocab_size = ft_config.getint('gpt', 'vocab_size', fallback=vocab_size)
tensor_para_size = args.tensor_para_size
pipeline_para_size = args.pipeline_para_size
lib_path = args.lib_path
ckpt_path = os.path.join(ft_model_location, f'{tensor_para_size}-gpu')
print(f"top_k: {top_k}")
print(f"top_p: {top_p}")
print(f"int8_mode: {args.int8_mode}")
print(f"random_seed: {random_seed}")
print(f"temperature: {temperature}")
print(f"max_seq_len: {max_seq_len}")
print(f"max_batch_size: {max_batch_size}")
print(f"repetition_penalty: {repetition_penalty}")
print(f"vocab_size: {vocab_size}")
print(f"tensor_para_size: {tensor_para_size}")
print(f"pipeline_para_size: {pipeline_para_size}")
print(f"lib_path: {lib_path}")
print(f"ckpt_path: {ckpt_path}")
print(f"hf_config: {hf_config}")
infer_decode_args = dict(
beam_width=1,
top_k=top_k * torch.ones(max_batch_size, dtype=torch.int32),
top_p=top_p * torch.ones(max_batch_size, dtype=torch.float32),
temperature=temperature * torch.ones(max_batch_size, dtype=torch.float32),
repetition_penalty=repetition_penalty * torch.ones(max_batch_size, dtype=torch.float32),
random_seed=random_seed * torch.ones(max_batch_size, dtype=torch.int64)
)
random_seed_tensor = random_seed * torch.ones([max_batch_size], dtype=torch.int64)
if args.data_type == 'fp8':
gpt = GPTFp8(head_num, size_per_head, vocab_size, start_id, end_id, layer_num,
max_seq_len, tensor_para_size, pipeline_para_size, lib_path=lib_path,
ckpt_path=ckpt_path, int8_mode=0, weights_data_type=args.weights_data_type)
else:
if not args.use_gpt_decoder_ops:
gpt = ParallelGPT(head_num, size_per_head, vocab_size, start_id, end_id, layer_num,
max_seq_len, tensor_para_size, pipeline_para_size, lib_path=lib_path,
inference_data_type=args.data_type, int8_mode=args.int8_mode,
weights_data_type=args.weights_data_type)
if not gpt.load(ckpt_path=ckpt_path):
print("[WARNING] Checkpoint file not found. Model loading is skipped.")
else:
gpt = gpt_decoder.Gpt(
num_heads=head_num,
size_per_head=size_per_head,
num_layers=layer_num,
vocab_size=vocab_size,
start_id=start_id,
end_id=end_id,
tensor_para_size=tensor_para_size,
pipeline_para_size=pipeline_para_size,
lib_path=lib_path,
max_seq_len=max_seq_len,
int8_mode = args.int8_mode,
inference_data_type=args.data_type,
weights_data_type=args.weights_data_type,
use_fp32_to_compute_logit=args.use_fp32_to_compute_logit)
gpt.load(ckpt_path, args.data_type)
if (test_hf and summarize) or not summarize:
model = GPT2LMHeadModel.from_pretrained(hf_model_location)
model.cuda()
if args.data_type == 'fp16':
model.half()
elif args.data_type == 'bf16':
model.bfloat16()
def summarize_ft_e2e(datapoint):
if summarize:
line = datapoint['article'] + ' TL;DR: '
else:
line = datapoint['article']
line = line.strip()
line = line.replace(" n't", "n't")
line_encoded = tokenizer.encode(line, return_tensors='pt')
if summarize:
line_encoded = line_encoded[:, -923:]
else:
line_encoded = line_encoded[:, -768:]
line_encoded = line_encoded.type(torch.int32)
with torch.no_grad():
output, ft_output_len = gpt(line_encoded, torch.IntTensor([len(line_encoded[0])]),
output_len,
return_output_length=True,
**infer_decode_args)
tokens = output[0][0][len(line_encoded[0]):ft_output_len[0]].cpu().numpy()
output_lines = tokenizer.decode(output[0][0][len(line_encoded[0]):ft_output_len[0]])
output_lines = ".".join(output_lines.split('.')[:4]) + "."
return output_lines, tokens
def summarize_ft_sep(datapoint):
if summarize:
line = datapoint['article'] + ' TL;DR: '
else:
line = datapoint['article']
line = line.strip()
line = line.replace(" n't", "n't")
line_encoded = tokenizer.encode(line, return_tensors='pt')
if summarize:
line_encoded = line_encoded[:, -923:]
else:
line_encoded = line_encoded[:, -768:]
line_encoded = line_encoded.type(torch.int32).to(gpt.device)
input_lengths = torch.tensor([len(line_encoded[0])], dtype=torch.int32, device=gpt.device)
with torch.no_grad():
output_dict = gpt.generate(input_token_ids=line_encoded,
input_lengths=input_lengths,
gen_length=output_len,
eos_token_id=tokenizer.eos_token_id,
return_output_length=True,
**infer_decode_args)
output_token_ids = output_dict['output_token_ids']
output_lengths = output_dict['output_lengths']
tokens = output_token_ids[0, 0, input_lengths[0]:output_lengths[0]]
output_lines = tokenizer.decode(tokens)
output_lines = ".".join(output_lines.split('.')[:4]) + "."
return output_lines, tokens.cpu().numpy()
summarize_ft = summarize_ft_e2e if not args.use_gpt_decoder_ops else summarize_ft_sep
def summarize_hf(datapoint):
if summarize:
line = datapoint['article'] + ' TL;DR: '
else:
line = datapoint['article']
line = line.strip()
line = line.replace(" n't", "n't")
line_encoded = tokenizer.encode(line, return_tensors='pt')
if summarize:
line_encoded = line_encoded[:, -923:]
else:
line_encoded = line_encoded[:, -768:]
line_encoded = line_encoded.cuda()
with torch.no_grad():
output = model.generate(line_encoded,
max_length=len(line_encoded[0]) + output_len,
top_k=top_k,
temperature=temperature,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id)
tokens = output[0][len(line_encoded[0]):].cpu().numpy()
output_lines = tokenizer.decode(output[0][len(line_encoded[0]):])
output_lines = ".".join(output_lines.split('.')[:4]) + "."
return output_lines, tokens
if summarize:
datapoint = dataset_cnn['test'][0]
summary, _ = summarize_ft(datapoint)
print('---------------------------------------------------------')
print('FT Generated : ')
print(' Article : ', datapoint['article'])
print('\n Highlights : ', datapoint['highlights'])
print('\n Summary : ', summary)
print('---------------------------------------------------------')
if test_hf:
summary, _ = summarize_hf(datapoint)
print('---------------------------------------------------------')
print('HF Generated : ')
print(' Article : ', datapoint['article'])
print('\n Highlights : ', datapoint['highlights'])
print('\n Summary : ', summary)
print('---------------------------------------------------------')
if summarize:
metric_ft = load_metric("rouge")
metric_hf = load_metric("rouge")
else:
tokens = []
for data_point_idx in tqdm(range(1, 11490, int(11490 / args.max_ite))):
try:
datapoint = dataset_cnn['test'][data_point_idx]
profiler.start('ft')
summary_ft, tokens_ft = summarize_ft(datapoint)
profiler.stop('ft')
if (test_hf and summarize) or not summarize:
profiler.start('hf')
summary_hf, tokens_hf = summarize_hf(datapoint)
profiler.stop('hf')
if rank == 0:
if summarize:
metric_ft.add_batch(predictions=[summary_ft], references=[datapoint['highlights']])
if test_hf:
metric_hf.add_batch(predictions=[summary_hf], references=[datapoint['highlights']])
else:
assert test_hf
tokens.append((tokens_ft, tokens_hf))
if args.verbose:
print('-' * 100)
print('FT Summary:', summary_ft)
if test_hf:
print('HF Summary:', summary_hf)
except:
print('Error with datapoint : ', data_point_idx)
def compute_exact_match(tokens, n_tokens=[1, 10, 25, 50, 100, 150, 200, 250]):
em_metrics = []
for t in n_tokens:
errors = 0
total = 0
for ft_tokens, hf_tokens in tokens:
if len(ft_tokens) > t and len(hf_tokens) > t:
total = total + 1
if not np.array_equal(ft_tokens[:t], hf_tokens[:t]):
errors = errors + 1
if total > 0:
print(f"{t}-token exact match acc: {100*(1-errors/total):.2f}")
em_metrics.append(1 - errors / total)
else:
em_metrics.append(np.nan)
return em_metrics
if rank == 0:
if summarize:
computed_metrics_ft = metric_ft.compute()
if test_hf:
computed_metrics_hf = metric_hf.compute()
print(f'Hugging Face (total latency: {profiler.elapsed_time_in_sec("hf")} sec)')
for key in computed_metrics_hf.keys():
print(f'{key} : {computed_metrics_hf[key].mid[2]*100}')
print(f'Faster Transformers (total latency: {profiler.elapsed_time_in_sec("ft")} sec)')
for key in computed_metrics_ft.keys():
print(f'{key} : {computed_metrics_ft[key].mid[2]*100}')
if args.rougeLsum_threshold is not None:
assert computed_metrics_ft["rougeLsum"].mid[2]*100 >= args.rougeLsum_threshold, "[INFO] TEST FAIL !"
print(f"[INFO] TEST PASS !")
else:
em_metrics = compute_exact_match(tokens)
if args.verbose:
profiler.summary()
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/gpt_summarization.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
from examples.tensorflow.gpt.utils import gpt_token_encoder as encoder
import fire
import numpy as np
def convert_token(
vocab_file="../models/gpt2-vocab.json",
bpe_file="../models/gpt2-merges.txt",
out_file="out",
max_input_length=-1,
text_out_file=None,
):
enc = encoder.get_encoder(vocab_file, bpe_file)
tokens_batch = np.loadtxt(out_file, dtype=np.int32)
end_id = 50256
outputs = []
if(tokens_batch.ndim == 1):
tokens_batch = tokens_batch.reshape([1, -1])
for batch_num, tokens in enumerate(tokens_batch):
if max_input_length > -1:
end_index = np.where(tokens[max_input_length:] == end_id)[0]
else:
end_index = []
end_pos = len(tokens)
if len(end_index) > 0:
end_pos = end_index[0]
print(f"[INFO] batch {batch_num}: \n[input]{enc.decode(tokens[:16])}\n[output]{enc.decode(tokens[16:end_pos])}")
outputs.append(enc.decode(tokens[:end_pos]))
if text_out_file != None:
with open(text_out_file, "w+") as f:
f.writelines("\n".join(outputs))
# return tokens_batch
if __name__ == "__main__":
fire.Fire(convert_token)
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/gpt_token_converter.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
def get_world_size():
return dist.get_world_size() if dist.is_initialized() else 1
def get_rank():
return dist.get_rank() if dist.is_initialized() else 0
def get_device_count():
return torch.cuda.device_count()
def get_device():
return torch.cuda.current_device()
class ModelParallelGroup:
def __init__(self, tensor_para_size: int, pipeline_para_size: int):
self.check_parallel_size_validity(tensor_para_size, pipeline_para_size)
rank = get_rank()
device = rank % get_device_count()
torch.cuda.set_device(device)
# tp: tensor parallel, pp: pipeline parallel.
self.tp_size = tensor_para_size
self.tp_rank = rank % self.tp_size
self.pp_size = pipeline_para_size
self.pp_rank = rank // self.tp_size
@staticmethod
def check_parallel_size_validity(tensor_para_size, pipeline_para_size):
world_size = get_world_size()
if world_size != tensor_para_size * pipeline_para_size:
raise ValueError(
f'[ERROR] Invalid tensor/pipeline parallel configuration. '
f'world_size({world_size}) != tensor_para_size({tensor_para_size})'
f' * pipeline_para_size({pipeline_para_size})')
@property
def is_pipeline_first(self):
return self.pp_rank == 0
@property
def is_pipeline_last(self):
return self.pp_rank == self.pp_size - 1
_model_para_group = None
def is_model_parallel_initailized():
return _model_para_group is not None
def initialize_model_parallel(tensor_para_size: int,
pipeline_para_size: int,
backend=dist.Backend.MPI):
if tensor_para_size == 1 and pipeline_para_size == 1:
return
assert torch.cuda.is_available()
assert not is_model_parallel_initailized(), \
f'parallel group has been already initialized.'
print('Initializing tensor and pipeline parallel...')
dist.init_process_group(backend=backend)
global _model_para_group
_model_para_group = ModelParallelGroup(tensor_para_size, pipeline_para_size)
def get_tensor_para_rank():
if _model_para_group is None:
return 0
return _model_para_group.tp_rank
def get_tensor_para_size():
if _model_para_group is None:
return 1
return _model_para_group.tp_size
def get_pipeline_para_rank():
if _model_para_group is None:
return 0
return _model_para_group.pp_rank
def get_pipeline_para_size():
if _model_para_group is None:
return 1
return _model_para_group.pp_size
def is_pipeline_group_first():
return _model_para_group is None or _model_para_group.is_pipeline_first
def is_pipeline_group_last():
return _model_para_group is None or _model_para_group.is_pipeline_last
def destroy():
dist.destroy_process_group()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/comm.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import datetime
import json
import multiprocessing
import pathlib
import re
import shutil
import sys
import numpy as np
import torch # pytype: disable=import-error
# verify if root package is in PYTHONPATH
__root_package_path__ = pathlib.Path(__file__).parent.parent.parent.parent.parent.absolute().as_posix()
if __root_package_path__ not in sys.path:
print(
f"[ERROR] add project root directory to your PYTHONPATH with "
f"'export PYTHONPATH={__root_package_path__}:${{PYTHONPATH}}'"
)
from examples.pytorch.gpt.utils.gpt import DEFAULT_START_TAG, DEFAULT_END_TAG, OPENAI_GPT2_START_ID, OPENAI_GPT2_END_ID
from examples.pytorch.utils import torch2np, safe_transpose, cpu_map_location, gpu_map_location, WEIGHT2DTYPE
def _inject_model_parallel_rank(
filepath,
tensor_model_parallel_size=1,
pipeline_model_parallel_size=1,
tensor_model_parallel_rank=0,
pipeline_model_parallel_rank=0,
):
"""
Injects tensor/pipeline model parallel ranks into the filepath.
Does nothing if not using model parallelism.
"""
filepath = pathlib.Path(filepath)
if tensor_model_parallel_size > 1 or pipeline_model_parallel_size > 1:
# filepath needs to be updated to include mp_rank
if pipeline_model_parallel_size is None or pipeline_model_parallel_size == 1:
filepath = filepath.parent / f"mp_rank_{tensor_model_parallel_rank:02d}" / filepath.name
else:
filepath = (
filepath.parent /
f"mp_rank_{tensor_model_parallel_rank:02d}_{pipeline_model_parallel_rank:03d}" /
filepath.name
)
if not filepath.exists():
filepath = (
filepath.parent /
f"tp_rank_{tensor_model_parallel_rank:02d}_pp_rank_{pipeline_model_parallel_rank:03d}" /
filepath.name
)
return filepath
else:
if filepath.exists():
return filepath
else:
return filepath.parent / "mp_rank_00" / filepath.name
def _create_model_training_args_for_checkpoint_version_0(args, model_00):
model_training_args = argparse.Namespace()
if args.head_num is None or args.trained_tensor_parallel_size is None:
raise ValueError(
"Provided checkpoint have missing training args. "
"Thus it is required to provide -head_num and -trained_tensor_parallel_size CLI arguments"
)
model_training_args.num_attention_heads = args.head_num
model_training_args.tensor_model_parallel_size = args.trained_tensor_parallel_size
# megatron ckpt_ver=0 only supports pipeline_parallel_size = 1
model_training_args.pipeline_model_parallel_size = 1
model_training_args.max_position_embeddings = \
model_00["model"]["language_model"]["embedding"]["position_embeddings"]["weight"].shape[0]
model_training_args.hidden_size = \
model_00["model"]["language_model"]["embedding"]["position_embeddings"]["weight"].shape[1]
model_training_args.ffn_hidden_size = 4 * model_training_args.hidden_size
def get_layer_num_from_weights(model_keys):
layer_num = 1
for key in model_keys:
if re.search(r'\d+', key) is not None:
layer_num = max(int(re.search(r'\d+', key).group()), layer_num)
return layer_num + 1
model_training_args.num_layers = \
get_layer_num_from_weights(model_00["model"]["language_model"]['transformer'].keys())
model_training_args.layernorm_epsilon = 1e-6
return model_training_args
# This tool is used to support the new megatron model trained by pipeline parallel + tensor parallel
def merge_and_convert_process(i, pipeline_para_rank, saved_dir, factor, key, model_training_args, transformer_model_list, ckpt_ver, np_weight_data_type):
saved_dir = pathlib.Path(saved_dir)
if key.find("layers.") != -1:
layer_index = (int)(key[7 : key.find(".", 7)])
saved_key = key.replace(
"layers.%d." % layer_index,
"layers.%d." % (layer_index + pipeline_para_rank * model_training_args.num_layers // model_training_args.pipeline_model_parallel_size))
if saved_key.find("self_attention") != -1:
saved_key = saved_key.replace("self_attention", "attention")
if saved_key.find("adaptor1") != -1:
saved_key = saved_key.replace("adaptor1", "after_attention_adapter")
if saved_key.find("adaptor2") != -1:
saved_key = saved_key.replace("adaptor2", "after_ffn_adapter")
else:
saved_key = key
major_device = transformer_model_list[0][key].device
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("adaptor1.dense_4h_to_h.bias") != -1
or key.find("adaptor2.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
or key.find("self_attention.query_key_value.fp_linear.fi") != -1
or key.find("self_attention.query_key_value.fp_linear.fw") != -1
or key.find("self_attention.query_key_value.fp_linear.fo") != -1
or key.find("self_attention.dense.fp_linear.fi") != -1
or key.find("self_attention.dense.fp_linear.fw") != -1
or key.find("self_attention.dense.fp_linear.fo") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.fi") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.fw") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.fo") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.fi") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.fw") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.fo") != -1):
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir / f"model.{saved_key}.bin"
val = safe_transpose(transformer_model_list[0][key])
val = torch2np(val, np_weight_data_type)
val = np.squeeze(val)
val.tofile(saved_path)
elif (key.find("attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1
or key.find("adaptor1.dense_4h_to_h.weight") != -1
or key.find("adaptor2.dense_4h_to_h.weight") != -1):
vals = [
safe_transpose(transformer_model_list[k][key]).float().to(major_device)
for k in range(factor)
]
val = torch.cat(vals, dim=0)
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.{i:d}.bin"
val.tofile(saved_path)
elif (key.find("mlp.dense_h_to_4h.weight") != -1
or key.find("adaptor1.dense_h_to_4h.weight") != -1
or key.find("adaptor2.dense_h_to_4h.weight") != -1
or key.find("mlp.dense_h_to_4h.bias") != -1
or key.find("adaptor1.dense_h_to_4h.bias") != -1
or key.find("adaptor2.dense_h_to_4h.bias") != -1):
vals = [
safe_transpose(transformer_model_list[k][key]).float().to(major_device)
for k in range(factor)
]
val = torch.cat(vals, dim=-1)
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.{i:d}.bin"
val.tofile(saved_path)
elif key.find("attention.query_key_value.bias") != -1:
vals = []
for k in range(factor):
val = safe_transpose(transformer_model_list[k][key]).float()
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads // model_training_args.tensor_model_parallel_size
size_per_head = local_dim // head_num
val = val.reshape(head_num, num_splits, size_per_head)
val = val.permute(1, 0, 2)
val = val.reshape(3, local_dim)
vals.append(val.to(major_device))
val = torch.cat(vals, dim=-1)
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.{i:d}.bin"
val.tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
vals = []
for k in range(factor):
val = safe_transpose(transformer_model_list[k][key]).float()
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads
size_per_head = hidden_dim // head_num
head_num = head_num // model_training_args.tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.permute(0, 2, 1, 3)
val = val.reshape(hidden_dim, 3, local_dim)
vals.append(val.to(major_device))
val = torch.cat(vals, dim=-1)
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.{i:d}.bin"
val.tofile(saved_path)
elif (key.find("self_attention.query_key_value.fp_linear.di") != -1
or key.find("self_attention.query_key_value.fp_linear.dw") != -1
or key.find("self_attention.query_key_value.fp_linear.do") != -1
or key.find("self_attention.dense.fp_linear.di") != -1
or key.find("self_attention.dense.fp_linear.dw") != -1
or key.find("self_attention.dense.fp_linear.do") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.di") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.dw") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.do") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.di") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.dw") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.do") != -1
or key.find("_amax") != -1):
pass
else:
print(f"[ERROR] cannot find key '{key}'")
def split_and_convert_process(i, pipeline_para_rank, saved_dir, factor, key, model_training_args, transformer_model_list, ckpt_ver, np_weight_data_type):
val = safe_transpose(transformer_model_list[0][key])
val = torch2np(val, np_weight_data_type)
if key.find("layers.") != -1:
layer_index = (int)(key[7 : key.find(".", 7)])
saved_key = key.replace(
"layers.%d." % layer_index,
"layers.%d." % (layer_index + pipeline_para_rank * model_training_args.num_layers // model_training_args.pipeline_model_parallel_size))
if saved_key.find("self_attention") != -1:
saved_key = saved_key.replace("self_attention", "attention")
if saved_key.find("adaptor1") != -1:
saved_key = saved_key.replace("adaptor1", "after_attention_adapter")
if saved_key.find("adaptor2") != -1:
saved_key = saved_key.replace("adaptor2", "after_ffn_adapter")
else:
saved_key = key
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("adaptor1.dense_4h_to_h.bias") != -1
or key.find("adaptor2.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
or key.find("self_attention.query_key_value.fp_linear.fi") != -1
or key.find("self_attention.query_key_value.fp_linear.fw") != -1
or key.find("self_attention.query_key_value.fp_linear.fo") != -1
or key.find("self_attention.dense.fp_linear.fi") != -1
or key.find("self_attention.dense.fp_linear.fw") != -1
or key.find("self_attention.dense.fp_linear.fo") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.fi") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.fw") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.fo") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.fi") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.fw") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.fo") != -1):
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir / f"model.{saved_key}.bin"
val.tofile(saved_path.as_posix())
elif (key.find("attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1
or key.find("adaptor1.dense_4h_to_h.weight") != -1
or key.find("adaptor2.dense_4h_to_h.weight") != -1):
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (key.find("mlp.dense_h_to_4h.weight") != -1
or key.find("adaptor1.dense_h_to_4h.weight") != -1
or key.find("adaptor2.dense_h_to_4h.weight") != -1
or key.find("mlp.dense_h_to_4h.bias") != -1
or key.find("adaptor1.dense_h_to_4h.bias") != -1
or key.find("adaptor2.dense_h_to_4h.bias") != -1):
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("attention.query_key_value.bias") != -1:
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads // model_training_args.tensor_model_parallel_size
size_per_head = local_dim // head_num
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads
size_per_head = hidden_dim // head_num
head_num = head_num // model_training_args.tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, 3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (key.find("self_attention.query_key_value.fp_linear.di") != -1
or key.find("self_attention.query_key_value.fp_linear.dw") != -1
or key.find("self_attention.query_key_value.fp_linear.do") != -1
or key.find("self_attention.dense.fp_linear.di") != -1
or key.find("self_attention.dense.fp_linear.dw") != -1
or key.find("self_attention.dense.fp_linear.do") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.di") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.dw") != -1
or key.find("mlp.dense_h_to_4h.fp_linear.do") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.di") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.dw") != -1
or key.find("mlp.dense_4h_to_h.fp_linear.do") != -1
or key.find("_amax") != -1):
pass
else:
print(f"[ERROR] cannot find key '{key}'")
def _get_checkpoint_name(checkpoint_dir):
checkpoint_dir = pathlib.Path(checkpoint_dir)
patterns = [
"model_optim_rng.pt", # older megatron checkpoints
"*last.ckpt", # newer format of checkpoints
]
for pattern in patterns:
model_files = sorted(list(checkpoint_dir.rglob(pattern)))
if model_files:
return model_files[0].name
raise ValueError(f"Could not find checkpoint files in {checkpoint_dir}")
def convert_checkpoint(args):
saved_dir = pathlib.Path(args.saved_dir) / f"{args.infer_gpu_num:d}-gpu"
if saved_dir.exists():
print(f"[ERROR] Remove {saved_dir} target directory before running conversion")
sys.exit(1)
saved_dir.mkdir(parents=True)
if args.vocab_path:
shutil.copy(args.vocab_path, (saved_dir / "vocab.json").as_posix())
if args.merges_path:
shutil.copy(args.merges_path, (saved_dir / "merges.txt").as_posix())
load_checkpoints_to_cpu = bool(args.load_checkpoints_to_cpu)
map_location_fn = cpu_map_location if load_checkpoints_to_cpu else gpu_map_location
checkpoints_dir = pathlib.Path(args.in_file)
checkpoint_name = _get_checkpoint_name(checkpoints_dir)
# load position_embedding from rank 0
checkpoints_paths = sorted(checkpoints_dir.rglob(checkpoint_name))
if not checkpoints_paths:
print(f"[ERROR] Cannot find checkpoint in {checkpoints_dir}.")
exit(1)
model_00 = torch.load(checkpoints_paths[0].as_posix(), map_location=map_location_fn)
if "hyper_parameters" in list(model_00.keys()):
print("Use nemo_ckpt_converter.py script for conversion of this checkpoint")
exit(1)
elif "args" in list(model_00.keys()):
checkpoint_version = model_00["checkpoint_version"]
model_training_args = model_00["args"]
megatron_gpt_key = "encoder"
else:
checkpoint_version = 0
model_training_args = _create_model_training_args_for_checkpoint_version_0(args, model_00)
megatron_gpt_key = "transformer"
with (saved_dir / "args.txt").open("w") as training_args_file:
for k, v in vars(model_training_args).items():
training_args_file.write(f"{k}:{v}\n")
np_weight_data_type = WEIGHT2DTYPE[args.weight_data_type]
val = model_00["model"]["language_model"]["embedding"]["position_embeddings"]["weight"]
val = torch2np(val, np_weight_data_type)
val.tofile((saved_dir / "model.wpe.bin").as_posix()) # not weight, do not need to transpose
del model_00
w_e_list = []
training_tensor_para_size = model_training_args.tensor_model_parallel_size
training_pipeline_para_size = model_training_args.pipeline_model_parallel_size
inference_tensor_para_size = args.infer_gpu_num
model_weights_paths = [
[
_inject_model_parallel_rank(
checkpoints_dir / checkpoint_name,
tensor_model_parallel_size=training_tensor_para_size,
pipeline_model_parallel_size=training_pipeline_para_size,
tensor_model_parallel_rank=tp_rank,
pipeline_model_parallel_rank=pp_rank,
)
for pp_rank in range(training_pipeline_para_size)
]
for tp_rank in range(training_tensor_para_size)
]
if training_tensor_para_size > inference_tensor_para_size:
assert training_tensor_para_size % inference_tensor_para_size == 0
is_merge_ckpt = True
factor = int(training_tensor_para_size / inference_tensor_para_size)
else:
assert inference_tensor_para_size % training_tensor_para_size == 0
is_merge_ckpt = False
factor = int(inference_tensor_para_size / training_tensor_para_size)
main_loop = min(training_tensor_para_size, inference_tensor_para_size)
vocab_size_list = [0 for i in range(main_loop)]
# quantization max ranges
quant_max_ranges = dict()
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
pool = multiprocessing.Pool(args.processes)
has_adapters = False
for i in range(main_loop):
for j in range(training_pipeline_para_size):
transformer_models = []
if is_merge_ckpt:
for k in range(factor):
m = torch.load(model_weights_paths[i * factor + k][j].as_posix(), map_location=map_location_fn)
if not has_adapters:
has_adapters = any("adaptor" in key for key in m['model']['language_model'][megatron_gpt_key].keys())
weights_dict = m["model"]["language_model"][megatron_gpt_key]
for name, weight in weights_dict.items():
if "_amax" in name:
# fuse qkv scale
if "matmul_q_input_quantizer" in name:
name = name.replace("_q_", "_qkv_")
if "matmul_k_input_quantizer" in name:
name = name.replace("_k_", "_qkv_")
if "matmul_v_input_quantizer" in name:
name = name.replace("_v_", "_qkv_")
if name in quant_max_ranges:
quant_max_ranges[name] = torch.max(torch.max(torch.abs(weight)),quant_max_ranges[name])
else:
quant_max_ranges[name] = torch.max(torch.abs(weight))
transformer_models.append(m["model"]["language_model"][megatron_gpt_key])
if j == 0:
vocab_size_list[i] = m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"].shape[0]
w_e_list.append(torch2np(m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"], np_weight_data_type))
else:
m = torch.load(model_weights_paths[i][j].as_posix(), map_location=map_location_fn)
if not has_adapters:
has_adapters = any("adaptor" in key for key in m['model']['language_model'][megatron_gpt_key].keys())
if j == 0:
vocab_size_list[i] = m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"].shape[0]
w_e_list.append(torch2np(
m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"],
np_weight_data_type
))
weights_dict = m["model"]["language_model"][megatron_gpt_key]
for name, weight in weights_dict.items():
if "_amax" in name:
# fuse qkv scale
if "matmul_q_input_quantizer" in name:
name = name.replace("_q_", "_qkv_")
if "matmul_k_input_quantizer" in name:
name = name.replace("_k_", "_qkv_")
if "matmul_v_input_quantizer" in name:
name = name.replace("_v_", "_qkv_")
if name in quant_max_ranges:
quant_max_ranges[name] = torch.max(torch.max(torch.abs(weight)),quant_max_ranges[name])
else:
quant_max_ranges[name] = torch.max(torch.abs(weight))
transformer_models.append(m["model"]["language_model"][megatron_gpt_key])
pool.starmap(
merge_and_convert_process if is_merge_ckpt else split_and_convert_process,
[
(
i,
j,
saved_dir,
factor,
k,
model_training_args,
transformer_models,
checkpoint_version,
np_weight_data_type,
)
for (k, v) in transformer_models[0].items()
],
)
pool.close()
pool.join()
torch.cuda.synchronize()
# calculate quant scales for fp8
for key, val in quant_max_ranges.items():
key = key.replace("self_attention", "attention")
if "attention.query_key_value._input_quantizer._amax" in key:
saved_key = key.replace("_input_quantizer._amax", "fp_linear.fi")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "attention.query_key_value._weight_quantizer._amax" in key:
saved_key = key.replace("_weight_quantizer._amax", "fp_linear.fw")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "attention.matmul_qkv_input_quantizer._amax" in key:
saved_key = key.replace("matmul_qkv_input_quantizer._amax", "query_key_value.fp_linear.fo")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "attention.matmul_a_input_quantizer._amax" in key:
saved_key = key.replace("attention.matmul_a_input_quantizer._amax", "attention.softmax.fo")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "attention.dense._input_quantizer._amax" in key:
saved_key = key.replace("_input_quantizer._amax", "fp_linear.fi")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "attention.dense._weight_quantizer._amax" in key:
saved_key = key.replace("_weight_quantizer._amax", "fp_linear.fw")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "add_local_input_quantizers.0._amax" in key:
saved_key = key.replace("add_local_input_quantizers.0._amax", "attention.dense.fp_linear.fo")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "mlp.dense_h_to_4h._input_quantizer._amax" in key:
saved_key = key.replace("_input_quantizer._amax", "fp_linear.fi")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "mlp.dense_h_to_4h._weight_quantizer._amax" in key:
saved_key = key.replace("_weight_quantizer._amax", "fp_linear.fw")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "mlp.dense_4h_to_h._input_quantizer._amax" in key:
saved_key = key.replace("_input_quantizer._amax", "fp_linear.fi")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "mlp.dense_4h_to_h._weight_quantizer._amax" in key:
saved_key = key.replace("_weight_quantizer._amax", "fp_linear.fw")
saved_path = saved_dir / f"model.{saved_key}.bin"
if "add_local_input_quantizers.2._amax" in key:
saved_key = key.replace("add_local_input_quantizers.2._amax", "mlp.dense_4h_to_h.fp_linear.fo")
saved_path = saved_dir / f"model.{saved_key}.bin"
val = (np.array(np.array(480.0, dtype=np.float32) / torch2np(val, np_weight_data_type))).astype(np.float32)
if "attention.query_key_value.fp_linear.fw" in saved_key:
print(saved_key, val)
val.tofile(saved_path.as_posix())
np.concatenate(w_e_list, axis=0).tofile((saved_dir / "model.wte.bin").as_posix())
# save vocab_size
full_vocab_size = sum(vocab_size_list)
if not hasattr(model_training_args, "padded_vocab_size"):
model_training_args.padded_vocab_size = full_vocab_size
# Configuration for the model (load by triton backends)
config = configparser.ConfigParser()
config["gpt"] = {}
if args.vocab_path:
vocab_path = pathlib.Path(args.vocab_path)
with vocab_path.open("r") as vocab_file:
vocab = json.load(vocab_file)
start_id, end_id = vocab[DEFAULT_START_TAG], vocab[DEFAULT_END_TAG]
else:
# hard coded values from english gpt_vocab.json file
start_id, end_id = str(OPENAI_GPT2_START_ID), str(OPENAI_GPT2_END_ID)
try:
config["gpt"]["model_name"] = "gpt"
config["gpt"]["head_num"] = str(model_training_args.num_attention_heads)
config["gpt"]["size_per_head"] = str(model_training_args.hidden_size // model_training_args.num_attention_heads)
config["gpt"]["inter_size"] = str(model_training_args.ffn_hidden_size)
config["gpt"]["num_layer"] = str(model_training_args.num_layers)
config["gpt"]["max_pos_seq_len"] = str(model_training_args.max_position_embeddings)
config["gpt"]["vocab_size"] = str(model_training_args.padded_vocab_size)
config["gpt"]["has_adapters"] = str(has_adapters)
config['gpt']['adapter_inter_size'] = str(model_training_args.project_size) if has_adapters else str(0)
config["gpt"]["layernorm_eps"] = str(model_training_args.layernorm_epsilon)
config["gpt"]["start_id"] = str(start_id)
config["gpt"]["end_id"] = str(end_id)
config["gpt"]["weight_data_type"] = args.weight_data_type
config["gpt"]["tensor_para_size"] = str(args.infer_gpu_num)
with open((saved_dir / f"config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
except Exception as e:
print(f"Fail to save the config in config.ini: {e}")
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--saved-dir", "-saved_dir", "-o", help="folder name of output files", required=True)
parser.add_argument(
"--in-file", "-in_file", "-i", help="file name of input checkpoint file", required=True
)
parser.add_argument(
"--infer-gpu-num", "-infer_gpu_num", "-i_g", type=int, help="How many gpus for inference", required=True
)
# -h_n and -t_g are needed when megatron_ckpt_version = 0, for example the public megatron 345M gpt model
parser.add_argument(
"--head-num",
"-head_num",
"-h_n",
type=int,
help="The number of heads, only needed when weight doesn't contain structure hyperparameters"
)
parser.add_argument(
"--trained-tensor-parallel-size",
"-trained_tensor_parallel_size",
"-t_g",
type=int,
help="the tensor parallel size for training"
)
parser.add_argument(
"--processes",
"-processes",
"-p",
type=int,
default=16,
help="How many processes to spawn for conversion",
)
parser.add_argument(
"--weight-data-type", "-weight_data_type", choices=["fp32", "fp16"], default="fp32", help=""
)
parser.add_argument(
"--load-checkpoints-to-cpu",
"-load_checkpoints_to_cpu",
"-cpu",
type=int,
choices=[0, 1],
default=1,
help="Whether to load model weights to CPU",
)
parser.add_argument(
"--vocab-path",
type=str,
help="Path to vocabulary file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--merges-path", type=str, help="Path to merges file to embed in FasterTransformer checkpoint", required=False
)
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print(f"{key}: {vars(args)[key]}")
print("========================================")
start_time = datetime.datetime.now()
convert_checkpoint(args)
run_time = datetime.datetime.now() - start_time
print(f"[INFO] Spent {run_time} (h:m:s) to convert the model")
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/megatron_fp8_ckpt_convert.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Important: You should try converting models with `huggingface_gpt_convert.py` first before running this file.
This file is intended for converting old versions of Japanese GPT models from https://huggingface.co/rinna.
'''
import argparse
import configparser
import multiprocessing
import numpy as np
from pathlib import Path
import torch
import os
import sys
from transformers import GPT2LMHeadModel # transformers-4.10.0-py3
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
sys.path.append(dir_path)
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def split_and_convert_process(i, saved_dir, factor, key, args, val):
if key.find("input_layernorm.weight") != -1 or key.find("input_layernorm.bias") != -1 or \
key.find("attention.dense.bias") != -1 or key.find("post_attention_layernorm.weight") != -1 or \
key.find("post_attention_layernorm.bias") != -1 or key.find("mlp.dense_4h_to_h.bias") != -1 or \
key.find("final_layernorm.weight") != -1 or key.find("final_layernorm.bias") != -1:
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir + "/model." + key + ".bin"
val.tofile(saved_path)
elif key.find("attention.dense.weight") != -1 or key.find("mlp.dense_4h_to_h.weight") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.bias") != -1:
local_dim = (int)(val.shape[-1] / 3)
val = val.reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
local_dim = (int)(val.shape[-1] / 3)
val = val.reshape(hidden_dim, 3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
else:
print("[ERROR] cannot find key '{}'".format(key))
def split_and_convert(args):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
if(os.path.exists(saved_dir) == False):
os.makedirs(saved_dir)
ckpt_name = args.in_file
t_gpu_num = args.trained_gpu_num
i_gpu_num = args.infer_gpu_num
assert(i_gpu_num % t_gpu_num == 0)
factor = (int)(i_gpu_num / t_gpu_num)
# load position_embedding from rank 0
# model = torch.load(ckpt_name)
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = GPT2LMHeadModel.from_pretrained(args.in_file).to(torch_device)
hf_config = vars(model.config)
config = configparser.ConfigParser()
config["gpt"] = {}
config["gpt"]["model_name"] = "gpt" if hf_config["_name_or_path"] == '' else hf_config["_name_or_path"]
config["gpt"]["head_num"] = str(hf_config["n_head"])
n_embd = hf_config["n_embd"]
config["gpt"]["size_per_head"] = str(n_embd // hf_config["n_head"])
config["gpt"]["inter_size"] = str(n_embd * 4)
config['gpt']['max_pos_seq_len'] = str(hf_config['n_positions'])
config["gpt"]["num_layer"] = str(hf_config["n_layer"])
config["gpt"]["vocab_size"] = str(hf_config["vocab_size"])
config["gpt"]["start_id"] = str(hf_config["bos_token_id"])
config["gpt"]["end_id"] = str(hf_config["eos_token_id"])
config['gpt']['weight_data_type'] = args.weight_data_type
config["gpt"]["tensor_para_size"] = str(args.infer_gpu_num)
with open(saved_dir + "/config.ini", 'w') as configfile:
config.write(configfile)
np_weight_data_type = get_weight_data_type(args.weight_data_type)
huggingface_model_name_pattern = [
"ln_1.bias",
"ln_1.weight",
"attn.c_attn.bias",
"attn.c_attn.weight",
"attn.c_proj.bias",
"attn.c_proj.weight",
"ln_2.bias",
"ln_2.weight",
"mlp.c_fc.bias",
"mlp.c_fc.weight",
"mlp.c_proj.bias",
"mlp.c_proj.weight",
]
ft_model_name_pattern = [
"input_layernorm.bias",
"input_layernorm.weight",
"attention.query_key_value.bias",
"attention.query_key_value.weight",
"attention.dense.bias",
"attention.dense.weight",
"post_attention_layernorm.bias",
"post_attention_layernorm.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h.weight",
"mlp.dense_4h_to_h.bias",
"mlp.dense_4h_to_h.weight",
]
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
pool = multiprocessing.Pool(args.processes)
for name, param in model.state_dict().items():
if name.find("weight") == -1 and name.find("bias") == -1:
continue
print(name)
if name == 'transformer.wpe.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wpe.bin")
elif name == 'transformer.wte.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wte.bin")
elif name == 'transformer.ln_f.bias':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.final_layernorm.bias.bin")
elif name == 'transformer.ln_f.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.final_layernorm.weight.bin")
elif name == 'lm_head.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.lm_head.weight.bin")
else:
for i in range(len(huggingface_model_name_pattern)):
if name.find(huggingface_model_name_pattern[i]) != -1:
new_name = name.replace("transformer.h.", "layers.").replace(huggingface_model_name_pattern[i], ft_model_name_pattern[i])
pool.starmap(split_and_convert_process,
[(0, saved_dir, factor, new_name, args,
param.detach().cpu().numpy().astype(np_weight_data_type))], )
pool.close()
pool.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='file name of input checkpoint file', required=True)
parser.add_argument('-trained_gpu_num', '-t_g', type=int, help='How many gpus for inference', default=1)
parser.add_argument('-infer_gpu_num', '-i_g', type=int, help='How many gpus for inference', required=True)
parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 4)", default=4)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
split_and_convert(args)
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/huggingface_jp_gpt_convert.py
|
#!/usr/bin/env python3
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import pathlib
def main():
parser = argparse.ArgumentParser(
description="Script updating GPT config.ini hyper-parameters and requests parameters"
)
# config.ini path
parser.add_argument("--config-ini-path", required=True, help="Path to config.ini file to be updated")
# FT hyperparameters
parser.add_argument("--model-dir", type=str, required=True, help="Model path prefix")
parser.add_argument("--tensor-para-size", type=int, required=True, help="tensor parallelism size")
parser.add_argument("--pipeline-para-size", type=int, required=True, help="layer parallelism size")
parser.add_argument("--max-batch-size", type=int, default=8, help="batch size")
parser.add_argument("--max-seq-len", type=int, default=256, help="max sequence length")
parser.add_argument("--beam-width", type=int, default=1, help="beam width for beam search")
parser.add_argument("--data-type", type=str, default="fp32", help="data type", choices=["fp32", "fp16", "bf16"])
parser.add_argument(
"--sampling-top-k",
type=int,
default=1,
help="Candidate (k) value of top k sampling in decoding",
)
parser.add_argument(
"--sampling-top-p",
type=float,
default=0.0,
help="Probability (p) value of top p sampling in decoding",
)
parser.add_argument("--temperature", type=float, default=1.0, help="temperature of penalty")
parser.add_argument("--repetition-penalty", type=float, default=1.0, help="repetition_penalty")
parser.add_argument("--len-penalty", type=float, default=0.0, help="len_penalty")
parser.add_argument("--beam-search-diversity-rate", type=float, default=0.0, help="beam_search_diversity_rate")
# request
parser.add_argument("--request-batch-size", type=int, default=8, help="batch size")
parser.add_argument("--request-output-len", type=int, default=32, help="output length")
parser.add_argument("--model-name", type=str, default="gpt", help="model-name for testing")
args = parser.parse_args()
config_path = pathlib.Path(args.config_ini_path)
config = configparser.ConfigParser()
config.read(config_path)
config["ft_instance_hyperparameter"] = {
"max_batch_size": args.max_batch_size,
"max_seq_len": args.max_seq_len,
"beam_width": args.beam_width,
"top_k": args.sampling_top_k,
"top_p": args.sampling_top_p,
"temperature": args.temperature,
"tensor_para_size": args.tensor_para_size,
"pipeline_para_size": args.pipeline_para_size,
"data_type": args.data_type,
"sparse": 0,
"int8_mode": 0,
"enable_custom_all_reduce": 0,
"model_name": args.model_name,
"model_dir": args.model_dir,
"repetition_penalty": args.repetition_penalty,
"len_penalty": args.len_penalty,
"beam_search_diversity_rate": args.beam_search_diversity_rate,
}
config["request"] = {
"request_batch_size": args.request_batch_size,
"request_output_len": args.request_output_len,
"return_log_probs": "false",
"context_log_probs": "false",
"beam_width": args.beam_width,
"top_k": args.sampling_top_k,
"top_p": args.sampling_top_p,
"temperature": args.temperature,
"repetition_penalty": args.repetition_penalty,
"len_penalty": args.len_penalty,
"beam_search_diversity_rate": args.beam_search_diversity_rate,
}
with config_path.open("w") as config_file:
config.write(config_file)
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/update_gpt_config.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Convert huggingface GPT model. Use https://huggingface.co/gpt2 as demo.
'''
import argparse
import configparser
import multiprocessing
import numpy as np
from pathlib import Path
import torch
import os
import sys
from transformers import GPT2Model # transformers-4.10.0-py3
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
sys.path.append(dir_path)
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def split_and_convert_process(i, saved_dir,factor,key,args, val):
if key.find("input_layernorm.weight") != -1 or key.find("input_layernorm.bias") != -1 or \
key.find("attention.dense.bias") != -1 or key.find("post_attention_layernorm.weight") != -1 or \
key.find("post_attention_layernorm.bias") != -1 or key.find("mlp.dense_4h_to_h.bias") != -1 or \
key.find("final_layernorm.weight") != -1 or key.find("final_layernorm.bias") != -1:
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir + "/model." + key + ".bin"
val.tofile(saved_path)
elif key.find("attention.dense.weight") != -1 or key.find("mlp.dense_4h_to_h.weight") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.bias") != -1:
local_dim = (int)(val.shape[-1] / 3)
val = val.reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
local_dim = (int)(val.shape[-1] / 3)
val = val.reshape(hidden_dim, 3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
else:
print("[ERROR] cannot find key '{}'".format(key))
def split_and_convert(args):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
if(os.path.exists(saved_dir) == False):
os.makedirs(saved_dir)
ckpt_name = args.in_file
t_gpu_num = args.trained_gpu_num
i_gpu_num = args.infer_gpu_num
assert(i_gpu_num % t_gpu_num == 0)
factor = (int)(i_gpu_num / t_gpu_num)
# load position_embedding from rank 0
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = GPT2Model.from_pretrained(args.in_file).to(torch_device)
hf_config = vars(model.config)
# NOTE: save parameters to config files (loaded by triton backends)
config = configparser.ConfigParser()
config["gpt"] = {}
try:
config["gpt"]["model_name"] = "gpt" if hf_config["_name_or_path"] == '' else hf_config["_name_or_path"]
config["gpt"]["head_num"] = str(hf_config["n_head"])
n_embd = hf_config["n_embd"]
config["gpt"]["size_per_head"] = str(n_embd // hf_config["n_head"])
config["gpt"]["inter_size"] = str(n_embd * 4)
config['gpt']['max_pos_seq_len'] = str(hf_config['n_positions'])
config["gpt"]["num_layer"] = str(hf_config["n_layer"])
config["gpt"]["vocab_size"] = str(hf_config["vocab_size"])
config["gpt"]["start_id"] = str(hf_config["bos_token_id"])
config["gpt"]["end_id"] = str(hf_config["eos_token_id"])
config['gpt']['weight_data_type'] = args.weight_data_type
config["gpt"]["tensor_para_size"] = str(args.infer_gpu_num)
with open(saved_dir + "/config.ini", 'w') as configfile:
config.write(configfile)
except:
print(f"Fail to save the config in config.ini.")
np_weight_data_type = get_weight_data_type(args.weight_data_type)
huggingface_model_name_pattern = [
"ln_1.bias",
"ln_1.weight",
"attn.c_attn.bias",
"attn.c_attn.weight",
"attn.c_proj.bias",
"attn.c_proj.weight",
"ln_2.bias",
"ln_2.weight",
"mlp.c_fc.bias",
"mlp.c_fc.weight",
"mlp.c_proj.bias",
"mlp.c_proj.weight",
]
ft_model_name_pattern = [
"input_layernorm.bias",
"input_layernorm.weight",
"attention.query_key_value.bias",
"attention.query_key_value.weight",
"attention.dense.bias",
"attention.dense.weight",
"post_attention_layernorm.bias",
"post_attention_layernorm.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h.weight",
"mlp.dense_4h_to_h.bias",
"mlp.dense_4h_to_h.weight",
]
pool = multiprocessing.Pool(args.processes)
for name, param in model.named_parameters():
if name.find("weight") == -1 and name.find("bias") == -1:
continue
if name == 'wpe.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wpe.bin")
elif name == 'wte.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wte.bin")
elif name == 'ln_f.bias':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.final_layernorm.bias.bin")
elif name == 'ln_f.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.final_layernorm.weight.bin")
elif name == 'lm_head.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.lm_head.weight.bin")
else:
for i in range(len(huggingface_model_name_pattern)):
if name.find(huggingface_model_name_pattern[i]) != -1:
new_name = name.replace("h.", "layers.").replace(huggingface_model_name_pattern[i], ft_model_name_pattern[i])
pool.starmap(split_and_convert_process,
[(0, saved_dir, factor, new_name, args,
param.detach().cpu().numpy().astype(np_weight_data_type))], )
pool.close()
pool.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='file name of input checkpoint file', required=True)
parser.add_argument('-trained_gpu_num', '-t_g', type=int, help='How many gpus for inference', default=1)
parser.add_argument('-infer_gpu_num', '-i_g', type=int, help='How many gpus for inference', required=True)
parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 4)", default=4)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
split_and_convert(args)
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/huggingface_gpt_convert.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import random
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-max_batch_size', '--max_batch_size', type=int, required=True, metavar='NUMBER',
help='batch size')
parser.add_argument('-max_input_length', '--max_input_length', type=int, required=True, metavar='NUMBER',
help='max input length')
parser.add_argument('--destination', type=str, default="../examples/cpp/multi_gpu_gpt/start_ids.csv", metavar='STRING',
help='Configuration save file. Default is "../examples/cpp/multi_gpu_gpt/start_ids.csv".')
args = parser.parse_args()
args_dict = vars(args)
batch_size = args_dict["max_batch_size"]
max_input_length = args_dict["max_input_length"]
path = f"../examples/cpp/multi_gpu_gpt/start_ids.csv"
with open(args_dict["destination"], 'w') as f:
ids = ""
for i in range(batch_size):
for j in range(max_input_length):
if j == 0:
ids = f"{ids}{random.randint(1, 100)}"
else:
ids = f"{ids}, {random.randint(1, 100)}"
ids = f"{ids}\n"
f.write(ids)
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/generate_start_ids.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import numpy as np
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
base_path = dir_path + "/../../../.."
sys.path.append(base_path)
from examples.pytorch.gpt.utils import gpt_token_encoder as encoder
def get_tokenizer(vocab_file=None, bpe_file=None):
vocab_file = vocab_file if vocab_file is not None else base_path + "/models/gpt2-vocab.json"
bpe_file = bpe_file if bpe_file is not None else base_path + "/models/gpt2-merges.txt"
tokenizer = encoder.get_encoder(vocab_file, bpe_file)
return tokenizer
def to_word_list_format(word_dict, tokenizer=None):
tokenizer = get_tokenizer() if tokenizer is None else tokenizer
flat_ids = []
offsets = []
for word_dict_item in word_dict:
item_flat_ids = []
item_offsets = []
words = list(csv.reader(word_dict_item))[0]
for word in words:
ids = tokenizer.encode(word)
if len(ids) == 0:
continue
item_flat_ids += ids
item_offsets.append(len(ids))
flat_ids.append(np.array(item_flat_ids))
offsets.append(np.cumsum(np.array(item_offsets)))
pad_to = max(1, max(len(ids) for ids in flat_ids))
for i, (ids, offs) in enumerate(zip(flat_ids, offsets)):
flat_ids[i] = np.pad(ids, (0, pad_to - len(ids)), constant_values=0)
offsets[i] = np.pad(offs, (0, pad_to - len(offs)), constant_values=-1)
result = np.array([flat_ids, offsets], dtype="int32").transpose((1, 0, 2))
if result.shape[0] == 1:
result = result.squeeze(0)
return np.ascontiguousarray(result)
def save_word_list(filename, word_list):
with open(filename, "w") as f:
writer = csv.writer(f)
for word_list_item in word_list:
writer.writerow(word_list_item[0].tolist())
writer.writerow(word_list_item[1].tolist())
def load_word_list(filename):
with open(filename, "r") as f:
reader = csv.reader(f)
data = list(reader)
data = np.array(data, dtype=np.int32)
batch_size_x2, list_len = data.shape
return data.reshape((batch_size_x2 // 2, 2, list_len))
def test_csv_read_write():
filename = sys.argv[1]
test_words = [["one,two,three, one, two, three, one two three"], ["four"]]
word_list = to_word_list_format(test_words)
save_word_list(filename, word_list)
read_word_list = load_word_list(filename)
assert np.all(word_list == read_word_list)
if __name__ == "__main__":
test_csv_read_write()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/word_list.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import dataclasses
import json
import os
import pathlib
import typing
import torch
import torch.nn as nn
import numpy as np
import torch.distributed as dist
class GPTFp8Weights(object):
def __init__(self, head_num, size_per_head, layer_num, vocab_size, max_seq_len, tensor_para_size, pipeline_para_size,
has_post_decoder_layernorm=True, int8_mode=0, fp8_mode=0, weights_data_type=typing.Union[str, np.float32]):
assert(head_num % tensor_para_size == 0)
if int8_mode != 0:
self.weight_transpose_calibrate_quantize = torch.ops.fastertransformer.weight_transpose_calibrate_quantize
self.head_num = head_num
self.size_per_head = size_per_head
self.layer_num = layer_num
self.vocab_size = vocab_size
self.max_seq_len = max_seq_len
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.layers_per_device = layer_num // pipeline_para_size
self.has_post_decoder_layernorm = has_post_decoder_layernorm
local_head_num = head_num // tensor_para_size
global_head_num = head_num
local_hidden_units = local_head_num * size_per_head
global_hidden_units = global_head_num * size_per_head
local_inter_size = local_hidden_units * 4
self.local_head_num = local_head_num
self.global_head_num = global_head_num
self.local_hidden_units = local_hidden_units
self.global_hidden_units = global_hidden_units
self.local_inter_size = local_inter_size
self.int8_mode = int8_mode
if isinstance(weights_data_type, str):
try:
weights_data_type = {
"fp16": np.float16,
"fp32": np.float32,
"float16": np.float16,
"float32": np.float32,
}[weights_data_type]
except KeyError:
raise ValueError(f"Don't know how to interpret weights_data_type: {weights_data_type}")
assert weights_data_type in [np.float32, np.float16]
self.weights_data_type = weights_data_type
self.w = []
self.int8_w = []
self.scale = []
# Transformer blocks
self.w.extend([torch.zeros(global_hidden_units)] * layer_num) # self_layernorm_gamma
self.w.extend([torch.zeros(global_hidden_units)] * layer_num) # self_layernorm_beta
self.w.extend([torch.zeros(global_hidden_units, local_hidden_units * 3)] * layer_num) # self_kernel
self.w.extend([torch.zeros(local_hidden_units * 3)] * layer_num) # self_bias
self.w.extend([torch.zeros(local_hidden_units, global_hidden_units)] * layer_num) # self_output_kernel
self.w.extend([torch.zeros(global_hidden_units)] * layer_num) # self_output_bias
self.w.extend([torch.zeros(global_hidden_units)] * layer_num) # ffn_layernorm_gamma
self.w.extend([torch.zeros(global_hidden_units)] * layer_num) # ffn_layernorm_beta
self.w.extend([torch.zeros(global_hidden_units, local_inter_size)] * layer_num) # ffn_kernel1
self.w.extend([torch.zeros(local_inter_size)] * layer_num) # ffn_bias1
self.w.extend([torch.zeros(local_inter_size, global_hidden_units)] * layer_num) # ffn_kernel2
self.w.extend([torch.zeros(global_hidden_units)] * layer_num) # ffn_bias2
# After Transformer blocks
if self.has_post_decoder_layernorm:
self.w.append(torch.zeros(global_hidden_units)) # layernorm_gamma
self.w.append(torch.zeros(global_hidden_units)) # layernorm_beta
self.w.append(torch.zeros(max_seq_len, global_hidden_units)) # position_encoding_table
self.w.append(torch.zeros(vocab_size, global_hidden_units)) # embedding_table
self.w.append(torch.zeros(vocab_size, global_hidden_units)) # embedding_kernel
# Initialization
self._map(lambda w: torch.nn.init.normal_(w, mean=0., std=1.))
if (self.int8_mode != 0):
self.int8_w.extend([torch.zeros(global_hidden_units, local_hidden_units * 3, dtype=torch.int8)] * layer_num) # self_int8_kernel
self.scale.extend([torch.zeros(local_hidden_units * 3, dtype=torch.float)] * layer_num) # self_scale
self.int8_w.extend([torch.zeros(local_hidden_units, global_hidden_units, dtype=torch.int8)] * layer_num) # self_output_int8_kernel
self.scale.extend([torch.zeros(global_hidden_units, dtype=torch.float)] * layer_num) # self_output_scale
self.int8_w.extend([torch.zeros(global_hidden_units, local_inter_size, dtype=torch.int8)] * layer_num) # ffn_int8_kernel1
self.scale.extend([torch.zeros(local_inter_size, dtype=torch.float)] * layer_num) # ffn_scale1
self.int8_w.extend([torch.zeros(local_inter_size, global_hidden_units, dtype=torch.int8)] * layer_num) # ffn_int8_kernel2
self.scale.extend([torch.zeros(global_hidden_units, dtype=torch.float)] * layer_num) # ffn_scale2
def __getitem__(self, idx):
return self.w[idx]
def __setitem__(self, idx, val):
self.w[idx] = val
def __len__(self):
return len(self.w)
def _map(self, func):
for i in range(len(self.w)):
if isinstance(self.w[i], list):
for j in range(len(self.w[i])):
self.w[i][j] = func(self.w[i][j])
else:
self.w[i] = func(self.w[i])
def _map_int8(self, func):
for i in range(len(self.int8_w)):
if isinstance(self.int8_w[i], list):
for j in range(len(self.int8_w[i])):
self.int8_w[i][j] = func(self.int8_w[i][j])
else:
self.int8_w[i] = func(self.int8_w[i])
for i in range(len(self.scale)):
if isinstance(self.scale[i], list):
for j in range(len(self.scale[i])):
self.scale[i][j] = func(self.scale[i][j])
else:
self.scale[i] = func(self.scale[i])
def load(self, ckpt_path, tensor_para_rank, pipeline_para_rank):
if not os.path.exists(ckpt_path):
return False
w = []
# Load
def is_load(i): return i >= self.layers_per_device * \
pipeline_para_rank and i < self.layers_per_device * (pipeline_para_rank + 1)
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.input_layernorm.weight.bin".format(i),
dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.input_layernorm.bias.bin".format(i),
dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.attention.query_key_value.weight.{}.bin".format(i,
tensor_para_rank), dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.attention.query_key_value.bias.{}.bin".format(i,
tensor_para_rank), dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.attention.dense.weight.{}.bin".format(i,
tensor_para_rank), dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.attention.dense.bias.bin".format(i),
dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.post_attention_layernorm.weight.bin".format(i),
dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.post_attention_layernorm.bias.bin".format(i),
dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.mlp.dense_h_to_4h.weight.{}.bin".format(i,
tensor_para_rank), dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.mlp.dense_h_to_4h.bias.{}.bin".format(i,
tensor_para_rank), dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.mlp.dense_4h_to_h.weight.{}.bin".format(i,
tensor_para_rank), dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.extend([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.mlp.dense_4h_to_h.bias.bin".format(i),
dtype=self.weights_data_type)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
if self.has_post_decoder_layernorm:
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.final_layernorm.weight.bin", dtype=self.weights_data_type)))
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.final_layernorm.bias.bin", dtype=self.weights_data_type)))
wpe = torch.from_numpy(np.fromfile(ckpt_path + "/model.wpe.bin", dtype=self.weights_data_type)
).reshape(-1, self.global_hidden_units)
assert self.max_seq_len <= wpe.size(0), (
f"max_seq_len ({self.max_seq_len} must not exceed "
f"the value of maximum sequence length during training ({wpe.size(0)})."
)
w.append(wpe)
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.wte.bin", dtype=self.weights_data_type)))
if os.path.isfile(ckpt_path + "/model.lm_head.weight.bin"):
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.lm_head.weight.bin", dtype=self.weights_data_type)))
else:
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.wte.bin", dtype=self.weights_data_type)))
# Reshape
try:
for i in range(len(w)):
if w[i].nelement() > 0:
self.w[i] = w[i].reshape(self.w[i].shape)
except RuntimeError:
raise RuntimeError(
f"head_num, size_per_head, vocab_size, and max_seq_len must be the same as the ones during training "
f"(idx: {i} expected shape: {self.w[i].shape} got shape: {w[i].shape})."
)
#transpose calibrate quantize the kernel
layer_num = self.layer_num
if self.int8_mode != 0:
for i in range(layer_num):
self.int8_w[i + 0*layer_num], self.scale[i + 0*layer_num] = self.weight_transpose_calibrate_quantize(self.w[2*layer_num + i])
self.int8_w[i + 1*layer_num], self.scale[i + 1*layer_num] = self.weight_transpose_calibrate_quantize(self.w[4*layer_num + i])
self.int8_w[i + 2*layer_num], self.scale[i + 2*layer_num] = self.weight_transpose_calibrate_quantize(self.w[8*layer_num + i])
self.int8_w[i + 3*layer_num], self.scale[i + 3*layer_num] = self.weight_transpose_calibrate_quantize(self.w[10*layer_num + i])
return True
class GPTFp8(nn.Module):
def __init__(self,
head_num, size_per_head,
vocab_size, start_id, end_id, layer_num,
max_seq_len,
tensor_para_size, pipeline_para_size,
lib_path,
ckpt_path,
layernorm_eps = 1e-6, layernorm_type = "pre_layernorm", # gpt_variant_params
activation_type = "Gelu", has_post_decoder_layernorm = True, # gpt variant params
int8_mode = 0,
fp8_mode = 1,
weights_data_type: np.dtype = np.float32):
super().__init__()
self.head_num = head_num
self.size_per_head = size_per_head
self.vocab_size = vocab_size
self.max_seq_len = max_seq_len
self.start_id = start_id
self.end_id = end_id
self.layer_num = layer_num
# gpt_variant_params
self.layernorm_eps = layernorm_eps
self.layernorm_type = layernorm_type
self.activation_type = activation_type
self.has_post_decoder_layernorm = has_post_decoder_layernorm
# multi-gpu params
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.use_sparse_gemm = False
self.int8_mode = int8_mode
self.fp8_mode = fp8_mode
self.weights_data_type = weights_data_type
self.ckpt_path = ckpt_path
assert torch.cuda.is_available(), "CUDA is required for this model."
assert head_num % tensor_para_size == 0, "head_num must be a multiple of tensor_para_size."
assert layer_num % pipeline_para_size == 0, "layer_num must be a multiple of pipeline_para_size."
# Load the C++ model into Pytorch model.
torch.classes.load_library(os.path.abspath(lib_path))
# Prepare for tensor/pipeline parallel
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Have initialize the process group")
self.rank = dist.get_rank()
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size()
assert world_size == tensor_para_size * pipeline_para_size, "tensor_para_size * pipeline_para_size must be equal to world_size."
self.tensor_para_rank = self.rank % self.tensor_para_size
self.pipeline_para_rank = self.rank // self.tensor_para_size
self.model = torch.classes.FasterTransformer.GptFp8Op(self.head_num, self.size_per_head, 4 * self.head_num * self.size_per_head,
self.layer_num, self.vocab_size, self.max_seq_len, self.start_id, self.end_id,
self.tensor_para_size, self.pipeline_para_size,
self.layernorm_eps, self.layernorm_type, self.activation_type, self.ckpt_path,
self.has_post_decoder_layernorm, [])
def forward(self,
start_ids,
start_lengths,
output_len,
beam_width=1,
top_k=1,
top_p=0.0,
beam_search_diversity_rate=0.0,
temperature=1.0,
len_penalty=1.0,
repetition_penalty=1.0,
random_seed=0,
return_output_length=False,
return_cum_log_probs=0):
input_len = start_ids.size(1)
assert input_len > 0, "input len must be larger than zero. For an unconditional case, use start_id as the first token."
# Inputs to device
start_ids = start_ids.cuda(self.device)
start_lengths = start_lengths.cuda(self.device)
# outputs: output_ids, output_lengths, output_cum_log_probs (optional)
outputs = self.model.forward(start_ids,
start_lengths,
output_len,
beam_width, # optional, can be None
top_k, # optional, can be None
top_p, # optional, can be None
beam_search_diversity_rate, # optional, can be None
temperature, # optional, can be None
len_penalty, # optional, can be None
repetition_penalty, # optional, can be None
random_seed, # optional, can be None
return_cum_log_probs) # optional, can be None
if return_cum_log_probs == 0:
output_ids, output_lengths = outputs
else:
output_ids, output_lengths, output_cum_log_probs = outputs
if return_output_length:
if return_cum_log_probs > 0:
return output_ids, output_lengths, output_cum_log_probs
else:
return output_ids, output_lengths
else:
return output_ids
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
@dataclasses.dataclass
class GptInitModelParameters:
head_num: int
size_per_head: int
layer_num: int
max_seq_len: int
tensor_para_size: int
vocab_size: int
start_id: int
end_id: int
pipeline_para_size: int
weights_data_type: str
data_type: str
int8_mode: int
sparse: int
def gpt_init_kwargs(self):
do_not_include = ["data_type", "sparse"]
return {k: v for k, v in dataclasses.asdict(self).items() if k not in do_not_include}
@classmethod
def from_args(cls, args, config_reader):
model_name = args.model_name
return cls(
head_num=config_reader.getint(model_name, "head_num"),
size_per_head=config_reader.getint(model_name, "size_per_head"),
layer_num=config_reader.getint(model_name, "num_layer"),
max_seq_len=config_reader.getint(model_name, "max_pos_seq_len"),
tensor_para_size=config_reader.getint(model_name, "tensor_para_size"),
vocab_size=config_reader.getint(model_name, "vocab_size"),
start_id=config_reader.getint(model_name, "start_id"),
end_id=config_reader.getint(model_name, "end_id"),
weights_data_type=config_reader.get(model_name, "weight_data_type"),
pipeline_para_size=(
args.pipeline_para_size or config_reader.getint("ft_instance_hyperparameter", "pipeline_para_size")
),
int8_mode=(
args.int8_mode
if args.int8_mode is not None
else config_reader.getint("ft_instance_hyperparameter", "int8_mode")
),
data_type=(args.data_type or config_reader.get("ft_instance_hyperparameter", "data_type")),
sparse=int(args.sparse or False),
)
@classmethod
def update_argparser(cls, parser):
parser.add_argument("--model-name", type=str, default="gpt", help="Model name from config.ini file")
parser.add_argument("--pipeline-para-size", type=int, help="size of pipeline parallelism")
parser.add_argument("--data-type", type=str, help="data type", choices=["fp32", "bf16", "fp16"])
parser.add_argument(
"--sparse",
type=int,
choices=[0, 1],
help="Set sparse matrix multiplication. (Need SM 8.0 or 8.6 and SPARSITY_SUPPORT=ON)",
)
parser.add_argument("--int8-mode", type=int, choices=[0, 1], help="Set int8 mode")
@dataclasses.dataclass
class GptRuntimeModelParameters:
beam_width: int
top_k: int
top_p: float
beam_search_diversity_rate: float
temperature: float
len_penalty: float
repetition_penalty: float
def gpt_forward_kwargs(self):
return dataclasses.asdict(self)
@classmethod
def from_args(cls, args, config_reader):
return cls(
beam_width=args.beam_width or config_reader.getint("ft_instance_hyperparameter", "beam_width"),
top_k=args.sampling_top_k or config_reader.getint("ft_instance_hyperparameter", "top_k"),
top_p=args.sampling_top_p or config_reader.getfloat("ft_instance_hyperparameter", "top_p"),
beam_search_diversity_rate=(
args.beam_search_diversity_rate
or config_reader.getfloat("ft_instance_hyperparameter", "beam_search_diversity_rate")
),
temperature=args.temperature or config_reader.getfloat("ft_instance_hyperparameter", "temperature"),
len_penalty=args.len_penalty or config_reader.getfloat("ft_instance_hyperparameter", "len_penalty"),
repetition_penalty=(
args.repetition_penalty or config_reader.getfloat("ft_instance_hyperparameter", "repetition_penalty")
),
)
@classmethod
def update_argparser(cls, parser):
parser.add_argument("--beam-width", type=int, help="beam width")
parser.add_argument("--sampling-top-k", type=int, help="Candidate (k) value of top k sampling in decoding")
parser.add_argument("--sampling-top-p", type=float, help="Probability (p) value of top p sampling in decoding.")
parser.add_argument("--temperature", type=float, help="temperature")
parser.add_argument("--len-penalty", type=float, help="len_penalty")
parser.add_argument("--repetition-penalty", type=float, help="repetition penalty")
parser.add_argument("--beam-search-diversity-rate", type=float, help="beam_search_diversity_rate")
DEFAULT_START_TAG = "<|endoftext|>"
DEFAULT_END_TAG = "<|endoftext|>"
OPENAI_GPT2_START_ID = 50256
OPENAI_GPT2_END_ID = 50256
@dataclasses.dataclass
class GptModelConfig:
model_name: str
tensor_para_size: int
head_num: int
size_per_head: int
inter_size: int
num_layer: int
max_pos_seq_len: int
weight_data_type: str
vocab_size: int
start_id: int
end_id: int
@classmethod
def from_nemo_package(
cls,
*,
args: argparse.Namespace,
nemo_model_config: typing.Dict[str, typing.Any],
vocab_path: typing.Optional[pathlib.Path] = None,
vocab_size: int,
):
if vocab_path:
vocab_path = pathlib.Path(vocab_path)
with vocab_path.open("r") as vocab_file:
vocab = json.load(vocab_file)
start_id, end_id = vocab[DEFAULT_START_TAG], vocab[DEFAULT_END_TAG]
else:
start_id, end_id = OPENAI_GPT2_START_ID, OPENAI_GPT2_END_ID
return cls(
model_name="gpt",
tensor_para_size=args.infer_gpu_num,
head_num=nemo_model_config["num_attention_heads"],
size_per_head=nemo_model_config["hidden_size"] // nemo_model_config["num_attention_heads"],
inter_size=nemo_model_config["ffn_hidden_size"],
num_layer=nemo_model_config["num_layers"],
max_pos_seq_len=nemo_model_config["max_position_embeddings"],
weight_data_type=args.weight_data_type,
vocab_size=vocab_size,
start_id=start_id,
end_id=end_id,
)
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/gpt_fp8.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
def generate_gpt_config(args):
config = configparser.ConfigParser()
config["ft_instance_hyperparameter"] = {
"max_batch_size": "{}".format(args['max_batch_size']),
"max_seq_len": "{}".format(args['max_seq_len']),
"beam_width": "{}".format(args['beam_width']),
"top_k": "{}".format(args['sampling_topk']),
"top_p": "{}".format(args['sampling_topp']),
"temperature": "{}".format(args['temperature']),
"tensor_para_size": "{}".format(args['tensor_para_size']),
"pipeline_para_size": "{}".format(args['pipeline_para_size']),
"data_type": "{}".format(args['data_type']),
"sparse": "0",
"int8_mode": "{}".format(args["int8_mode"]),
"enable_custom_all_reduce": "0",
"model_name": "tmp_model",
"model_dir": "{}".format(args['model_dir']),
"repetition_penalty": "{}".format(args['repetition_penalty']),
"len_penalty": "{}".format(args['len_penalty']),
"beam_search_diversity_rate": "{}".format(args['beam_search_diversity_rate']),
}
config["request"] = {
"request_batch_size": "{}".format(args['request_batch_size']),
"request_output_len": "{}".format(args['request_output_len']),
"return_log_probs": "false",
"context_log_probs": "false",
"remove_padding": "true",
"context_embeddings": "true"
}
config["tmp_model"] = {
"head_num": "{}".format(args['head_number']),
"size_per_head": "{}".format(args['size_per_head']),
"inter_size": "{}".format(args['inter_size']),
"vocab_size": "{}".format(args['vocab_size']),
"decoder_layers": "{}".format(args['num_layer']),
"start_id": "{}".format(args['start_id']),
"end_id": "{}".format(args['end_id']),
}
if args['model_variant'] is not None:
config["tmp_model"]["model_variant"] = args["model_variant"]
with open(args['destination'], 'w') as configfile:
config.write(configfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-max_batch_size', '--max_batch_size', type=int, default=8, metavar='NUMBER',
help='batch size (default: 8)')
parser.add_argument('-max_seq_len', '--max_seq_len', type=int, default=256, metavar='NUMBER',
help='max sequence length (default: 256)')
parser.add_argument('-beam_width', '--beam_width', type=int, default=1, metavar='NUMBER',
help='beam width for beam search (default: 1)')
parser.add_argument('-n', '--head_number', type=int, default=32, metavar='NUMBER',
help='head number (default: 32)')
parser.add_argument('-size', '--size_per_head', type=int, default=128, metavar='NUMBER',
help='size per head (default: 128)')
parser.add_argument('-inter_size', '--inter_size', type=int, default=16384, metavar='NUMBER',
help='inter size for ffn (default: 16384)')
parser.add_argument('-l', '--num_layer', type=int, default=12, metavar='NUMBER',
help='number of layers (default: 12)')
parser.add_argument('-v', '--vocab_size', type=int, default=50257, metavar='BOOL',
help='vocabulary size. (default: 50257).')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0.')
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='tensor parallelism size. Default is 1.')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='layer parallelism size. Default is 1.')
parser.add_argument('--model_dir', type=str, default="./models/", metavar='STRING',
help='Model path prfix. Default is "./models".')
parser.add_argument('-temperature', '--temperature', type=float, default=1.0, metavar='NUMBER',
help='temperature of penalty. Default is 1.0.')
parser.add_argument('-request_batch_size', '--request_batch_size', type=int, default=8, metavar='NUMBER',
help='batch size (default: 8)')
parser.add_argument('-request_output_len', '--request_output_len', type=int, default=32, metavar='NUMBER',
help='output length (default: 32)')
parser.add_argument('-start_id', '--start_id', type=int, default=50256, metavar='NUMBER',
help='start id (default: 50256)')
parser.add_argument('-end_id', '--end_id', type=int, default=50256, metavar='NUMBER',
help='end id (default: 50256)')
parser.add_argument('-repetition_penalty', '--repetition_penalty', type=float, default=1.0, metavar='NUMBER',
help='repetition_penalty (default: 1.0)')
parser.add_argument('-len_penalty', '--len_penalty', type=float, default=0.0, metavar='NUMBER',
help='len_penalty (default: 0.0)')
parser.add_argument('-beam_search_diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='beam_search_diversity_rate (default: 0.0)')
parser.add_argument('-memory_len', '--memory_len', type=int, default=None, metavar='NUMBER',
help='Memory length (how many time steps to keep in memory) (default: None)')
parser.add_argument('-model_variant', '--model_variant', type=str, default=None, metavar='STRING',
help='Model variant (needed for OPT models) (default: None)')
parser.add_argument('--destination', type=str, default=".tmp.config.ini", metavar='STRING',
help='Configuration save file. Default is ".tmp.config.ini".')
parser.add_argument('--int8_mode', type=int, default=0, choices=[0, 1, 2],
help='The level of quantization to perform.'
' 0: No quantization. All computation in data_type'
' 1: Quantize weights to int8, all compute occurs in fp16/bf16. Not supported for when data_type is fp32'
' 2: Data path is mostly w8a8')
args = parser.parse_args()
generate_gpt_config(vars(args))
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/generate_gpt_config.py
|
import os
import sys
import numpy as np
import torch
# Example
# python tools/checkpoint_util.py --model-type GPT --loader megatron --saver fastertransformer \
# --input /home/scratch.bhsueh_sw/megatron_new_ckpt/357m-pipeline-2-tensor-2/ --output ./tmp --target-tensor-parallel-size 2
def add_arguments(parser):
group = parser.add_argument_group(title='FasterTransformer saver')
group.add_argument('--megatron-path', type=str, default=".",
help='Base directory of Megatron repository')
group.add_argument('--target-tensor-parallel-size', type=int,
help='Target tensor model parallel size')
def save_checkpoint(queue, args):
sys.path.insert(0, args.megatron_path)
from megatron.global_vars import set_global_variables, get_args
md = queue.get()
os.environ["WORLD_SIZE"] = f'{args.target_tensor_parallel_size}'
# We want all arguments to come from us
sys.argv = ['script.py',
'--num-layers', str(md.num_layers),
'--hidden-size', str(md.hidden_size),
'--seq-length', str(md.seq_length),
'--num-attention-heads', str(md.num_attention_heads),
'--max-position-embeddings', str(md.max_position_embeddings),
'--tokenizer-type', str(md.tokenizer_type),
'--tensor-model-parallel-size', str(args.target_tensor_parallel_size),
'--no-masked-softmax-fusion',
'--no-bias-gelu-fusion',
'--no-bias-dropout-fusion',
'--use-cpu-initialization',
'--micro-batch-size', '1',
'--no-load-optim',
'--no-load-rng',
'--no-save-optim',
'--no-save-rng',
'--no-initialization',
'--save-interval', '1',
'--save', args.output
]
set_global_variables()
# margs = megatron args
margs = get_args()
# Embeddings
#-----------
pos_embed = queue.get()
full_word_embed = queue.get()
# Tell Megatron what our full size is
margs.padded_vocab_size = full_word_embed.shape[0]
if margs.padded_vocab_size % args.target_tensor_parallel_size != 0:
print("source vocab size is not evenly divisble by target tensor parallel size")
exit(1)
if(os.path.exists(args.output) == False):
os.makedirs(args.output)
with open(args.output + "/args.txt", "w") as outfile:
outfile.write("{}\n".format(md))
pos_embed.cpu().numpy().astype(np.float32).tofile(args.output + "/model.wpe.bin")
full_word_embed.cpu().numpy().astype(np.float32).tofile(args.output + "/model.wte.bin")
# Transformer layers
#-------------------
for layer in range(md.num_layers):
# get full tensors
input_layernorm_weight = queue.get().T
input_layernorm_bias = queue.get().T
full_qkv_weight = queue.get().T
full_qkv_bias = queue.get().T
full_dense_weight = queue.get().T
dense_bias = queue.get().T
post_layernorm_weight = queue.get().T
post_layernorm_bias = queue.get().T
full_mlp_l0_weight = queue.get().T
full_mlp_l0_bias = queue.get().T
full_mlp_l1_weight = queue.get().T
mlp_l1_bias = queue.get().T
# Assume the version of checkpoint is 3
ckpt_ver = 3
if ckpt_ver == 3:
num_splits = 3
size_per_head = (int)(md.hidden_size / md.num_attention_heads)
full_qkv_weight = full_qkv_weight.reshape(md.hidden_size, md.num_attention_heads ,num_splits, size_per_head)
full_qkv_weight = full_qkv_weight.permute(0, 2, 1, 3)
full_qkv_weight = full_qkv_weight.reshape(md.hidden_size, num_splits, md.hidden_size)
full_qkv_bias = full_qkv_bias.reshape(md.num_attention_heads ,num_splits, size_per_head)
full_qkv_bias = full_qkv_bias.permute(1, 0, 2)
full_qkv_bias = full_qkv_bias.reshape(num_splits, md.hidden_size)
# Split up the parallel tensors
out_qkv_weight = torch.chunk(full_qkv_weight, args.target_tensor_parallel_size, dim=-1)
out_qkv_bias = torch.chunk(full_qkv_bias, args.target_tensor_parallel_size, dim=-1)
out_dense_weight = torch.chunk(full_dense_weight, args.target_tensor_parallel_size, dim=0)
out_mlp_l0_weight = torch.chunk(full_mlp_l0_weight, args.target_tensor_parallel_size, dim=-1)
out_mlp_l0_bias = torch.chunk(full_mlp_l0_bias, args.target_tensor_parallel_size, dim=-1)
out_mlp_l1_weight = torch.chunk(full_mlp_l1_weight, args.target_tensor_parallel_size, dim=0)
# Save model
input_layernorm_weight.cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.input_layernorm.weight.bin".format(layer))
input_layernorm_bias.cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.input_layernorm.bias.bin".format(layer))
post_layernorm_weight.cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.post_attention_layernorm.weight.bin".format(layer))
post_layernorm_bias.cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.post_attention_layernorm.bias.bin".format(layer))
dense_bias.cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.attention.dense.bias.bin".format(layer))
mlp_l1_bias.cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.mlp.dense_4h_to_h.bias.bin".format(layer))
for tp_rank in range(args.target_tensor_parallel_size):
out_qkv_weight[tp_rank].cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.attention.query_key_value.weight.{}.bin".format(layer, tp_rank))
out_qkv_bias[tp_rank].cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.attention.query_key_value.bias.{}.bin".format(layer, tp_rank))
out_dense_weight[tp_rank].cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.attention.dense.weight.{}.bin".format(layer, tp_rank))
out_mlp_l0_weight[tp_rank].cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.mlp.dense_h_to_4h.weight.{}.bin".format(layer, tp_rank))
out_mlp_l0_bias[tp_rank].cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.mlp.dense_h_to_4h.bias.{}.bin".format(layer, tp_rank))
out_mlp_l1_weight[tp_rank].cpu().numpy().astype(np.float32).tofile(args.output + "/model.layers.{}.mlp.dense_4h_to_h.weight.{}.bin".format(layer, tp_rank))
final_layernorm_weight = queue.get().T
final_layernorm_bias = queue.get().T
final_layernorm_weight.cpu().numpy().astype(np.float32).tofile(args.output + "/model.final_layernorm.weight.bin")
final_layernorm_bias.cpu().numpy().astype(np.float32).tofile(args.output + "/model.final_layernorm.bias.bin")
del final_layernorm_weight
del final_layernorm_bias
print("Done!")
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/checkpoint_saver_fastertransformer.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import dataclasses
import datetime
import logging
import multiprocessing
import os
import pathlib
import shutil
import sys
import tempfile
import typing
import numpy as np
import torch # pytype: disable=import-error
import yaml
# verify if root package is in PYTHONPATH
__root_package_path__ = pathlib.Path(__file__).parent.parent.parent.parent.parent.absolute().as_posix()
if __root_package_path__ not in sys.path:
print(
f"[ERROR] add project root directory to your PYTHONPATH with "
f"'export PYTHONPATH={__root_package_path__}:${{PYTHONPATH}}'"
)
from examples.pytorch.gpt.utils.gpt import GptModelConfig
from examples.pytorch.nemo import (
UnpackedNemoCheckpointDir,
unpack_nemo_ckpt,
extract_layers_with_prefix,
)
from examples.pytorch.utils import (
torch2np,
safe_transpose,
cpu_map_location,
gpu_map_location,
WEIGHT2DTYPE,
)
LOGGER = logging.getLogger(__name__)
# This tool is used to support the new NeMo megatron model trained by pipeline parallel + tensor parallel
def merge_and_convert_process(
tp_rank: int,
pp_rank: int,
saved_dir: typing.Union[str, pathlib.Path],
factor: int,
key: str,
nemo_model_config: typing.Dict[str, typing.Any],
transformer_model_list: typing.List,
np_weight_data_type,
args: argparse.Namespace,
):
# Config params
num_layers = nemo_model_config["num_layers"]
num_attention_heads = nemo_model_config["num_attention_heads"]
tensor_model_parallel_size = nemo_model_config.get("tensor_model_parallel_size", 1)
pipeline_model_parallel_size = nemo_model_config.get("pipeline_model_parallel_size", 1)
if key.find("layers.") != -1:
layer_index = int(key[7 : key.find(".", 7)])
saved_key = key.replace(
"layers.%d." % layer_index,
"layers.%d." % (layer_index + pp_rank * num_layers // pipeline_model_parallel_size),
)
if saved_key.find("self_attention") != -1:
saved_key = saved_key.replace("self_attention", "attention")
else:
saved_key = key
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
):
# shared weights, only need to convert the weights of rank 0
if tp_rank == 0:
val = safe_transpose(transformer_model_list[0][key])
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.bin"
np.squeeze(val).tofile(saved_path)
elif key.find("attention.dense.weight") != -1 or key.find("mlp.dense_4h_to_h.weight") != -1:
vals = []
for k in range(factor):
val = safe_transpose(transformer_model_list[k][key])
val = torch2np(val, np_weight_data_type)
vals.append(val)
saved_path = saved_dir / f"model.{saved_key}.{tp_rank:d}.bin"
np.concatenate(vals, axis=0).tofile(saved_path)
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
vals = []
for k in range(factor):
val = safe_transpose(transformer_model_list[k][key])
val = torch2np(val, np_weight_data_type)
vals.append(val)
saved_path = saved_dir / f"model.{saved_key}.{tp_rank:d}.bin"
np.concatenate(vals, axis=-1).tofile(saved_path)
elif key.find("attention.query_key_value.bias") != -1:
vals = []
for k in range(factor):
val = safe_transpose(transformer_model_list[k][key])
val = torch2np(val, np_weight_data_type)
local_dim = int(val.shape[-1] / 3)
num_splits = 3
head_num = num_attention_heads // tensor_model_parallel_size
size_per_head = local_dim // head_num
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(3, local_dim)
vals.append(val)
saved_path = saved_dir / f"model.{saved_key}.{tp_rank:d}.bin"
np.concatenate(vals, axis=-1).tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
vals = []
for k in range(factor):
val = safe_transpose(transformer_model_list[k][key])
val = torch2np(val, np_weight_data_type)
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / 3)
num_splits = 3
head_num = num_attention_heads // tensor_model_parallel_size
size_per_head = local_dim // head_num
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, 3, local_dim)
vals.append(val)
saved_path = saved_dir / f"model.{saved_key}.{tp_rank:d}.bin"
if args.fused_qkv == 1:
np.concatenate(vals, axis=-1).tofile(saved_path)
elif args.fused_qkv == 0:
np.concatenate(vals, axis=-1).transpose(1, 0, 2).tofile(saved_path)
else:
LOGGER.error("cannot find key '%s'", key)
def split_and_convert_process(
tp_rank: int,
pp_rank: int,
saved_dir: typing.Union[str, pathlib.Path],
factor: int,
key: str,
nemo_model_config: typing.Dict[str, typing.Any],
transformer_model_list: typing.List,
np_weight_data_type,
args: argparse.Namespace,
):
# Config params
num_layers = nemo_model_config["num_layers"]
num_attention_heads = nemo_model_config["num_attention_heads"]
tensor_model_parallel_size = nemo_model_config.get("tensor_model_parallel_size", 1)
pipeline_model_parallel_size = nemo_model_config.get("pipeline_model_parallel_size", 1)
# Handle model[key] weights
transformer_model = transformer_model_list[0]
val = safe_transpose(transformer_model[key])
val = torch2np(val, np_weight_data_type)
if key.find("layers.") != -1:
layer_index = (int)(key[7 : key.find(".", 7)])
saved_key = key.replace(
"layers.%d." % layer_index,
"layers.%d." % (layer_index + pp_rank * num_layers // pipeline_model_parallel_size),
)
if saved_key.find("self_attention") != -1:
saved_key = saved_key.replace("self_attention", "attention")
else:
saved_key = key
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
):
# shared weights, only need to convert the weights of rank 0
if tp_rank == 0:
saved_path = saved_dir / f"model.{saved_key}.bin"
val.tofile(saved_path)
elif key.find("attention.dense.weight") != -1 or key.find("mlp.dense_4h_to_h.weight") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{tp_rank * factor + j:d}.bin"
split_vals[j].tofile(saved_path)
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{tp_rank * factor + j:d}.bin"
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.bias") != -1:
local_dim = int(val.shape[-1] / 3)
num_splits = 3
head_num = num_attention_heads // tensor_model_parallel_size
size_per_head = local_dim // head_num
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{tp_rank * factor + j:d}.bin"
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / 3)
num_splits = 3
head_num = num_attention_heads
size_per_head = hidden_dim // head_num
head_num = head_num // tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, 3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{tp_rank * factor + j:d}.bin"
split_vals[j].tofile(saved_path)
else:
LOGGER.error("cannot find key '%s'", key)
def convert_checkpoint(unpacked_checkpoints_dir: UnpackedNemoCheckpointDir, args):
nemo_model_config = unpacked_checkpoints_dir.model_config
checkpoints_paths = unpacked_checkpoints_dir.get_checkpoints_paths(
nemo_model_config.get("tensor_model_parallel_size", 1),
nemo_model_config.get("pipeline_model_parallel_size", 1),
)
# if checkpoints files could be found - start preparing output dir
saved_dir = _prepare_saved_dir(args)
map_location_fn = cpu_map_location if bool(args.load_checkpoints_to_cpu) else gpu_map_location
np_weight_data_type = WEIGHT2DTYPE[args.weight_data_type]
# load position_embedding from rank 0
model_00 = torch.load(checkpoints_paths[0][0], map_location=map_location_fn)
val = model_00.get("state_dict", model_00)["model.language_model.embedding.position_embeddings.weight"]
# not weight, do not need to transpose
val = torch2np(val, np_weight_data_type)
val.tofile(saved_dir / "model.wpe.bin")
del model_00
w_e_list = []
training_tensor_para_size = nemo_model_config.get("tensor_model_parallel_size", 1)
training_pipeline_para_size = nemo_model_config.get("pipeline_model_parallel_size", 1)
inference_tensor_para_size = args.infer_gpu_num
if training_tensor_para_size > inference_tensor_para_size:
assert training_tensor_para_size % inference_tensor_para_size == 0
is_merge_ckpt = True
factor = int(training_tensor_para_size / inference_tensor_para_size)
else:
assert inference_tensor_para_size % training_tensor_para_size == 0
is_merge_ckpt = False
factor = int(inference_tensor_para_size / training_tensor_para_size)
main_loop = min(training_tensor_para_size, inference_tensor_para_size)
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
pool = multiprocessing.Pool(args.processes)
for i in range(main_loop):
for j in range(training_pipeline_para_size):
transformer_models = []
if is_merge_ckpt:
for k in range(factor):
rank_weights = checkpoints_paths[i * factor + k][j]
model = torch.load(rank_weights, map_location=map_location_fn)
if j == 0:
val = model.get("state_dict", model)["model.language_model.embedding.word_embeddings.weight"]
val = torch2np(val, np_weight_data_type)
w_e_list.append(val)
layers = extract_layers_with_prefix(model, "model.language_model.encoder.")
transformer_models.append(layers)
else:
rank_weights = checkpoints_paths[i][j]
model = torch.load(rank_weights, map_location=map_location_fn)
if j == 0:
val = model.get("state_dict", model)["model.language_model.embedding.word_embeddings.weight"]
val = torch2np(val, np_weight_data_type)
w_e_list.append(val)
layers = extract_layers_with_prefix(model, "model.language_model.encoder.")
transformer_models.append(layers)
pool.starmap(
merge_and_convert_process if is_merge_ckpt else split_and_convert_process,
[
(
i, # tp_rank
j, # pp_rank
saved_dir,
factor,
key,
nemo_model_config,
transformer_models,
np_weight_data_type,
args,
)
for key in transformer_models[0]
],
)
pool.close()
pool.join()
val = np.concatenate(w_e_list, axis=0)
val.tofile(saved_dir / "model.wte.bin")
vocab_size = val.shape[0]
tokenizer_config = nemo_model_config["tokenizer"]
tokenizer_config = _update_tokenizer_config(tokenizer_config, unpacked_checkpoints_dir)
if args.tokenizer_model_path:
LOGGER.debug("Use tokenizer model passed from CLI: %s", args.tokenizer_model_path)
tokenizer_config["model"] = args.tokenizer_model_path
if args.vocab_path:
LOGGER.debug("Use tokenizer vocab passed from CLI: %s", args.vocab_path)
tokenizer_config["vocab_file"] = args.vocab_path
if args.merges_path:
LOGGER.debug("Use tokenizer merge passed from CLI: %s", args.merges_path)
tokenizer_config["merge_file"] = args.merges_path
_copy_tokenizer_file_if_defined("model", tokenizer_config["model"], saved_dir)
_copy_tokenizer_file_if_defined("vocab_file", tokenizer_config["vocab_file"], saved_dir)
_copy_tokenizer_file_if_defined("merge_file", tokenizer_config["merge_file"], saved_dir)
bos_id, eos_id = _get_special_tokens_ids(tokenizer_config)
gpt_model_config = GptModelConfig.from_nemo_package(
args=args,
nemo_model_config=nemo_model_config,
vocab_size=vocab_size,
bos_id=bos_id,
eos_id=eos_id,
)
# Configuration for the model (load by triton backends)
config = configparser.ConfigParser()
config["gpt"] = {k: str(v) for k, v in dataclasses.asdict(gpt_model_config).items()}
try:
config_path = saved_dir / "config.ini"
with config_path.open("w") as config_file:
config.write(config_file)
except Exception as e:
LOGGER.error("Fail to save the config; %s", e)
def _prepare_saved_dir(args):
saved_dir = pathlib.Path(args.saved_dir)
if args.fused_qkv == 1:
saved_dir = saved_dir / f"{args.infer_gpu_num:d}-gpu/"
else:
saved_dir = saved_dir / f"unfusedQKV-{args.infer_gpu_num:d}-gpu"
if saved_dir.exists():
LOGGER.error(f"Remove %s target directory before running conversion", saved_dir)
sys.exit(1)
saved_dir.mkdir(parents=True)
return saved_dir
def prompt_convert(args, prompt_config, prompt_weights):
prompt_templates = prompt_config["task_templates"]
# model config save dir
config_saved_dir = _prepare_saved_dir(args)
# Configuration for the model (load by triton backends)
config_path = config_saved_dir / "config.ini"
config = configparser.ConfigParser()
with config_path.open("r") as config_file:
config.read_file(config_file)
num_tasks = len(prompt_templates)
prompt_learning_type = 3 # p_prompt_tuning
prompt_learning_start_id = 50257 # hard code here
config["gpt"]["num_tasks"] = str(num_tasks)
config["gpt"]["prompt_learning_start_id"] = str(prompt_learning_start_id)
config["gpt"]["prompt_learning_type"] = str(prompt_learning_type)
for task_name_id, prompt_task in enumerate(prompt_templates):
prompt_task_name = prompt_task["taskname"]
prompt_length = int(prompt_task["total_virtual_tokens"])
config[f"task_{task_name_id:d}"] = {}
config[f"task_{task_name_id:d}"]["task_name"] = prompt_task_name
config[f"task_{task_name_id:d}"]["prompt_length"] = str(prompt_length)
prompt_task_weights = prompt_weights["prompt_table"][
f"prompt_table.{prompt_task_name}.prompt_embeddings.weight"
]
# put converted prompts weights to the model weights saved dir
prompt_task_weights_output_path = config_saved_dir / f"model.prompt_table.{prompt_task_name}.weight.bin"
val = torch2np(prompt_task_weights)
val.tofile(prompt_task_weights_output_path)
with config_path.open("w") as config_file:
config.write(config_file)
LOGGER.info(">>>>>>>>>>>>>>>> model saved config")
LOGGER.info(config_path.read_text())
def _update_tokenizer_config(tokenizer_config: typing.Dict, unpacked_checkpoints_dir):
def _update_config_entry(key, file_pattern):
old_file_path = tokenizer_config[key]
if old_file_path:
LOGGER.debug("tokenizer %s %s type %s", key, old_file_path, type(old_file_path))
old_file_path = pathlib.Path(old_file_path)
new_file_path = unpacked_checkpoints_dir.get_tokenizer_file_path("tokenizer", key, file_pattern)
if new_file_path:
LOGGER.debug("Update tokenizer %s %s -> %s", key, old_file_path, new_file_path)
tokenizer_config[key] = new_file_path.as_posix()
elif not old_file_path.exists():
LOGGER.warning("Because tokenizer %s %s does not exists - set it as None", key, old_file_path)
tokenizer_config[key] = None
_update_config_entry("model", "*.model")
_update_config_entry("vocab_file", "*vocab*")
_update_config_entry("merge_file", "*merge*.txt")
return tokenizer_config
def _copy_tokenizer_file_if_defined(key_name, tokenizer_file_path, saved_dir):
if tokenizer_file_path:
tokenizer_file_path = pathlib.Path(tokenizer_file_path)
if tokenizer_file_path.exists():
tokenizer_basename = {
"model": "tokenizer",
"vocab_file": "vocab",
"merge_file": "merges",
}[key_name]
dst_path = saved_dir / f"{tokenizer_basename}{tokenizer_file_path.suffix}"
LOGGER.debug("Copy of %s %s file as %s", tokenizer_file_path, key_name, dst_path)
shutil.copy(tokenizer_file_path.as_posix(), dst_path.as_posix())
else:
LOGGER.debug("%s %s file does not exists", tokenizer_file_path, key_name)
def _get_special_tokens_ids(tokenizer_config: typing.Dict):
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from examples.pytorch.tokenizer import add_special_tokens_to_tokenizer
logging.getLogger("git.cmd").setLevel(logging.INFO)
logging.getLogger("h5py._conv").setLevel(logging.INFO)
logging.getLogger("matplotlib").setLevel(logging.INFO)
logging.getLogger("matplotlib.font_manager").setLevel(logging.INFO)
logging.getLogger("matplotlib.pyplot").setLevel(logging.INFO)
tokenizer = get_nmt_tokenizer(
library=tokenizer_config["library"],
model_name=tokenizer_config["type"],
tokenizer_model=tokenizer_config["model"],
vocab_file=tokenizer_config["vocab_file"],
merges_file=tokenizer_config["merge_file"],
legacy=True,
)
if tokenizer_config["library"] == "sentencepiece":
add_special_tokens_to_tokenizer(tokenizer)
bos_id = tokenizer.bos_id
eos_id = tokenizer.eos_id
LOGGER.debug("for %s obtained tokenizer tokens ids bos_id=%d eos_id=%d", tokenizer_config, bos_id, eos_id)
return bos_id, eos_id
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--saved-dir",
"-saved_dir",
"-o",
help="folder name of output files",
required=True,
)
parser.add_argument(
"--in-file",
"-in_file",
"-i",
help="file name of .nemo checkpoint file",
required=True,
)
parser.add_argument(
"--prompt-in-file",
"-prompt_in_file",
"-p_i",
help="file name of .nemo prompt checkpoint file",
)
parser.add_argument(
"--prompt-saved-dir",
"-prompt_saved_dir",
"-p_o",
help="folder name of prompt checkpoint output files",
)
parser.add_argument(
"--infer-gpu-num",
"-infer_gpu_num",
"-i_g",
type=int,
help="How many gpus for inference",
required=True,
)
parser.add_argument(
"--fused-qkv",
"-fused_qkv",
type=int,
choices=[0, 1],
default=1,
help="Fuse the qkv weights or not",
)
parser.add_argument(
"--processes",
"-processes",
"-p",
type=int,
default=16,
help="How many processes to spawn for conversion",
)
parser.add_argument(
"--weight-data-type",
"-weight_data_type",
choices=["fp32", "fp16"],
default="fp32",
help="Data type of results weights",
)
parser.add_argument(
"--load-checkpoints-to-cpu",
"-load_checkpoints_to_cpu",
"-cpu",
type=int,
choices=[0, 1],
default=1,
help="Whether to load model weights to CPU",
)
parser.add_argument(
"--vocab-path",
help="Path to vocabulary file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--merges-path",
help="Path to merges file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--tokenizer-model-path",
help="Path to tokenizer model file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument("--verbose", action="store_true", help="Provide verbose messages")
args = parser.parse_args()
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO, format=log_format)
print("\n=============== Argument ===============")
for key in vars(args):
print(f"{key}: {vars(args)[key]}")
print("========================================")
input_path = pathlib.Path(args.in_file)
if not input_path.exists():
LOGGER.error("%s does not exists", input_path)
sys.exit(1)
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir = pathlib.Path(temp_dir)
# unpack if needed
if input_path.is_file():
checkpoint_dir_path = temp_dir / "unpacked"
start_time = datetime.datetime.now()
unpacked_checkpoint_dir = UnpackedNemoCheckpointDir(
unpack_nemo_ckpt(args.in_file, checkpoint_dir_path),
load_checkpoints_to_cpu=bool(args.load_checkpoints_to_cpu),
)
LOGGER.info("Spent %s (h:m:s) to unpack NeMo archive", datetime.datetime.now() - start_time)
else:
unpacked_checkpoint_dir = UnpackedNemoCheckpointDir(
input_path, load_checkpoints_to_cpu=bool(args.load_checkpoints_to_cpu)
)
start_time = datetime.datetime.now()
convert_checkpoint(unpacked_checkpoint_dir, args)
LOGGER.info("Spent %s (h:m:s) to convert the model", datetime.datetime.now() - start_time)
map_location_fn = cpu_map_location if bool(args.load_checkpoints_to_cpu) else gpu_map_location
# prompt checkpoint converting
if args.prompt_in_file is not None:
start_time = datetime.datetime.now()
assert args.prompt_saved_dir is not None
unpack_nemo_ckpt(args.prompt_in_file, args.prompt_saved_dir)
LOGGER.info("Spent %s (h:m:s) to unpack NeMo prompt archive", datetime.datetime.now() - start_time)
model_config_yaml = "model_config.yaml"
model_weights_ckpt = "model_weights.ckpt"
prompt_config_file = open(os.path.join(args.prompt_saved_dir, model_config_yaml), "r")
prompt_config = yaml.full_load(prompt_config_file)
LOGGER.info(prompt_config)
start_time = datetime.datetime.now()
prompt_weights = torch.load(
os.path.join(args.prompt_saved_dir, model_weights_ckpt),
map_location=map_location_fn,
)
prompt_convert(args, prompt_config, prompt_weights)
LOGGER.info(f"Spent %s (h:m:s) to unpack convert prompt model", datetime.datetime.now() - start_time)
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/nemo_ckpt_convert.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from tokenizers import Tokenizer
class JiebaBPETokenizer:
"""SentencePiece BPE tokenizer with Jieba integration"""
def __init__(self, tokenizer_json_file):
self.name = 'Jieba BPE Tokenizer'
self.tokenizer = Tokenizer.from_file(tokenizer_json_file)
self.eod_id = self.tokenizer.token_to_id('<|endoftext|>')
try:
import jieba
import logging
jieba.setLogLevel(logging.INFO)
except ImportError:
raise ImportError(
'You need to install jieba to use JiebaTokenizer. '
'See https://pypi.org/project/rjieba/ for installation.')
self.jieba = jieba
self.new_line = self.vocab['\n']
self.sep_token = self.vocab['<sep>']
@property
def vocab_size(self):
return self.tokenizer.get_vocab_size(with_added_tokens=True)
@property
def vocab(self):
return self.tokenizer.get_vocab(with_added_tokens=True)
@property
def inv_vocab(self):
vocab = self.vocab
inv_vocab = dict()
for key, val in vocab.items():
inv_vocab[val] = key
return inv_vocab
def encode(self, text: str, is_code: bool = False) -> List[int]:
"""
"""
if not is_code:
seg_list = [x for x in self.jieba.cut(text)]
return self.tokenizer.encode(
seg_list, is_pretokenized=True, add_special_tokens=True).ids
else:
return self.tokenizer.encode(
text, is_pretokenized=False, add_special_tokens=True).ids
def decode(self, token_ids):
text = self.tokenizer.decode(token_ids, skip_special_tokens=True)
return text
@property
def eod(self):
return self.eod_id
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/tokenizer.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import datetime
import json
import multiprocessing
import pathlib
import re
import shutil
import sys
import numpy as np
import torch # pytype: disable=import-error
# verify if root package is in PYTHONPATH
__root_package_path__ = pathlib.Path(__file__).parent.parent.parent.parent.parent.absolute().as_posix()
if __root_package_path__ not in sys.path:
print(
f"[ERROR] add project root directory to your PYTHONPATH with "
f"'export PYTHONPATH={__root_package_path__}:${{PYTHONPATH}}'"
)
from examples.pytorch.gpt.utils.gpt import DEFAULT_START_TAG, DEFAULT_END_TAG, OPENAI_GPT2_START_ID, OPENAI_GPT2_END_ID
from examples.pytorch.utils import torch2np, safe_transpose, cpu_map_location, gpu_map_location, WEIGHT2DTYPE
def _inject_model_parallel_rank(
filepath,
tensor_model_parallel_size=1,
pipeline_model_parallel_size=1,
tensor_model_parallel_rank=0,
pipeline_model_parallel_rank=0,
):
"""
Injects tensor/pipeline model parallel ranks into the filepath.
Does nothing if not using model parallelism.
"""
filepath = pathlib.Path(filepath)
if tensor_model_parallel_size > 1 or pipeline_model_parallel_size > 1:
# filepath needs to be updated to include mp_rank
if pipeline_model_parallel_size is None or pipeline_model_parallel_size == 1:
filepath = filepath.parent / f"mp_rank_{tensor_model_parallel_rank:02d}" / filepath.name
else:
filepath = (
filepath.parent /
f"mp_rank_{tensor_model_parallel_rank:02d}_{pipeline_model_parallel_rank:03d}" /
filepath.name
)
if not filepath.exists():
filepath = (
filepath.parent /
f"tp_rank_{tensor_model_parallel_rank:02d}_pp_rank_{pipeline_model_parallel_rank:03d}" /
filepath.name
)
return filepath
else:
if filepath.exists():
return filepath
else:
return filepath.parent / "mp_rank_00" / filepath.name
def _create_model_training_args_for_checkpoint_version_0(args, model_00):
model_training_args = argparse.Namespace()
if args.head_num is None or args.trained_tensor_parallel_size is None:
raise ValueError(
"Provided checkpoint have missing training args. "
"Thus it is required to provide -head_num and -trained_tensor_parallel_size CLI arguments"
)
model_training_args.num_attention_heads = args.head_num
model_training_args.tensor_model_parallel_size = args.trained_tensor_parallel_size
# megatron ckpt_ver=0 only supports pipeline_parallel_size = 1
model_training_args.pipeline_model_parallel_size = 1
model_training_args.max_position_embeddings = \
model_00["model"]["language_model"]["embedding"]["position_embeddings"]["weight"].shape[0]
model_training_args.hidden_size = \
model_00["model"]["language_model"]["embedding"]["position_embeddings"]["weight"].shape[1]
model_training_args.ffn_hidden_size = 4 * model_training_args.hidden_size
def get_layer_num_from_weights(model_keys):
layer_num = 1
for key in model_keys:
if re.search(r'\d+', key) is not None:
layer_num = max(int(re.search(r'\d+', key).group()), layer_num)
return layer_num + 1
model_training_args.num_layers = \
get_layer_num_from_weights(model_00["model"]["language_model"]['transformer'].keys())
model_training_args.layernorm_epsilon = 1e-6
return model_training_args
# This tool is used to support the new megatron model trained by pipeline parallel + tensor parallel
def merge_and_convert_process(i, pipeline_para_rank, saved_dir, factor, key, model_training_args, transformer_model_list, ckpt_ver, np_weight_data_type):
saved_dir = pathlib.Path(saved_dir)
if key.find("layers.") != -1:
layer_index = (int)(key[7 : key.find(".", 7)])
saved_key = key.replace(
"layers.%d." % layer_index,
"layers.%d." % (layer_index + pipeline_para_rank * model_training_args.num_layers // model_training_args.pipeline_model_parallel_size))
if saved_key.find("self_attention") != -1:
saved_key = saved_key.replace("self_attention", "attention")
if saved_key.find("adaptor1") != -1:
saved_key = saved_key.replace("adaptor1", "after_attention_adapter")
if saved_key.find("adaptor2") != -1:
saved_key = saved_key.replace("adaptor2", "after_ffn_adapter")
else:
saved_key = key
major_device = transformer_model_list[0][key].device
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("adaptor1.dense_4h_to_h.bias") != -1
or key.find("adaptor2.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1):
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir / f"model.{saved_key}.bin"
val = safe_transpose(transformer_model_list[0][key])
val = torch2np(val, np_weight_data_type)
val = np.squeeze(val)
val.tofile(saved_path)
elif (key.find("attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1
or key.find("adaptor1.dense_4h_to_h.weight") != -1
or key.find("adaptor2.dense_4h_to_h.weight") != -1):
vals = [
safe_transpose(transformer_model_list[k][key]).float().to(major_device)
for k in range(factor)
]
val = torch.cat(vals, dim=0)
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.{i:d}.bin"
val.tofile(saved_path)
elif (key.find("mlp.dense_h_to_4h.weight") != -1
or key.find("adaptor1.dense_h_to_4h.weight") != -1
or key.find("adaptor2.dense_h_to_4h.weight") != -1
or key.find("mlp.dense_h_to_4h.bias") != -1
or key.find("adaptor1.dense_h_to_4h.bias") != -1
or key.find("adaptor2.dense_h_to_4h.bias") != -1):
vals = [
safe_transpose(transformer_model_list[k][key]).float().to(major_device)
for k in range(factor)
]
val = torch.cat(vals, dim=-1)
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.{i:d}.bin"
val.tofile(saved_path)
elif key.find("attention.query_key_value.bias") != -1:
vals = []
for k in range(factor):
val = safe_transpose(transformer_model_list[k][key]).float()
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads // model_training_args.tensor_model_parallel_size
size_per_head = local_dim // head_num
val = val.reshape(head_num, num_splits, size_per_head)
val = val.permute(1, 0, 2)
val = val.reshape(3, local_dim)
vals.append(val.to(major_device))
val = torch.cat(vals, dim=-1)
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.{i:d}.bin"
val.tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
vals = []
for k in range(factor):
val = safe_transpose(transformer_model_list[k][key]).float()
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads
size_per_head = hidden_dim // head_num
head_num = head_num // model_training_args.tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.permute(0, 2, 1, 3)
val = val.reshape(hidden_dim, 3, local_dim)
vals.append(val.to(major_device))
val = torch.cat(vals, dim=-1)
val = torch2np(val, np_weight_data_type)
saved_path = saved_dir / f"model.{saved_key}.{i:d}.bin"
val.tofile(saved_path)
else:
print(f"[ERROR] cannot find key '{key}'")
def split_and_convert_process(i, pipeline_para_rank, saved_dir, factor, key, model_training_args, transformer_model_list, ckpt_ver, np_weight_data_type):
val = safe_transpose(transformer_model_list[0][key])
val = torch2np(val, np_weight_data_type)
if key.find("layers.") != -1:
layer_index = (int)(key[7 : key.find(".", 7)])
saved_key = key.replace(
"layers.%d." % layer_index,
"layers.%d." % (layer_index + pipeline_para_rank * model_training_args.num_layers // model_training_args.pipeline_model_parallel_size))
if saved_key.find("self_attention") != -1:
saved_key = saved_key.replace("self_attention", "attention")
if saved_key.find("adaptor1") != -1:
saved_key = saved_key.replace("adaptor1", "after_attention_adapter")
if saved_key.find("adaptor2") != -1:
saved_key = saved_key.replace("adaptor2", "after_ffn_adapter")
else:
saved_key = key
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("adaptor1.dense_4h_to_h.bias") != -1
or key.find("adaptor2.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
):
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir / f"model.{saved_key}.bin"
val.tofile(saved_path.as_posix())
elif (key.find("attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1
or key.find("adaptor1.dense_4h_to_h.weight") != -1
or key.find("adaptor2.dense_4h_to_h.weight") != -1):
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (key.find("mlp.dense_h_to_4h.weight") != -1
or key.find("adaptor1.dense_h_to_4h.weight") != -1
or key.find("adaptor2.dense_h_to_4h.weight") != -1
or key.find("mlp.dense_h_to_4h.bias") != -1
or key.find("adaptor1.dense_h_to_4h.bias") != -1
or key.find("adaptor2.dense_h_to_4h.bias") != -1):
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("attention.query_key_value.bias") != -1:
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads // model_training_args.tensor_model_parallel_size
size_per_head = local_dim // head_num
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads
size_per_head = hidden_dim // head_num
head_num = head_num // model_training_args.tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, 3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"model.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
else:
print(f"[ERROR] cannot find key '{key}'")
def _get_checkpoint_name(checkpoint_dir):
checkpoint_dir = pathlib.Path(checkpoint_dir)
patterns = [
"model_optim_rng.pt", # older megatron checkpoints
"*last.ckpt", # newer format of checkpoints
]
for pattern in patterns:
model_files = sorted(list(checkpoint_dir.rglob(pattern)))
if model_files:
return model_files[0].name
raise ValueError(f"Could not find checkpoint files in {checkpoint_dir}")
def convert_checkpoint(args):
saved_dir = pathlib.Path(args.saved_dir) / f"{args.infer_gpu_num:d}-gpu"
if saved_dir.exists():
print(f"[ERROR] Remove {saved_dir} target directory before running conversion")
sys.exit(1)
saved_dir.mkdir(parents=True)
if args.vocab_path:
shutil.copy(args.vocab_path, (saved_dir / "vocab.json").as_posix())
if args.merges_path:
shutil.copy(args.merges_path, (saved_dir / "merges.txt").as_posix())
load_checkpoints_to_cpu = bool(args.load_checkpoints_to_cpu)
map_location_fn = cpu_map_location if load_checkpoints_to_cpu else gpu_map_location
checkpoints_dir = pathlib.Path(args.in_file)
checkpoint_name = _get_checkpoint_name(checkpoints_dir)
# load position_embedding from rank 0
checkpoints_paths = sorted(checkpoints_dir.rglob(checkpoint_name))
if not checkpoints_paths:
print(f"[ERROR] Cannot find checkpoint in {checkpoints_dir}.")
exit(1)
model_00 = torch.load(checkpoints_paths[0].as_posix(), map_location=map_location_fn)
if "hyper_parameters" in list(model_00.keys()):
print("Use nemo_ckpt_converter.py script for conversion of this checkpoint")
exit(1)
elif "args" in list(model_00.keys()):
checkpoint_version = model_00["checkpoint_version"]
model_training_args = model_00["args"]
megatron_gpt_key = "encoder"
else:
checkpoint_version = 0
model_training_args = _create_model_training_args_for_checkpoint_version_0(args, model_00)
megatron_gpt_key = "transformer"
with (saved_dir / "args.txt").open("w") as training_args_file:
for k, v in vars(model_training_args).items():
training_args_file.write(f"{k}:{v}\n")
np_weight_data_type = WEIGHT2DTYPE[args.weight_data_type]
val = model_00["model"]["language_model"]["embedding"]["position_embeddings"]["weight"]
val = torch2np(val, np_weight_data_type)
val.tofile((saved_dir / "model.wpe.bin").as_posix()) # not weight, do not need to transpose
del model_00
w_e_list = []
training_tensor_para_size = model_training_args.tensor_model_parallel_size
training_pipeline_para_size = model_training_args.pipeline_model_parallel_size
inference_tensor_para_size = args.infer_gpu_num
model_weights_paths = [
[
_inject_model_parallel_rank(
checkpoints_dir / checkpoint_name,
tensor_model_parallel_size=training_tensor_para_size,
pipeline_model_parallel_size=training_pipeline_para_size,
tensor_model_parallel_rank=tp_rank,
pipeline_model_parallel_rank=pp_rank,
)
for pp_rank in range(training_pipeline_para_size)
]
for tp_rank in range(training_tensor_para_size)
]
if training_tensor_para_size > inference_tensor_para_size:
assert training_tensor_para_size % inference_tensor_para_size == 0
is_merge_ckpt = True
factor = int(training_tensor_para_size / inference_tensor_para_size)
else:
assert inference_tensor_para_size % training_tensor_para_size == 0
is_merge_ckpt = False
factor = int(inference_tensor_para_size / training_tensor_para_size)
main_loop = min(training_tensor_para_size, inference_tensor_para_size)
vocab_size_list = [0 for i in range(main_loop)]
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
pool = multiprocessing.Pool(args.processes)
has_adapters = False
for i in range(main_loop):
for j in range(training_pipeline_para_size):
transformer_models = []
if is_merge_ckpt:
for k in range(factor):
m = torch.load(model_weights_paths[i * factor + k][j].as_posix(), map_location=map_location_fn)
if not has_adapters:
has_adapters = any("adaptor" in key for key in m['model']['language_model'][megatron_gpt_key].keys())
transformer_models.append(m["model"]["language_model"][megatron_gpt_key])
if j == 0:
vocab_size_list[i] = m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"].shape[0]
w_e_list.append(torch2np(m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"], np_weight_data_type))
else:
m = torch.load(model_weights_paths[i][j].as_posix(), map_location=map_location_fn)
if not has_adapters:
has_adapters = any("adaptor" in key for key in m['model']['language_model'][megatron_gpt_key].keys())
if j == 0:
vocab_size_list[i] = m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"].shape[0]
w_e_list.append(torch2np(
m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"],
np_weight_data_type
))
transformer_models.append(m["model"]["language_model"][megatron_gpt_key])
pool.starmap(
merge_and_convert_process if is_merge_ckpt else split_and_convert_process,
[
(
i,
j,
saved_dir,
factor,
k,
model_training_args,
transformer_models,
checkpoint_version,
np_weight_data_type,
)
for (k, v) in transformer_models[0].items()
],
)
pool.close()
pool.join()
torch.cuda.synchronize()
np.concatenate(w_e_list, axis=0).tofile((saved_dir / "model.wte.bin").as_posix())
# save vocab_size
full_vocab_size = sum(vocab_size_list)
if not hasattr(model_training_args, "padded_vocab_size"):
model_training_args.padded_vocab_size = full_vocab_size
# Configuration for the model (load by triton backends)
config = configparser.ConfigParser()
config["gpt"] = {}
if args.vocab_path:
vocab_path = pathlib.Path(args.vocab_path)
with vocab_path.open("r") as vocab_file:
vocab = json.load(vocab_file)
start_id, end_id = vocab[DEFAULT_START_TAG], vocab[DEFAULT_END_TAG]
else:
# hard coded values from english gpt_vocab.json file
start_id, end_id = str(OPENAI_GPT2_START_ID), str(OPENAI_GPT2_END_ID)
try:
config["gpt"]["model_name"] = "gpt"
config["gpt"]["head_num"] = str(model_training_args.num_attention_heads)
config["gpt"]["size_per_head"] = str(model_training_args.hidden_size // model_training_args.num_attention_heads)
config["gpt"]["inter_size"] = str(model_training_args.ffn_hidden_size)
config["gpt"]["num_layer"] = str(model_training_args.num_layers)
config["gpt"]["max_pos_seq_len"] = str(model_training_args.max_position_embeddings)
config["gpt"]["vocab_size"] = str(model_training_args.padded_vocab_size)
config["gpt"]["has_adapters"] = str(has_adapters)
config['gpt']['adapter_inter_size'] = str(model_training_args.project_size) if has_adapters else str(0)
config["gpt"]["layernorm_eps"] = str(model_training_args.layernorm_epsilon)
config["gpt"]["start_id"] = str(start_id)
config["gpt"]["end_id"] = str(end_id)
config["gpt"]["weight_data_type"] = args.weight_data_type
config["gpt"]["tensor_para_size"] = str(args.infer_gpu_num)
with open((saved_dir / f"config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
except Exception as e:
print(f"Fail to save the config in config.ini: {e}")
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--saved-dir", "-saved_dir", "-o", help="folder name of output files", required=True)
parser.add_argument(
"--in-file", "-in_file", "-i", help="file name of input checkpoint file", required=True
)
parser.add_argument(
"--infer-gpu-num", "-infer_gpu_num", "-i_g", type=int, help="How many gpus for inference", required=True
)
# -h_n and -t_g are needed when megatron_ckpt_version = 0, for example the public megatron 345M gpt model
parser.add_argument(
"--head-num",
"-head_num",
"-h_n",
type=int,
help="The number of heads, only needed when weight doesn't contain structure hyperparameters"
)
parser.add_argument(
"--trained-tensor-parallel-size",
"-trained_tensor_parallel_size",
"-t_g",
type=int,
help="the tensor parallel size for training"
)
parser.add_argument(
"--processes",
"-processes",
"-p",
type=int,
default=16,
help="How many processes to spawn for conversion",
)
parser.add_argument(
"--weight-data-type", "-weight_data_type", choices=["fp32", "fp16"], default="fp32", help=""
)
parser.add_argument(
"--load-checkpoints-to-cpu",
"-load_checkpoints_to_cpu",
"-cpu",
type=int,
choices=[0, 1],
default=1,
help="Whether to load model weights to CPU",
)
parser.add_argument(
"--vocab-path",
type=str,
help="Path to vocabulary file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--merges-path", type=str, help="Path to merges file to embed in FasterTransformer checkpoint", required=False
)
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print(f"{key}: {vars(args)[key]}")
print("========================================")
start_time = datetime.datetime.now()
convert_checkpoint(args)
run_time = datetime.datetime.now() - start_time
print(f"[INFO] Spent {run_time} (h:m:s) to convert the model")
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/megatron_ckpt_convert.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Convert huggingface Meta OPT model. Use https://huggingface.co/facebook/opt-125m as demo.
'''
import argparse
import configparser
import multiprocessing
import numpy as np
import os
import sys
import torch
from datetime import datetime
from pathlib import Path
from tqdm import tqdm
from transformers import OPTForCausalLM, AutoModelForCausalLM, AutoTokenizer # transformers-4.20.0.dev0
from transformers.models.opt.modeling_opt import OPTAttention, OPTDecoderLayer
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
sys.path.append(dir_path)
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def quantize(mat, act_range):
# qkv proj weight quantization
if mat.ndim == 3 and mat.shape[1] == 3:
# get max_q, max_k, max_v
mat_max = np.abs(mat).clip(1e-8, None).max(axis=(0,2))[None, :, None]
else:
mat_max = np.abs(mat).clip(1e-8, None).max()
act_scale_in = 127. / np.array(act_range[0])
weight_scales = 127. / mat_max
act_scale_post = 127. / np.array(act_range[1])
mat_quant = (mat * weight_scales).round().astype(np.int8)
return mat_quant, weight_scales, act_scale_in, act_scale_post
def split_and_convert_process(i, saved_dir, factor, key, args, val, capture_dict, old_name, dtype):
def save_val(val, key, tp_num=None):
path = saved_dir + "/model." + key
if tp_num is not None:
path += "." + str(tp_num)
path += ".bin"
val.tofile(path)
quantized_out = args.act_scale is not None
if "input_layernorm.weight" in key or "input_layernorm.bias" in key or \
"attention.dense.bias" in key or "post_attention_layernorm.weight" in key or \
"post_attention_layernorm.bias" in key or "mlp.dense_4h_to_h.bias" in key or \
"final_layernorm.weight" in key or "final_layernorm.bias" in key:
# shared weights, only need to convert the weights of rank 0
if i == 0:
save_val(val, key)
elif "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key:
if quantized_out:
val_q, weight_scales, act_scale_pre, act_scale_post = quantize(val, capture_dict[old_name])
save_val(act_scale_pre.astype(dtype), key.replace("weight", "scale"))
scale_inter = (act_scale_post / (act_scale_pre * weight_scales)).astype(dtype)
save_val(scale_inter, key.replace("weight", "scale_inter"))
save_val((1. / act_scale_post).astype(dtype), key.replace("weight", "scale_out"))
split_vals_q = np.split(val_q, factor, axis=0)
for j in range(factor):
save_val(split_vals_q[j], key + ".int8", i * factor + j)
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
save_val(split_vals[j], key, i * factor + j)
elif "mlp.dense_h_to_4h.weight" in key or "mlp.dense_h_to_4h.bias" in key:
if quantized_out and "weight" in key:
val_q, weight_scales, act_scale_pre, act_scale_post = quantize(val, capture_dict[old_name])
save_val(act_scale_pre.astype(dtype), key.replace("weight", "scale"))
scale_inter = (act_scale_post / (act_scale_pre * weight_scales)).astype(dtype)
save_val(scale_inter, key.replace("weight", "scale_inter"))
save_val((1. / act_scale_post).astype(dtype), key.replace("weight", "scale_out"))
split_vals_q = np.split(val_q, factor, axis=-1)
for j in range(factor):
save_val(split_vals_q[j], key + ".int8", i * factor + j)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
save_val(split_vals[j], key, i * factor + j)
elif "attention.query_key_value.bias" in key:
local_dim = (int)(val.shape[-1] / 3)
val = val.reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
save_val(split_vals[j], key, i * factor + j)
elif "attention.query_key_value.weight" in key:
hidden_dim = val.shape[0]
local_dim = (int)(val.shape[-1] / 3)
val = val.reshape(hidden_dim, 3, local_dim)
if quantized_out:
val_q, weight_scales, act_scale_pre, act_scale_post = quantize(val, capture_dict[old_name])
weight_scales = weight_scales[0] * np.ones((3, local_dim // factor))
save_val(act_scale_pre.astype(dtype), key.replace("weight", "scale"))
scale_inter = (act_scale_post[:, None] / (act_scale_pre[:, None] * weight_scales)).astype(dtype)
save_val(scale_inter, key.replace("weight", "scale_inter"))
save_val((1. / act_scale_post).astype(dtype), key.replace("weight", "scale_out"))
split_vals_q = np.split(val_q, factor, axis=-1)
for j in range(factor):
save_val(split_vals_q[j], key + ".int8", i * factor + j)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
save_val(split_vals[j], key, i * factor + j)
else:
print("[ERROR] cannot find key '{}'".format(key))
def fuse_qkv_weight(q, k, v):
if isinstance(q, float):
qkv = torch.tensor((q, k, v))
else:
qkv = torch.cat([q, k, v], dim=-1)
return qkv
def split_and_convert(args):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
if(os.path.exists(saved_dir) == False):
os.makedirs(saved_dir)
ckpt_name = args.in_file
t_gpu_num = args.trained_gpu_num
i_gpu_num = args.infer_gpu_num
assert(i_gpu_num % t_gpu_num == 0)
save_int8 = args.act_scale is not None
factor = (int)(i_gpu_num / t_gpu_num)
# load position_embedding from rank 0
model = AutoModelForCausalLM.from_pretrained(args.in_file, device_map="auto")
capture_dict = None
if args.act_scale is not None:
capture_dict = {}
for key, values in torch.load(args.act_scale).items():
capture_dict[key + ".weight"] = (values["input"], values["output"])
hf_config = vars(model.config)
num_layers = hf_config["num_hidden_layers"]
layer_names = [name for name, param in model.named_parameters()]
# NOTE: save parameters to config files (loaded by triton backends)
config = configparser.ConfigParser()
config["gpt"] = {}
has_post_decoder_layernorm = "model.decoder.final_layer_norm.bias" in layer_names
try:
config["gpt"]["model_name"] = "opt" if hf_config["_name_or_path"] == '' else hf_config["_name_or_path"]
config["gpt"]["head_num"] = str(hf_config["num_attention_heads"])
n_embd = hf_config["hidden_size"]
config["gpt"]["size_per_head"] = str(n_embd // hf_config["num_attention_heads"])
config["gpt"]["inter_size"] = str(hf_config["ffn_dim"])
config['gpt']['max_pos_seq_len'] = str(hf_config['max_position_embeddings'])
config["gpt"]["num_layer"] = str(hf_config["num_hidden_layers"])
config["gpt"]["layernorm_eps"] = "1e-5";
config["gpt"]["layernorm_type"] = "pre_layernorm" if hf_config["do_layer_norm_before"] else "post_layernorm"
config["gpt"]["activation_type"] = "Relu"
config["gpt"]["has_post_decoder_layernorm"] = "1" if has_post_decoder_layernorm else "0"
config["gpt"]["vocab_size"] = str(hf_config["vocab_size"])
config["gpt"]["start_id"] = str(hf_config["bos_token_id"])
config["gpt"]["end_id"] = str(hf_config["eos_token_id"])
config['gpt']['weight_data_type'] = args.weight_data_type
config['gpt']['int8'] = str(save_int8) # really useful?
with open(saved_dir + "/config.ini", 'w') as configfile:
config.write(configfile)
except:
print(f"Fail to save the config in config.ini.")
np_weight_data_type = get_weight_data_type(args.weight_data_type)
huggingface_model_name_pattern = [
"self_attn_layer_norm.bias",
"self_attn_layer_norm.weight",
"self_attn.qkv_proj.bias",
"self_attn.qkv_proj.weight",
"self_attn.out_proj.bias",
"self_attn.out_proj.weight",
"final_layer_norm.bias",
"final_layer_norm.weight",
"fc1.bias",
"fc1.weight",
"fc2.bias",
"fc2.weight",
]
ft_model_name_pattern = [
"input_layernorm.bias",
"input_layernorm.weight",
"attention.query_key_value.bias",
"attention.query_key_value.weight",
"attention.dense.bias",
"attention.dense.weight",
"post_attention_layernorm.bias",
"post_attention_layernorm.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h.weight",
"mlp.dense_4h_to_h.bias",
"mlp.dense_4h_to_h.weight",
]
model_named_parameters_iter = model.named_parameters()
model_named_parameters = dict()
for name, param in model_named_parameters_iter:
if "embed" in name:
model_named_parameters[name] = param
elif "project_in" in name:
model_named_parameters[name] = param.permute(1, 0)
elif "project_out" in name:
model_named_parameters[name] = param
else:
model_named_parameters[name] = param.permute(1, 0) if len(param.shape) == 2 else param
for l in range(num_layers):
prefix = f'model.decoder.layers.{l}.self_attn.'
q_weight = model_named_parameters[prefix + 'q_proj.weight']
k_weight = model_named_parameters[prefix + 'k_proj.weight']
v_weight = model_named_parameters[prefix + 'v_proj.weight']
q_bias = model_named_parameters[prefix + 'q_proj.bias']
k_bias = model_named_parameters[prefix + 'k_proj.bias']
v_bias = model_named_parameters[prefix + 'v_proj.bias']
qkv_weight = fuse_qkv_weight(q_weight, k_weight, v_weight)
qkv_bias = fuse_qkv_weight(q_bias, k_bias, v_bias)
if save_int8:
qkv_scales = (capture_dict[prefix + 'q_proj.weight'],
capture_dict[prefix + 'k_proj.weight'],
capture_dict[prefix + 'v_proj.weight'])
capture_dict[prefix + 'qkv_proj.weight'] = (fuse_qkv_weight(qkv_scales[0][0], qkv_scales[1][0], qkv_scales[2][0]),
fuse_qkv_weight(qkv_scales[0][1], qkv_scales[1][1], qkv_scales[2][1]))
model_named_parameters[prefix + 'qkv_proj.weight'] = qkv_weight
model_named_parameters[prefix + 'qkv_proj.bias'] = qkv_bias
pool = multiprocessing.Pool(args.processes)
padding_offset = 2
for name, param in model_named_parameters.items():
if name == 'model.decoder.embed_positions.weight':
param[padding_offset:,...].detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wpe.bin")
elif name == 'model.decoder.embed_tokens.weight':
if 'model.decoder.project_in.weight' in model_named_parameters.keys():
project_in = model_named_parameters['model.decoder.project_in.weight']
project_out = model_named_parameters['model.decoder.project_out.weight']
torch.matmul(param, project_in).detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wte.bin")
torch.matmul(param, project_out).detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.lm_head.weight.bin")
else:
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wte.bin")
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.lm_head.weight.bin")
elif name == 'model.decoder.final_layer_norm.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.final_layernorm.weight.bin")
elif name == 'model.decoder.final_layer_norm.bias':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.final_layernorm.bias.bin")
elif "project_in" in name or "project_out" in name:
continue
else:
starmap_args = []
for i in range(len(huggingface_model_name_pattern)):
if huggingface_model_name_pattern[i] in name:
new_name = name.replace("model.decoder.layers.", "layers.").replace(huggingface_model_name_pattern[i], ft_model_name_pattern[i])
starmap_args.append((0, saved_dir, factor, new_name, args,
param.detach().cpu().numpy().astype(np_weight_data_type),
capture_dict, name, np_weight_data_type))
pool.starmap_async(split_and_convert_process, starmap_args)
pool.close()
pool.join()
if __name__ == "__main__":
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='file name of input checkpoint file', required=True)
parser.add_argument('-trained_gpu_num', '-t_g', type=int, help='How many gpus for inference', default=1)
parser.add_argument('-infer_gpu_num', '-i_g', type=int, help='How many gpus for inference', required=True)
parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 4)", default=4)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
parser.add_argument("-act_scale", default=None, help="path to activation scalings for int8 conversion")
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print(f"{key}: {vars(args)[key]}")
print("========================================")
start_time = datetime.now()
split_and_convert(args)
stop_time = datetime.now()
run_time = (stop_time - start_time)
print(f"[INFO] Spend {run_time} (h:m:s) to convert the model")
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/huggingface_opt_convert.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Convert HuggingFace pretrained checkpoint into FT format.
"""
import argparse
import configparser
import logging
import multiprocessing
import os
import re
import time
from pathlib import Path
from typing import Dict, List, Optional, Union
import numpy as np
import torch
import torch.nn
from transformers import AutoModel, BloomConfig, PretrainedConfig
PathLike = Union[str, Path]
DATATYPE_MAP = dict(
fp32=torch.float32,
fp16=torch.float16
)
_args = None
logger = logging.getLogger() # get the root logger.
def set_logger(verbose=False):
logging.basicConfig(
# do not print logging level to make it print-like.
format='%(message)s',
level=logging.DEBUG if verbose else logging.INFO)
def get_args():
global _args
if _args is not None:
return _args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-i', '--input-dir', type=str, metavar='DIR', required=True,
help='A checkpoint directory of a huggingface pretrained model.')
parser.add_argument(
'-o', '--output-dir', type=str, metavar='DIR', required=True,
help='A directory where converted binary files for FT will be saved.')
parser.add_argument(
'-tp', '--tensor-para-size', type=int, metavar='N', default=1,
help='The tensor parallel size for inference.')
parser.add_argument(
'-dt', '--data-type', type=str, metavar='STR', default='fp32',
choices=list(DATATYPE_MAP),
help='A data type of converted weights.')
parser.add_argument(
'-p', '--processes', type=int, metavar='N', default=1,
help='The number of parallel processes to use for conversion.')
parser.add_argument(
'-v', '--verbose', action='store_true',
help='Enable verbose logging')
parser.add_argument(
'-s', '--by-shard', action='store_true',
help='Process shard by shard, enable when converting big model like bloom 175B')
_args = parser.parse_args()
set_logger(_args.verbose)
logger.info('\n======================= Arguments =======================')
for k, v in vars(_args).items():
logger.info(f' - {k.ljust(20, ".")}: {v}')
logger.info('=========================================================')
return _args
parameter_prefix_map = {
r'^h.': 'layers.',
}
# pattern and replacement map.
parameter_rename_map = {
# CasualLM weights
'word_embeddings.weight': 'wte',
'word_embeddings_layernorm.weight': 'pre_decoder_layernorm.weight',
'word_embeddings_layernorm.bias': 'pre_decoder_layernorm.bias',
'ln_f.weight': 'final_layernorm.weight',
'ln_f.bias': 'final_layernorm.bias',
# Layer weights
'self_attention.dense.weight': 'attention.dense.weight',
'self_attention.dense.bias': 'attention.dense.bias',
'self_attention.query_key_value.weight': 'attention.query_key_value.weight',
'self_attention.query_key_value.bias': 'attention.query_key_value.bias',
}
parameter_to_split = [
# tuple of (name, index to split)
('attention.query_key_value.weight', -1),
('attention.query_key_value.bias', -1),
('attention.dense.weight', 0),
('mlp.dense_h_to_4h.weight', -1),
('mlp.dense_h_to_4h.bias', -1),
('mlp.dense_4h_to_h.weight', 0)
]
def safe_transpose(param: torch.nn.Parameter):
return param.T if len(param.shape) == 2 else param
def convert_parameter_name(name: str):
# A parameter in BloomForCausalLM has an additional prefix 'transformer.'.
if name.startswith('transformer.'):
name = name[len('transformer.'):]
# Regularize the weight prefix.
for pattern, rename in parameter_prefix_map.items():
if re.match(pattern, name):
name = re.sub(pattern, rename, name)
break
# Rename weight names.
name_suffix = re.sub(r'layers.\d+.', '', name)
if name_suffix in parameter_rename_map:
name = name.replace(name_suffix, parameter_rename_map[name_suffix])
# An GPT weight of FT has a prefix "model.".
return 'model.' + name
def is_split_param(name: str):
for phrase, _ in parameter_to_split:
if phrase in name:
return True
return False
def axis_to_split(name: str):
for phrase, axis in parameter_to_split:
if phrase in name:
return axis
raise ValueError(f'Got a unexpected parameter name to split {name}')
# Exception handling.
def reorder_qkv_weight_or_bias(model_config: PretrainedConfig,
name: str,
param: torch.nn.Parameter):
""" Reorder the qkv weight to use at FT.
Note that the shape of the fused QKV weights in HF is different from the
shape that FT requires.
HF: (hidden_size, num_heads x 3 x head_dim)
FT: (hidden_size, 3 x num_heads x head_dim)
This is unlike to the other models in HF e.g. GPT where they have the
same shape with FT, i.e., (hidden_size, 3 x num_heads x head_dim). Also,
to split across attention heads in tensor parallel, we reshape the qkv
weight: (hidden, 3, num_heads x head_dim).
bias : (3, num_heads x head_dim).
# Args.
model_config: PretrainedConfig, a model configuration.
name: str, a parameter name.
param: torch.nn.Parameter, a fused QKV weight or bias. of shape
(..., num_heads * 3 * head_dim).
# Returns.
torch.nn.Parameter, a reordered fused QKV weight of size
(..., 3, num_heads * head_dim).
"""
if 'query_key_value' not in name:
# Nothing to do for the non-eligible parameters.
return param
num_heads = model_config.n_head
head_dim = model_config.hidden_size // model_config.n_head
# (..., 3 x hidden) view as (..., num_heads, 3, head_dim)
param = param.view(-1, num_heads, 3, head_dim)
# permute to (..., 3, num_heads, head_dim)
param = param.permute(0, 2, 1, 3)
# final shape: weight=(hidden, 3, hidden) or bias=(3, hidden)
if 'query_key_value.bias' in name:
return param.reshape(3, num_heads * head_dim)
return param.reshape(-1, 3, num_heads * head_dim)
def handle_exceptions(model_config: PretrainedConfig,
param_name: str,
param: torch.nn.Parameter):
if 'query_key_value' in param_name:
param = reorder_qkv_weight_or_bias(model_config, param_name, param)
elif 'wte' in param_name:
# The input word embedding shouldn't be transposed.
param = param.T
return param
def convert_and_save_parameter(param_name: str,
param,
tensor_para_size: Optional[int],
save_dir: PathLike):
""" Convert and save to FT parameter
Split a param into tensor_para_size if needed, and save each split param at
{save_dir}/{param_name}.bin or {save_dir}/{param_name}.{tp_idx}.bin in case
of a split param.
# Args.
model_config: PretrainedConfig, a model configuration.
name: str, parameter name.
param: torch.nn.Parameter, a model parameter to convert.
tensor_para_size: int, tensor parallel size.
save_dir: str or Path, a base directory of binary files.
"""
save_dir = Path(save_dir)
if not is_split_param(param_name):
save_path = save_dir / f'{param_name}.bin'
param.tofile(save_path)
logger.debug(
f' - {param_name.ljust(48, ".")}: shape {str(param.shape):16s} '
f'| saved at {str(save_path)}')
return
axis = axis_to_split(param_name)
split_params = np.split(param, tensor_para_size, axis=axis)
for tp_idx, split_param in zip(range(tensor_para_size), split_params):
save_path = save_dir / f'{param_name}.{tp_idx}.bin'
split_param.tofile(save_path)
logger.debug(
f' - {param_name.ljust(48, ".")}: shape {str(split_param.shape):16s} s '
f'| saved at {str(save_path)} ({tp_idx}/{tensor_para_size})')
def save_bloom_config(model_config: BloomConfig, save_dir: PathLike):
""" Save Bloom model configuration.
Args:
model_config: HF pretrained model configuration.
save_dir: a directory to save the config file.
"""
args = get_args()
save_dir = Path(save_dir)
save_dir.parent.mkdir(exist_ok=True, parents=True)
config = configparser.ConfigParser()
# FT's layernorm type string.
if model_config.apply_residual_connection_post_layernorm:
model_variant = 'bloom-post'
layernorm_type = 'post_layernorm'
else:
model_variant = 'bloom-pre'
layernorm_type = 'pre_layernorm'
# We use the section name `gpt` since FT runs BLOOM model through the GPT
# module, which requires the section name `gpt` to retrieve the weight
# data type.
config['gpt'] = dict(
model_name=model_config.name_or_path,
num_layer=model_config.n_layer,
head_num=model_config.n_head,
# inter_size is fixed in bloom model by 4 * hidden_size and a model
# config does not include the intermediate dimension of FFN.
inter_size=4 * model_config.hidden_size,
size_per_head=model_config.hidden_size // model_config.n_head,
vocab_size=model_config.vocab_size,
tensor_para_size=args.tensor_para_size,
weight_data_type=args.data_type,
# GPT variant params
model_variant=model_variant,
layernorm_eps=model_config.layer_norm_epsilon,
layernorm_type=layernorm_type,
activation_type='Gelu',
has_positional_encoding=False,
has_pre_decoder_layernorm=True,
has_post_decoder_layernorm=True,
use_attention_linear_bias=True,
# Special token ids.
start_id=model_config.bos_token_id,
end_id=model_config.eos_token_id,
)
with (save_dir / 'config.ini').open('w') as f:
config.write(f, space_around_delimiters=False)
def load_state_dict(file_path: Path, dtype: torch.dtype) -> Dict[str, torch.Tensor]:
""" Load weights from model file
`safetensors` or `pytorch binary` is supported
# Args.
file_path: model file path, ends with .bin or .safetensors.
dtype: torch.dtype, data type.
# Returns.
Dict[str, torch.Tensor]
"""
state_dict = {}
if file_path.suffix == ".safetensors":
# load from safetensors file
from safetensors import safe_open
with safe_open(file_path, framework="pt", device="cpu") as f:
for k in f.keys():
state_dict[k] = f.get_tensor(k).type(dtype)
else:
# load from pytorch bin file
state_dict = torch.load(file_path, map_location="cpu")
for k in state_dict:
state_dict[k] = state_dict[k].type(dtype)
return state_dict
def get_model_files(model_name: str) -> List[Path]:
""" List all model files that you want to load and convert
# Args.
model_name: name(like `bigscience/bloom`) or local directory of the model
# Returns.
List[Path] model file paths
"""
import glob
from huggingface_hub import try_to_load_from_cache
model_dir = model_name
# get the local model directory
try:
config_file = "config.json"
# will fall back to HUGGINGFACE_HUB_CACHE
config_path = try_to_load_from_cache(
model_name, config_file, cache_dir=os.getenv("TRANSFORMERS_CACHE")
)
if config_path is not None:
# treat the model name as an huggingface model path
model_dir = os.path.dirname(config_path)
except:
# treat the model name as an explicit model path
pass
model_files = glob.glob(model_dir + "/*.bin")
try:
from safetensors import safe_open as _
st_files = glob.glob(model_dir + "/*.safetensors")
if st_files:
model_files = st_files
logger.info("loading from safetensors format")
except ImportError:
logger.info("loading from pytorch bin format")
if not model_files:
raise FileNotFoundError('model files not found')
logger.info(f"model file num: {len(model_files)}")
return [Path(i) for i in model_files]
def process_by_model_param(model_id: str, dtype: torch.dtype, tp_size: int, save_dir: Path, nproc: int):
""" Process conversion parameter by parameter.
"""
# init bloom config
model_config = BloomConfig.from_pretrained(model_id)
# list all model files
model_files = get_model_files(model_id)
# save bloom config to output dir
save_bloom_config(model_config, save_dir)
if nproc > 1:
pool = multiprocessing.Pool(nproc)
star_args = []
for model_file in model_files:
state_dict = load_state_dict(model_file, dtype)
for name in state_dict:
param = state_dict[name]
# Preprocess
param_name = convert_parameter_name(name)
param = safe_transpose(param)
param = handle_exceptions(model_config, param_name, param)
star_args.append((param_name, param.detach().cpu().numpy(), tp_size, save_dir))
pool.starmap_async(convert_and_save_parameter, star_args)
pool.close()
pool.join()
else:
for model_file in model_files:
state_dict = load_state_dict(model_file, dtype)
for name in state_dict:
param = state_dict[name]
# Preprocess
param_name = convert_parameter_name(name)
param = safe_transpose(param)
param = handle_exceptions(model_config, param_name, param)
convert_and_save_parameter(param_name, param.detach().cpu().numpy(), tp_size, save_dir)
def _process_by_model_shard(model_config, model_file, dtype: torch.dtype, tp_size: int, save_dir: Path):
state_dict = load_state_dict(model_file, dtype)
for name in state_dict:
param = state_dict[name]
# Preprocess
param_name = convert_parameter_name(name)
param = safe_transpose(param)
param = handle_exceptions(model_config, param_name, param)
convert_and_save_parameter(param_name, param.detach().cpu().numpy(), tp_size, save_dir)
def process_by_model_shard(model_id: str, dtype: torch.dtype, tp_size: int, save_dir: Path, nproc: int):
""" Process conversion shard by shard.
Benchmarks @ 64C(Intel Xeon 6326 2.90GH) x 756G:
| model | format | by-shard | nproc | elapsed(s) | mem |
|------------|------------------|----------|-------|------------|------|
| bloom-175b | safetensors x 72 | NO | 8 | 1516.66 | 350G |
| bloom-175b | safetensors x 72 | YES | 8 | 1165.03 | 50G |
| bloom-175b | safetensors x 72 | YES | 24 | 494.81 | 150G |
"""
# init bloom config
model_config = BloomConfig.from_pretrained(model_id)
# list all model files
model_files = get_model_files(model_id)
# save bloom config to output dir
save_bloom_config(model_config, save_dir)
if nproc > 1:
pool = multiprocessing.Pool(nproc)
star_args = []
for model_file in model_files:
star_args.append((model_config, model_file, dtype, tp_size, save_dir))
pool.starmap_async(_process_by_model_shard, star_args)
pool.close()
pool.join()
else:
for model_file in model_files:
_process_by_model_shard(model_config, model_file, dtype, tp_size, save_dir)
def main():
start_time = time.time()
args = get_args()
tp_size = args.tensor_para_size
dtype = DATATYPE_MAP[args.data_type]
save_dir = Path(args.output_dir) / f'{tp_size}-gpu'
save_dir.mkdir(exist_ok=True, parents=True)
if args.by_shard:
process_by_model_shard(args.input_dir, dtype, tp_size, save_dir, args.processes)
else:
process_by_model_param(args.input_dir, dtype, tp_size, save_dir, args.processes)
elapsed_time = time.time() - start_time
logger.info(f'Checkpoint conversion (HF >> FT) has done '
f'(elapsed time: {elapsed_time:.2f} sec)')
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/huggingface_bloom_convert.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from abc import abstractmethod
from pathlib import Path
from typing import List, Literal, Optional, Union
import os
import numpy as np
import torch
from . import comm
from . import profiler
from .gpt import GptInitModelParameters
PathLike = Union[str, Path]
def to_numpy_dtype(maybe_str_dtype: Union[str, np.dtype]):
assert isinstance(maybe_str_dtype, (str, np.dtype))
if isinstance(maybe_str_dtype, str):
try:
dtype = {
'fp16': np.float16,
'float16': np.float16,
'fp32': np.float32,
'float32': np.float32,
}[maybe_str_dtype]
except KeyError:
raise ValueError(f'Cannot convert to numpy data type, got {maybe_str_dtype}')
else:
dtype = maybe_str_dtype
return dtype
def to_torch_dtype(maybe_str_dtype: Union[str, torch.dtype]):
if isinstance(maybe_str_dtype, torch.dtype):
dtype = maybe_str_dtype
else:
try:
dtype = {
"bf16": torch.bfloat16,
"fp16": torch.float16,
"fp32": torch.float32,
"bfloat16": torch.bfloat16,
"float16": torch.float16,
"float32": torch.float32,
}[maybe_str_dtype]
except KeyError:
raise ValueError(f"Cannot convert to torch data type, got {maybe_str_dtype}")
return dtype
def load_weight_from_bin(checkpoint_path: PathLike,
shape: List[int],
weight_dtype: Union[str, np.dtype]):
""" Load a weight from a bin file.
# Args.
checkpoint_path: str or Path, a checkpoint file path of an FT's layer weight.
shape: list of int, the shape of weight tensor.
weight_dtype: str or np.dtype, the data type of the stored weight.
"""
weight_dtype = to_numpy_dtype(weight_dtype)
return torch.from_numpy(np.fromfile(checkpoint_path, dtype=weight_dtype))
LayernormType = Literal['pre_layernorm', 'post_layernorm']
class GptLayerWeights:
def __init__(self,
num_heads: int,
size_per_head: int,
inter_size: int,
num_layers: int,
tensor_para_size: int = 1,
pipeline_para_size: int = 1,
has_adapters: bool = False,
adapter_inter_size: int = 0,
int8_mode: int = 0):
assert num_heads % tensor_para_size == 0, \
f'num_heads ({num_heads}) is not multiple of tensor para size ({tensor_para_size})'
self.num_heads = num_heads
self.size_per_head = size_per_head
self.hidden_units = num_heads * size_per_head
self.num_layers = num_layers
self.tensor_para_size = tensor_para_size
self.tensor_para_rank = comm.get_tensor_para_rank()
self.pipeline_para_size = pipeline_para_size
self.pipeline_para_rank = comm.get_pipeline_para_rank()
self.has_adapters = has_adapters
self.adapter_inter_size = adapter_inter_size
self.local_num_layers = num_layers // pipeline_para_size
self.local_num_heads = num_heads // tensor_para_size
self.local_hidden_units = self.local_num_heads * size_per_head
self.local_inter_size = inter_size // tensor_para_size
self.local_adapter_inter_size = self.adapter_inter_size // tensor_para_size
self.weight_transpose_calibrate_quantize = None
assert int8_mode in [0, 1], "Invalid int8 mode for GPT. Must be 0 or 1"
self.int8_mode = int8_mode
if self.int8_mode == 1:
quant = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix
self.weight_transpose_calibrate_quantize = lambda x : quant(x, torch.int8)
self.weights = None
self.int8_weights = None
self.int8_scales = None
self.expected_weight_shapes = list()
# pylint:disable=line-too-long
# Transformer blocks
self.expected_weight_shapes.extend([(self.hidden_units,)] * self.local_num_layers) # input layernorm weight
self.expected_weight_shapes.extend([(self.hidden_units,)] * self.local_num_layers) # input layernorm bias
self.expected_weight_shapes.extend([(self.hidden_units, self.local_hidden_units * 3)] * self.local_num_layers) # attention qkv weight
self.expected_weight_shapes.extend([(self.local_hidden_units * 3,)] * self.local_num_layers) # attention qkv bias
self.expected_weight_shapes.extend([(self.local_hidden_units, self.hidden_units)] * self.local_num_layers) # attention dense weight
self.expected_weight_shapes.extend([(self.hidden_units,)] * self.local_num_layers) # attention dense bias
self.expected_weight_shapes.extend([(self.hidden_units,)] * self.local_num_layers) # post attention layernorm weight
self.expected_weight_shapes.extend([(self.hidden_units,)] * self.local_num_layers) # post attention layernorm bias
self.expected_weight_shapes.extend([(self.hidden_units, self.local_inter_size)] * self.local_num_layers) # ffn_kernel1
self.expected_weight_shapes.extend([(self.local_inter_size,)] * self.local_num_layers) # ffn_bias1
self.expected_weight_shapes.extend([(self.local_inter_size, self.hidden_units)] * self.local_num_layers) # ffn_kernel2
self.expected_weight_shapes.extend([(self.hidden_units,)] * self.local_num_layers) # ffn_bias2
# Adapters
if self.has_adapters:
self.expected_weight_shapes.extend([(self.hidden_units, self.local_adapter_inter_size)] * self.local_num_layers) # adaptor1_kernel1
self.expected_weight_shapes.extend([(self.local_adapter_inter_size,)] * self.local_num_layers) # adaptor1_bias1
self.expected_weight_shapes.extend([(self.local_adapter_inter_size, self.hidden_units)] * self.local_num_layers) # adaptor1_kernel2
self.expected_weight_shapes.extend([(self.hidden_units,)] * self.local_num_layers) # adaptor1_bias2
self.expected_weight_shapes.extend([(self.hidden_units, self.local_adapter_inter_size)] * self.local_num_layers) # adaptor2_kernel1
self.expected_weight_shapes.extend([(self.local_adapter_inter_size,)] * self.local_num_layers) # adaptor2_bias1
self.expected_weight_shapes.extend([(self.local_adapter_inter_size, self.hidden_units)] * self.local_num_layers) # adaptor2_kernel2
self.expected_weight_shapes.extend([(self.hidden_units,)] * self.local_num_layers) # adaptor2_bias2
# pylint:enable=line-too-long
@classmethod
def from_config(cls, config: GptInitModelParameters):
return cls(num_heads=config.head_num,
size_per_head=config.size_per_head,
inter_size=4 * config.head_num * config.size_per_head,
num_layers=config.layer_num,
tensor_para_size=config.tensor_para_size,
pipeline_para_size=config.pipeline_para_size,
has_adapters=config.has_adapters,
adapter_inter_size=config.adapter_inter_size,
int8_mode=config.int8_mode)
@property
def dtype(self):
return self.weights[0].dtype
@property
def device(self):
return self.weights[0].device
def _map(self, func):
for i in range(len(self.weights)):
if isinstance(self.weights[i], list):
for j in range(len(self.weights[i])):
self.weights[i][j] = func(self.weights[i][j])
else:
self.weights[i] = func(self.weights[i])
def _map_int8(self, func):
for i in range(len(self.int8_weights)):
if isinstance(self.int8_weights[i], list):
for j in range(len(self.int8_weights[i])):
self.int8_weights[i][j] = func(self.int8_weights[i][j])
else:
self.int8_weights[i] = func(self.int8_weights[i])
for i in range(len(self.int8_scales)):
if isinstance(self.int8_scales[i], list):
for j in range(len(self.int8_scales[i])):
self.int8_scales[i][j] = func(self.int8_scales[i][j])
else:
self.int8_scales[i] = func(self.int8_scales[i])
def float(self):
if self.dtype == torch.float32:
return
self._map(lambda x: x.float())
def half(self):
if self.dtype == torch.float16:
return
self._map(lambda x: x.half())
if self.int8_mode == 1:
self._map_int8(lambda w: w.half())
def bfloat16(self):
if self.dtype == torch.bfloat16:
return
self._map(lambda x: x.bfloat16())
if self.int8_mode == 1:
self._map_int8(lambda w: w.bfloat16())
def cuda(self, device=None):
self._map(lambda x: x.cuda(device))
if self.int8_mode == 1:
self._map_int8(lambda x: x.cuda(device))
def to(self, device=None):
self._map(lambda x: x.to(device))
if self.int8_mode == 1:
self._map_int8(lambda x: x.to(device))
def is_valid_pp_group(self, layer, pp_rank):
return layer // self.layers_per_device == pp_rank
def load(self,
checkpoint_path: PathLike,
compute_dtype: torch.dtype,
weight_dtype: Optional[Union[str, np.dtype]] = None,
device: Optional[Union[int, str, torch.device]] = None):
""" Load checkpoint weights.
# Args.
checkpoint_path: str or Path,
a checkpoint directory where FT checkpoint files locate.
weight_dtype: str or np.dtype, the data type of stored weights.
"""
checkpoint_path = Path(checkpoint_path)
if not checkpoint_path.exists():
raise FileNotFoundError(f"Could not find checkpoint {str(checkpoint_path)}")
weight_dtype = to_numpy_dtype(weight_dtype)
print(f'Load weights from {str(checkpoint_path)} (data type: {weight_dtype}')
self.weights = list()
self.int8_weights = list()
self.int8_scales = list()
torch.cuda.empty_cache()
def _load_from_file(fname):
quant_sub_names = ["attention.query_key_value.weight", "attention.dense.weight", \
"dense_h_to_4h.weight", "dense_4h_to_h.weight"]
_weight = torch.from_numpy(np.fromfile(checkpoint_path / fname, dtype=weight_dtype))
_weight = _weight.to(compute_dtype)
weight_index = len(self.weights)
expected_shape = self.expected_weight_shapes[weight_index]
try:
if _weight.nelement() > 0:
_weight = _weight.reshape(expected_shape)
except:
raise ValueError(
f"num_heads, size_per_head, vocab_size, and max_seq_len must be the same "
f"as the ones during training (weight: {fname} expected shape: {expected_shape}, "
f"got shape: {_weight.shape}).")
should_quantize = any(sub_name in fname for sub_name in quant_sub_names)
if self.int8_mode != 0 and should_quantize:
calibrate = self.weight_transpose_calibrate_quantize
int8_weight, int8_scales = calibrate(_weight)
#int8 weights should appear in same order as FP weights.
# Move to device and add to the int8 list.
dummy_weight = torch.empty(0, dtype=compute_dtype)
if device is not None:
int8_weight = int8_weight.to(device)
int8_scales = int8_scales.to(device)
dummy_weight = dummy_weight.to(device)
self.int8_weights.append(int8_weight)
self.int8_scales.append(int8_scales)
self.weights.append(dummy_weight)
else:
if device is not None:
_weight = _weight.to(device)
self.weights.append(_weight)
# Load
# pylint:disable=line-too-long
layer_offset = self.local_num_layers * self.pipeline_para_rank
[_load_from_file(f'model.layers.{layer_offset + i}.input_layernorm.weight.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.input_layernorm.bias.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.attention.query_key_value.weight.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.attention.query_key_value.bias.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.attention.dense.weight.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.attention.dense.bias.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.post_attention_layernorm.weight.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.post_attention_layernorm.bias.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.mlp.dense_h_to_4h.weight.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.mlp.dense_h_to_4h.bias.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.mlp.dense_4h_to_h.weight.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.mlp.dense_4h_to_h.bias.bin') for i in range(self.local_num_layers)]
if self.has_adapters:
[_load_from_file(f'model.layers.{layer_offset + i}.after_attention_adapter.dense_h_to_4h.weight.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.after_attention_adapter.dense_h_to_4h.bias.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.after_attention_adapter.dense_4h_to_h.weight.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.after_attention_adapter.dense_4h_to_h.bias.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.after_ffn_adapter.dense_h_to_4h.weight.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.after_ffn_adapter.dense_h_to_4h.bias.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.after_ffn_adapter.dense_4h_to_h.weight.{self.tensor_para_rank}.bin') for i in range(self.local_num_layers)]
[_load_from_file(f'model.layers.{layer_offset + i}.after_ffn_adapter.dense_4h_to_h.bias.bin') for i in range(self.local_num_layers)]
assert len(self.weights) == len(self.expected_weight_shapes), "Incorrect number of weights loaded"
class FtModuleBase:
def __init__(self):
self.weight = None
@classmethod
@abstractmethod
def from_config(cls, config: GptInitModelParameters, **kwargs):
raise NotImplementedError
@abstractmethod
def _initialize_model(self, force_init=False):
raise NotImplementedError
@abstractmethod
def forward(self, *args, **kwargs):
raise NotImplementedError
def set_weight(self, weight: GptLayerWeights):
old_weight_dtype = self.weight.dtype if self.weight is not None else None
self.weight = weight
if old_weight_dtype is None or old_weight_dtype != self.weight.dtype:
self._initialize_model(force_init=True)
@property
def dtype(self):
assert self.weight is not None
return self.weight.dtype
@property
def device(self):
assert self.weight is not None
return self.weight.device
def cuda(self, device=None):
assert torch.cuda.is_available()
self.weight.cuda(device)
return self
def to(self, device=None):
self.weight.to(device)
return self
def float(self):
self.weight.float()
self._initialize_model(force_init=True)
return self
def half(self):
self.weight.half()
self._initialize_model(force_init=True)
return self
def bfloat16(self):
self.weight.bfloat16()
self._initialize_model(force_init=True)
return self
class GptContextDecoder(FtModuleBase):
def __init__(self,
num_heads: int,
size_per_head: int,
inter_size: int,
num_layers: int,
tensor_para_size: int = 1,
pipeline_para_size: int = 1,
remove_padding: bool = True,
shared_contexts_ratio: float = 1.0,
layernorm_eps: float = 1e-6,
layernorm_type: LayernormType = 'pre_layernorm',
activation_type: str = 'gelu',
has_adapters: bool = False,
adapter_inter_size: int = 0,
int8_mode: int = 0):
super().__init__()
self.num_heads = num_heads
self.size_per_head = size_per_head
self.hidden_size = self.num_heads * self.size_per_head
self.inter_size = inter_size
self.num_layers = num_layers
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.remove_padding = remove_padding
self.shared_contexts_ratio = shared_contexts_ratio
self.layernorm_eps = layernorm_eps
self.layernorm_type = layernorm_type
self.activation_type = activation_type
self.has_adapters = has_adapters
self.adapter_inter_size = adapter_inter_size
assert int8_mode in [0, 1]
self.int8_mode = int8_mode
self.ft_op = None
self.weight = None
def __repr__(self):
args_dict = dict(
num_heads=self.num_heads,
size_per_head=self.size_per_head,
hidden_size=self.hidden_size,
inter_size=self.inter_size,
num_layers=self.num_layers,
tensor_para_size=self.tensor_para_size,
pipeline_para_size=self.pipeline_para_size,
remove_padding=self.remove_padding,
shared_contexts_ratio=self.shared_contexts_ratio,
layernorm_eps=self.layernorm_eps,
layernorm_type=self.layernorm_type,
activation_type=self.activation_type,
has_adapters=self.has_adapters,
adapter_inter_size=self.adapter_inter_size,
int8_mode=self.int8_mode,
)
args_str = ',\n '.join([f'{k}: {v}' for k, v in args_dict.items()])
return f'{self.__class__.__name__}[\n{ args_str}\n]'
@classmethod
def from_config(cls, config: GptInitModelParameters, **kwargs):
return cls(num_heads=config.head_num,
size_per_head=config.size_per_head,
inter_size=4 * config.head_num * config.size_per_head,
num_layers=config.layer_num,
tensor_para_size=config.tensor_para_size,
pipeline_para_size=config.pipeline_para_size,
remove_padding=kwargs.get('remove_padding', True),
shared_contexts_ratio=kwargs.get('shared_contexts_ratio', 1.0),
layernorm_eps=config.layernorm_eps,
layernorm_type=config.layernorm_type,
activation_type=config.activation_type,
has_adapters=config.has_adapters,
adapter_inter_size=config.adapter_inter_size,
int8_mode=config.int8_mode)
def _initialize_model(self, force_init=False):
if self.weight is None:
self.weight = GptLayerWeights(
num_heads=self.num_heads,
size_per_head=self.size_per_head,
inter_size=self.inter_size,
num_layers=self.num_layers,
tensor_para_size=self.tensor_para_size,
pipeline_para_size=self.pipeline_para_size,
has_adapters=self.has_adapters,
adapter_inter_size=self.adapter_inter_size,
int8_mode=self.int8_mode)
if not force_init and self.ft_op is not None:
return
if self.ft_op is not None:
del self.ft_op
self.ft_op = torch.classes.FasterTransformer.ParallelGptContextDecoderOp(
self.num_heads,
self.size_per_head,
self.inter_size,
self.num_layers,
self.tensor_para_size,
self.pipeline_para_size,
self.layernorm_eps,
self.layernorm_type,
self.activation_type,
self.has_adapters,
self.adapter_inter_size,
self.int8_mode,
self.weight.weights,
self.weight.int8_weights,
self.weight.int8_scales,
self.remove_padding)
def forward(self,
input_embeds: torch.Tensor,
attention_mask: torch.Tensor,
input_lengths: torch.IntTensor,
memory_length: Optional[int] = None,
compact_index: Optional[torch.IntTensor] = None,
batch_to_compact_index: Optional[torch.IntTensor] = None,
linear_bias_slopes: Optional[torch.Tensor] = None):
"""
# Args.
input_embeds: Tensor, (batch * beam, max_input_length, hidden_dim),
input hidden states.
attention_mask: Tensor, (batch * beam, max_input_length, max_input_length),
input attention mask.
input_lengths: (batch * beam,), input sequence lengths.
memory_length: int, the length of memory to keep key/cache values.
compact_index: IntTensor, (compact_batch_size,)
The index of input sequences of a compact batch. If None, the FT op
doesn't apply the shared context feature and as result the inference
time may increase.
batch_to_compact_index: IntTensor, (batch * beam,)
The index map from the original input batch to the compact batch.
This must be provided if compact_index is not None.
linear_bias_slopes: (num_heads,)
The slope per head of linear attention bias - ALiBi. If None, a base
self attention will be performed.
# Returns
hidden_states: Tensor, (batch * beam, max_input_length, hidden_dim),
decoder outputs.
key_cache: Tensor, (num_layers, batch * beam, local_num_heads, size_per_head / x, memory_length, x),
key cache of attention of inputs.
x = 16 / sizeof(T), memory_length = max_input_length or max_input_length + gen_length
value_cache: Tensor, (num_layers, batch * beam, local_num_heads, memory_length, hidden_dim)
value cache of attention
last_token_hidden_states: Tensor, (batch * beam, hidden_dim)
hidden states of the last input token.
"""
self._initialize_model()
# outputs: output hidden states
decoder_ouptut, key_cache, value_cache, last_token_hidden_states = self.ft_op.forward(
input_embeds,
attention_mask,
input_lengths,
memory_length,
compact_index,
batch_to_compact_index,
linear_bias_slopes)
return decoder_ouptut, key_cache, value_cache, last_token_hidden_states
class GptDecoder(FtModuleBase):
def __init__(self,
num_heads: int,
size_per_head: int,
inter_size: int,
num_layers: int,
tensor_para_size: int = 1,
pipeline_para_size: int = 1,
layernorm_eps: float = 1e-6,
layernorm_type: LayernormType = 'pre_layernorm',
activation_type: str = 'gelu',
has_adapters: bool = False,
adapter_inter_size: int = 0,
int8_mode: int = 0):
super().__init__()
self.num_heads = num_heads
self.size_per_head = size_per_head
self.hidden_size = self.num_heads * self.size_per_head
self.inter_size = inter_size
self.num_layers = num_layers
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.layernorm_eps = layernorm_eps
self.layernorm_type = layernorm_type
self.activation_type = activation_type
self.has_adapters = has_adapters
self.adapter_inter_size = adapter_inter_size
self.int8_mode = int8_mode
self.ft_op = None
self.weight = None
def __repr__(self):
args_dict = dict(
num_heads=self.num_heads,
size_per_head=self.size_per_head,
hidden_size=self.hidden_size,
inter_size=self.inter_size,
num_layers=self.num_layers,
tensor_para_size=self.tensor_para_size,
pipeline_para_size=self.pipeline_para_size,
layernorm_eps=self.layernorm_eps,
layernorm_type=self.layernorm_type,
activation_type=self.activation_type,
has_adapters=self.has_adapters,
adapter_inter_size=self.adapter_inter_size,
int8_mode=self.int8_mode,
)
args_str = ',\n '.join([f'{k}: {v}' for k, v in args_dict.items()])
return f'{self.__class__.__name__}[\n {args_str}\n]'
@classmethod
def from_config(cls, config: GptInitModelParameters, **kwargs):
hidden_dim = config.head_num * config.size_per_head
return cls(num_heads=config.head_num,
size_per_head=config.size_per_head,
inter_size=4 * hidden_dim,
num_layers=config.layer_num,
tensor_para_size=config.tensor_para_size,
pipeline_para_size=config.pipeline_para_size,
layernorm_eps=config.layernorm_eps,
layernorm_type=config.layernorm_type,
activation_type=config.activation_type,
has_adapters=config.has_adapters,
adapter_inter_size=config.adapter_inter_size,
int8_mode=config.int8_mode)
def _initialize_model(self, force_init=False):
if self.weight is None:
self.weight = GptLayerWeights(
num_heads=self.num_heads,
size_per_head=self.size_per_head,
inter_size=self.inter_size,
num_layers=self.num_layers,
tensor_para_size=self.tensor_para_size,
pipeline_para_size=self.pipeline_para_size,
has_adapters=self.has_adapters,
adapter_inter_size=self.adapter_inter_size,
int8_mode=self.int8_mode)
if not force_init and self.ft_op is not None:
return
if self.ft_op is not None:
del self.ft_op
self.ft_op = torch.classes.FasterTransformer.ParallelGptDecoderOp(
self.num_heads,
self.size_per_head,
self.inter_size,
self.num_layers,
self.tensor_para_size,
self.pipeline_para_size,
self.layernorm_eps,
self.layernorm_type,
self.activation_type,
self.has_adapters,
self.adapter_inter_size,
self.weight.int8_mode,
self.weight.weights,
self.weight.int8_weights,
self.weight.int8_scales)
def forward(self,
max_input_length: int,
step: int,
ite: int,
input_embeds: torch.Tensor,
sequence_lengths: torch.IntTensor,
key_cache: torch.Tensor,
value_cache: torch.Tensor,
finished: torch.BoolTensor,
total_padding_tokens: torch.IntTensor,
masked_tokens: torch.BoolTensor,
cache_indirection: Optional[torch.IntTensor] = None,
linear_bias_slopes: Optional[torch.Tensor] = None):
"""
# Args.
max_input_length: int, maximum input context length.
step: int, the current step index.
ite: int, local batch iteration.
input_embeds: Tensor, (local_batch * beam, hidden_dim),
input hidden state to decoder.
sequence_lengths: IntTensor, (local_batch * beam,),
the current sequence lengths.
key_cache: Tensor, key cache buffer.
value_cache: Tensor, value cache buffer.
finished: BoolTensor, (local_batch * beam,),
whether to finish sentence generation.
total_padding_tokens IntTensor, (local_batch * beam,),
the number of padded tokens.
masked_tokens: BoolTensor, (local_batch * beam, memory_length),
a mask tensor that indicates padded tokens.
cache_indirection: IntTensor, (local_batch * beam,),
cache of beam positions if needed if beam > 1.
linear_bias_slopes Tensor, (num_heads,)
slopes head of linear position bias (ALiBi) (optional).
# Returns
IntTensor, (batch * beam,) output token ids.
"""
self._initialize_model()
outputs = self.ft_op.forward(max_input_length,
step,
ite,
input_embeds,
sequence_lengths,
finished,
total_padding_tokens,
masked_tokens,
key_cache,
value_cache,
cache_indirection,
linear_bias_slopes)
return outputs[0]
class Gpt:
def __init__(self,
num_heads: int,
size_per_head: int,
num_layers: int,
vocab_size: int,
start_id: int,
end_id: int,
lib_path: PathLike,
tensor_para_size: int = 1,
pipeline_para_size: int = 1,
remove_padding: bool = True,
shared_contexts_ratio: float = 1.0,
layernorm_eps: float = 1e-6,
layernorm_type: LayernormType = 'pre_layernorm',
activation_type: str = 'gelu',
has_positional_encoding: bool = True,
max_seq_len: int = 0,
has_pre_decoder_layernorm: bool = False,
has_post_decoder_layernorm: bool = True,
has_adapters: bool = False,
adapter_inter_size: int = 0,
int8_mode: int = 0,
inference_data_type: Optional[str] = None,
weights_data_type: str = 'fp32',
use_fp32_to_compute_logit: bool = False,
**kwargs):
super().__init__()
inference_data_type = inference_data_type or weights_data_type
self.config = GptInitModelParameters(
head_num=num_heads,
size_per_head=size_per_head,
layer_num=num_layers,
max_seq_len=max_seq_len,
tensor_para_size=tensor_para_size,
vocab_size=vocab_size,
start_id=start_id,
end_id=end_id,
pipeline_para_size=pipeline_para_size,
data_type=inference_data_type,
weights_data_type=weights_data_type,
layernorm_eps=layernorm_eps,
layernorm_type=layernorm_type,
activation_type=activation_type,
has_positional_encoding=has_positional_encoding,
has_pre_decoder_layernorm=has_pre_decoder_layernorm,
has_post_decoder_layernorm=has_post_decoder_layernorm,
has_adapters=has_adapters,
adapter_inter_size=adapter_inter_size,
int8_mode=int8_mode,
sparse=kwargs.get('sparse', False)
)
self.use_fp32_to_compute_logit = use_fp32_to_compute_logit
self.weight = None
self.shared_contexts_ratio = shared_contexts_ratio
torch.classes.load_library(os.path.abspath(lib_path))
# Embeddings to encode or decode tokens.
hidden_dim = num_heads * size_per_head
# Pad vocab size for FT.
local_vocab_size = math.ceil(self.config.vocab_size / self.config.tensor_para_size)
if self.config.data_type == 'fp16':
local_vocab_size = math.ceil(local_vocab_size / 8) * 8
self.vocab_size_padded = local_vocab_size * self.config.tensor_para_size
self.vocab_size = self.config.vocab_size
self.decode_op = torch.classes.FasterTransformer.DynamicDecodeOp(
self.vocab_size,
self.vocab_size_padded,
self.config.tensor_para_size,
self.config.pipeline_para_size,
torch.float)
self._parameters = {}
def register_param(name, p):
self._parameters[name] = p
setattr(self, name, p)
register_param(
'context_decoder',
GptContextDecoder.from_config(
self.config,
remove_padding=remove_padding,
shared_contexts_ratio=shared_contexts_ratio,
**kwargs))
register_param('decoder', GptDecoder.from_config(self.config, **kwargs))
compute_dtype = to_torch_dtype(inference_data_type)
if comm.is_pipeline_group_first():
register_param(
'word_embedding',
torch.nn.Embedding(self.vocab_size_padded, hidden_dim, dtype=compute_dtype))
self._mask_padded_vocab_weights(self.word_embedding.weight)
if self.config.has_positional_encoding:
register_param(
'position_encoding',
torch.nn.Embedding(self.config.max_seq_len, hidden_dim, dtype=compute_dtype))
else:
self.position_encoding = None
if self.config.has_pre_decoder_layernorm:
register_param(
'pre_decoder_layernorm',
torch.nn.LayerNorm(hidden_dim, eps=layernorm_eps, dtype=compute_dtype))
else:
self.pre_decoder_layernorm = None
if comm.is_pipeline_group_last():
if has_post_decoder_layernorm:
register_param(
'post_decoder_layernorm',
torch.nn.LayerNorm(hidden_dim, eps=layernorm_eps, dtype=compute_dtype))
else:
self.post_decoder_layernorm = None
self.lm_head_ctype = compute_dtype if not self.use_fp32_to_compute_logit else torch.float32
register_param(
'lm_head',
torch.nn.Linear(hidden_dim, self.vocab_size_padded, bias=False, dtype=self.lm_head_ctype))
self._mask_padded_vocab_weights(self.lm_head.weight)
@classmethod
def from_config(cls, config: GptInitModelParameters, **kwargs):
return cls(
num_heads=config.head_num,
size_per_head=config.size_per_head,
num_layers=config.layer_num,
max_seq_len=config.max_seq_len,
tensor_para_size=config.tensor_para_size,
vocab_size=config.vocab_size,
start_id=config.start_id,
end_id=config.end_id,
pipeline_para_size=config.pipeline_para_size,
inference_data_type=config.data_type,
weights_data_type=config.weights_data_type,
layernorm_eps=config.layernorm_eps,
layernorm_type=config.layernorm_type,
activation_type=config.activation_type,
has_positional_encoding=config.has_positional_encoding,
has_pre_decoder_layernorm=config.has_pre_decoder_layernorm,
has_post_decoder_layernorm=config.has_post_decoder_layernorm,
has_adapters=config.has_adapters,
adapter_inter_size=config.adapter_inter_size,
int8_mode=config.int8_mode,
**kwargs
)
def load(self,
checkpoint_path: PathLike,
inference_data_type: Optional[Union[str, torch.dtype]] = None,
config: Optional[GptInitModelParameters] = None,
device: Optional[Union[str, int, torch.device]] = None):
checkpoint_path = Path(checkpoint_path)
device = device or comm.get_device()
config = config or self.config
compute_dtype = to_torch_dtype(inference_data_type or self.dtype)
self.weight = GptLayerWeights.from_config(config)
self.weight.load(checkpoint_path, compute_dtype, config.weights_data_type, device)
self.context_decoder.set_weight(self.weight)
self.decoder.set_weight(self.weight)
weight_dtype = to_numpy_dtype(config.weights_data_type)
def _safe_load_from_bin(param: torch.nn.Parameter, fname):
if (checkpoint_path / fname).exists():
# np_w is 1-D array since a bin file doesn't have shape info.
w_ = np.fromfile(checkpoint_path / fname, dtype=weight_dtype)
param.data = torch.from_numpy(w_).reshape(param.data.shape).to(compute_dtype)
else:
raise FileNotFoundError(f'Faile to load {fname}')
def _safe_load_lm_head_from_bin(param, fname, ctype):
if (checkpoint_path / fname).exists():
shape = (self.vocab_size, self.config.head_num * self.config.size_per_head)
# np_w is 1-D array since a bin file doesn't have shape info.
w_ = np.fromfile(checkpoint_path / fname, dtype=weight_dtype)
param.data = param.data.to(ctype)
param.data[:self.vocab_size, :] = torch.from_numpy(w_).reshape(shape).to(ctype)
else:
print(f'Faile to load {fname}')
torch.nn.init.normal_(param).to(compute_dtype)
self._mask_padded_vocab_weights(param)
# pylint:disable=line-too-long
if comm.is_pipeline_group_first():
_safe_load_lm_head_from_bin(self.word_embedding.weight, 'model.wte.bin', compute_dtype)
self._mask_padded_vocab_weights(self.word_embedding.weight)
if self.position_encoding is not None:
_safe_load_from_bin(self.position_encoding.weight, 'model.wpe.bin')
if self.pre_decoder_layernorm is not None:
_safe_load_from_bin(self.pre_decoder_layernorm.weight,
'model.pre_decoder_layernorm.weight.bin')
_safe_load_from_bin(self.pre_decoder_layernorm.bias,
'model.pre_decoder_layernorm.bias.bin')
if comm.is_pipeline_group_last():
if self.post_decoder_layernorm is not None:
_safe_load_from_bin(self.post_decoder_layernorm.weight,
'model.final_layernorm.weight.bin')
_safe_load_from_bin(self.post_decoder_layernorm.bias,
'model.final_layernorm.bias.bin')
if (checkpoint_path / 'model.lm_head.weight.bin').exists():
_safe_load_lm_head_from_bin(self.lm_head.weight, 'model.lm_head.weight.bin', self.lm_head_ctype)
else:
if self.use_fp32_to_compute_logit:
_safe_load_lm_head_from_bin(self.lm_head.weight, 'model.wte.bin', torch.float32)
else:
# In this branch we can share the pre and post decoder embeddings, but ONLY pipeline size is 1.
# When pipeline size > 1, these two weights will end up on different GPUs, so we must load the
# post decoder weight again (else case).
if comm.get_pipeline_para_size() == 1:
self.lm_head.weight = self.word_embedding.weight
else:
_safe_load_lm_head_from_bin(self.lm_head.weight, 'model.wte.bin', compute_dtype)
self.to(device)
@property
def dtype(self):
assert self.weight is not None
return self.weight.dtype
@property
def device(self):
assert self.weight is not None
return self.weight.device
def cuda(self, device=None):
assert torch.cuda.is_available()
for name, param in self._parameters.items():
setattr(self, name, param.cuda(device))
return self
def to(self, device=None):
for name, param in self._parameters.items():
setattr(self, name, param.to(device))
return self
def float(self):
for name, param in self._parameters.items():
setattr(self, name, param.float())
return self
def half(self):
for name, param in self._parameters.items():
setattr(self, name, param.half())
return self
def bfloat16(self):
for name, param in self._parameters.items():
setattr(self, name, param.bfloat16())
return self
def _mask_padded_vocab_weights(self, weight: torch.Tensor):
assert self.vocab_size_padded >= self.vocab_size
if self.vocab_size_padded > self.vocab_size:
weight.data[self.vocab_size:, ...] = 0
def generate_pad_mask(self, input_lengths, memory_length, init_step=0):
""" Generate a pad mask tensor.
# Args.
input_lengths: (batch_size * beam_width,), input lengths
memory_length: the length of key/value cache memory.
init_step: int, initial step.
# Return
masked_tokens: BoolTensor, (batch_size * beam_width, memory_length),
True if init_step + input_length[i] <= j < init_step + max_input_length,
where i is a batch-beam index and j is a time step modulo by memory_length.
"""
max_input_length = input_lengths.max()
input_lengths = input_lengths.unsqueeze(1)
shift = init_step % memory_length
step_indices = torch.arange(
init_step, init_step + memory_length, device=input_lengths.device)
step_indices = step_indices.roll(shift).unsqueeze(0).tile(input_lengths.shape[0], 1)
masked_tokens = torch.logical_and(
step_indices >= input_lengths, step_indices < init_step + max_input_length)
return masked_tokens
def get_local_batch_size(self, batch_size):
""" Get a local batch size by the same way that FT Gpt does. """
local_batch_size = batch_size
pp_size = self.decoder.pipeline_para_size
if pp_size > 1:
if local_batch_size % pp_size == 0:
local_batch_size //= pp_size
while local_batch_size > 1024 and local_batch_size % 2 == 0:
local_batch_size //= 2
return local_batch_size
@torch.no_grad()
def generate(self,
input_token_ids: torch.IntTensor,
input_lengths: torch.IntTensor,
gen_length: int,
eos_token_id: Optional[int] = None,
local_batch_size: Optional[int] = None,
beam_width: int = 1,
top_k: Optional[torch.IntTensor] = None,
top_p: Optional[torch.FloatTensor] = None,
top_p_decay: Optional[torch.FloatTensor] = None,
top_p_min: Optional[torch.FloatTensor] = None,
top_p_reset_ids: Optional[torch.IntTensor] = None,
temperature: Optional[torch.FloatTensor] = None,
repetition_penalty: Optional[torch.FloatTensor] = None,
presence_penalty: Optional[torch.FloatTensor] = None,
min_length: Optional[torch.IntTensor] = None,
len_penalty: Optional[torch.FloatTensor] = None,
beam_search_diversity_rate: Optional[torch.FloatTensor] = None,
stop_words_list: Optional[torch.IntTensor] = None,
bad_words_list: Optional[torch.IntTensor] = None,
sequence_limit_lengths: Optional[torch.IntTensor] = None,
random_seed: Optional[torch.LongTensor] = None,
memory_length: Optional[int] = None,
return_output_length: bool = False,
return_log_probs: bool = False):
"""
# Args.
input_token_ids: IntTensor, (batch_size, max_input_length),
input hidden state to decoder.
input_lengths: IntTensor, (batch_size),
the lengths of input context sequences.
gen_length: int, the number of tokens to generate.
local_batch_size: int, optional, a batch size of local iteration. (disabled)
eos_token_id: int, eos token id.
beam_width: int, number of beams for beam search.
If 1, sampling decode will be used.
top_k: IntTensor, (batch_size,) top-k sampling. The number of most probable
tokens to keep for sampling per sentence in a batcch.
top_p: FloatTensor, (batch_size,), top-p sampling. The cumulative probability
of to filter the set of most probable tokens.
top_p_decay: FloatTensor, (batch_size,)
The decay of top-p value for top_p sampling.
top_p_min: FloatTensor, (batch_size,)
The minimum top p values in top-p decaying.
top_p_reset_ids: IntTensor, (batch_size,)
reset ids for resetting top_p values for top p sampling
temperature: FloatTensor, (batch_size,),
The temperature value for smoothing the logit distribution.
repetition_penalty: FloatTensor, (batch_size,),
The repetition penalty.
presence_penalty: FloatTensor, (batch_size,),
The presence penalty, which is exclusive with repetition_penalty.
Only one of repetition and presence penalties is allowed.
min_length: IntTensor, (batch_size,),
Minimum length for each sentences. EOS is masked if length is below min.
len_penalty: FloatTensor, (batch_size,)
The exponent of the length penalty of beam scores.
beam_search_diversity_rate: FloatTensor, (batch_size,),
The diversity rate of beam search.
stop_words_list: IntTensor, (batch_size, 2, stop_words_length)
When FT generates words in this list, it will stop the generation. An extension of stop id.
bad_words_list IntTensor, (batch_size, 2, bad_words_length)
The words in the list will never be sampled.
sequence_limit_lengths: IntTensor, (batch_size,), The maximum length of a generated sequence.
memory_length: int, the length of cache memory. If None, it will be
max_input_length + gen_length.
# Returns
IntTensor, (batch_size, beam_width, max_seq_length) output token ids.
"""
assert self.weight is not None, 'Please call load() first to initialize weights.'
input_token_ids = input_token_ids.type(torch.int32).to(self.device)
input_lengths = input_lengths.type(torch.int32).to(self.device)
batch_size = len(input_token_ids)
max_input_length = input_token_ids.shape[-1]
max_seq_length = max_input_length + gen_length
memory_length = memory_length or max_seq_length
# TODO: Enable local batch later. We currently disable local batching due to
# an input mismatch issue of FT's decode_op: FT's decode_op requires logits
# of shape (batch_size, ...) but we have logits of shape (local_batch_size, ...)
# After fixing FT's side, we will enable local batch.
# local_batch_size = local_batch_size or self.get_local_batch_size(batch_size)
# num_local_batches, last_chunk = divmod(batch_size, local_batch_size)
# if last_chunk > 0:
# num_local_batches += 1
assert local_batch_size is None or local_batch_size == batch_size
local_batch_size = batch_size
num_local_batches = 1
device = self.device
eos_token_id = eos_token_id if eos_token_id is not None else self.config.end_id
assert eos_token_id is not None, 'eos_token-id must be specified in generation.'
eos_token_ids = eos_token_id * torch.ones(batch_size, dtype=torch.int32, device=device)
assert repetition_penalty is None or presence_penalty is None,\
'Found ambiguous parameters repetition_penalty and presence_penalty '\
'which are mutually exclusive. Please provide one of repetition_penalty '\
'and presence_penalty.'
# Setup decoder_op prior to calling the forward function.
self.decode_op.setup(batch_size,
beam_width,
top_k,
top_p,
temperature,
repetition_penalty,
presence_penalty,
min_length,
len_penalty,
beam_search_diversity_rate,
random_seed,
top_p_decay,
top_p_min,
top_p_reset_ids)
# Prepare input and output arguments.
if beam_width > 1:
# Tiling for beam search.
input_token_ids = input_token_ids.repeat(1, beam_width).view(batch_size * beam_width, -1)
input_lengths = input_lengths.view(-1, 1).repeat(1, beam_width).view(-1)
if sequence_limit_lengths is not None:
sequence_limit_lengths = sequence_limit_lengths.view(-1, 1).repeat(1, beam_width).view(-1)
# src/tgt cache indirections.
cache_indirection = torch.zeros(
(2, batch_size, beam_width, memory_length), dtype=torch.int32, device=device)
parent_ids = torch.zeros(max_seq_length, batch_size * beam_width, dtype=torch.int32, device=device)
else:
cache_indirection = None
src_cache_indirection = None
tgt_cache_indirection = None
parent_ids = None
pad_lengths = max_input_length - input_lengths
# Since tril() doesn't support bf16 dtype, we create of bool type and then cast it to dtype.
attention_mask = torch.ones(
(max_input_length, max_input_length), dtype=torch.bool, device=device)\
.tril().unsqueeze(0).tile(input_token_ids.shape[0], 1, 1).to(self.dtype)
for b, input_length in enumerate(input_lengths):
attention_mask[b, input_length:, ...] = 0
masked_tokens = self.generate_pad_mask(input_lengths, memory_length)
finished = torch.zeros_like(input_lengths).bool()
sequence_lengths = (max_input_length - 1) * torch.ones_like(input_lengths)
if return_log_probs or beam_width > 1:
cum_log_probs = torch.zeros(batch_size * beam_width, device=device)
output_log_probs = torch.zeros((gen_length, batch_size * beam_width), device=device)
else:
cum_log_probs = None
output_log_probs = None
# Contiguous buffer for each decode_op step, it will be transposed tensor for the final output.
output_token_ids = torch.zeros(
(max_seq_length, batch_size * beam_width), dtype=torch.int32, device=device)
output_token_ids[:max_input_length, ...] = input_token_ids.T
if comm.is_pipeline_group_first():
# Prepare input tensors of decoder.
input_embeds = self.word_embedding(input_token_ids)
if self.position_encoding is not None:
position_ids = torch.arange(0, max_input_length, dtype=torch.int, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, max_input_length)
input_embeds += self.position_encoding(position_ids)
if self.pre_decoder_layernorm is not None:
input_embeds = self.pre_decoder_layernorm(input_embeds)
else:
# Dummy input_embeds
input_embeds = torch.empty(
size=(batch_size * beam_width, max_input_length, self.context_decoder.hidden_size),
dtype=self.context_decoder.dtype,
device=device)
use_shared_contexts = (self.shared_contexts_ratio > 0.) and (max_input_length >= 1) and (batch_size > 1)
batch_to_compact, compact_to_batch = None, None
if use_shared_contexts:
find_context_duplications = torch.ops.fastertransformer.find_context_duplications
batch_to_compact, compact_to_batch = find_context_duplications(input_token_ids)
use_shared_contexts = compact_to_batch.shape[0] <= self.shared_contexts_ratio * batch_size
if not use_shared_contexts:
batch_to_compact, compact_to_batch = None , None
profiler.start('ft-context-decoder')
_, k_cache, v_cache, last_token_hidden_states = self.context_decoder.forward(
input_embeds=input_embeds,
attention_mask=attention_mask,
input_lengths=input_lengths,
memory_length=memory_length,
batch_to_compact_index=batch_to_compact,
compact_index=compact_to_batch)
profiler.stop('ft-context-decoder')
for step in range(max_input_length, max_seq_length):
src_indir_idx = (step - max_input_length) % 2
tgt_indir_idx = 1 - src_indir_idx
is_generation_done = torch.tensor([True], dtype=torch.bool, device=device)
for ite in range(num_local_batches):
# The indices of the current local batch-beam.
bbidx = range(
ite * local_batch_size * beam_width,
min((ite + 1) * local_batch_size * beam_width, batch_size * beam_width))
if cache_indirection is not None:
bidx = range(ite * local_batch_size,
min((ite + 1) * local_batch_size, batch_size))
src_cache_indirection = cache_indirection[src_indir_idx, bidx, ...]
tgt_cache_indirection = cache_indirection[tgt_indir_idx, bidx, ...]
if step == max_input_length:
hidden_states = last_token_hidden_states[bbidx, ...]
else:
if comm.is_pipeline_group_first():
input_embeds = self.word_embedding(output_token_ids[step - 1, bbidx])
if self.position_encoding is not None:
position_ids = (step - 1) * torch.ones_like(pad_lengths[bbidx])
input_embeds += self.position_encoding(position_ids)
if self.pre_decoder_layernorm is not None:
input_embeds = self.pre_decoder_layernorm(input_embeds)
else:
# Dummy input_imbeds
input_embeds = torch.empty(
size=(len(bbidx), self.decoder.hidden_size),
dtype=self.decoder.dtype,
device=device)
profiler.start('ft-decoder')
hidden_states = self.decoder.forward(
max_input_length=max_input_length,
step=step,
ite=ite,
input_embeds=input_embeds,
sequence_lengths=sequence_lengths[bbidx],
key_cache=k_cache,
value_cache=v_cache,
finished=finished[bbidx],
total_padding_tokens=pad_lengths[bbidx],
cache_indirection=src_cache_indirection,
masked_tokens=masked_tokens[bbidx, ...])
profiler.stop('ft-decoder')
if comm.is_pipeline_group_last():
if self.post_decoder_layernorm is not None:
hidden_states = self.post_decoder_layernorm(hidden_states)
# We use logits of fp32 type to avoid overflow issue.
if self.use_fp32_to_compute_logit:
# The FT GPT op internally uses FP32 compute type for matrix multiplication.
# This will produce the same result with the end-to-end FT's GPT op.
logits = torch.nn.functional.linear(hidden_states.float(), self.lm_head.weight)
else:
logits = self.lm_head(hidden_states).float()
profiler.start('ft-decode')
should_stop = self.decode_op.forward(
logits.view(batch_size, beam_width, -1),
step,
max_input_length,
ite,
local_batch_size,
eos_token_ids,
top_k,
top_p,
temperature,
repetition_penalty,
presence_penalty,
min_length,
len_penalty,
beam_search_diversity_rate,
top_p_decay,
top_p_min,
top_p_reset_ids,
None,
input_lengths,
sequence_limit_lengths,
stop_words_list,
bad_words_list,
src_cache_indirection,
output_token_ids.view(-1, batch_size, beam_width),
finished,
sequence_lengths,
cum_log_probs,
output_log_probs,
parent_ids,
tgt_cache_indirection)
profiler.stop('ft-decode')
is_generation_done &= should_stop
# Broadcast from the last pipeline node if needed.
profiler.start('ft-bcast')
tensors_to_bcast = [output_token_ids[step, ...],
finished,
sequence_lengths,
is_generation_done]
if beam_width > 1:
tensors_to_bcast.append(tgt_cache_indirection)
self.decode_op.broadcast_from_last_pipeline(tensors_to_bcast)
profiler.stop('ft-bcast')
if is_generation_done or finished.all():
break
# Transpose (L, batch, beam) -> (batch, beam, L)
output_token_ids = output_token_ids.view(-1, batch_size, beam_width).permute(1, 2, 0)
# Increase sequence_length by 1 because the sequence length of time step t is t - 1.
sequence_lengths += 1
# Outputs
output_dict = dict(output_token_ids=output_token_ids)
if return_output_length:
output_dict['output_lengths'] = sequence_lengths
if return_log_probs:
output_dict['cum_log_probs'] = cum_log_probs
output_dict['output_log_probs'] = output_log_probs
return output_dict
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/gpt_decoder.py
|
import time
class Timer:
def __init__(self):
self._start_times = {}
self._total_elapsed_times = {}
def start(self, tag):
self._start_times[tag] = time.time()
def stop(self, tag):
elapsed_time = time.time() - self._start_times[tag]
if tag not in self._total_elapsed_times:
self._total_elapsed_times[tag] = 0
self._total_elapsed_times[tag] += elapsed_time
return elapsed_time
def elapsed_time_in_sec(self, tag):
if tag not in self._total_elapsed_times:
return None
return self._total_elapsed_times[tag]
def reset(self):
self._start_times.clear()
self._total_elapsed_times.clear()
def summary(self):
print('Profile Results')
for tag, elapsed_time in self._total_elapsed_times.items():
print(f' - {tag.ljust(30, ".")}: {elapsed_time:.6f} (sec)')
_default_timer = Timer()
def start(tag):
_default_timer.start(tag)
def stop(tag):
_default_timer.stop(tag)
def elapsed_time_in_sec(tag):
return _default_timer.elapsed_time_in_sec(tag)
def reset():
_default_timer.reset()
def summary():
_default_timer.summary()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/profiler.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .gpt import GPT
class ParallelGPT(GPT):
def cuda(self):
self.weights._map(lambda w: w.cuda(self.device))
if self.int8_mode != 0:
self.weights._map_int8(lambda w: w.cuda(self.device))
if self.build_model:
del self.model
self.build_model = False
self.model = torch.classes.FasterTransformer.ParallelGptOp(
self.head_num, self.size_per_head, self.inter_size,
self.layer_num,
self.expert_num,
self.moe_k,
self.moe_layer_index,
self.vocab_size, self.start_id, self.end_id,
self.tensor_para_size, self.pipeline_para_size, self.int8_mode,
# GPT variant parameters
self.layernorm_eps,
self.layernorm_type,
self.activation_type,
self.has_positional_encoding,
self.has_pre_decoder_layernorm,
self.has_post_decoder_layernorm,
self.has_adapters,
self.adapter_inter_size,
self.use_attention_linear_bias,
self.weights.w,
self.weights.int8_w,
self.weights.scale,
self.shared_contexts_ratio)
self.build_model = True
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/parallel_gpt.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import datetime
import json
import pathlib
import shutil
import sys
import os
import numpy as np
import torch # pytype: disable=import-error
# verify if root package is in PYTHONPATH
__root_package_path__ = pathlib.Path(__file__).parent.parent.parent.parent.parent.absolute().as_posix()
if __root_package_path__ not in sys.path:
print(
f"[ERROR] add project root directory to your PYTHONPATH with "
f"'export PYTHONPATH={__root_package_path__}:${{PYTHONPATH}}'"
)
from examples.pytorch.gpt.utils.gpt import DEFAULT_START_TAG, DEFAULT_END_TAG, OPENAI_GPT2_START_ID, OPENAI_GPT2_END_ID
from examples.pytorch.utils import torch2np, safe_transpose, cpu_map_location, gpu_map_location, WEIGHT2DTYPE
def save_dense_split(model_states_list, factor_dense, model_key, megatron_gpt_key, np_weight_data_type, saved_dir,
ckpt_ver, model_training_args):
training_pipeline_para_dense_size = len(model_states_list[0])
step_layer_pp = model_training_args.num_layers // training_pipeline_para_dense_size
model_list = [
[
model_states[model_key]["language_model"][megatron_gpt_key]
for model_states in sub_model_states_list
]
for sub_model_states_list in model_states_list
]
has_adapters = any("adaptor" in key for key in model_list[0][0].keys())
moe_layers = []
for idx_tp, sub_model_list in enumerate(model_list):
save_offset = idx_tp * factor_dense
for idx_pp, model in enumerate(sub_model_list):
for key, val in model.items():
val = safe_transpose(val)
val = torch2np(val, np_weight_data_type)
saved_key = key
if key.find("layers.") != -1:
key_split = key.split('.')
layer_index = (int)(key_split[1]) + idx_pp * step_layer_pp
saved_key = '.'.join(key_split[:1] + [str(layer_index)] + key_split[2:])
if saved_key.find("self_attention") != -1:
saved_key = saved_key.replace("self_attention", "attention")
if saved_key.find("adaptor1") != -1:
saved_key = saved_key.replace("adaptor1", "after_attention_adapter")
if saved_key.find("adaptor2") != -1:
saved_key = saved_key.replace("adaptor2", "after_ffn_adapter")
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("adaptor1.dense_4h_to_h.bias") != -1
or key.find("adaptor2.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
):
# shared weights, only need to convert the weights of rank 0
if idx_tp == 0:
saved_path = saved_dir / f"model.{saved_key}.bin"
val.tofile(saved_path.as_posix())
elif (key.find("attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1
or key.find("adaptor1.dense_4h_to_h.weight") != -1
or key.find("adaptor2.dense_4h_to_h.weight") != -1):
split_vals = np.split(val, factor_dense, axis=0)
for j in range(factor_dense):
saved_path = saved_dir / f"model.{saved_key}.{save_offset + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (key.find("mlp.dense_h_to_4h.weight") != -1
or key.find("adaptor1.dense_h_to_4h.weight") != -1
or key.find("adaptor2.dense_h_to_4h.weight") != -1
or key.find("mlp.dense_h_to_4h.bias") != -1
or key.find("adaptor1.dense_h_to_4h.bias") != -1
or key.find("adaptor2.dense_h_to_4h.bias") != -1):
split_vals = np.split(val, factor_dense, axis=-1)
for j in range(factor_dense):
saved_path = saved_dir / f"model.{saved_key}.{save_offset + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("attention.query_key_value.bias") != -1:
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads // model_training_args.tensor_model_parallel_size
size_per_head = local_dim // head_num
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(3, local_dim)
split_vals = np.split(val, factor_dense, axis=-1)
for j in range(factor_dense):
saved_path = saved_dir / f"model.{saved_key}.{save_offset + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
assert val.shape[-1] % 3 == 0
local_dim = val.shape[-1] // 3
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads
assert hidden_dim % head_num == 0
size_per_head = hidden_dim // head_num
assert head_num % model_training_args.tensor_model_parallel_size == 0
head_num = head_num // model_training_args.tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, 3, local_dim)
split_vals = np.split(val, factor_dense, axis=-1)
for j in range(factor_dense):
saved_path = saved_dir / f"model.{saved_key}.{save_offset + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find('experts') == -1:
if key.find('deepspeed_moe.gate') != -1 or key.find('megatron_moe.gate') != -1:
layer_index = (int)(key.split('.')[1]) + idx_pp * step_layer_pp
if idx_tp == 0:
moe_layers.append(layer_index)
prefix = key.replace('deepspeed_moe', 'moe').replace('megatron_moe', 'moe')
if key.find('layernorm') != -1 or key.find('gate') != -1 or key.find("attention.dense.bias") != -1 \
or key.find("dense_4h_to_h.bias") != -1:
if idx_tp == 0:
file_name = os.path.join(saved_dir, "model." + prefix + ".bin")
saved_path = file_name
# print(f"Saving '{prefix}' to '{file_name}'")
# print(f"Shape: '{val.shape}'")
val.tofile(file_name)
else:
val_tensor_para = []
print(key, val.shape)
if key.find("attention.dense.weight") != -1 or key.find("dense_4h_to_h.weight") != -1 \
or key.find("dense_h_to_4h.bias") != -1:
val_tensor_para = np.split(val, factor_dense, axis=0)
elif key.find("dense_h_to_4h.weight") != -1:
val_tensor_para = np.split(val, factor_dense, axis=1)
else:
print(f"[ERROR] cannot find experts key '{key}'")
sys.exit(1)
for j in range(factor_dense):
file_name = os.path.join(saved_dir, "model." + prefix + "." + str(save_offset + j) + ".bin")
saved_path = file_name
# print(f"Saving '{j}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[j]
# print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
else:
print(f"[ERROR] cannot find key '{key}'")
sys.exit(1)
print('{} {} {} {}'.format(idx_tp, idx_pp, key, saved_path))
print(val.shape)
return moe_layers, has_adapters
def save_dense_concat(model_states_list, inference_tensor_para_size, factor_dense, model_key, megatron_gpt_key,
np_weight_data_type, saved_dir, ckpt_ver, model_training_args):
def convert_val(x):
x = safe_transpose(x)
x = torch2np(x, np_weight_data_type)
return x
training_pipeline_para_dense_size = len(model_states_list[0])
step_layer_pp = model_training_args.num_layers // training_pipeline_para_dense_size
model_list = [
[
model_states[model_key]["language_model"][megatron_gpt_key]
for model_states in sub_model_states_list
]
for sub_model_states_list in model_states_list
]
has_adapters = any("adaptor" in key for key in model_list[0][0].keys())
moe_layers = []
for idx_tp in range(inference_tensor_para_size):
load_offset = idx_tp * factor_dense
for idx_pp in range(training_pipeline_para_dense_size):
for key in model_list[0][0]:
saved_key = key
if key.find("layers.") != -1:
key_split = key.split('.')
layer_index = (int)(key_split[1]) + idx_pp * step_layer_pp
saved_key = '.'.join(key_split[:1] + [str(layer_index)] + key_split[2:])
if saved_key.find("self_attention") != -1:
saved_key = saved_key.replace("self_attention", "attention")
if saved_key.find("adaptor1") != -1:
saved_key = saved_key.replace("adaptor1", "after_attention_adapter")
if saved_key.find("adaptor2") != -1:
saved_key = saved_key.replace("adaptor2", "after_ffn_adapter")
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("adaptor1.dense_4h_to_h.bias") != -1
or key.find("adaptor2.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
):
# shared weights, only need to convert the weights of rank 0
if idx_tp == 0:
concat_val = convert_val(model_list[0][idx_pp][key])
saved_path = saved_dir / f"model.{saved_key}.bin"
concat_val.tofile(saved_path.as_posix())
elif (key.find("attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1
or key.find("adaptor1.dense_4h_to_h.weight") != -1
or key.find("adaptor2.dense_4h_to_h.weight") != -1):
val_list = [convert_val(model_list[load_offset + j][idx_pp][key]) for j in range(factor_dense)]
concat_val = np.concatenate(val_list, axis=0)
saved_path = saved_dir / f"model.{saved_key}.{idx_tp:d}.bin"
concat_val.tofile(saved_path.as_posix())
elif (key.find("mlp.dense_h_to_4h.weight") != -1
or key.find("adaptor1.dense_h_to_4h.weight") != -1
or key.find("adaptor2.dense_h_to_4h.weight") != -1
or key.find("mlp.dense_h_to_4h.bias") != -1
or key.find("adaptor1.dense_h_to_4h.bias") != -1
or key.find("adaptor2.dense_h_to_4h.bias") != -1):
val_list = [convert_val(model_list[load_offset + j][idx_pp][key]) for j in range(factor_dense)]
concat_val = np.concatenate(val_list, axis=-1)
saved_path = saved_dir / f"model.{saved_key}.{idx_tp:d}.bin"
concat_val.tofile(saved_path.as_posix())
elif key.find("attention.query_key_value.bias") != -1:
val_list = []
for j in range(factor_dense):
val = convert_val(model_list[load_offset + j][idx_pp][key])
assert val.shape[-1] % 3 == 0
local_dim = val.shape[-1] // 3
if ckpt_ver == 3:
num_splits = 3
num_attention_heads = model_training_args.num_attention_heads
tensor_model_parallel_size = model_training_args.tensor_model_parallel_size
assert num_attention_heads % tensor_model_parallel_size == 0
head_num = num_attention_heads // tensor_model_parallel_size
assert local_dim % head_num == 0
size_per_head = local_dim // head_num
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(3, local_dim)
val_list.append(val)
concat_val = np.concatenate(val_list, axis=-1)
saved_path = saved_dir / f"model.{saved_key}.{idx_tp:d}.bin"
concat_val.tofile(saved_path.as_posix())
elif key.find("attention.query_key_value.weight") != -1:
val_list = []
for j in range(factor_dense):
val = convert_val(model_list[load_offset + j][idx_pp][key])
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / 3)
if ckpt_ver == 3:
num_splits = 3
head_num = model_training_args.num_attention_heads
size_per_head = hidden_dim // head_num
head_num = head_num // model_training_args.tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, 3, local_dim)
val_list.append(val)
concat_val = np.concatenate(val_list, axis=-1)
saved_path = saved_dir / f"model.{saved_key}.{idx_tp:d}.bin"
concat_val.tofile(saved_path.as_posix())
elif key.find('experts') == -1:
if key.find('deepspeed_moe.gate') != -1 or key.find('megatron_moe.gate') != -1:
layer_index = (int)(key.split('.')[1]) + idx_pp * step_layer_pp
if idx_tp == 0:
moe_layers.append(layer_index)
prefix = key.replace('deepspeed_moe', 'moe').replace('megatron_moe', 'moe')
if key.find('layernorm') != -1 or key.find('gate') != -1 or key.find("attention.dense.bias") != -1 or key.find("dense_4h_to_h.bias") != -1:
if idx_tp == 0:
concat_val = convert_val(model_list[0][idx_pp][key])
file_name = os.path.join(saved_dir, "model." + prefix + ".bin")
saved_path = file_name
# print(f"Saving '{prefix}' to '{file_name}'")
# print(f"Shape: '{val.shape}'")
concat_val.tofile(file_name)
else:
if key.find("attention.dense.weight") != -1 or key.find("dense_4h_to_h.weight") != -1 or key.find("dense_h_to_4h.bias") != -1:
concat_axis = 0
elif key.find("dense_h_to_4h.weight") != -1:
concat_axis = 1
else:
print(f"[ERROR] cannot find experts key '{key}'")
sys.exit(1)
val_list = []
for j in range(factor_dense):
val = convert_val(model_list[load_offset + j][idx_pp][key])
val_list.append(val)
concat_val = np.concatenate(val_list, axis=concat_axis)
file_name = os.path.join(saved_dir, "model." + prefix + "." + str(idx_tp) + ".bin")
saved_path = file_name
concat_val.tofile(file_name)
else:
print(f"[ERROR] cannot find key '{key}'")
sys.exit(1)
print('{} {} {} {}'.format(idx_tp, idx_pp, key, saved_path))
print(concat_val.shape)
return moe_layers, has_adapters
def save_experts_split(moe_layers, num_experts, training_tensor_para_dense_size, training_tensor_para_expert_size,
factor_expert, args, map_location_fn, is_deepspeed, np_weight_data_type, saved_dir,
training_pipeline_para_expert_size):
def get_file_name(idx_moe, idx_expert, idx_rank):
if training_tensor_para_expert_size > 1:
assert len(moe_layers) % training_pipeline_para_expert_size == 0
step_layer_pp = len(moe_layers) // training_pipeline_para_expert_size
file_name = 'layer_{}_expert_{}_mp_rank_{:02}_{:03}_model_states.pt'.format(
idx_moe % step_layer_pp, idx_expert, idx_rank, idx_moe // step_layer_pp)
else:
file_name = 'layer_{}_expert_{}_mp_rank_{:02}_model_states.pt'.format(idx_moe, idx_expert, idx_rank)
return file_name
assert training_tensor_para_expert_size == 1 or training_tensor_para_expert_size == training_tensor_para_dense_size
# Saving experts weight
print(f"The number of moe layers is '{len(moe_layers)}'")
for idx_tp in range(training_tensor_para_expert_size):
save_offset = idx_tp * factor_expert
for n, idx_layer in enumerate(moe_layers):
fc1_weight = []
fc1_bias = []
fc2_weight = []
fc2_bias = []
prefix = None
for e in range(num_experts):
if training_tensor_para_expert_size == training_tensor_para_dense_size:
file_name = get_file_name(n, e, idx_tp)
file_path = os.path.join(args.input_dir, file_name)
else:
for idx_rank in range(training_tensor_para_dense_size):
file_name = get_file_name(n, e, idx_rank)
file_path = os.path.join(args.input_dir, file_name)
if os.path.exists(file_path):
break
else:
raise FileNotFoundError
expert_dict = torch.load(file_path, map_location=map_location_fn)
for k, v in expert_dict.items():
if k.find('dense_h_to_4h.weight') != -1:
if prefix is None:
prefix = k
fc1_weight.append(v)
elif k.find('dense_h_to_4h.bias') != -1:
fc1_bias.append(v)
elif k.find('dense_4h_to_h.weight') != -1:
fc2_weight.append(v)
elif k.find('dense_4h_to_h.bias') != -1:
fc2_bias.append(v)
else:
print(f"[ERROR] cannot find expert_dict key '{k}'")
sys.exit(1)
if is_deepspeed:
prefix_list = ['model'] + prefix.split('.')[2:5] + ['moe', 'experts']
else:
prefix_list = ['model'] + prefix.split('.')[4:7] + ['moe', 'experts']
prefix = '.'.join(prefix_list) + '.'
prefix_split = prefix.split('.')
prefix_split[1] = 'layers'
prefix_split[3] = 'mlp'
prefix = '.'.join(prefix_split[:2] + [str(idx_layer)] + prefix_split[3:])
stacked_fc1_weight = torch.stack(fc1_weight, 0).transpose(-1, -2).contiguous()
# val = stacked_fc1_weight.float().cpu().numpy() # (num_experts, d_model, d_ff)
val = torch2np(stacked_fc1_weight, np_weight_data_type)
val_tensor_para = np.split(val, factor_expert, axis=2)
for i in range(factor_expert):
file_name = os.path.join(saved_dir, prefix + "dense_h_to_4h.weight." + str(save_offset + i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[i]
print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
stacked_fc1_bias = torch.stack(fc1_bias, 0).contiguous()
# val = stacked_fc1_bias.float().cpu().numpy() # (num_experts, d_ff)
val = torch2np(stacked_fc1_bias, np_weight_data_type)
val_tensor_para = np.split(val, factor_expert, axis=1)
for i in range(factor_expert):
file_name = os.path.join(saved_dir, prefix + "dense_h_to_4h.bias." + str(save_offset + i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[i]
print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
stacked_fc2_weight = torch.stack(fc2_weight, 0).transpose(-1, -2).contiguous()
# val = stacked_fc2_weight.float().cpu().numpy() # (num_experts, d_ff, d_model)
val = torch2np(stacked_fc2_weight, np_weight_data_type)
val_tensor_para = np.split(val, factor_expert, axis=1)
for i in range(factor_expert):
file_name = os.path.join(saved_dir, prefix + "dense_4h_to_h.weight." + str(save_offset + i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[i]
print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
if idx_tp == 0:
stacked_fc2_bias = torch.stack(fc2_bias, 0)
# val = stacked_fc2_bias.float().cpu().numpy()
val = torch2np(stacked_fc2_bias, np_weight_data_type)
file_name = os.path.join(saved_dir, prefix + "dense_4h_to_h.bias.bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
print(f"Shape: '{val.shape}'")
val.tofile(file_name)
return
def convert_checkpoint(args):
saved_dir = pathlib.Path(args.saved_dir) / f"{args.infer_gpu_num:d}-gpu"
if saved_dir.exists():
shutil.rmtree(saved_dir)
saved_dir.mkdir(parents=True)
if args.vocab_path:
shutil.copy(args.vocab_path, (saved_dir / "vocab.json").as_posix())
if args.merges_path:
shutil.copy(args.merges_path, (saved_dir / "merges.txt").as_posix())
load_checkpoints_to_cpu = bool(args.load_checkpoints_to_cpu)
map_location_fn = cpu_map_location if load_checkpoints_to_cpu else gpu_map_location
config_model_states_list = [
# Pattern 1
{
'pattern': 'mp_rank_{:02}/model_states.pt',
'use_dense_pp': False,
'is_deepspeed': True,
},
{
'pattern': 'mp_rank_{:02}_{:03}/model_states.pt',
'use_dense_pp': True,
'is_deepspeed': True,
},
{
'pattern': 'tp_rank_{:02}_pp_rank_{:03}/model_states.pt',
'use_dense_pp': True,
'is_deepspeed': True,
},
# Pattern 2
{
'pattern': 'mp_rank_{:02}_model_states.pt',
'use_dense_pp': False,
'is_deepspeed': True,
},
{
'pattern': 'mp_rank_{:02}_{:03}_model_states.pt',
'use_dense_pp': True,
'is_deepspeed': True,
},
{
'pattern': 'tp_rank_{:02}_pp_rank_{:03}_model_states.pt',
'use_dense_pp': True,
'is_deepspeed': True,
},
# Pattern 3
{
'pattern': 'mp_rank_{:02}/model_rng.pt',
'use_dense_pp': False,
'is_deepspeed': False,
},
{
'pattern': 'mp_rank_{:02}_{:03}/model_rng.pt',
'use_dense_pp': True,
'is_deepspeed': False,
},
{
'pattern': 'tp_rank_{:02}_pp_rank_{:03}/model_rng.pt',
'use_dense_pp': True,
'is_deepspeed': False,
},
]
for config_model_states in config_model_states_list:
pattern = config_model_states['pattern']
use_dense_pp = config_model_states['use_dense_pp']
is_deepspeed = config_model_states['is_deepspeed']
if use_dense_pp:
path_model_states = os.path.join(args.input_dir, pattern.format(0, 0))
else:
path_model_states = os.path.join(args.input_dir, pattern.format(0))
if os.path.exists(path_model_states):
break
else:
raise FileNotFoundError("'path_model_states' not found")
model_states_00 = torch.load(path_model_states, map_location=map_location_fn)
for model_key in ['model', 'module']:
if model_key in model_states_00:
break
else:
raise KeyError("'model_key' not found")
ckpt_ver = model_states_00["checkpoint_version"]
assert ckpt_ver == 3
megatron_gpt_key = "encoder"
model_training_args = model_states_00["args"]
training_tensor_para_dense_size = model_training_args.tensor_model_parallel_size
training_tensor_para_expert_size = 1
training_pipeline_para_dense_size = model_training_args.pipeline_model_parallel_size
training_pipeline_para_expert_size = 1
inference_tensor_para_size = args.infer_gpu_num
assert use_dense_pp == (training_pipeline_para_dense_size > 1)
assert model_training_args.num_layers % training_pipeline_para_dense_size == 0
assert model_training_args.num_layers % training_pipeline_para_expert_size == 0
if use_dense_pp:
model_states_list = [
[
torch.load(os.path.join(args.input_dir, pattern.format(idx_tp, idx_pp)), map_location=map_location_fn)
for idx_pp in range(training_pipeline_para_dense_size)
]
for idx_tp in range(training_tensor_para_dense_size)
]
else:
model_states_list = [
[torch.load(os.path.join(args.input_dir, pattern.format(idx_tp)), map_location=map_location_fn)]
for idx_tp in range(training_tensor_para_dense_size)
]
with (saved_dir / "args.txt").open("w") as training_args_file:
for k, v in vars(model_training_args).items():
training_args_file.write(f"{k}:{v}\n")
np_weight_data_type = WEIGHT2DTYPE[args.weight_data_type]
val = model_states_00[model_key]["language_model"]["embedding"]["position_embeddings"]["weight"]
val = torch2np(val, np_weight_data_type)
val.tofile((saved_dir / "model.wpe.bin").as_posix()) # not weight, do not need to transpose
val_list = [
torch2np(sub_model_states_list[0][model_key]["language_model"]["embedding"]["word_embeddings"]["weight"],
np_weight_data_type)
for sub_model_states_list in model_states_list
]
val = np.concatenate(val_list, axis=0)
vocab_size = val.shape[0]
val.tofile((saved_dir / "model.wte.bin").as_posix())
# save vocab_size
if not hasattr(model_training_args, "padded_vocab_size"):
model_training_args.padded_vocab_size = vocab_size
structure_config = {
"gpt_with_moe": 0,
"expert_num": 0,
"moe_layers": [],
}
model_training_args_vars = vars(model_training_args)
num_experts = 0
if 'num_experts' in model_training_args_vars.keys():
num_experts = model_training_args_vars['num_experts'][0]
if num_experts != 0:
structure_config["gpt_with_moe"] = 1
structure_config['expert_num'] = num_experts
if inference_tensor_para_size >= training_tensor_para_dense_size:
assert inference_tensor_para_size % training_tensor_para_dense_size == 0
factor_dense = inference_tensor_para_size // training_tensor_para_dense_size
moe_layers, has_adapters = save_dense_split(model_states_list, factor_dense, model_key, megatron_gpt_key,
np_weight_data_type, saved_dir, ckpt_ver, model_training_args)
else:
assert training_tensor_para_dense_size % inference_tensor_para_size == 0
factor_dense = training_tensor_para_dense_size // inference_tensor_para_size
moe_layers, has_adapters = save_dense_concat(model_states_list, inference_tensor_para_size, factor_dense,
model_key, megatron_gpt_key, np_weight_data_type, saved_dir, ckpt_ver, model_training_args)
if inference_tensor_para_size >= training_tensor_para_expert_size:
assert inference_tensor_para_size % training_tensor_para_expert_size == 0
factor_expert = inference_tensor_para_size // training_tensor_para_expert_size
save_experts_split(moe_layers, num_experts, training_tensor_para_dense_size, training_tensor_para_expert_size,
factor_expert, args, map_location_fn, is_deepspeed, np_weight_data_type, saved_dir,
training_pipeline_para_expert_size)
else:
raise NotImplementedError
torch.cuda.synchronize()
structure_config['moe_layers'] = moe_layers
# Configuration for the model (load by triton backends)
config = configparser.ConfigParser()
config["gpt"] = {}
if args.vocab_path:
vocab_path = pathlib.Path(args.vocab_path)
with vocab_path.open("r") as vocab_file:
vocab = json.load(vocab_file)
start_id, end_id = vocab[DEFAULT_START_TAG], vocab[DEFAULT_END_TAG]
else:
# hard coded values from english gpt_vocab.json file
start_id, end_id = str(OPENAI_GPT2_START_ID), str(OPENAI_GPT2_END_ID)
try:
config["gpt"]["model_name"] = "gpt"
config["gpt"]["head_num"] = str(model_training_args.num_attention_heads)
config["gpt"]["size_per_head"] = str(model_training_args.hidden_size // model_training_args.num_attention_heads)
config["gpt"]["inter_size"] = str(model_training_args.ffn_hidden_size)
config["gpt"]["num_layer"] = str(model_training_args.num_layers)
config["gpt"]["max_pos_seq_len"] = str(model_training_args.max_position_embeddings)
config["gpt"]["vocab_size"] = str(model_training_args.padded_vocab_size)
config["gpt"]["has_adapters"] = str(has_adapters)
config['gpt']['adapter_inter_size'] = str(model_training_args.project_size) if has_adapters else str(0)
config["gpt"]["layernorm_eps"] = str(model_training_args.layernorm_epsilon)
config["gpt"]["start_id"] = str(start_id)
config["gpt"]["end_id"] = str(end_id)
config["gpt"]["weight_data_type"] = args.weight_data_type
config["gpt"]["tensor_para_size"] = str(args.infer_gpu_num)
config["structure"] = structure_config
with open((saved_dir / f"config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
except Exception as e:
print(f"Fail to save the config in config.ini: {e}")
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--input-dir", "-input_dir", "-i", help="folder name of checkpoint files", required=True)
parser.add_argument("--saved-dir", "-saved_dir", "-o", help="folder name of output files", required=True)
parser.add_argument(
"--infer-gpu-num", "-infer_gpu_num", "-i_g", type=int, help="How many gpus for inference", required=True
)
parser.add_argument(
"--weight-data-type", "-weight_data_type", choices=["fp32", "fp16"], default="fp16", help=""
)
parser.add_argument(
"--load-checkpoints-to-cpu",
"-load_checkpoints_to_cpu",
"-cpu",
type=int,
choices=[0, 1],
default=1,
help="Whether to load model weights to CPU",
)
parser.add_argument(
"--vocab-path",
type=str,
help="Path to vocabulary file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--merges-path", type=str, help="Path to merges file to embed in FasterTransformer checkpoint", required=False
)
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print(f"{key}: {vars(args)[key]}")
print("========================================")
start_time = datetime.datetime.now()
convert_checkpoint(args)
run_time = datetime.datetime.now() - start_time
print(f"[INFO] Spent {run_time} (h:m:s) to convert the model")
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/megatron_gpt_moe_ckpt_convert.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import dataclasses
import os
import pathlib
import typing
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
str_type_map = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16}
class GPTWeights:
def __init__(self, head_num, size_per_head, layer_num, vocab_size, max_seq_len, tensor_para_size, pipeline_para_size,
weights_data_type: typing.Union[str, np.dtype],
inference_data_type: str,
has_adapters: bool = False,
adapter_inter_size: int = 0,
gpt_with_moe: bool = False,
has_positional_encoding: bool = True,
has_pre_decoder_layernorm: bool = False,
has_post_decoder_layernorm: bool = True,
int8_mode: int = 0,
inter_size: int = 0):
assert(head_num % tensor_para_size == 0)
if int8_mode == 1:
torch_infer_dtype = str_type_map[inference_data_type]
assert torch_infer_dtype == torch.float16 or torch_infer_dtype == torch.bfloat16, "Weight only quant only supported for infer type fp16 or bf16."
quant = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix
self.weight_transpose_calibrate_quantize = lambda x: quant(x, torch.int8)
else:
assert int8_mode == 0, "Invalid int8 mode for GPT. Must be 0 or 1"
self.head_num = head_num
self.size_per_head = size_per_head
self.layer_num = layer_num
self.vocab_size = vocab_size
self.max_seq_len = max_seq_len
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.layers_per_device = layer_num // pipeline_para_size
self.has_adapters = has_adapters
self.adapter_inter_size = adapter_inter_size
self.gpt_with_moe = gpt_with_moe
self.has_positional_encoding = has_positional_encoding
self.has_pre_decoder_layernorm = has_pre_decoder_layernorm
self.has_post_decoder_layernorm = has_post_decoder_layernorm
local_head_num = head_num // tensor_para_size
global_head_num = head_num
local_hidden_units = local_head_num * size_per_head
global_hidden_units = global_head_num * size_per_head
local_inter_size = local_hidden_units * 4
if inter_size != 0:
assert inter_size % tensor_para_size == 0, f"inter_size({inter_size}) \% tensor_para_size({tensor_para_size}) must be 0"
local_inter_size = inter_size // tensor_para_size
local_adapter_inter_size = self.adapter_inter_size // tensor_para_size
self.local_head_num = local_head_num
self.global_head_num = global_head_num
self.local_hidden_units = local_hidden_units
self.global_hidden_units = global_hidden_units
self.local_inter_size = local_inter_size
self.int8_mode = int8_mode
self.share_embed = False
if isinstance(weights_data_type, str):
try:
weights_data_type = {
"fp16": np.float16,
"fp32": np.float32,
"float16": np.float16,
"float32": np.float32,
}[weights_data_type]
except KeyError:
raise ValueError(f"Don't know how to interpret weights_data_type: {weights_data_type}")
assert weights_data_type in [np.float32, np.float16]
self.weights_data_type = weights_data_type
self.inference_data_type = inference_data_type
self.w = []
self.int8_w = []
self.scale = []
# Transformer blocks
self.w.extend([torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # self_layernorm_gamma
self.w.extend([torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # self_layernorm_beta
self.w.extend([torch.zeros(global_hidden_units, local_hidden_units * 3,
dtype=str_type_map[self.inference_data_type])] * layer_num) # self_kernel
self.w.extend([torch.zeros(local_hidden_units * 3, dtype=str_type_map[self.inference_data_type])]
* layer_num) # self_bias
self.w.extend([torch.zeros(local_hidden_units, global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # self_output_kernel
self.w.extend([torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # self_output_bias
self.w.extend([torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # ffn_layernorm_gamma
self.w.extend([torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # ffn_layernorm_beta
self.w.extend([torch.zeros(global_hidden_units, local_inter_size,
dtype=str_type_map[self.inference_data_type])] * layer_num) # ffn_kernel1
self.w.extend([torch.zeros(local_inter_size, dtype=str_type_map[
self.inference_data_type])] * layer_num) # ffn_bias1
self.w.extend([torch.zeros(local_inter_size, global_hidden_units,
dtype=str_type_map[self.inference_data_type])] * layer_num) # ffn_kernel2
self.w.extend([torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # ffn_bias2
optional_adapter_offset = 0
# After Transformer blocks
if self.has_pre_decoder_layernorm:
self.w.append(torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])) # embedding layernorm gamma
self.w.append(torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])) # embedding layernorm beta
optional_adapter_offset += 2
if self.has_post_decoder_layernorm:
self.w.append(torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])) # final layernorm gamma
self.w.append(torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])) # final layernorm beta
optional_adapter_offset += 2
if self.has_positional_encoding:
self.w.append(torch.zeros(max_seq_len, global_hidden_units, dtype=str_type_map[
self.inference_data_type])) # position_encoding_table
optional_adapter_offset += 1
self.pre_embed_idx = len(self.w)
self.w.append(torch.zeros(vocab_size, global_hidden_units,
dtype=str_type_map[self.inference_data_type])) # embedding_table
self.post_embed_idx = len(self.w)
self.w.append(torch.zeros(vocab_size, global_hidden_units, dtype=str_type_map[
self.inference_data_type])) # post embedding_kernel
self.adapter_offset = 2 + optional_adapter_offset
self.w.extend([torch.empty(0, dtype=str_type_map[self.inference_data_type])] * layer_num) # gating_weight
self.adapter_offset += layer_num
# adapters
if self.has_adapters:
self.w.extend([torch.zeros(global_hidden_units, local_adapter_inter_size,
dtype=str_type_map[self.inference_data_type])] * layer_num) # adaptor1_kernel1
self.w.extend([torch.zeros(local_adapter_inter_size, dtype=str_type_map[
self.inference_data_type])] * layer_num) # adaptor1_bias1
self.w.extend([torch.zeros(local_adapter_inter_size, global_hidden_units,
dtype=str_type_map[self.inference_data_type])] * layer_num) # adaptor1_kernel2
self.w.extend([torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # adaptor1_bias2
self.w.extend([torch.zeros(global_hidden_units, local_adapter_inter_size,
dtype=str_type_map[self.inference_data_type])] * layer_num) # adaptor2_kernel1
self.w.extend([torch.zeros(local_adapter_inter_size, dtype=str_type_map[
self.inference_data_type])] * layer_num) # adaptor2_bias1
self.w.extend([torch.zeros(local_adapter_inter_size, global_hidden_units,
dtype=str_type_map[self.inference_data_type])] * layer_num) # adaptor2_kernel2
self.w.extend([torch.zeros(global_hidden_units, dtype=str_type_map[
self.inference_data_type])] * layer_num) # adaptor2_bias2
# Initialization
self._map(lambda w: torch.nn.init.normal_(w, mean=0., std=1.))
if (self.int8_mode != 0):
self.int8_w.extend([torch.zeros(global_hidden_units, local_hidden_units *
3, dtype=torch.int8)] * layer_num) # self_int8_kernel
self.scale.extend([torch.zeros(local_hidden_units * 3, dtype=torch.float)] * layer_num) # self_scale
self.int8_w.extend([torch.zeros(local_hidden_units, global_hidden_units, dtype=torch.int8)]
* layer_num) # self_output_int8_kernel
self.scale.extend([torch.zeros(global_hidden_units, dtype=torch.float)] * layer_num) # self_output_scale
self.int8_w.extend([torch.zeros(global_hidden_units, local_inter_size,
dtype=torch.int8)] * layer_num) # ffn_int8_kernel1
self.scale.extend([torch.zeros(local_inter_size, dtype=torch.float)] * layer_num) # ffn_scale1
self.int8_w.extend([torch.zeros(local_inter_size, global_hidden_units,
dtype=torch.int8)] * layer_num) # ffn_int8_kernel2
self.scale.extend([torch.zeros(global_hidden_units, dtype=torch.float)] * layer_num) # ffn_scale2
if self.has_adapters:
self.int8_w.extend([torch.zeros(global_hidden_units, local_adapter_inter_size,
dtype=torch.int8)] * layer_num) # adaptor1_int8_kernel1
self.scale.extend([torch.zeros(local_adapter_inter_size, dtype=torch.float)]
* layer_num) # adaptor1_scale1
self.int8_w.extend([torch.zeros(local_adapter_inter_size, global_hidden_units,
dtype=torch.int8)] * layer_num) # adaptor1_int8_kernel2
self.scale.extend([torch.zeros(global_hidden_units, dtype=torch.float)] * layer_num) # adaptor1_scale2
self.int8_w.extend([torch.zeros(global_hidden_units, local_adapter_inter_size,
dtype=torch.int8)] * layer_num) # adaptor2_int8_kernel1
self.scale.extend([torch.zeros(local_adapter_inter_size, dtype=torch.float)]
* layer_num) # adaptor2_scale1
self.int8_w.extend([torch.zeros(local_adapter_inter_size, global_hidden_units,
dtype=torch.int8)] * layer_num) # adaptor2_int8_kernel2
self.scale.extend([torch.zeros(global_hidden_units, dtype=torch.float)] * layer_num) # adaptor2_scale2
def __getitem__(self, idx):
return self.w[idx]
def __setitem__(self, idx, val):
self.w[idx] = val
def __len__(self):
return len(self.w)
def _map(self, func):
assert(self.pre_embed_idx < self.post_embed_idx, "Pre decoder embedding index should be lower than post decoder embedding index.")
for i in range(len(self.w)):
if isinstance(self.w[i], list):
for j in range(len(self.w[i])):
self.w[i][j] = func(self.w[i][j])
else:
if self.share_embed and i == self.post_embed_idx:
# If sharing the pre and post embedding, any mapping to
# the pre decoder weight will give the same output to the
# post decoder weight, so we just copy here.
self.w[self.post_embed_idx] = self.w[self.pre_embed_idx]
else:
self.w[i] = func(self.w[i])
def _map_int8(self, func):
for i in range(len(self.int8_w)):
if isinstance(self.int8_w[i], list):
for j in range(len(self.int8_w[i])):
self.int8_w[i][j] = func(self.int8_w[i][j])
else:
self.int8_w[i] = func(self.int8_w[i])
for i in range(len(self.scale)):
if isinstance(self.scale[i], list):
for j in range(len(self.scale[i])):
self.scale[i][j] = func(self.scale[i][j])
else:
self.scale[i] = func(self.scale[i])
def _map_int8_scales(self, func):
for i in range(len(self.scale)):
if isinstance(self.scale[i], list):
for j in range(len(self.scale[i])):
self.scale[i][j] = func(self.scale[i][j])
else:
self.scale[i] = func(self.scale[i])
def load(self, ckpt_path, tp_rank, pipeline_para_rank):
if not os.path.exists(ckpt_path):
raise FileNotFoundError(f"Failed to find {ckpt_path}")
w = []
type_map = {np.float32: torch.float32, np.float16: torch.float16}
# Load
def is_load(i): return i >= self.layers_per_device * \
pipeline_para_rank and i < self.layers_per_device * (pipeline_para_rank + 1)
def load_to_torch(file_path: str, is_load: bool):
if is_load:
return torch.from_numpy(np.fromfile(file_path, dtype=self.weights_data_type)).to(str_type_map[self.inference_data_type])
else:
return torch.empty(0).to(str_type_map[self.inference_data_type])
w.extend([load_to_torch(f"{ckpt_path}/model.layers.{i}.input_layernorm.weight.bin", is_load(i))
for i in range(self.layer_num)])
w.extend([load_to_torch(f"{ckpt_path}/model.layers.{i}.input_layernorm.bias.bin", is_load(i))
for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.attention.query_key_value.weight.{tp_rank}.bin", is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.attention.query_key_value.bias.{tp_rank}.bin", is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(f"{ckpt_path}/model.layers.{i}.attention.dense.weight.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(f"{ckpt_path}/model.layers.{i}.attention.dense.bias.bin", is_load(i))
for i in range(self.layer_num)])
w.extend([load_to_torch(f"{ckpt_path}/model.layers.{i}.post_attention_layernorm.weight.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(f"{ckpt_path}/model.layers.{i}.post_attention_layernorm.bias.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.mlp.dense_h_to_4h.weight.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.mlp.dense_h_to_4h.weight.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.mlp.moe.experts.dense_h_to_4h.weight.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.mlp.dense_h_to_4h.bias.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.mlp.dense_h_to_4h.bias.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.mlp.moe.experts.dense_h_to_4h.bias.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.mlp.dense_4h_to_h.weight.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.mlp.dense_4h_to_h.weight.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.mlp.moe.experts.dense_4h_to_h.weight.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.mlp.dense_4h_to_h.bias.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.mlp.dense_4h_to_h.bias.bin") \
else f"{ckpt_path}/model.layers.{i}.mlp.moe.experts.dense_4h_to_h.bias.bin",
is_load(i)) for i in range(self.layer_num)])
if self.has_pre_decoder_layernorm:
w.append(load_to_torch(f"{ckpt_path}/model.pre_decoder_layernorm.weight.bin", True))
w.append(load_to_torch(f"{ckpt_path}/model.pre_decoder_layernorm.bias.bin", True))
if self.has_post_decoder_layernorm:
w.append(load_to_torch(f"{ckpt_path}/model.final_layernorm.weight.bin", True))
w.append(load_to_torch(f"{ckpt_path}/model.final_layernorm.bias.bin", True))
if self.has_positional_encoding:
wpe = load_to_torch(f"{ckpt_path}/model.wpe.bin", True).reshape(-1, self.global_hidden_units)
assert self.max_seq_len <= wpe.size(0), (
f"max_seq_len ({self.max_seq_len} must not exceed "
f"the value of maximum sequence length during training ({wpe.size(0)})."
)
w.append(wpe)
w.append(load_to_torch(f"{ckpt_path}/model.wte.bin", True))
if os.path.isfile(f"{ckpt_path}/model.lm_head.weight.bin"):
self.share_embed = False
w.append(load_to_torch(f"{ckpt_path}/model.lm_head.weight.bin", True))
else:
self.share_embed = True
w.append(torch.empty(0).to(str_type_map[self.inference_data_type]))
gate_list = []
for i in range(self.layer_num):
if (os.path.isfile(f"{ckpt_path}/model.layers.{i}.mlp.moe.gate.wg.weight.bin")):
gate_list.append(load_to_torch(f"{ckpt_path}/model.layers.{i}.mlp.moe.gate.wg.weight.bin", True))
else:
gate_list.append(load_to_torch(f"{ckpt_path}/model.layers.{i}.mlp.moe.gate.wg.weight.bin", False))
w.extend(gate_list)
if self.has_adapters:
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.after_attention_adapter.dense_h_to_4h.weight.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.after_attention_adapter.dense_h_to_4h.weight.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.after_attention_adapter.moe.experts.dense_h_to_4h.weight.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.after_attention_adapter.dense_h_to_4h.bias.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.after_attention_adapter.dense_h_to_4h.bias.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.after_attention_adapter.moe.experts.dense_h_to_4h.bias.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.after_attention_adapter.dense_4h_to_h.weight.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.after_attention_adapter.dense_4h_to_h.weight.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.after_attention_adapter.moe.experts.dense_4h_to_h.weight.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.after_attention_adapter.dense_4h_to_h.bias.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.after_attention_adapter.dense_4h_to_h.bias.bin") \
else f"{ckpt_path}/model.layers.{i}.after_attention_adapter.moe.experts.dense_4h_to_h.bias.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.dense_h_to_4h.weight.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.dense_h_to_4h.weight.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.moe.experts.dense_h_to_4h.weight.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.dense_h_to_4h.bias.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.dense_h_to_4h.bias.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.moe.experts.dense_h_to_4h.bias.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.dense_4h_to_h.weight.{tp_rank}.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.dense_4h_to_h.weight.{tp_rank}.bin") \
else f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.moe.experts.dense_4h_to_h.weight.{tp_rank}.bin",
is_load(i)) for i in range(self.layer_num)])
w.extend([load_to_torch(
f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.dense_4h_to_h.bias.bin" \
if os.path.isfile(f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.dense_4h_to_h.bias.bin") \
else f"{ckpt_path}/model.layers.{i}.after_ffn_adapter.moe.experts.dense_4h_to_h.bias.bin",
is_load(i)) for i in range(self.layer_num)])
assert len(self.w) == len(w)
# Reshape
try:
for i in range(len(w)):
if w[i].nelement() == self.w[i].nelement():
self.w[i] = w[i].reshape(self.w[i].shape)
else:
self.w[i] = w[i]
except RuntimeError:
raise RuntimeError(
f"head_num, size_per_head, vocab_size, and max_seq_len must be the same as the ones during training "
f"(idx: {i} expected shape: {self.w[i].shape} got shape: {w[i].shape})."
)
# transpose calibrate quantize the kernel
layer_num = self.layer_num
if self.int8_mode != 0:
for i in range(layer_num):
self.int8_w[i + 0 * layer_num], self.scale[i + 0 *
layer_num] = self.weight_transpose_calibrate_quantize(self.w[2 * layer_num + i])
self.int8_w[i + 1 * layer_num], self.scale[i + 1 *
layer_num] = self.weight_transpose_calibrate_quantize(self.w[4 * layer_num + i])
self.int8_w[i + 2 * layer_num], self.scale[i + 2 *
layer_num] = self.weight_transpose_calibrate_quantize(self.w[8 * layer_num + i])
self.int8_w[i + 3 * layer_num], self.scale[i + 3 *
layer_num] = self.weight_transpose_calibrate_quantize(self.w[10 * layer_num + i])
# We clear the original weights since they are no longer needed
if self.int8_mode == 1:
self.w[2 * layer_num + i] = torch.empty(0).to(str_type_map[self.inference_data_type])
self.w[4 * layer_num + i] = torch.empty(0).to(str_type_map[self.inference_data_type])
self.w[8 * layer_num + i] = torch.empty(0).to(str_type_map[self.inference_data_type])
self.w[10 * layer_num + i] = torch.empty(0).to(str_type_map[self.inference_data_type])
if self.has_adapters:
self.int8_w[i + 4 * layer_num], self.scale[i + 4 * layer_num] = self.weight_transpose_calibrate_quantize(
self.w[12 * layer_num + i + self.adapter_offset])
self.int8_w[i + 5 * layer_num], self.scale[i + 5 * layer_num] = self.weight_transpose_calibrate_quantize(
self.w[14 * layer_num + i + self.adapter_offset])
self.int8_w[i + 6 * layer_num], self.scale[i + 6 * layer_num] = self.weight_transpose_calibrate_quantize(
self.w[16 * layer_num + i + self.adapter_offset])
self.int8_w[i + 7 * layer_num], self.scale[i + 7 * layer_num] = self.weight_transpose_calibrate_quantize(
self.w[18 * layer_num + i + self.adapter_offset])
# Similar to above:
if self.int8_mode == 1:
self.w[12 * layer_num + i + self.adapter_offset] = torch.empty(0).to(str_type_map[self.inference_data_type])
self.w[14 * layer_num + i + self.adapter_offset] = torch.empty(0).to(str_type_map[self.inference_data_type])
self.w[16 * layer_num + i + self.adapter_offset] = torch.empty(0).to(str_type_map[self.inference_data_type])
self.w[18 * layer_num + i + self.adapter_offset] = torch.empty(0).to(str_type_map[self.inference_data_type])
return True
class GPT(nn.Module):
def __init__(self,
head_num, size_per_head,
vocab_size, start_id, end_id, layer_num,
max_seq_len: int,
tensor_para_size: int,
pipeline_para_size: int,
lib_path: typing.Union[str, pathlib.Path],
inference_data_type: str,
inter_size: int = 0,
# gpt_variant_params
layernorm_eps: float = 1e-6,
layernorm_type: typing.Literal['pre_layernorm', 'post_layernorm'] = "pre_layernorm",
activation_type: str = "Gelu",
gpt_with_moe: bool = False,
expert_num: int = 0,
moe_k: int = 0,
moe_layer_index: typing.List = [],
has_positional_encoding: bool = True,
has_pre_decoder_layernorm: bool = False,
has_post_decoder_layernorm: bool = True,
has_adapters: bool = False,
adapter_inter_size: int = 0,
use_attention_linear_bias: bool = False,
int8_mode: int = 0,
weights_data_type: typing.Union[str, np.dtype] = np.float32,
shared_contexts_ratio: float = 1.0):
super().__init__()
self.head_num = head_num
self.size_per_head = size_per_head
self.vocab_size = vocab_size
self.start_id = start_id
self.end_id = end_id
self.layer_num = layer_num
self.inter_size = inter_size if inter_size != 0 else 4 * self.head_num * self.size_per_head
# gpt_variant_params
self.layernorm_eps = layernorm_eps
self.layernorm_type = layernorm_type
self.activation_type = activation_type
self.gpt_with_moe = gpt_with_moe
self.expert_num = expert_num
self.moe_k = moe_k
self.moe_layer_index = moe_layer_index
self.has_positional_encoding = has_positional_encoding
self.has_pre_decoder_layernorm = has_pre_decoder_layernorm
self.has_post_decoder_layernorm = has_post_decoder_layernorm
self.has_adapters = has_adapters
self.adapter_inter_size = adapter_inter_size
self.use_attention_linear_bias = use_attention_linear_bias
# multi-gpu params
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.use_sparse_gemm = False
self.build_model = False
self.int8_mode = int8_mode
self.weights_data_type = weights_data_type
self.shared_contexts_ratio = shared_contexts_ratio
assert torch.cuda.is_available(), "CUDA is required for this model."
assert head_num % tensor_para_size == 0, "head_num must be a multiple of tensor_para_size."
assert layer_num % pipeline_para_size == 0, "layer_num must be a multiple of pipeline_para_size."
# Load the C++ model into Pytorch model.
torch.classes.load_library(os.path.abspath(lib_path))
# Prepare weights
self.weights = GPTWeights(head_num, size_per_head, layer_num, vocab_size,
max_seq_len, tensor_para_size, pipeline_para_size,
weights_data_type=weights_data_type,
inference_data_type=inference_data_type,
gpt_with_moe=self.gpt_with_moe,
has_positional_encoding=self.has_positional_encoding,
has_pre_decoder_layernorm=self.has_pre_decoder_layernorm,
has_post_decoder_layernorm=self.has_post_decoder_layernorm,
has_adapters=self.has_adapters,
adapter_inter_size=self.adapter_inter_size,
int8_mode=int8_mode,
inter_size=inter_size)
# Prepare for tensor/pipeline parallel
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Have initialized the process group")
self.rank = dist.get_rank()
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size()
assert world_size == tensor_para_size * pipeline_para_size, "tensor_para_size * pipeline_para_size must be equal to world_size."
self.tensor_para_rank = self.rank % self.tensor_para_size
self.pipeline_para_rank = self.rank // self.tensor_para_size
def load(self, ckpt_path):
is_load = self.weights.load(ckpt_path, tp_rank=self.tensor_para_rank,
pipeline_para_rank=self.pipeline_para_rank)
self.cuda()
torch.cuda.empty_cache() # clean cache for model weight preprocessing
return is_load
def sparse(self):
if not self.use_sparse_gemm:
self.use_sparse_gemm = True
def cuda(self):
self.weights._map(lambda w: w.cuda(self.device))
if self.int8_mode != 0:
self.weights._map_int8(lambda w: w.cuda(self.device))
if self.build_model:
del self.model
self.build_model = False
self.model = torch.classes.FasterTransformer.GptOp(
self.head_num, self.size_per_head, self.inter_size,
self.layer_num,
self.expert_num,
self.moe_k,
self.moe_layer_index,
self.vocab_size, self.start_id, self.end_id,
self.use_sparse_gemm,
# gpt_variant_params
self.layernorm_eps,
self.layernorm_type,
self.activation_type,
self.has_positional_encoding,
self.has_pre_decoder_layernorm,
self.has_post_decoder_layernorm,
self.has_adapters,
self.adapter_inter_size,
self.use_attention_linear_bias,
self.weights.w)
self.build_model = True
def forward(self,
start_ids: torch.IntTensor,
start_lengths: torch.IntTensor,
output_len: int,
beam_width: int = 1,
top_k: typing.Optional[torch.IntTensor] = None,
top_p: typing.Optional[torch.FloatTensor] = None,
beam_search_diversity_rate: typing.Optional[torch.FloatTensor] = None,
temperature: typing.Optional[torch.FloatTensor] = None,
len_penalty: typing.Optional[torch.FloatTensor] = None,
repetition_penalty: typing.Optional[torch.FloatTensor] = None,
presence_penalty: typing.Optional[torch.FloatTensor] = None,
min_length: typing.Optional[torch.IntTensor] = None,
random_seed: typing.Optional[torch.LongTensor] = None,
bad_words_list: typing.Optional[torch.IntTensor] = None,
return_output_length: bool = False,
return_cum_log_probs: int = 0):
if not self.build_model:
# for the cases we don't load model
self.cuda()
torch.cuda.empty_cache() # clean cache for model weight preprocessing
input_len = start_ids.size(1)
assert input_len > 0, "input len must be larger than zero. For an unconditional case, use start_id as the first token."
# Inputs to device
start_ids = start_ids.cuda(self.device)
start_lengths = start_lengths.cuda(self.device)
# outputs: output_ids, output_lengths, output_cum_log_probs (optional)
outputs = self.model.forward(start_ids,
start_lengths,
output_len,
beam_width, # optional, can be None
top_k, # optional, can be None
top_p, # optional, can be None
beam_search_diversity_rate, # optional, can be None
temperature, # optional, can be None
len_penalty, # optional, can be None
repetition_penalty, # optional, can be None
presence_penalty, # optional, can be None
min_length, # optional, can be None
random_seed, # optional, can be None
bad_words_list, # optional, can be None
return_cum_log_probs) # optional, can be None
if return_cum_log_probs == 0:
output_ids, output_lengths = outputs
else:
output_ids, output_lengths, output_cum_log_probs = outputs
if return_output_length:
if return_cum_log_probs > 0:
return output_ids, output_lengths, output_cum_log_probs
else:
return output_ids, output_lengths
else:
return output_ids
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
@dataclasses.dataclass
class GptInitModelParameters:
head_num: int
size_per_head: int
layer_num: int
max_seq_len: int
tensor_para_size: int
vocab_size: int
start_id: int
end_id: int
pipeline_para_size: int
weights_data_type: str
has_adapters: bool
adapter_inter_size: int
data_type: str
int8_mode: int
sparse: int
# GPT variant params.
layernorm_eps: float = 1e-6
layernorm_type: typing.Literal['pre_layernorm', 'post_layernorm'] = 'pre_layernorm'
activation_type: str = 'gelu'
has_positional_encoding: bool = True
has_pre_decoder_layernorm: bool = False
has_post_decoder_layernorm: bool = True
use_attention_linear_bias: bool = False
inter_size: int = 0
PREDEFINED_MODELS: typing.ClassVar[dict] = {
'default': dict(),
'opt-pre': dict(layernorm_eps=1e-5,
layernorm_type='pre_layernorm',
activation_type='relu',
has_post_decoder_layernorm=True),
'opt-pre': dict(layernorm_eps=1e-5,
layernorm_type='post_layernorm',
activation_type='relu',
has_post_decoder_layernorm=False),
'bloom': dict(layernorm_eps=1e-5,
layernorm_type='pre_layernorm',
activation_type='gelu',
has_positional_encoding=False,
has_pre_decoder_layernorm=True,
has_post_decoder_layernorm=True,
use_attention_linear_bias=True)
}
def gpt_init_kwargs(self):
do_not_include = ["sparse", "data_type"]
args = {k: v for k, v in dataclasses.asdict(self).items() if k not in do_not_include}
args["inference_data_type"] = dataclasses.asdict(self)["data_type"]
return args
@classmethod
def from_args(cls, args, config_reader):
model_name = args.model_name
head_num = config_reader.getint(model_name, "head_num")
size_per_head = config_reader.getint(model_name, "size_per_head")
param = cls(
head_num=head_num,
size_per_head=size_per_head,
layer_num=config_reader.getint(model_name, "num_layer"),
# There is no limitation on the length when no positional encoding,
# setting by a large enough integer.
max_seq_len=config_reader.getint(model_name, "max_pos_seq_len", fallback=int(1e7)),
tensor_para_size=config_reader.getint(model_name, "tensor_para_size"),
vocab_size=config_reader.getint(model_name, "vocab_size"),
start_id=config_reader.getint(model_name, "start_id"),
end_id=config_reader.getint(model_name, "end_id"),
weights_data_type=config_reader.get(model_name, "weight_data_type"),
has_adapters=config_reader.getboolean(model_name, "has_adapters", fallback=False),
adapter_inter_size=config_reader.getint(model_name, "adapter_inter_size", fallback=0),
pipeline_para_size=(
args.pipeline_para_size
or config_reader.getint("ft_instance_hyperparameter", "pipeline_para_size", fallback=1)
),
int8_mode=(
args.int8_mode
if args.int8_mode is not None
else config_reader.getint("ft_instance_hyperparameter", "int8_mode", fallback=0)
),
data_type=(
args.data_type or
config_reader.get("ft_instance_hyperparameter", "data_type",
fallback=config_reader.get(model_name, "weight_data_type"))
),
sparse=int(getattr(args, 'sparse', False)),
inter_size=config_reader.getint(model_name, "inter_size", fallback=4*head_num*size_per_head)
)
if config_reader.has_option(model_name, 'model_variant'):
model_type = config_reader.get(model_name, 'model_variant')
model_params = cls.PREDEFINED_MODELS[model_type]
param.update(model_params)
return param
def update(self, update_params: dict):
for k, v in update_params:
setattr(self, k, v)
return self
def asdict(self):
return dataclasses.asdict(self)
@classmethod
def update_argparser(cls, parser):
parser.add_argument("--model-name", type=str, default="gpt", help="Model name from config.ini file")
parser.add_argument("--pipeline-para-size", type=int, help="size of pipeline parallelism")
parser.add_argument("--data-type", type=str, help="data type", choices=["fp32", "bf16", "fp16"])
parser.add_argument(
"--sparse", action='store_true',
help="Enable sparse matrix multiplication. (Need SM 8.0 or 8.6 and SPARSITY_SUPPORT=ON)")
parser.add_argument("--int8-mode", type=int, choices=[0, 1], help="Set int8 mode")
@dataclasses.dataclass
class GptRuntimeModelParameters:
beam_width: int
top_k: torch.Tensor
top_p: torch.Tensor
beam_search_diversity_rate: torch.Tensor
temperature: torch.Tensor
len_penalty: torch.Tensor
repetition_penalty: torch.Tensor
def gpt_forward_kwargs(self):
return dataclasses.asdict(self)
@classmethod
def from_args(cls, args, config_reader, batch_size=None):
bs = args.batch_size
if batch_size is not None:
bs = batch_size
return cls(
beam_width=args.beam_width or config_reader.getint("ft_instance_hyperparameter", "beam_width", fallback=1),
top_k=(args.sampling_top_k or config_reader.getint("ft_instance_hyperparameter", "top_k", fallback=1)) *
torch.ones(size=[bs], dtype=torch.int32),
top_p=(args.sampling_top_p or config_reader.getfloat("ft_instance_hyperparameter", "top_p", fallback=0.0)) *
torch.ones(size=[bs], dtype=torch.float32),
beam_search_diversity_rate=(
args.beam_search_diversity_rate
or config_reader.getfloat("ft_instance_hyperparameter", "beam_search_diversity_rate", fallback=0.0)
) * torch.ones(size=[bs], dtype=torch.float32),
temperature=(args.temperature or config_reader.getfloat("ft_instance_hyperparameter",
"temperature", fallback=1.0)) * torch.ones(size=[bs], dtype=torch.float32),
len_penalty=(args.len_penalty or config_reader.getfloat("ft_instance_hyperparameter",
"len_penalty", fallback=0.0)) * torch.ones(size=[bs], dtype=torch.float32),
repetition_penalty=(
args.repetition_penalty or config_reader.getfloat("ft_instance_hyperparameter", "repetition_penalty", fallback=1.0)
) * torch.ones(size=[bs], dtype=torch.float32),
)
def slice_args(self, idx):
return GptRuntimeModelParameters(
beam_width=self.beam_width,
top_k=self.top_k[idx],
top_p=self.top_p[idx],
beam_search_diversity_rate=self.beam_search_diversity_rate[idx],
temperature=self.temperature[idx],
len_penalty=self.len_penalty[idx],
repetition_penalty=self.repetition_penalty[idx],
)
@classmethod
def update_argparser(cls, parser):
parser.add_argument("--beam-width", type=int, help="beam width")
parser.add_argument("--sampling-top-k", type=int, help="Candidate (k) value of top k sampling in decoding")
parser.add_argument("--sampling-top-p", type=float, help="Probability (p) value of top p sampling in decoding.")
parser.add_argument("--temperature", type=float, help="temperature")
parser.add_argument("--len-penalty", type=float, help="len_penalty")
parser.add_argument("--repetition-penalty", type=float, help="repetition penalty")
parser.add_argument("--beam-search-diversity-rate", type=float, help="beam_search_diversity_rate")
DEFAULT_START_TAG = "<|endoftext|>"
DEFAULT_END_TAG = "<|endoftext|>"
OPENAI_GPT2_START_ID = 50256
OPENAI_GPT2_END_ID = 50256
@dataclasses.dataclass
class GptModelConfig:
model_name: str
tensor_para_size: int
head_num: int
size_per_head: int
inter_size: int
num_layer: int
max_pos_seq_len: int
weight_data_type: str
vocab_size: int
start_id: int
end_id: int
@classmethod
def from_nemo_package(
cls,
*,
args: argparse.Namespace,
nemo_model_config: typing.Dict[str, typing.Any],
bos_id: int,
eos_id: int,
vocab_size: int,
):
return cls(
model_name="gpt",
tensor_para_size=args.infer_gpu_num,
head_num=nemo_model_config["num_attention_heads"],
size_per_head=nemo_model_config["hidden_size"] // nemo_model_config["num_attention_heads"],
inter_size=nemo_model_config["ffn_hidden_size"],
num_layer=nemo_model_config["num_layers"],
max_pos_seq_len=nemo_model_config["max_position_embeddings"],
weight_data_type=args.weight_data_type,
vocab_size=vocab_size,
start_id=bos_id,
end_id=eos_id,
)
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/gpt.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import argparse
import dataclasses
from pathlib import Path
from typing import Optional
import numpy as np
import torch
from . import gpt
from . import parallel_gpt
class BloomWeight(gpt.GPTWeights):
def __init__(self, head_num, size_per_head, layer_num, vocab_size,
tensor_para_size, pipeline_para_size, weights_data_type, inference_data_type,
int8_mode=0):
super().__init__(
head_num, size_per_head, layer_num, vocab_size, 0,
tensor_para_size, pipeline_para_size, weights_data_type,
inference_data_type,
has_adapters=False,
adapter_inter_size=0,
has_positional_encoding=False,
has_pre_decoder_layernorm=True,
has_post_decoder_layernorm=True,
int8_mode=int8_mode)
@dataclasses.dataclass
class BloomParam:
num_heads: int
size_per_head: int
inter_size: int
num_layers: int
vocab_size: int
start_id: Optional[int] = None
end_id: Optional[int] = None
tensor_para_size: int = 1
pipeline_para_size: int = 1
remove_padding: bool = True
shared_contexts_ratio: float = 1.0
def __post_init__(self):
if not 0.0 <= self.shared_contexts_ratio <= 1.0:
raise ValueError(
f'Got an invalid value of shared_context_ratio '
f'{self.shared_contexts_ratio} - range: [0.0, 1.0]')
@classmethod
def from_args(cls, args: argparse.Namespace):
return cls(num_heads=args.num_heads,
size_per_head=args.size_per_head,
inter_size=args.inter_size,
num_layers=args.num_layers,
vocab_size=args.vocab_size,
start_id=args.start_id,
end_id=args.end_id,
tensor_para_size=args.tensor_para_size,
pipeline_para_size=args.pipeline_para_size,
shared_contexts_ratio=args.shared_contexts_ratio)
@staticmethod
def add_args_group(parser: argparse.ArgumentParser):
group = parser.add_argument_group('Bloom Model Configuration')
group.add_argument(
'--num-heads', type=int, metavar='N', default=None,
help='The number of attention heads.')
group.add_argument(
'--size-per-head', type=int, metavar='N', default=None,
help='The dimension of an attention head.')
group.add_argument(
'--inter-size', type=int, metavar='N', default=None,
help='The intermediate dimension of the MLP block. If None, '
'it will be 4 * num_heads * size_per_head as default.')
group.add_argument(
'--num-layers', type=int, metavar='N', default=None,
help='The number of bloom layers.')
group.add_argument(
'--vocab-size', type=int, metavar='N', default=None,
help='The vocabulary size.')
group.add_argument(
'-tp', '--tensor-para-size', type=int, metavar='N', default=1,
help='The size of tensor parallelism.')
group.add_argument(
'-pp', '--pipeline-para-size', type=int, metavar='N', default=1,
help='The size of pipeline parallelism.')
group.add_argument(
'--no-remove-padding', action='store_false', dest='remove_padding',
help='Disable the optimization feature that skips padded tokens'
' during context computation.')
group.add_argument(
'--shared-contexts-ratio', type=float, metavar='M', default=1.0,
help='The threshold of the duplication ratio to apply the context'
' sharing. If less than shared_context_ratio * batch_size '
'sentences are duplicated among inputs of size batch_size, '
'the model shares those inputs during context computation.')
def asdict(self):
return dataclasses.asdict(self)
@dataclasses.dataclass
class BloomInferParam:
beam_width: int = 1
top_k: torch.IntTensor = 1
top_p: torch.FloatTensor = 1.0
beam_search_diversity_rate: torch.FloatTensor = 0.0
temperature: torch.FloatTensor = 1.0
len_penalty: torch.FloatTensor = 0.0
repetition_penalty: torch.FloatTensor = 1.0
random_seed: torch.LongTensor = None
return_output_length: bool = True
return_cum_log_probs: bool = False
@classmethod
def from_args(cls,
args: argparse.Namespace,
batch_size: Optional[int] = None):
batch_size = batch_size or args.batch_size
random_seed = args.random_seed
if random_seed is None:
random_seed = torch.randint(int(1e5), size=(batch_size,)).long()
else:
# Periodically padding to make the tensor of size (batch_size,)
pad_length = batch_size - len(random_seed)
random_seed = np.pad(args.random_seed, (0, pad_length), mode='wrap')
random_seed = torch.tensor(random_seed).long()
ones = torch.ones(batch_size, dtype=torch.float32)
return cls(
beam_width=args.beam_width,
top_k=args.top_k * ones.int(),
top_p=args.top_p * ones,
beam_search_diversity_rate=args.beam_search_diversity_rate * ones,
temperature=args.temperature * ones,
len_penalty=args.len_penalty * ones,
repetition_penalty=args.repetition_penalty * ones,
random_seed=random_seed,
return_output_length=args.return_cum_log_probs > 0,
return_cum_log_probs=args.return_cum_log_probs)
def slice_args(self, idx):
safe_slice = lambda x: x[idx] if x.numel() > 1 else x
return __class__(
beam_width=self.beam_width,
top_k=safe_slice(self.top_k),
top_p=safe_slice(self.top_p),
beam_search_diversity_rate=safe_slice(
self.beam_search_diversity_rate),
temperature=safe_slice(self.temperature),
len_penalty=safe_slice(self.len_penalty),
repetition_penalty=safe_slice(self.repetition_penalty))
@staticmethod
def add_args_group(parser: argparse.ArgumentParser):
group = parser.add_argument_group('Bloom Inference Parameters')
group.add_argument(
'--batch-size', type=int, metavar='N', default=8,
help='Inference batch size.')
group.add_argument(
'--output-length', type=int, metavar='N', default=32,
help='The number of output tokens to generate.')
group.add_argument(
'--beam-width', type=int, metavar='N', default=1,
help='The beam width for beam search. When beam_width=1, '
'a sampling method will be used to generate a token.')
group.add_argument(
'--top-k', type=int, metavar='N', default=1,
help='Top-k sampling. The number of most probable tokens to keep '
'for sampling.')
group.add_argument(
'--top-p', type=float, metavar='M', default=1.,
help='Top-p sampling. The cumulative probability of to filter the '
'set of most probable tokens. If 1, it is equivalent to the '
'ancestral sampling.')
group.add_argument(
'--temperature', type=float, metavar='M', default=1.,
help='The temperature value for smoothing the logit distribution.')
group.add_argument(
'--len-penalty', type=float, metavar='M', default=0.,
help='The exponent of the length penalty of beam scores.')
group.add_argument(
'--beam-search-diversity-rate', type=float, metavar='M', default=0.,
help='The diversity rate of beam search.')
group.add_argument(
'--start-id', type=int, metavar='N', default=0,
help='The index of the start token.')
group.add_argument(
'--end-id', type=int, metavar='N', default=2,
help='The index of the end token. FT will use the eos token to '
'pad a sequence while Bloom requires the pad token to get '
'correct results. According to the pretrained model, we set '
'the eos token by the pad token id 3 instead of the eos '
'token id 2.')
group.add_argument(
'--repetition-penalty', type=float, default=1.,
help='The repetition penalty.')
group.add_argument(
'--random-seed', type=int, metavar='N', nargs='+', default=None,
help='Random seed values. If multiple values are provided, they '
'will be assigned to each sentence in a batch. Otherwise, '
'if providing a single value, all sentence share the same '
'random seed.')
group.add_argument(
'--return-cum-log-probs', type=int, default=0, choices=[0, 1, 2],
help='Whether to compute the cumulative log probability of the '
'context sentences.\n'
' 0: do not return the cumulative log probs\n'
' 1: return the cumulative log probs of generated sequences\n'
' 2: return the cumulative log probs of sequences')
def asdict(self):
return dataclasses.asdict(self)
class Bloom(parallel_gpt.ParallelGPT):
def __init__(self,
head_num, size_per_head,
vocab_size, start_id, end_id, layer_num,
tensor_para_size: int,
pipeline_para_size: int,
lib_path: str | Path,
inference_data_type: str,
weights_data_type: str | np.dtype = np.float32,
layernorm_eps: float = 1e-5,
shared_contexts_ratio: float = 1.0,
int8_mode: int = 0):
super().__init__(
head_num, size_per_head, vocab_size, start_id, end_id, layer_num,
0, tensor_para_size, pipeline_para_size,
lib_path=lib_path,
inference_data_type=inference_data_type,
layernorm_eps=layernorm_eps,
# gpt_variant_params
layernorm_type="pre_layernorm",
activation_type="Gelu",
has_positional_encoding=False,
has_pre_decoder_layernorm=True,
has_post_decoder_layernorm=True,
has_adapters=False,
adapter_inter_size=0,
use_attention_linear_bias=True,
int8_mode=int8_mode,
weights_data_type=weights_data_type,
shared_contexts_ratio=shared_contexts_ratio)
def set_input_tensor(self, input_tensor: Optional[torch.Tensor]):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func
"""
self.input_tensor = input_tensor
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/bloom.py
|
"""Byte pair encoding utilities"""
# Modified MIT License
# Software Copyright (c) 2019 OpenAI
# We don’t claim ownership of the content you create with GPT-2, so it is yours to do with as you please.
# We only ask that you use GPT-2 responsibly and clearly indicate your content was created using GPT-2.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# The above copyright notice and this permission notice need not be included
# with content created by the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def get_encoder(vocab_file, bpe_file):
with open(vocab_file, 'r') as f:
encoder = json.load(f)
with open(bpe_file, 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
|
FasterTransformer-main
|
examples/pytorch/gpt/utils/gpt_token_encoder.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import argparse
import timeit
import torch
import torch.cuda.nvtx as nvtx
import time
import sys
import numpy as np
import random
from onmt.utils.misc import sequence_mask
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.encoder.utils.ft_encoder import EncoderWeights
from examples.pytorch.encoder.utils.ft_encoder import CustomEncoder
from examples.pytorch.encoder.utils.ft_encoder import ONMTEncoder
def main():
parser = argparse.ArgumentParser()
parser.add_argument('batch_size', type=int,
help='batch size')
parser.add_argument('layer_num', type=int,
help='number of layers')
parser.add_argument('seq_len', type=int,
help='sequence length')
parser.add_argument('head_num', type=int,
help='head number')
parser.add_argument('head_size', type=int,
help='size per head')
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument('--time', action='store_true',
help='test the time or not.')
parser.add_argument('--avg_seq_len', type=int, default=-1, metavar='NUMBER',
help='average sequence length (default: -1)')
parser.add_argument('--remove_padding', action='store_true',
help='Remove the padding of sentences of encoder.')
parser.add_argument('--allow_gemm_test', action='store_true',
help='Whether allow gemm test inside FT.')
parser.add_argument('--ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
args = parser.parse_args()
encoder_example(vars(args))
def encoder_example(args):
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
batch_size = args['batch_size']
seq_len = args['seq_len']
layer_num = args['layer_num']
head_num = args['head_num']
head_size = args['head_size']
hidden_dim = head_num * head_size
print("\n=============== Argument ===============")
for key in args:
print("{}: {}".format(key, args[key]))
print("========================================\n")
inp = torch.empty(batch_size, seq_len, hidden_dim).cuda()
torch.nn.init.normal_(inp, -0.02, 0.02)
mem_seq_lens = torch.randint(1, seq_len+1, (batch_size,), dtype=torch.int32).cuda()
if args['remove_padding']:
if args['avg_seq_len'] > 0:
mem_seq_lens = torch.ones((batch_size,)) * args['avg_seq_len']
mem_seq_lens = mem_seq_lens.to(torch.int32).cuda()
elif args['avg_seq_len'] == -1:
mem_seq_lens = torch.ones((batch_size,)) * seq_len / 2
mem_seq_lens = mem_seq_lens.to(torch.int32).cuda()
else:
raise ValueError("wrong avg_seq_len")
mask = ~sequence_mask(mem_seq_lens, seq_len).unsqueeze(1)
if args['data_type'] == 'fp16':
inp = inp.half()
weights = EncoderWeights(layer_num, hidden_dim)
onmt_encoder = ONMTEncoder(layer_num, hidden_dim, head_num, 4 * hidden_dim, weights)
onmt_encoder.cuda()
if args['data_type'] == 'fp16':
onmt_encoder.half()
onmt_encoder.eval()
onmt_encoder = torch.jit.trace(onmt_encoder, (inp, mask))
if args['data_type'] == 'fp16':
weights.to_half()
weights.to_cuda()
custom_encoder = CustomEncoder(layer_num, head_num, head_size, weights,
remove_padding=False, allow_gemm_test=args['allow_gemm_test'],
path=args['ths_path'])
custom_encoder = torch.jit.script(custom_encoder)
eff_custom_encoder = CustomEncoder(layer_num, head_num, head_size, weights,
remove_padding=True, allow_gemm_test=args['allow_gemm_test'],
path=args['ths_path'])
eff_custom_encoder = torch.jit.script(eff_custom_encoder)
with torch.no_grad():
output_mask = sequence_mask(mem_seq_lens, args['seq_len']).to(mask.dtype).unsqueeze(-1)
onmt_output = onmt_encoder(inp, mask) * output_mask
print(onmt_output)
print(onmt_output.size())
ft_output = custom_encoder(inp, mem_seq_lens) * output_mask
print(ft_output)
print(ft_output.size())
eff_ft_output = eff_custom_encoder(inp, mem_seq_lens) * output_mask
print(eff_ft_output)
print(eff_ft_output.size())
FT_diff = torch.abs(onmt_output - ft_output)
print('FT Mean diff: {}'.format(torch.mean(FT_diff)))
print('FT Max diff: {}'.format(torch.max(FT_diff)))
print('FT Min diff: {}'.format(torch.min(FT_diff)))
EFF_diff = torch.abs(onmt_output - eff_ft_output)
print('EFF-FT Mean diff: {}'.format(torch.mean(EFF_diff)))
print('EFF-FT Max diff: {}'.format(torch.max(EFF_diff)))
print('EFF-FT Min diff: {}'.format(torch.min(EFF_diff)))
if args['time']:
iterations = 100
for i in range(iterations):
output = onmt_encoder(inp, mask)
t10 = timeit.default_timer()
# nvtx.range_push("hf")
for i in range(iterations):
# nvtx.range_push("hf"+str(i))
output = onmt_encoder(inp, mask)
# nvtx.range_pop()
# nvtx.range_pop()
t1 = timeit.default_timer() - t10
# time.sleep(60)
for i in range(iterations):
output = custom_encoder(inp, mem_seq_lens)
t20 = timeit.default_timer()
# nvtx.range_push("ext")
for i in range(iterations):
# nvtx.range_push("ext"+str(i))
output = custom_encoder(inp, mem_seq_lens)
# nvtx.range_pop()
# nvtx.range_pop()
t2 = timeit.default_timer() - t20
# time.sleep(60)
for i in range(iterations):
output = eff_custom_encoder(inp, mem_seq_lens)
t30 = timeit.default_timer()
# nvtx.range_push("eff_ext")
for i in range(iterations):
# nvtx.range_push("eff_ext"+str(i))
output = eff_custom_encoder(inp, mem_seq_lens)
# nvtx.range_pop()
# nvtx.range_pop()
t3 = timeit.default_timer() - t30
# time.sleep(60)
print("[INFO] ONMTEnocder time costs: {:.2f} ms".format(t1*1000/iterations))
print("[INFO] FasterTransformer time costs: {:.2f} ms".format(t2*1000/iterations))
print("[INFO] EFF-FasterTransformer time costs: {:.2f} ms".format(t3*1000/iterations))
return max(torch.mean(FT_diff), torch.mean(EFF_diff))
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/encoder/encoder_example.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import torch
import torch.nn as nn
from onmt.encoders.transformer import TransformerEncoderLayer
class EncoderWeights(object):
def __init__(self, layer_num, hidden_dim, weights=None):
"""weights need be a state_dict of bert model"""
self.layer_num = layer_num
self.hidden_dim = hidden_dim
self.weights = {}
if weights is None:
self.weights['encoder.layer_norm.weight'] = torch.zeros(hidden_dim)
self.weights['encoder.layer_norm.bias'] = torch.zeros(hidden_dim)
# self.weights['encoder.embeddings.make_embedding.emb_luts.0.weight']
# self.weights['encoder.embeddings.make_embedding.pe.pe']
for i in range(layer_num):
pre = 'encoder.transformer.' + str(i) + '.'
self.weights[pre + 'layer_norm.weight'] = torch.zeros(hidden_dim)
self.weights[pre + 'layer_norm.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'self_attn.linear_query.weight'] = torch.zeros(hidden_dim, hidden_dim)
self.weights[pre + 'self_attn.linear_query.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'self_attn.linear_keys.weight'] = torch.zeros(hidden_dim, hidden_dim)
self.weights[pre + 'self_attn.linear_keys.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'self_attn.linear_values.weight'] = torch.zeros(hidden_dim, hidden_dim)
self.weights[pre + 'self_attn.linear_values.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'self_attn.final_linear.weight'] = torch.zeros(hidden_dim, hidden_dim)
self.weights[pre + 'self_attn.final_linear.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'feed_forward.layer_norm.weight'] = torch.zeros(hidden_dim)
self.weights[pre + 'feed_forward.layer_norm.bias'] = torch.zeros(hidden_dim)
self.weights[pre + 'feed_forward.w_1.weight'] = torch.zeros(4 * hidden_dim, hidden_dim)
self.weights[pre + 'feed_forward.w_1.bias'] = torch.zeros(4 * hidden_dim)
self.weights[pre + 'feed_forward.w_2.weight'] = torch.zeros(hidden_dim, 4 * hidden_dim)
self.weights[pre + 'feed_forward.w_2.bias'] = torch.zeros(hidden_dim)
for k, v in self.weights.items():
self.weights[k] = torch.nn.init.uniform_(v, -1, 1)
else:
self.weights = weights
def listed_weights(self):
ret = []
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'layer_norm.weight'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'layer_norm.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'self_attn.linear_query.weight'].transpose(-1, -2) for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'self_attn.linear_query.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'self_attn.linear_keys.weight'].transpose(-1, -2) for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'self_attn.linear_keys.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'self_attn.linear_values.weight'].transpose(-1, -2) for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'self_attn.linear_values.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'self_attn.final_linear.weight'].transpose(-1, -2) for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'self_attn.final_linear.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'feed_forward.layer_norm.weight'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'feed_forward.layer_norm.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'feed_forward.w_1.weight'].transpose(-1, -2) for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'feed_forward.w_1.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'feed_forward.w_2.weight'].transpose(-1, -2) for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(torch.stack([self.weights['encoder.transformer.' + str(layer_idx) + '.' + 'feed_forward.w_2.bias'] for layer_idx in range(self.layer_num)], 0).contiguous())
ret.append(self.weights['encoder.layer_norm.weight'].contiguous())
ret.append(self.weights['encoder.layer_norm.bias'].contiguous())
return ret
def to_cuda(self):
for k, v in self.weights.items():
self.weights[k] = v.cuda()
def to_half(self):
for k, v in self.weights.items():
self.weights[k] = v.half()
def to_bfloat16(self):
for k, v in self.weights.items():
self.weights[k] = v.bfloat16()
class CustomEncoder(torch.nn.Module):
def __init__(self, layer_num, head_num, head_size, weights,
remove_padding=False, allow_gemm_test=False, path='./lib/libth_transformer.so', embedding=None):
super().__init__()
self.layer_num = layer_num
self.remove_padding = remove_padding
self.embedding = embedding
torch.classes.load_library(path)
weights_ = weights.listed_weights()
assert len(weights_) == 18
try:
self.encoders = torch.classes.FasterTransformer.Encoder(
*weights_,
head_num, head_size, 4 * head_num * head_size, remove_padding, layer_num, allow_gemm_test, False, 1.0)
except:
# legacy ths for 20.03 image
self.encoders = torch.classes.FasterTransformerEncoder(
*weights_,
head_num, head_size, 4 * head_num * head_size, remove_padding, layer_num, allow_gemm_test, False, 1.0)
def forward(self, inputs, lengths):
if self.embedding is not None:
emb = self.embedding(inputs)
inputs = emb.transpose(0, 1).contiguous()
hidden_states = self.encoders.forward(inputs, lengths)
if self.embedding is not None:
return emb, hidden_states.transpose(0, 1).contiguous(), lengths
else:
return hidden_states
class ONMTEncoder(torch.nn.Module):
def __init__(self, num_layers, d_model, heads, d_ff, weights):
super(ONMTEncoder, self).__init__()
self.transformer = nn.ModuleList(
[TransformerEncoderLayer(
d_model, heads, d_ff, 0.0, 0.0,
max_relative_positions=0)
for i in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.layer_norm.weight.data = weights.weights['encoder.layer_norm.weight']
self.layer_norm.bias.data = weights.weights['encoder.layer_norm.bias']
w = {}
for k, v in weights.weights.items():
if k.startswith('encoder.transformer'):
w[k[20:]] = v
self.transformer.load_state_dict(w)
def forward(self, src, mask):
out = src
for layer in self.transformer:
out = layer(out, mask)
out = self.layer_norm(out)
return out
|
FasterTransformer-main
|
examples/pytorch/encoder/utils/ft_encoder.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021, NAVER Corp. Authored by CLOVA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from torch.nn.utils.rnn import pad_sequence
import random
import os
import sys
import argparse
import configparser
import timeit
import torch
import torch.distributed as dist
import numpy as np
from transformers import AutoTokenizer
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.gptneox.utils.gptneox import GptNeoX
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output_len', type=int, default=32,
help='output sequence length to generate.')
parser.add_argument('--beam_width', type=int, default=1,
help='beam width for beam search. Using sampling when beam width is 1.')
parser.add_argument('--top_k', type=int, default=1,
help='top k candidate num')
parser.add_argument('--top_p', type=float, default=0.,
help='top p probability threshold')
parser.add_argument('--temperature', type=float, default=1.,
help='temperature')
parser.add_argument('--len_penalty', type=float, default=0.,
help='len_penalty')
parser.add_argument('--beam_search_diversity_rate', type=float, default=0.,
help='beam_search_diversity_rate')
parser.add_argument('--tensor_para_size', type=int, default=1,
help='tensor parallel size')
parser.add_argument('--pipeline_para_size', type=int, default=1,
help='pipeline parallel size')
parser.add_argument('--ckpt_path', type=str, default='../models/gptneox/c-model/NeoX-1.3B/1-gpu',
help='path to the checkpoint file.')
parser.add_argument('--tokenizer_path', type=str, default='../models/gptneox/model/NeoX-1.3B',
help='directory where the tokenizer file is located.')
parser.add_argument('--lib_path', type=str, default='./lib/libth_transformer.so',
help='path to the pyt_fastertransformer dynamic lib file.')
parser.add_argument('--sample_input_file', type=str,
help='path to the sample input file.')
parser.add_argument('--max_batch_size', type=int, default=8,
help='max batch size.')
parser.add_argument('--repetition_penalty', type=float, default=1.,
help='repetition penalty')
parser.add_argument('--max_seq_len', type=int, default=1024,
help='max sequence length for position embedding table.')
parser.add_argument('--inference_data_type', '--data_type', type=str, choices=['fp32', 'fp16'], default='fp16')
parser.add_argument('--time', action='store_true',
help='whether or not to measure time elapsed.')
parser.add_argument('--enable_random_seed', action='store_true',
help='is enable the random seed.')
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(os.path.join(args.ckpt_path, "config.ini"))
head_num = int(config.get('gptneox', 'head_num'))
size_per_head = int(config.get('gptneox', 'size_per_head'))
vocab_size = int(config.get('gptneox', 'vocab_size'))
layer_num = int(config.get('gptneox', 'num_layer'))
rotary_embedding = int(config.get('gptneox', 'rotary_embedding'))
start_id = int(config.get('gptneox', 'start_id'))
end_id = int(config.get('gptneox', 'end_id'))
use_gptj_residual = (config.get('gptneox', 'use_gptj_residual') == "1")
weight_data_type = config.get('gptneox', 'weight_data_type')
ckpt_path = args.ckpt_path
tokenizer_path = args.tokenizer_path
lib_path = args.lib_path
output_len = args.output_len
beam_width = args.beam_width
top_k = args.top_k
top_p = args.top_p
temperature = args.temperature
len_penalty = args.len_penalty
beam_search_diversity_rate = args.beam_search_diversity_rate
tensor_para_size = args.tensor_para_size
pipeline_para_size = args.pipeline_para_size
max_batch_size = args.max_batch_size
max_seq_len = args.max_seq_len
repetition_penalty = args.repetition_penalty
inference_data_type = args.inference_data_type
print("\n=============== Arguments ===============")
for arg in vars(args):
print("{}: {}".format(arg, getattr(args, arg)))
print("=========================================\n")
if tensor_para_size * pipeline_para_size > 1:
dist.init_process_group(backend=dist.Backend.MPI)
rank = dist.get_rank() if dist.is_initialized() else 0
device_count = dist.get_world_size() if dist.is_initialized() else 1
device = rank % device_count
torch.cuda.set_device(device)
device = torch.cuda.current_device()
# sentencepiece needed
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
# Inputs
contexts = []
if args.sample_input_file: # conditional case
with open(args.sample_input_file, "r") as f:
contexts = f.read().splitlines()
batch_size = min(len(contexts), max_batch_size)
contexts = contexts[:batch_size]
start_ids = [torch.tensor(tokenizer.encode(c), dtype=torch.int32, device=device) for c in contexts]
else: # unconditional case
batch_size = max_batch_size
contexts = ['<|endoftext|>'] * batch_size
start_ids = [torch.IntTensor([end_id])] * batch_size
print("[INFO] batch size: {}".format(batch_size))
start_lengths = [len(ids) for ids in start_ids]
start_ids = pad_sequence(start_ids, batch_first=True, padding_value=end_id)
start_lengths = torch.IntTensor(start_lengths)
if args.enable_random_seed == True:
random_seed_tensor = torch.randint(0, 10000, size=[batch_size], dtype=torch.int64)
else:
random_seed_tensor = torch.zeros([batch_size], dtype=torch.int64)
# Prepare model.
gpt = GptNeoX(head_num, size_per_head, vocab_size, rotary_embedding,
start_id, end_id, layer_num, max_seq_len,
tensor_para_size, pipeline_para_size,
use_gptj_residual, lib_path,
inference_data_type=inference_data_type,
weights_data_type=weight_data_type)
if not gpt.load(ckpt_path=ckpt_path):
print("[WARNING] Checkpoint file not found. Model loading is skipped.")
with torch.no_grad():
tokens_batch = gpt(
start_ids=start_ids,
start_lengths=start_lengths,
output_len=output_len,
beam_width=beam_width,
top_k=top_k * torch.ones(size=[batch_size], dtype=torch.int32),
top_p=top_p * torch.ones(size=[batch_size], dtype=torch.float32),
beam_search_diversity_rate=beam_search_diversity_rate * torch.ones(size=[batch_size], dtype=torch.float32),
temperature=temperature * torch.ones(size=[batch_size], dtype=torch.float32),
len_penalty=len_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
repetition_penalty=repetition_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
random_seed=random_seed_tensor,
return_output_length=False,
return_cum_log_probs=0)
if tokens_batch is not None and rank == 0:
tokens_batch = tokens_batch.cpu().numpy()
for i, (context, tokens) in enumerate(zip(contexts, tokens_batch)):
for beam_id in range(beam_width):
token = tokens[beam_id][start_lengths[i]:] # exclude context input from the output
output = tokenizer.decode(token)
print(f'[INFO] batch {i}, beam {beam_id}:\n[Context]\n{context}\n\n[Output]\n{output}\n')
# Measure inference time.
if args.time:
iterations = 10
# warmup
for i in range(iterations):
tokens_batch = gpt(
start_ids=start_ids,
start_lengths=start_lengths,
output_len=output_len,
beam_width=beam_width,
top_k=top_k * torch.ones(size=[batch_size], dtype=torch.int32),
top_p=top_p * torch.ones(size=[batch_size], dtype=torch.float32),
beam_search_diversity_rate=beam_search_diversity_rate * torch.ones(size=[batch_size], dtype=torch.float32),
temperature=temperature * torch.ones(size=[batch_size], dtype=torch.float32),
len_penalty=len_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
repetition_penalty=repetition_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
random_seed=random_seed_tensor,
return_output_length=False,
return_cum_log_probs=0)
batch_num = 0
token_num = 0
time = timeit.default_timer()
for i in range(iterations):
tokens_batch = gpt(
start_ids=start_ids,
start_lengths=start_lengths,
output_len=output_len,
beam_width=beam_width,
top_k=top_k * torch.ones(size=[batch_size], dtype=torch.int32),
top_p=top_p * torch.ones(size=[batch_size], dtype=torch.float32),
beam_search_diversity_rate=beam_search_diversity_rate * torch.ones(size=[batch_size], dtype=torch.float32),
temperature=temperature * torch.ones(size=[batch_size], dtype=torch.float32),
len_penalty=len_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
repetition_penalty=repetition_penalty * torch.ones(size=[batch_size], dtype=torch.float32),
random_seed=random_seed_tensor,
return_output_length=False,
return_cum_log_probs=0)
batch_num += 1
for j, tokens in enumerate(tokens_batch):
token_num += tokens.shape[-1] - start_lengths[j]
time_elapsed = timeit.default_timer() - time
throughput = token_num / time_elapsed
print(f"[INFO] FT-GPT generates {batch_num} batches, taking {time_elapsed:0.3f} secs "
f"to generate {token_num} tokens, {throughput:0.3f} tokens/sec.")
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/gptneox/gptneox_example.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import multiprocessing
import numpy as np
from pathlib import Path
import torch
import os
import sys
from transformers import GPTNeoXForCausalLM # 4.21.1
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def prefix_prompt_convert(args, config, weight_data_type):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
prompt_in_file_list = args.prompt_in_file_list.split(',')
task_list = []
for idx, prompt_in_file in enumerate(prompt_in_file_list):
weights=torch.load(prompt_in_file)
task_name = prompt_in_file.split("/")[-1].split(".")[-3]
total_size = weights.nelement()
n_layers = config['n_layer']
n_head = config['n_head']
size_per_head = config['n_embd'] // n_head
prefix_prompt_len = total_size // (2 * n_layers * n_head * size_per_head)
task_list.append((task_name, prefix_prompt_len))
# GPT NeoX
weights=weights.view(prefix_prompt_len,n_layers,2,n_head,size_per_head) ## prefix_seq_len, num_layers, 2, num_heads, size_per_head
# weights=weights.view(prefix_prompt_len,28,2,16,256) ## prefix_seq_len, num_layers, 2, num_heads, size_per_head
weights=weights.permute(1,2,3,0,4) ## num_layers, 2, num_heads, perfix_seq_len, size_per_head
local_head_num = n_head // args.infer_gpu_num
weights_split = torch.split(weights, local_head_num, dim=2)
for i in range(args.infer_gpu_num):
output_file_path = saved_dir + "/model.prefix_prompt." + task_name + ".weight." + str(i) + ".bin"
weights_split[i].detach().cpu().numpy().astype(weight_data_type).tofile(output_file_path)
return task_list
def split_and_convert_process(i, saved_dir,factor,key,args,config,val):
if key.find("input_layernorm.weight") != -1 or key.find("input_layernorm.bias") != -1 or \
key.find("attention.dense.bias") != -1 or key.find("post_attention_layernorm.weight") != -1 or \
key.find("post_attention_layernorm.bias") != -1 or key.find("mlp.dense_4h_to_h.bias") != -1 or \
key.find("final_layernorm.weight") != -1 or key.find("final_layernorm.bias") != -1:
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir + "/model." + key + ".bin"
val.tofile(saved_path)
elif key.find("attention.dense.weight") != -1 or key.find("mlp.dense_4h_to_h.weight") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.bias") != -1:
local_dim = (int)(val.shape[-1] / 3)
n_head = config['n_head']
val = val.reshape(n_head, 3, local_dim // n_head)
val = np.transpose(val, [1, 0, 2]).reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
local_dim = (int)(val.shape[-1] / 3)
n_head = config['n_head']
# Note that the HF qkv weight are stored as [hidden_size, num_heads, 3, head_hidden]
# FT needs the shape of [hidden_size, 3, num_heads, head_hidden]
val = val.reshape(hidden_dim, n_head, 3, local_dim // n_head)
val = np.transpose(val, [0, 2, 1, 3]).reshape(hidden_dim, 3, local_dim)
# print(np.mean(np.abs(val[:, 0, :])))
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
else:
print("[ERROR] cannot find key '{}'".format(key))
def split_and_convert(args):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
if(os.path.exists(saved_dir) == False):
os.makedirs(saved_dir)
ckpt_name = args.in_file
t_gpu_num = args.trained_gpu_num
i_gpu_num = args.infer_gpu_num
assert(i_gpu_num % t_gpu_num == 0)
factor = (int)(i_gpu_num / t_gpu_num)
# load position_embedding from rank 0
# model = torch.load(ckpt_name)
model = GPTNeoXForCausalLM.from_pretrained(args.in_file)
hf_config = vars(model.config)
if "gpt_j_residual" not in hf_config:
hf_config["gpt_j_residual"] = 0
np_weight_data_type = get_weight_data_type(args.weight_data_type)
task_list = []
if args.prompt_in_file_list is not None:
task_list = prefix_prompt_convert(args, hf_config, np_weight_data_type)
try:
model_name = args.model_name
config = configparser.ConfigParser()
config['gptneox'] = {}
config['gptneox']['model_name'] = model_name
config['gptneox']["head_num"] = str(hf_config["n_head"])
n_embd = hf_config["n_embd"]
config['gptneox']["size_per_head"] = str(n_embd // hf_config["n_head"])
config['gptneox']["inter_size"] = str(n_embd * 4)
config['gptneox']["num_layer"] = str(hf_config["n_layer"])
rotary_dim = n_embd // hf_config["n_head"] if hf_config["rotary_dim"] is None else hf_config["rotary_dim"]
config['gptneox']["rotary_embedding"] = str(rotary_dim)
config['gptneox']["vocab_size"] = str(hf_config["vocab_size"])
config['gptneox']["start_id"] = str(hf_config["bos_token_id"])
config['gptneox']["end_id"] = str(hf_config["eos_token_id"])
config['gptneox']['use_gptj_residual'] = str(int(hf_config['gpt_j_residual']))
config['gptneox']["weight_data_type"] = args.weight_data_type
if len(task_list) > 0:
config['gptneox']['num_tasks'] = str(len(task_list))
config['gptneox']['prompt_learning_type'] = str(2)
for idx, (task_name, prompt_length) in enumerate(task_list):
config[f'task_{idx}'] = {}
config[f'task_{idx}']['task_name'] = task_name
config[f'task_{idx}']['prompt_length'] = str(prompt_length)
with open((Path(saved_dir) / f"config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
except:
print(f"Fail to save the config in config.ini.")
huggingface_model_name_pattern = [
"ln_1.bias",
"ln_1.weight",
"attn.qkv_proj.bias",
"attn.qkv_proj.weight",
"attn.out_proj.bias",
"attn.out_proj.weight",
"ln_2.bias",
"ln_2.weight",
"mlp.fc_in.bias",
"mlp.fc_in.weight",
"mlp.fc_out.bias",
"mlp.fc_out.weight",
]
ft_model_name_pattern = [
"input_layernorm.bias",
"input_layernorm.weight",
"attention.query_key_value.bias",
"attention.query_key_value.weight",
"attention.dense.bias",
"attention.dense.weight",
"post_attention_layernorm.bias",
"post_attention_layernorm.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h.weight",
"mlp.dense_4h_to_h.bias",
"mlp.dense_4h_to_h.weight",
]
torch.multiprocessing.set_start_method("spawn")
pool = multiprocessing.Pool(args.processes)
for name, param in model.named_parameters():
if name.find("weight") == -1 and name.find("bias") == -1:
continue
print(name)
if name == 'transformer.wpe.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wpe.bin")
elif name == 'transformer.wte.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wte.bin")
elif name == 'transformer.ln_f.bias':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.final_layernorm.bias.bin")
elif name == 'transformer.ln_f.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.final_layernorm.weight.bin")
elif name == 'lm_head.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.lm_head.weight.bin")
elif name == 'lm_head.bias':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.lm_head.bias.bin")
else:
for i in range(len(huggingface_model_name_pattern)):
if name.find(huggingface_model_name_pattern[i]) != -1:
new_name = name.replace("transformer.h.", "layers.").replace(huggingface_model_name_pattern[i], ft_model_name_pattern[i])
pool.starmap(split_and_convert_process,
[(0, saved_dir, factor, new_name, args, vars(model.config),
param.detach().cpu().numpy().astype(np_weight_data_type).T)], )
pool.close()
pool.join()
# Post-process biases if use_gptj_residual is True
if hf_config['gpt_j_residual']:
for layer_idx in range(hf_config["n_layer"]):
attn_bias = np.fromfile(saved_dir + f"/model.layers.{layer_idx}.attention.dense.bias.bin", dtype=np.float32)
mlp_bias = np.fromfile(saved_dir + f"/model.layers.{layer_idx}.mlp.dense_4h_to_h.bias.bin", dtype=np.float32)
(attn_bias + mlp_bias).tofile(saved_dir + f"/model.layers.{layer_idx}.mlp.attention.bias.sum.bin")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='file name of input checkpoint file', required=True)
parser.add_argument('-prompt_in_file_list','-p_i_list', type=str, help='list of the prompt weight file path,'
'separate by (,). e.g. -prompt_in_file_list prefix_prompt.task0.weight,prefix_prompt.task1.weight')
parser.add_argument('-trained_gpu_num', '-t_g', type=int, help='How many gpus for inference', default=1)
parser.add_argument('-infer_gpu_num', '-i_g', type=int, help='How many gpus for inference', required=True)
parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 4)", default=4)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
parser.add_argument('-model_name', '-m_n', type=str, help='model name', required=True)
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
split_and_convert(args)
|
FasterTransformer-main
|
examples/pytorch/gptneox/utils/huggingface_jp_gptneox_convert.py
|
#! /usr/bin/env python3
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from tokenizers import Tokenizer
from typing import List, Union
class HFTokenizer:
def __init__(self, vocab_file):
self.tokenizer = Tokenizer.from_file(vocab_file)
def tokenize(self, text: str):
return self.tokenizer.encode(text).ids
def tokenize_batch(self, text_batch: Union[List[str], str]):
return self.tokenizer.encode_batch(text_batch)
def detokenize(self, token_ids):
return self.tokenizer.decode(token_ids)
def handle_args():
parser = ArgumentParser()
parser.add_argument("in_file")
parser.add_argument("--out-file")
parser.add_argument("--tokenizer", default="../models/20B_tokenizer.json")
parser.add_argument("--action", choices=["tokenize", "detokenize", "auto"], default="auto")
return parser.parse_args()
def main(in_file, tokenizer, out_file, action):
tokenizer = HFTokenizer(tokenizer)
with open(in_file) as f:
lines = f.read().split('\n')
in_lines = None
do = None
if action != "tokenize":
if in_lines is None:
try:
in_lines = [[int(tok) for tok in line.split(' ') if tok] for line in lines if line]
do = "detokenize"
except ValueError:
pass
if in_lines is None:
try:
in_lines = [[int(tok) for tok in line.split(', ') if tok] for line in lines if line]
do = "detokenize"
except ValueError:
pass
if action != "detokenize":
if in_lines is None:
try:
in_lines = [line for line in lines if line]
do = "tokenize"
except ValueError:
pass
if do is not None:
if do == "detokenize":
output = [tokenizer.detokenize(token_list) for token_list in in_lines]
else:
output = [tokenizer.tokenize(line) for line in in_lines]
output = [",".join(str(tok) for tok in tok_seq) for tok_seq in output]
if args.out_file:
with open(out_file, "w") as f:
f.write("\n".join(output))
else:
print("\n---\n".join(output))
if __name__ == "__main__":
args = handle_args()
main(args.in_file, args.tokenizer, args.out_file, args.action)
|
FasterTransformer-main
|
examples/pytorch/gptneox/utils/hftokenizer.py
|
#! /usr/bin/env python3
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import numpy as np
import torch # pytype: disable=import-error
import yaml
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from tqdm import tqdm
from typing import List
'''
GPT-NeoX 20B model
Download by wget --cut-dirs=5 -nH -r --no-parent --reject "index.html*" https://mystic.the-eye.eu/public/AI/models/GPT-NeoX-20B/slim_weights/ -P 20B_checkpoints
layer_00-model_00-model_states.pt
word_embeddings.weight: embedding table, split by tensor parallelism
layer_02-model_00-model_states.pt ~ layer_45-model_01-model_states.pt:
input_layernorm.weight
input_layernorm.bias
attention.query_key_value.weight
attention.query_key_value.bias
attention.rotary_emb.inv_freq
attention.dense.weight
attention.dense.bias
post_attention_layernorm.weight
post_attention_layernorm.bias
mlp.dense_h_to_4h.weight
mlp.dense_h_to_4h.bias
mlp.dense_4h_to_h.weight
mlp.dense_4h_to_h.bias
layer_47-model_00-model_states.pt:
finally layernorm. model_00 and model_01 have same weights. Using one of them is enough.
layer_48-model_00-model_states.pt
final_linear.weight. It should be the logit gemm weight.
mp_rank_xx_model_states.pt:
some training states, useless in inference
'''
weights_skip_tensor_split = ["input_layernorm.bias",
"input_layernorm.weight",
"attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"post_attention_layernorm.bias",
"post_attention_layernorm.weight"]
def write_config_file(save_dir):
file_template = """
[gptneox]
model_name=gptneox_20B
head_num=64
size_per_head=96
vocab_size=50432
num_layer=44
rotary_embedding=24
start_id=0
end_id=2
inter_size=24576
use_gptj_residual=1
weight_data_type=fp32
"""
with open(Path(save_dir) / "config.ini", "w") as f:
f.write(file_template)
@dataclass
class KeyHandler:
outname: str
gather: str = ""
scatter: str = "copy"
reshape: List = field(default_factory=lambda: [])
transpose: List = field(default_factory=lambda: [])
def on_cpu(storage, loc):
return storage.cpu()
def handle_layer(chkpt_dir, in_filename, key_mapping, save_dir,
in_range, out_range, whole_range=None):
def read_layers(filename, range):
if range is not None:
filename = [filename.format(i) for i in range]
else:
filename = [filename]
return [torch.load(chkpt_dir / fn, map_location=on_cpu) for fn in filename]
layers = read_layers(in_filename, in_range)
layer_keys = set(layers[0].keys())
for key, value in key_mapping.items():
key_templ, gather, scatter = value.outname, value.gather, value.scatter
reshape, transpose = value.reshape, value.transpose
layer_keys.remove(key)
if key_templ == "":
continue
# Preprocess tensors
tensors = [np.array(layer[key], dtype=np.float32) for layer in layers]
if reshape:
tensors = [ten.reshape(reshape) for ten in tensors]
if transpose:
tensors = [ten.transpose(transpose) for ten in tensors]
# Gather tensors
if len(tensors) == 1:
gather_tensor = tensors[0]
else:
if "join" in gather:
axis = int(gather.partition("_")[2])
gather_tensor = np.concatenate(tensors, axis=axis)
elif gather == "mean":
gather_tensor = np.sum(tensors, axis=0) / len(tensors)
elif gather == "sum":
gather_tensor = np.sum(tensors, axis=0)
else:
raise NotImplementedError(f"Gather strategy {gather} is not supported")
# Scatter tensors
if len(out_range) == 1:
scatter_tensors = [gather_tensor]
else:
if scatter == "copy":
scatter_tensors = [gather_tensor for i in out_range]
elif "split" in scatter:
axis = int(scatter.partition("_")[2])
if gather_tensor.shape[axis] % len(out_range) != 0:
raise ValueError(f"{key} cannot be divided in {len(out_range)} along axis {axis}")
scatter_tensors = np.split(gather_tensor, len(out_range), axis=axis)
elif scatter == "divide":
scatter_tensors = [gather_tensor / len(out_range) for i in out_range]
else:
raise NotImplementedError(f"Scatter strategy {scatter} is not supported")
for tensor, idx in zip(scatter_tensors, out_range):
output_name = key_templ.format(idx)
for weight_name in weights_skip_tensor_split:
if weight_name in output_name:
output_name = output_name.split('.')
del output_name[-1]
output_name = '.'.join(output_name)
tensor.tofile(save_dir / ("model." + output_name + ".bin"))
if len(layer_keys) > 0:
print("[Warning] Remaining keys:", layer_keys)
def convert_checkpoint(args):
base_dir = Path(args.checkpoint_dir)
with open(base_dir / "latest") as f:
chkpt_dir = f.readline().rstrip()
chkpt_dir = base_dir / chkpt_dir
with open(base_dir / "configs/20B.yml") as f:
model_args = yaml.safe_load(f)
hidden_dim = model_args["hidden-size"]
n_layers = model_args["num-layers"]
n_heads = model_args["num-attention-heads"]
hidden_per_head = hidden_dim // n_heads
tp_source = model_args["model-parallel-size"]
tp_target = args.tensor_parallelism
print(f"Converting from {tp_source} to {tp_target} GPUs")
save_dir = Path(args.save_dir) / f"{tp_target:d}-gpu"
save_dir.mkdir(parents=True, exist_ok=True)
handle_layer_args = []
handle_layer_args.append((
chkpt_dir,
"layer_00-model_{:02d}-model_states.pt",
{"word_embeddings.weight": KeyHandler("wte", "join_0")},
save_dir,
range(tp_source),
[0],
))
handle_layer_args.append((
chkpt_dir,
"layer_47-model_{:02d}-model_states.pt",
{
"norm.weight": KeyHandler("final_layernorm.weight", "mean"),
"norm.bias": KeyHandler("final_layernorm.bias", "mean"),
},
save_dir,
range(tp_source),
[0],
))
handle_layer_args.append((
chkpt_dir,
"layer_48-model_{:02d}-model_states.pt",
{
"final_linear.weight": KeyHandler("lm_head.weight", "join_0"),
},
save_dir,
range(tp_source),
[0],
))
gcd = np.gcd(tp_source, tp_target)
print(f"Strategy: group {tp_source//gcd} source gpu(s) into {tp_target//gcd} out gpu(s).\n")
in_indices = np.split(np.arange(tp_source), gcd)
out_indices = np.split(np.arange(tp_target), gcd)
for layer_id in range(model_args["num-layers"]):
for in_idx, out_idx in zip(in_indices, out_indices):
def make_fn_out(fn):
return f"layers.{layer_id}." + fn + ".{:d}"
handle_layer_args.append((
chkpt_dir,
f"layer_{layer_id+2:02d}" + "-model_{:02d}-model_states.pt",
{
"attention.rotary_emb.inv_freq": KeyHandler(""),
"attention.dense.weight": KeyHandler(
make_fn_out("attention.dense.weight"),
"join_0", "split_0",
transpose=[1, 0]),
"attention.dense.bias": KeyHandler(
make_fn_out("attention.dense.bias"), "sum", "divide"),
"attention.query_key_value.weight": KeyHandler(
make_fn_out("attention.query_key_value.weight"),
"join_2", "split_2",
reshape=[n_heads // tp_source, 3, hidden_per_head, hidden_dim],
transpose=[3, 1, 0, 2]),
"attention.query_key_value.bias": KeyHandler(
make_fn_out("attention.query_key_value.bias"),
"join_1", "split_1",
reshape=[n_heads // tp_source, 3, hidden_per_head],
transpose=[1, 0, 2]),
"input_layernorm.weight": KeyHandler(
make_fn_out("input_layernorm.weight"), "mean"),
"input_layernorm.bias": KeyHandler(
make_fn_out("input_layernorm.bias"), "mean"),
"mlp.dense_4h_to_h.weight": KeyHandler(
make_fn_out("mlp.dense_4h_to_h.weight"),
"join_0", "split_0",
transpose=[1, 0]),
"mlp.dense_4h_to_h.bias": KeyHandler(
make_fn_out("mlp.dense_4h_to_h.bias"), "sum", "divide"),
"mlp.dense_h_to_4h.weight": KeyHandler(
make_fn_out("mlp.dense_h_to_4h.weight"),
"join_1", "split_1",
transpose=[1, 0]),
"mlp.dense_h_to_4h.bias": KeyHandler(
make_fn_out("mlp.dense_h_to_4h.bias"), "join_0", "split_0"),
"post_attention_layernorm.weight": KeyHandler(
make_fn_out("post_attention_layernorm.weight"), "mean"),
"post_attention_layernorm.bias": KeyHandler(
make_fn_out("post_attention_layernorm.bias"), "mean"),
},
save_dir,
in_idx,
out_idx,
))
torch.multiprocessing.set_start_method("spawn")
with multiprocessing.Pool(args.jobs) as pool:
pool.starmap(handle_layer, handle_layer_args)
# Post-process biases and lm_head (TODO: remove this)
for layer_idx in range(model_args["num-layers"]):
attn_bias = np.fromfile(save_dir / f"model.layers.{layer_idx}.attention.dense.bias.bin", dtype=np.float32)
mlp_bias = np.fromfile(save_dir / f"model.layers.{layer_idx}.mlp.dense_4h_to_h.bias.bin", dtype=np.float32)
(attn_bias + mlp_bias).tofile(save_dir / f"model.layers.{layer_idx}.mlp.attention.bias.sum.bin")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_dir", metavar="checkpoint-dir",
help="directory where resides the source model. Must contain a \"latest\" file.")
parser.add_argument("save_dir", metavar="save-dir",
help="where to store the FT model")
parser.add_argument("--tensor-parallelism", "-t", type=int, default=1,
help="level of tensor parallelism used for inference")
parser.add_argument("--jobs", "-j", type=int, default=None,
help="how many processes to spawn for conversion (default: cpu_count)")
args = parser.parse_args()
start_time = datetime.now()
convert_checkpoint(args)
write_config_file(args.save_dir + f"/{args.tensor_parallelism}-gpu")
stop_time = datetime.now()
run_time = (stop_time - start_time)
print("[INFO] Spend {} (h:m:s) to convert the model".format(run_time))
|
FasterTransformer-main
|
examples/pytorch/gptneox/utils/eleutherai_gpt_neox_convert.py
|
import argparse
import configparser
import multiprocessing
import os
import shutil
from pathlib import Path
import numpy as np
import torch
from transformers import GPTNeoXForCausalLM
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def split_and_convert_process(saved_dir, factor, key, args, config, val):
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
):
saved_path = saved_dir + f"/model.{key}.bin"
val.tofile(saved_path)
elif (
key.find("attention.dense.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
):
saved_path = saved_dir + f"/model.{key}.bin"
val = (val / factor) if factor > 1 else val
val.tofile(saved_path)
else:
if (
key.find("attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1
):
split_vals = np.split(val, factor, axis=0)
elif (
key.find("mlp.dense_h_to_4h.weight") != -1
or key.find("mlp.dense_h_to_4h.bias") != -1
):
split_vals = np.split(val, factor, axis=-1)
elif key.find("attention.query_key_value.bias") != -1:
local_dim = (int)(val.shape[-1] / 3)
n_head = config["num_attention_heads"]
val = val.reshape(n_head, 3, local_dim // n_head)
val = np.transpose(val, [1, 0, 2]).reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
local_dim = (int)(val.shape[-1] / 3)
n_head = config["num_attention_heads"]
# Note that the HF qkv weight are stored as [hidden_size, num_heads, 3, head_hidden]
# FT needs the shape of [hidden_size, 3, num_heads, head_hidden]
val = val.reshape(hidden_dim, n_head, 3, local_dim // n_head)
val = np.transpose(val, [0, 2, 1, 3]).reshape(hidden_dim, 3, local_dim)
# print(np.mean(np.abs(val[:, 0, :])))
split_vals = np.split(val, factor, axis=-1)
else:
print("[ERROR] cannot find key '{}'".format(key))
return
for j in range(factor):
saved_path = saved_dir + f"/model.{key}.{j}.bin"
split_vals[j].tofile(saved_path)
def split_and_convert(args):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
if os.path.exists(saved_dir) == False:
os.makedirs(saved_dir)
factor = args.infer_gpu_num
# load position_embedding from rank 0
# model = torch.load(ckpt_name)
model = GPTNeoXForCausalLM.from_pretrained(args.in_file)
hf_config = vars(model.config)
np_weight_data_type = get_weight_data_type(args.weight_data_type)
try:
model_name = args.model_name
n_heads = hf_config["num_attention_heads"]
head_size = hf_config["hidden_size"] // n_heads
rotary_dim = int(head_size * hf_config["rotary_pct"])
use_gptj_residual = int(hf_config["use_parallel_residual"])
config = configparser.ConfigParser()
config["gptneox"] = {}
config["gptneox"]["model_name"] = model_name
config["gptneox"]["head_num"] = str(n_heads)
config["gptneox"]["size_per_head"] = str(head_size)
config["gptneox"]["inter_size"] = str(hf_config["intermediate_size"])
config["gptneox"]["num_layer"] = str(hf_config["num_hidden_layers"])
config["gptneox"]["rotary_embedding"] = str(rotary_dim)
config["gptneox"]["vocab_size"] = str(hf_config["vocab_size"])
config["gptneox"]["start_id"] = str(hf_config["bos_token_id"])
config["gptneox"]["end_id"] = str(hf_config["eos_token_id"])
config["gptneox"]["use_gptj_residual"] = str(use_gptj_residual)
config["gptneox"]["weight_data_type"] = args.weight_data_type
with open((Path(saved_dir) / f"config.ini").as_posix(), "w") as configfile:
config.write(configfile)
except Exception as e:
print(f"Fail to save the config in config.ini.", e)
ft_model_name_pattern = [
"input_layernorm.bias",
"input_layernorm.weight",
"attention.query_key_value.bias",
"attention.query_key_value.weight",
"attention.dense.bias",
"attention.dense.weight",
"post_attention_layernorm.bias",
"post_attention_layernorm.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h.weight",
"mlp.dense_4h_to_h.bias",
"mlp.dense_4h_to_h.weight",
]
huggingface_model_file_list = [__fn for __fn in os.listdir(args.in_file) if __fn.endswith(".bin")]
if len(huggingface_model_file_list) > 1:
multiprocessing_context = multiprocessing.get_context()
pool_fn = multiprocessing_context.Pool
else:
torch.multiprocessing.set_start_method("spawn")
pool_fn = multiprocessing.Pool
pool = pool_fn(args.processes)
for name, param in model.named_parameters():
array = param.detach().cpu().numpy().astype(np_weight_data_type)
# print("input shape", name, array.shape)
if name.find("weight") == -1 and name.find("bias") == -1:
print("skipped", name)
continue
elif name == "gpt_neox.embed_in.weight":
array.tofile(saved_dir + "model.wte.bin")
elif name == "gpt_neox.final_layer_norm.bias":
array.tofile(saved_dir + "model.final_layernorm.bias.bin")
elif name == "gpt_neox.final_layer_norm.weight":
array.tofile(saved_dir + "model.final_layernorm.weight.bin")
elif name == "embed_out.weight":
array.tofile(saved_dir + "model.lm_head.weight.bin")
else:
processed = False
for i in range(len(ft_model_name_pattern)):
if name.find(ft_model_name_pattern[i]) != -1:
new_name = name.replace("gpt_neox.", "")
pool.starmap(
split_and_convert_process,
[
(
saved_dir,
factor,
new_name,
args,
vars(model.config),
array.T,
)
],
)
processed = True
break
if not processed:
print("Unused layer", name)
pool.close()
pool.join()
# Post-process biases if use_gptj_residual is True
if use_gptj_residual:
for layer_idx in range(hf_config["num_hidden_layers"]):
attn_bias = np.fromfile(
saved_dir + f"/model.layers.{layer_idx}.attention.dense.bias.bin",
dtype=np_weight_data_type,
)
mlp_bias = np.fromfile(
saved_dir + f"/model.layers.{layer_idx}.mlp.dense_4h_to_h.bias.bin",
dtype=np_weight_data_type,
)
(attn_bias + mlp_bias).astype(np_weight_data_type).tofile(
saved_dir + f"/model.layers.{layer_idx}.mlp.attention.bias.sum.bin"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"-saved_dir", "-o", type=str, help="file name of output file", required=True
)
parser.add_argument(
"-in_file",
"-i",
type=str,
help="file name of input checkpoint file",
required=True,
)
parser.add_argument(
"-infer_gpu_num",
"-i_g",
type=int,
help="How many gpus for inference",
required=True,
)
parser.add_argument(
"-processes",
"-p",
type=int,
help="How many processes to spawn for conversion (default: 4)",
default=4,
)
parser.add_argument(
"-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"]
)
parser.add_argument(
"-model_name", "-m_n", type=str, help="model name", required=True
)
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
__dir = os.path.join(args.saved_dir, "%d-gpu" % args.infer_gpu_num)
assert not os.path.exists(__dir), "target path has exist, please remove %s first." % __dir
split_and_convert(args)
|
FasterTransformer-main
|
examples/pytorch/gptneox/utils/huggingface_gptneox_convert.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import inspect
import argparse
import dataclasses
import json
import os
import pathlib
import typing
import torch
import torch.nn as nn
import numpy as np
import torch.distributed as dist
str_type_map = {"fp32": torch.float32, "fp16": torch.float16}
class GptNeoXWeights(object):
def __init__(self,
head_num, size_per_head, layer_num, vocab_size,
max_seq_len, tensor_para_size, pipeline_para_size, use_gptj_residual,
inference_data_type: str = "fp16",
weights_data_type: np.dtype = np.float32):
assert(head_num % tensor_para_size == 0)
self.head_num = head_num
self.size_per_head = size_per_head
self.layer_num = layer_num
self.vocab_size = vocab_size
self.max_seq_len = max_seq_len
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.layers_per_device = layer_num // pipeline_para_size
self.use_gptj_residual = use_gptj_residual
local_head_num = head_num // tensor_para_size
global_head_num = head_num
local_hidden_units = local_head_num * size_per_head
global_hidden_units = global_head_num * size_per_head
local_inter_size = local_hidden_units * 4
self.local_head_num = local_head_num
self.global_head_num = global_head_num
self.local_hidden_units = local_hidden_units
self.global_hidden_units = global_hidden_units
self.local_inter_size = local_inter_size
if isinstance(weights_data_type, str):
try:
weights_data_type = {
"fp16": np.float16,
"fp32": np.float32,
"float16": np.float16,
"float32": np.float32,
}[weights_data_type]
except KeyError:
raise ValueError(f"Don't know how to interpret weights_data_type: {weights_data_type}")
assert weights_data_type in [np.float32, np.float16]
self.weights_data_type = weights_data_type
self.inference_data_type = str_type_map[inference_data_type]
self.w = []
# Transformer blocks
self.w.extend([torch.zeros(global_hidden_units, dtype=self.inference_data_type)] * layer_num) # pre_layernorm_weights.beta
self.w.extend([torch.zeros(global_hidden_units, dtype=self.inference_data_type)] * layer_num) # pre_layernorm_weights.gamma
self.w.extend([torch.zeros(global_hidden_units, local_hidden_units * 3, dtype=self.inference_data_type)] * layer_num) # self_attention_weights.query_weight.kernel
self.w.extend([torch.zeros(local_hidden_units * 3, dtype=self.inference_data_type)] * layer_num) # self_attention_weights.query_weight.bias
self.w.extend([torch.zeros(local_hidden_units, global_hidden_units, dtype=self.inference_data_type)] * layer_num) # self_attention_weights.attention_output_weight.kernel
self.w.extend([torch.zeros(global_hidden_units, dtype=self.inference_data_type) if not use_gptj_residual else torch.empty(0)] * layer_num)
# self_attention_weights.attention_output_weight.bias
self.w.extend([torch.zeros(global_hidden_units, local_inter_size, dtype=self.inference_data_type)] * layer_num) # ffn_weights.intermediate_weight.kernel
self.w.extend([torch.zeros(local_inter_size, dtype=self.inference_data_type)] * layer_num) # ffn_weights.intermediate_weight.bias
self.w.extend([torch.zeros(local_inter_size, global_hidden_units, dtype=self.inference_data_type)] * layer_num) # ffn_weights.output_weight.kernel
self.w.extend([torch.zeros(global_hidden_units, dtype=self.inference_data_type)] * layer_num) # ffn_weights.output_weight.bias
self.w.extend([torch.zeros(global_hidden_units, dtype=self.inference_data_type)] * layer_num) # post_attention_layernorm_weights.beta
self.w.extend([torch.zeros(global_hidden_units, dtype=self.inference_data_type)] * layer_num) # post_attention_layernorm_weights.gamma
# After Transformer blocks
self.w.append(torch.zeros(vocab_size, global_hidden_units, dtype=self.inference_data_type)) # pre_decoder_embedding_table
self.w.append(torch.zeros(global_hidden_units, dtype=self.inference_data_type)) # post_decoder_layernorm.beta
self.w.append(torch.zeros(global_hidden_units, dtype=self.inference_data_type)) # post_decoder_layernorm.gamma
self.w.append(torch.zeros(vocab_size, global_hidden_units, dtype=self.inference_data_type)) # post_decoder_embedding.kernel
# Initialization
self._map(lambda w: torch.nn.init.normal_(w, mean=0., std=0.01))
def __getitem__(self, idx):
return self.w[idx]
def __setitem__(self, idx, val):
self.w[idx] = val
def __len__(self):
return len(self.w)
def _map(self, func):
for i in range(len(self.w)):
if isinstance(self.w[i], list):
for j in range(len(self.w[i])):
self.w[i][j] = func(self.w[i][j])
else:
self.w[i] = func(self.w[i])
def load(self, ckpt_path, tensor_para_rank, pipeline_para_rank):
if not os.path.exists(ckpt_path):
return False
w = []
type_map = {np.float32: torch.float32, np.float16: torch.float16}
# Load
def is_load(i):
return i >= self.layers_per_device * pipeline_para_rank and i < self.layers_per_device * (pipeline_para_rank + 1)
file_names = ["input_layernorm.bias",
"input_layernorm.weight",
"attention.query_key_value.weight.%d" % tensor_para_rank,
"attention.query_key_value.bias.%d" % tensor_para_rank,
"attention.dense.weight.%d" % tensor_para_rank,
"attention.dense.bias" if not self.use_gptj_residual else None,
"mlp.dense_h_to_4h.weight.%d" % tensor_para_rank,
"mlp.dense_h_to_4h.bias.%d" % tensor_para_rank,
"mlp.dense_4h_to_h.weight.%d" % tensor_para_rank,
"mlp.attention.bias.sum" if self.use_gptj_residual else "mlp.dense_4h_to_h.bias",
"post_attention_layernorm.bias",
"post_attention_layernorm.weight"]
for file_name in file_names:
for i in range(self.layer_num):
if file_name is not None and is_load(i):
w.append(torch.from_numpy(np.fromfile(
"%s/model.layers.%d.%s.bin" % (ckpt_path, i, file_name),
dtype=self.weights_data_type)).to(self.inference_data_type))
else:
w.append(torch.empty(0).to(self.inference_data_type))
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.wte.bin", dtype=self.weights_data_type)).to(self.inference_data_type))
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.final_layernorm.weight.bin", dtype=self.weights_data_type)).to(self.inference_data_type))
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.final_layernorm.bias.bin", dtype=self.weights_data_type)).to(self.inference_data_type))
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.lm_head.weight.bin", dtype=self.weights_data_type)).to(self.inference_data_type))
try:
for i in range(len(w)):
if w[i].nelement() > 0:
self.w[i] = w[i].reshape(self.w[i].shape)
else:
self.w[i] = w[i]
except RuntimeError:
raise RuntimeError(
f"head_num, size_per_head, vocab_size, and max_seq_len must be the same as the ones during training "
f"(idx: {i} expected shape: {self.w[i].shape} got shape: {w[i].shape})."
)
return True
class GptNeoX(nn.Module):
def __init__(self,
head_num, size_per_head,
vocab_size, rotary_embedding_dim,
start_id, end_id, layer_num,
max_seq_len,
tensor_para_size, pipeline_para_size,
use_gptj_residual,
lib_path,
inference_data_type: str = "fp16",
weights_data_type: np.dtype = np.float32):
super().__init__()
self.head_num = head_num
self.size_per_head = size_per_head
self.inter_size = 4 * self.head_num * self.size_per_head
self.vocab_size = vocab_size
self.rotary_embedding_dim = rotary_embedding_dim
self.start_id = start_id
self.end_id = end_id
self.max_seq_len = max_seq_len
self.layer_num = layer_num
self.use_gptj_residual = use_gptj_residual
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.build_model = False
self.weights_data_type = weights_data_type
self.inference_data_type = inference_data_type
assert torch.cuda.is_available(), "CUDA is required for this model."
assert head_num % tensor_para_size == 0, "head_num must be a multiple of tensor_para_size."
assert layer_num % pipeline_para_size == 0, "layer_num must be a multiple of pipeline_para_size."
# Load the C++ model into Pytorch model.
torch.classes.load_library(os.path.abspath(lib_path))
# Prepare weights
self.weights = GptNeoXWeights(head_num, size_per_head, layer_num, vocab_size,
max_seq_len, tensor_para_size, pipeline_para_size, use_gptj_residual,
weights_data_type=weights_data_type, inference_data_type=inference_data_type)
# Prepare for tensor/pipeline parallel
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Have initialized the process group")
self.rank = dist.get_rank()
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size()
# print(tensor_para_size * pipeline_para_size)
assert world_size == tensor_para_size * pipeline_para_size, "tensor_para_size * pipeline_para_size must be equal to world_size."
self.tensor_para_rank = self.rank % self.tensor_para_size
self.pipeline_para_rank = self.rank // self.tensor_para_size
# Create and copy model to the device.
# self.cuda()
def load(self, ckpt_path):
is_load = self.weights.load(ckpt_path, tensor_para_rank=self.tensor_para_rank,
pipeline_para_rank=self.pipeline_para_rank)
self.cuda()
return is_load
def half(self):
self.weights._map(lambda w: w.half())
self.cuda()
def cuda(self):
self.weights._map(lambda w: w.cuda(self.device))
if self.build_model:
del self.model
self.build_model = False
self.model = torch.classes.FasterTransformer.GptNeoXOp(self.head_num, self.size_per_head, self.inter_size,
self.layer_num, self.vocab_size, self.rotary_embedding_dim,
self.start_id, self.end_id, self.tensor_para_size, self.pipeline_para_size,
self.max_seq_len, self.use_gptj_residual, self.weights.w)
self.build_model = True
def forward(self,
start_ids: torch.Tensor,
start_lengths: torch.Tensor,
output_len,
beam_width = 1,
top_k: torch.Tensor = None,
top_p: torch.Tensor = None,
beam_search_diversity_rate: torch.Tensor = None,
temperature: torch.Tensor = None,
len_penalty: torch.Tensor = None,
repetition_penalty: torch.Tensor = None,
random_seed: torch.Tensor = None,
return_output_length = False,
return_cum_log_probs=0):
if not self.build_model:
self.cuda()
input_len = start_ids.size(1)
assert input_len > 0, "input len must be larger than zero. For an unconditional case, use start_id as the first token."
# Inputs to device
input_ids = start_ids.cuda(self.device)
input_lengths = start_lengths.cuda(self.device)
# outputs: output_ids, output_lengths, output_cum_log_probs (optional)
outputs = self.model.forward(input_ids,
input_lengths,
output_len,
beam_width, # optional, can be None
top_k, # optional, can be None
top_p, # optional, can be None
beam_search_diversity_rate, # optional, can be None
temperature, # optional, can be None
len_penalty, # optional, can be None
repetition_penalty, # optional, can be None
random_seed, # optional, can be None
return_cum_log_probs) # optional, can be None
if return_cum_log_probs == 0:
output_ids, output_lengths = outputs
else:
output_ids, output_lengths, output_cum_log_probs = outputs
if return_output_length:
if return_cum_log_probs > 0:
return output_ids, output_lengths, output_cum_log_probs
else:
return output_ids, output_lengths
else:
return output_ids
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
|
FasterTransformer-main
|
examples/pytorch/gptneox/utils/gptneox.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import numpy as np
import os
import random
import sys
import timeit
import torch
from onmt.utils.misc import sequence_mask
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.decoder.utils.decoder import ONMTDecoder, init_op_cache, init_onmt_cache
from examples.pytorch.decoder.utils.ft_decoder import FTDecoder, FtDecoderWeights
from examples.pytorch.decoding.utils.decoding import DecodingWeights
def main():
parser = argparse.ArgumentParser()
parser.add_argument('batch_size', type=int,
help='batch size')
parser.add_argument('layer_num', type=int,
help='number of layers')
parser.add_argument('seq_len', type=int,
help='sequence length')
parser.add_argument('head_num', type=int,
help='head number')
parser.add_argument('head_size', type=int,
help='size per head')
parser.add_argument('--step', type=int, default=0,
help='decoding step number')
parser.add_argument('--decoder_ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('--time', action='store_true',
help='test the time or not.')
parser.add_argument('--ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
args = parser.parse_args()
hidden_dim = args.head_num * args.head_size
if args.step <= 0:
step = args.seq_len
else:
step = args.step
print("\n=============== Argument ===============")
print('batch_size: ' + str(args.batch_size))
print('layer_num: ' + str(args.layer_num))
print('seq_len: ' + str(args.seq_len))
print('head_num: ' + str(args.head_num))
print('head_size: ' + str(args.head_size))
print('hidden_dim: ' + str(hidden_dim))
print('step: ' + str(step))
print('data_type: ' + str(args.data_type))
print('test_time: ' + str(args.time))
print("========================================\n")
np.random.seed(1)
torch.manual_seed(0)
random.seed(0)
inp = torch.empty(args.batch_size, 1, hidden_dim).cuda()
mem = torch.empty(args.batch_size, args.seq_len, hidden_dim).cuda() # We assume mem_hidden_dim = hidden_dim
torch.nn.init.uniform_(inp, -0.5, 0.5)
torch.nn.init.uniform_(mem, -0.5, 0.5)
if args.data_type == 'fp16':
inp = inp.half()
mem = mem.half()
mem_seq_lens = torch.randint(1, args.seq_len+1, (args.batch_size,), dtype=torch.int32).cuda()
src_pad_mask = ~sequence_mask(mem_seq_lens, args.seq_len).unsqueeze(1)
weights = DecodingWeights(args.layer_num, hidden_dim, 30000)
ft_weights = FtDecoderWeights(args.layer_num, hidden_dim, weights.w)
onmt_decoder = ONMTDecoder(args.layer_num, args.head_num, args.head_size, weights)
onmt_decoder.cuda()
if args.data_type == 'fp16':
onmt_decoder.half()
onmt_decoder.eval()
ft_weights.to_cuda()
weights.to_cuda()
if args.data_type == 'fp16':
weights.to_half()
ft_weights.to_half()
elif args.data_type == 'bf16':
weights.to_bfloat16()
ft_weights.to_bfloat16()
custom_decoder = FTDecoder(args.head_num, args.head_size, hidden_dim, args.layer_num, ft_weights, args)
with torch.no_grad():
self_cache, mem_cache = init_op_cache(args.layer_num, args.batch_size, 1, args.seq_len, \
args.seq_len, args.head_num, args.head_size, hidden_dim, args.data_type == 'fp16')
cache = init_onmt_cache(args.layer_num, mem)
output1 = inp
output2 = inp
for i in range(step):
output1 = onmt_decoder(output1, mem, src_pad_mask, cache, i)
output2, self_cache, mem_cache = custom_decoder(output2, mem, mem_seq_lens, self_cache, mem_cache, torch.ones(args.batch_size, dtype=torch.int32).cuda() * i, i)
epsilon = 1e-6
if args.data_type == 'fp16':
epsilon = 1e-3
diff = torch.abs((output1 - output2) / (output1 + epsilon))
print('step: {} Mean relative diff: {} Max relative diff: {} Min relative diff: {}'.format(
i, torch.mean(diff), torch.max(diff), torch.min(diff)))
output2 = output1
if args.time:
iterations = 10
for i in range(iterations):
cache = init_onmt_cache(args.layer_num, mem)
output1 = inp
for i in range(step):
output1 = onmt_decoder(output1, mem, src_pad_mask, cache, 0)
t10 = timeit.default_timer()
for i in range(iterations):
cache = init_onmt_cache(args.layer_num, mem)
output1 = inp
for i in range(step):
output1 = onmt_decoder(output1, mem, src_pad_mask, cache, 0)
t1 = timeit.default_timer() - t10
for i in range(iterations):
self_cache, mem_cache = init_op_cache(args.layer_num, args.batch_size, 1, args.seq_len, \
args.seq_len, args.head_num, args.head_size, hidden_dim, args.data_type == 'fp16')
output2 = inp
for i in range(step):
output2, self_cache, mem_cache = custom_decoder(output2, mem, mem_seq_lens, self_cache, mem_cache, torch.ones(args.batch_size, dtype=torch.int32).cuda() * i, i)
t20 = timeit.default_timer()
for i in range(iterations):
self_cache, mem_cache = init_op_cache(args.layer_num, args.batch_size, 1, args.seq_len, \
args.seq_len, args.head_num, args.head_size, hidden_dim, args.data_type == 'fp16')
output2 = inp
for i in range(step):
output2, self_cache, mem_cache = custom_decoder(output2, mem, mem_seq_lens, self_cache, mem_cache, torch.ones(args.batch_size, dtype=torch.int32).cuda() * i, i)
t2 = timeit.default_timer() - t20
print("[INFO] ONMTDecoder time costs: {:.2f} ms".format(t1*1000/iterations))
print("[INFO] FTDecoder time costs: {:.2f} ms".format(t2*1000/iterations))
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/decoder/decoder_example.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import torch
import torch.nn as nn
USE_CACHE_BATCH_MAJOR_ATTENTION = True
def get_op_cache_config(size_per_head, is_fp16):
x = 8 if is_fp16 else 4
use_batch_major_op_cache = True if USE_CACHE_BATCH_MAJOR_ATTENTION == True and \
size_per_head % x == 0 \
else False
x = x if use_batch_major_op_cache else 1
return use_batch_major_op_cache, x
class FtDecoderWeights(object):
def __init__(self, layer_num, hidden_dim, onmtcheckpoint, max_step_for_pe=2048):
self.max_step_for_pe = max_step_for_pe
self.hidden_dim = hidden_dim
self.w = []
prefix = 'decoder.transformer_layers.'
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.layer_norm_1.weight'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.layer_norm_1.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[torch.stack([onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_query.weight'].transpose(-1, -2),
onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_keys.weight'].transpose(-1, -2),
onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_values.weight'].transpose(-1, -2)], -2)
for i in range(layer_num)], 0).contiguous())
self.w.append(torch.stack(
[torch.stack([onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_query.bias'],
onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_keys.bias'],
onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_values.bias']], -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.self_attn.final_linear.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.self_attn.final_linear.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.layer_norm_2.weight'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.layer_norm_2.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_query.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_keys.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_values.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_query.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_keys.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_values.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.final_linear.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.final_linear.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.layer_norm.weight'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.layer_norm.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.w_1.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.w_1.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.w_2.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.w_2.bias'] for i in range(layer_num)],
0).contiguous())
def to_cuda(self):
for i in range(len(self.w)):
self.w[i] = self.w[i].cuda()
def to_half(self):
for i in range(len(self.w)):
self.w[i] = self.w[i].half()
def to_bfloat16(self):
for i in range(len(self.w)):
self.w[i] = self.w[i].bfloat16()
class FTDecoder(nn.Module):
def __init__(self, head_num, head_size, mem_hidden_dim, layer_num, weights, args):
super().__init__()
self.args = args
self.is_fp16 = True if self.args.data_type == 'fp16' else False
self.layer_num = layer_num
self.use_batch_major_op_cache, self.op_cache_dim_x = get_op_cache_config(head_size, self.is_fp16)
torch.classes.load_library(args.decoder_ths_path)
try:
self.dec_layer = torch.classes.FasterTransformer.Decoder(*weights.w, head_num, head_size, head_num * head_size * 4, layer_num, mem_hidden_dim)
except:
# legacy ths for 20.03 image
self.dec_layer = torch.classes.FasterTransformerDecoder(*weights.w, head_num, head_size, head_num * head_size * 4, layer_num, mem_hidden_dim)
def forward(self, inputs, memory, memory_seq_lens, self_cache, mem_cache, sequence_lengths, step):
dtype = torch.half if self.is_fp16 else torch.float32
inputs_shape = inputs.shape
inputs = inputs.reshape([-1, inputs.shape[-1]])
output, self_key_cache, self_val_cache, mem_key_cache, mem_val_cache = \
self.dec_layer.forward(step, inputs, memory, memory_seq_lens, sequence_lengths,
self_cache[0], self_cache[1], mem_cache[0], mem_cache[1])
output = output.reshape(inputs_shape)
return output, [self_key_cache, self_val_cache], [mem_key_cache, mem_val_cache]
class ArgHelper(object):
def __init__(self, model_type=None, data_type=None, ths_path=None):
self.model_type = model_type
self.data_type = data_type
self.ths_path = ths_path
|
FasterTransformer-main
|
examples/pytorch/decoder/utils/ft_decoder.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import torch
from onmt.decoders.transformer import TransformerDecoderLayer
USE_CACHE_BATCH_MAJOR_ATTENTION = True
def get_op_cache_config(size_per_head, is_fp16):
x = 8 if is_fp16 else 4
use_batch_major_op_cache = True if USE_CACHE_BATCH_MAJOR_ATTENTION == True and \
size_per_head % x == 0 \
else False
x = x if use_batch_major_op_cache else 1
return use_batch_major_op_cache, x
def init_op_cache(layer_num, batch_size, beam_width, max_seq_len, \
decoding_max_seq_len, head_num, size_per_head, hidden_dim, is_fp16):
use_batch_major_op_cache, x = get_op_cache_config(size_per_head, is_fp16)
dtype = torch.half if is_fp16 else torch.float32
if use_batch_major_op_cache == True:
self_cache = [ torch.zeros(layer_num, batch_size * beam_width, head_num, size_per_head // x,
decoding_max_seq_len, x, dtype=dtype, device='cuda'),
torch.zeros(layer_num, batch_size * beam_width, head_num,
decoding_max_seq_len, size_per_head, dtype=dtype, device='cuda') ]
else:
self_cache = [ torch.zeros(layer_num, 0, batch_size * beam_width, hidden_dim, dtype=dtype, device='cuda'),
torch.zeros(layer_num, 0, batch_size * beam_width, hidden_dim, dtype=dtype, device='cuda') ]
# always use old format for cross attention for now
mem_cache = torch.zeros(2, layer_num, batch_size * beam_width, max_seq_len, hidden_dim, dtype=dtype, device='cuda')
return self_cache, mem_cache
def init_onmt_cache(layer_num, memory_bank):
cache = {}
for i in range(layer_num):
layer_cache = {"memory_keys": None, "memory_values": None}
layer_cache["self_keys"] = None
layer_cache["self_values"] = None
cache[i] = layer_cache
return cache
class ONMTDecoder(torch.nn.Module):
def __init__(self, layer_num, head_num, head_size, weights):
super().__init__()
self.layer_num = layer_num
self.hidden_dim = head_num * head_size
self.decoders = torch.nn.ModuleList()
for i in range(layer_num):
self.decoders.append(TransformerDecoderLayer(self.hidden_dim, head_num, 4 * self.hidden_dim, 0, 0))
for i in range(layer_num):
prefix = 'decoder.transformer_layers.' + str(i)
self.decoders[i].layer_norm_1.weight.data = weights.w['model'][prefix + '.layer_norm_1.weight']
self.decoders[i].layer_norm_1.bias.data = weights.w['model'][prefix + '.layer_norm_1.bias']
self.decoders[i].self_attn.linear_query.weight.data = weights.w['model'][prefix + '.self_attn.linear_query.weight']
self.decoders[i].self_attn.linear_keys.weight.data = weights.w['model'][prefix + '.self_attn.linear_keys.weight']
self.decoders[i].self_attn.linear_values.weight.data = weights.w['model'][prefix + '.self_attn.linear_values.weight']
self.decoders[i].self_attn.linear_query.bias.data = weights.w['model'][prefix + '.self_attn.linear_query.bias']
self.decoders[i].self_attn.linear_keys.bias.data = weights.w['model'][prefix + '.self_attn.linear_keys.bias']
self.decoders[i].self_attn.linear_values.bias.data = weights.w['model'][prefix + '.self_attn.linear_values.bias']
self.decoders[i].self_attn.final_linear.weight.data = weights.w['model'][prefix + '.self_attn.final_linear.weight']
self.decoders[i].self_attn.final_linear.bias.data = weights.w['model'][prefix + '.self_attn.final_linear.bias']
self.decoders[i].layer_norm_2.weight.data = weights.w['model'][prefix + '.layer_norm_2.weight']
self.decoders[i].layer_norm_2.bias.data = weights.w['model'][prefix + '.layer_norm_2.bias']
self.decoders[i].context_attn.linear_query.weight.data = weights.w['model'][prefix + '.context_attn.linear_query.weight']
self.decoders[i].context_attn.linear_keys.weight.data = weights.w['model'][prefix + '.context_attn.linear_keys.weight']
self.decoders[i].context_attn.linear_values.weight.data = weights.w['model'][prefix + '.context_attn.linear_values.weight']
self.decoders[i].context_attn.linear_query.bias.data = weights.w['model'][prefix + '.context_attn.linear_query.bias']
self.decoders[i].context_attn.linear_keys.bias.data = weights.w['model'][prefix + '.context_attn.linear_keys.bias']
self.decoders[i].context_attn.linear_values.bias.data = weights.w['model'][prefix + '.context_attn.linear_values.bias']
self.decoders[i].context_attn.final_linear.weight.data = weights.w['model'][prefix + '.context_attn.final_linear.weight']
self.decoders[i].context_attn.final_linear.bias.data = weights.w['model'][prefix + '.context_attn.final_linear.bias']
self.decoders[i].feed_forward.layer_norm.weight.data = weights.w['model'][prefix + '.feed_forward.layer_norm.weight']
self.decoders[i].feed_forward.layer_norm.bias.data = weights.w['model'][prefix + '.feed_forward.layer_norm.bias']
self.decoders[i].feed_forward.w_1.weight.data = weights.w['model'][prefix + '.feed_forward.w_1.weight']
self.decoders[i].feed_forward.w_1.bias.data = weights.w['model'][prefix + '.feed_forward.w_1.bias']
self.decoders[i].feed_forward.w_2.weight.data = weights.w['model'][prefix + '.feed_forward.w_2.weight']
self.decoders[i].feed_forward.w_2.bias.data = weights.w['model'][prefix + '.feed_forward.w_2.bias']
def forward(self, inputs, memory, src_pad_msk, cache, step):
output = inputs
for i in range(self.layer_num):
output, _, _ = self.decoders[i](output, memory, src_pad_msk, None, cache[i], step)
return output
|
FasterTransformer-main
|
examples/pytorch/decoder/utils/decoder.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import os
import sys
import math
import logging
from datetime import datetime
import numpy as np
import torch
import torch.distributed as dist
import csv
# dir_path = os.path.dirname(os.path.realpath(__file__))
# sys.path.append(dir_path + "/../../../3rdparty/transformers/src/")
from transformers import PreTrainedTokenizerFast
from transformers import T5ForConditionalGeneration, T5Tokenizer # transformers-4.10.0-py3
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.t5.utils.ft_encoder import FTT5EncoderWeight, FTT5Encoder
from examples.pytorch.t5.utils.ft_decoding import FTT5DecodingWeight, FTT5Decoding, FTT5
from examples.pytorch.decoding.utils.recover_bpe import recover_bpe
LOGGER = logging.getLogger(__name__)
gemm_data_type_mapping = {"fp32":0, "fp16":1, "bf16":2}
def to_word_list_format(word_dict, tokenizer):
flat_ids = []
offsets = []
for word_dict_item in word_dict:
item_flat_ids = []
item_offsets = []
words = list(csv.reader(word_dict_item))[0]
for word in words:
ids = tokenizer.encode(word, add_special_tokens=False)
if len(ids) == 0:
continue
item_flat_ids += ids
item_offsets.append(len(ids))
flat_ids.append(np.array(item_flat_ids))
offsets.append(np.cumsum(np.array(item_offsets)))
pad_to = max(1, max(len(ids) for ids in flat_ids))
for i, (ids, offs) in enumerate(zip(flat_ids, offsets)):
flat_ids[i] = np.pad(ids, (0, pad_to - len(ids)), constant_values=0)
offsets[i] = np.pad(offs, (0, pad_to - len(offs)), constant_values=-1)
return np.array([flat_ids, offsets], dtype="int32").transpose((1, 0, 2))
def bleu_score(pred, ref):
from sacrebleu import corpus_bleu
bleu = corpus_bleu(pred, [ref], force=True)
LOGGER.info(" bleu score: {:6.2f}".format(bleu.score))
LOGGER.info(" bleu counts: {}".format(bleu.counts))
LOGGER.info(" bleu totals: {}".format(bleu.totals))
LOGGER.info(" bleu precisions: {}".format(bleu.precisions))
LOGGER.info(" bleu sys_len: {}; ref_len: {}".format(bleu.sys_len, bleu.ref_len))
return bleu
class TranslationResult(object):
def __init__(self, name, frame_work):
self.name = name
self.frame_work = frame_work # FT or HF
self.file_name = name + ".txt"
self.token_list = []
self.batch_ids_list = []
self.batch_seq_len_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.sentence_num = 0
self.token_num = 0
self.bleu_score = None
def translate(args_dict):
torch.set_printoptions(precision=6)
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_seq_len = args_dict['max_seq_len']
source_file = args_dict["source"]
tgt_file = args_dict["target"]
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
topk = args_dict['sampling_topk']
topp = args_dict['sampling_topp']
tensor_para_size = args_dict['tensor_para_size']
pipeline_para_size = args_dict['pipeline_para_size']
max_ite = args_dict['max_iteration']
repetition_penalty = args_dict["repetition_penalty"]
temperature = args_dict["temperature"]
len_penalty = args_dict["len_penalty"]
## huggingface without bias and use relative position embedding
## relative position embedding -> 0, absolute position embedding -> 1
t5_with_bias = False
use_gated_activation = False
t5_with_moe = False
position_embedding_type = 0
weight_data_type = np.float32
## only huggingface model path supported
model_path = args_dict['model_path'] if args_dict['model_path'] != None else args_dict['model']
ckpt_path = args_dict['ckpt_path']
model_type = args_dict['model_type']
load_data_type = args_dict['load_data_type']
## read checkpoint config if exists
ckpt_config = configparser.ConfigParser()
activation_type = "relu"
if (model_type in ["Megatron", "Megatron-DeepSpeed"]):
ckpt_config_path = os.path.join(ckpt_path, 'config.ini')
if os.path.isfile(ckpt_config_path):
ckpt_config.read(ckpt_config_path)
## update structure config
t5_with_bias = ckpt_config.getboolean('structure', 't5_with_bias')
position_embedding_type = 0 if ckpt_config.get('structure', 'position_embedding_type') == 'relative' else 1
use_gated_activation = ckpt_config.getboolean('structure', 'use_gated_activation')
weight_data_type = {"fp16": np.float16, "fp32": np.float32}[ckpt_config.get("encoder", "weight_data_type")]
activation_type = "gated-gelu" if use_gated_activation else "gelu" # change to gelu, which is default setting of Megatron T5
t5_with_moe= ckpt_config.getint('structure', 't5_with_moe') == 1
moe_layers_in_encoder = []
moe_layers_in_decoder = []
if (ckpt_config.get('structure', 'moe_layers_in_encoder') != '[]'):
moe_layers_in_encoder = [int(n) for n in ckpt_config.get('structure', 'moe_layers_in_encoder')[1:-1].replace(" ", "").split(',')]
if (ckpt_config.get('structure', 'moe_layers_in_decoder') != '[]'):
moe_layers_in_decoder = [int(n) for n in ckpt_config.get('structure', 'moe_layers_in_encoder')[1:-1].replace(" ", "").split(',')]
else:
raise Exception("config file does exist with the ckpt !")
if model_type in ["Megatron", "Megatron-DeepSpeed"] and args_dict['ckpt_path'] == None:
raise Exception("Megatron T5 model needs to specify checkpoint path !")
LOGGER.info("\n=============== Argument ===============")
for key in args_dict:
LOGGER.info("{}: {}".format(key, args_dict[key]))
LOGGER.info("========================================")
lib_path = args_dict['lib_path']
t5_model = T5ForConditionalGeneration.from_pretrained(model_path)
if dist.is_mpi_available():
try:
dist.init_process_group(backend='mpi')
rank = dist.get_rank()
except:
rank = dist.get_rank()
else:
rank = 0
if time_args.find("0") != -1 or time_args.find("2") != -1:
t5_model = t5_model.to(rank)
if args_dict['data_type'] == 'fp16':
t5_model = t5_model.half()
elif args_dict['data_type'] == 'bf16':
t5_model = t5_model ## bfloat inference not supported yet
## TODO: modidy Megatron T5 Converter
## TODO: add megatron t5 tokenizer
tokenizer = T5Tokenizer.from_pretrained(model_path)
try:
fast_tokenizer = PreTrainedTokenizerFast.from_pretrained(model_path)
except:
fast_tokenizer = T5Tokenizer.from_pretrained(model_path)
encoder_config = t5_model.encoder.config
decoder_config = t5_model.decoder.config
encoder_config.update({"num_experts": 0})
decoder_config.update({"num_experts": 0})
encoder_config.update({"moe_layer_index": []})
decoder_config.update({"moe_layer_index": []})
if model_type != "Megatron":
activation_type = encoder_config.feed_forward_proj
if activation_type == "gated-gelu" or activation_type == "gated-relu":
use_gated_activation = True
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L1660
# if tie_word_embeddings == True, scale the decoder output by sequence_output = sequence_output * (self.model_dim**-0.5)
tie_word_embeddings = decoder_config.tie_word_embeddings
q_scaling = 1.0 / (math.sqrt(encoder_config.d_kv))
if model_type in ["Megatron", "Megatron-DeepSpeed"]:
## update configs when using Megatron model structure
q_scaling = 1.0
encoder_ckpt_config = ckpt_config['encoder']
decoder_ckpt_config = ckpt_config['decoder']
encoder_config.d_model = ckpt_config.getint('encoder', 'd_model')
encoder_config.vocab_size = ckpt_config.getint('encoder', 'vocab_size')
encoder_config.num_heads = ckpt_config.getint('encoder', 'num_heads')
encoder_config.d_kv = ckpt_config.getint('encoder', 'd_kv')
encoder_config.d_ff = ckpt_config.getint('encoder', 'd_ff')
encoder_config.num_layers = ckpt_config.getint('encoder', 'num_layers')
encoder_config.relative_attention_num_buckets = ckpt_config.getint('encoder', 'relative_attention_num_buckets_or_max_pos_seq_len')
if model_type == "Megatron-DeepSpeed":
encoder_config.num_experts = ckpt_config.getint('encoder', 'num_experts')
encoder_config.moe_layer_index = moe_layers_in_encoder
decoder_config.d_model = ckpt_config.getint('decoder', 'd_model')
decoder_config.vocab_size = ckpt_config.getint('decoder', 'vocab_size')
decoder_config.num_heads = ckpt_config.getint('decoder', 'num_heads')
decoder_config.d_kv = ckpt_config.getint('decoder', 'd_kv')
decoder_config.d_ff = ckpt_config.getint('decoder', 'd_ff')
decoder_config.num_layers = ckpt_config.getint('decoder', 'num_layers')
decoder_config.relative_attention_num_buckets = ckpt_config.getint('decoder', 'relative_attention_num_buckets_or_max_pos_seq_len')
if model_type == "Megatron-DeepSpeed":
decoder_config.num_experts = ckpt_config.getint('decoder', 'num_experts')
decoder_config.moe_layer_index = moe_layers_in_decoder
decoder_config.decoder_start_token_id = ckpt_config.getint('decoder', 'decoder_start_token_id')
decoder_config.eos_token_id = ckpt_config.getint('decoder', 'eos_token_id')
LOGGER.debug(f"{model_type} encoder_config: {encoder_config}")
LOGGER.debug(f"{model_type} decoder_config: {decoder_config}")
if os.path.isfile("gemm_config.in") and rank == 0:
cmd = f"rm gemm_config.in"
LOGGER.info(f"Run {cmd}")
os.system(cmd)
translation_result_list = []
if (t5_with_moe == 1) and (time_args.find('0') != -1 or time_args.find('2') != -1):
raise Exception("HF models doesn't support MoE inference")
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult("hf-beamsearch-warmup", "HF"))
translation_result_list.append(TranslationResult("hf-beamsearch", "HF"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult("ft-beamsearch-warmup", "FT"))
translation_result_list.append(TranslationResult("ft-beamsearch", "FT"))
if rank == 0:
data_type = gemm_data_type_mapping[args_dict['data_type']]
cmd = f"./bin/t5_gemm {math.ceil(batch_size / pipeline_para_size)} {beam_size} {max_seq_len} " \
f"{encoder_config.d_model} {encoder_config.num_heads} {encoder_config.d_kv} {encoder_config.d_ff} " \
f"{decoder_config.d_model} {decoder_config.num_heads} {decoder_config.d_kv} {decoder_config.d_ff} " \
f"{decoder_config.vocab_size} {data_type} {tensor_para_size} 0 > .tmp_gemm.log"
LOGGER.info(f"Run gemm test: {cmd}")
os.system(cmd)
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult("hf-sampling-warmup", "HF"))
translation_result_list.append(TranslationResult("hf-sampling", "HF"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult("ft-sampling-warmup", "FT"))
translation_result_list.append(TranslationResult("ft-sampling", "FT"))
if rank == 0:
data_type = gemm_data_type_mapping[args_dict['data_type']]
cmd = f"./bin/t5_gemm {math.ceil(batch_size / pipeline_para_size)} {1} {max_seq_len} " \
f"{encoder_config.d_model} {encoder_config.num_heads} {encoder_config.d_kv} {encoder_config.d_ff} " \
f"{decoder_config.d_model} {decoder_config.num_heads} {decoder_config.d_kv} {decoder_config.d_ff} " \
f"{decoder_config.vocab_size} {data_type} {tensor_para_size} 1 > .tmp_gemm.log"
LOGGER.info(f"Run gemm test: {cmd}")
os.system(cmd)
if time_args.find("1") != -1 or time_args.find("3") != -1:
ft_encoder_weight = FTT5EncoderWeight(
encoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
t5_with_moe=t5_with_moe,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
ft_decoding_weight = FTT5DecodingWeight(
decoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
t5_with_moe=t5_with_moe,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
if args_dict["ckpt_path"] is not None:
ft_encoder_weight.load_from_bin(args_dict["ckpt_path"], model_type, load_data_type)
ft_decoding_weight.load_from_bin(args_dict["ckpt_path"], model_type, load_data_type)
else:
ft_encoder_weight.load_from_model(t5_model)
ft_decoding_weight.load_from_model(t5_model)
if args_dict['data_type'] == 'fp16':
if load_data_type != 'fp16':
t5_model = t5_model.half()
ft_encoder_weight.to_half()
ft_decoding_weight.to_half()
elif args_dict['data_type'] == 'bf16':
t5_model = t5_model ## bfloat inference not supported yet
ft_encoder_weight.to_bfloat16()
ft_decoding_weight.to_bfloat16()
remove_padding = True if batch_size > 32 else False
ft_encoder = FTT5Encoder(ft_encoder_weight.w, lib_path, encoder_config.num_heads,
encoder_config.d_kv, encoder_config.d_ff,
encoder_config.d_model, remove_padding, encoder_config.num_layers,
encoder_config.relative_attention_num_buckets, encoder_config.num_experts, encoder_config.moe_layer_index,
128, False, q_scaling, tensor_para_size, pipeline_para_size, t5_with_bias,
position_embedding_type, moe_k=0,
activation_type=activation_type,)
ft_decoding = FTT5Decoding(ft_decoding_weight.w, lib_path,
decoder_config.num_heads, decoder_config.d_kv,
decoder_config.d_ff, encoder_config.d_model,
decoder_config.d_model, decoder_config.num_layers,
decoder_config.decoder_start_token_id, decoder_config.eos_token_id,
decoder_config.vocab_size,
q_scaling,
decoder_config.relative_attention_num_buckets, decoder_config.num_experts, decoder_config.moe_layer_index, max_distance=128,
tensor_para_size=tensor_para_size, pipeline_para_size=pipeline_para_size,
t5_with_bias=t5_with_bias,
position_embedding_type=position_embedding_type, moe_k=0,
activation_type=activation_type, tie_word_embeddings=tie_word_embeddings,)
ft_t5 = FTT5(ft_encoder, ft_decoding)
with open(source_file, 'r') as f:
src_text = recover_bpe(f.readlines())
src_text = ["translate English to German: " + line.strip() for line in src_text]
with open(tgt_file, 'r') as f:
tgt_text = recover_bpe(f.readlines())
for i in range(len(translation_result_list)):
sys.stdout.flush()
prev = 0
start_time = datetime.now()
while prev < len(src_text):
input_texts = src_text[prev:prev+batch_size]
prev += batch_size
input_token = tokenizer(input_texts, return_tensors='pt', padding=True)
# An example to prevent generating "Chef"
# bad_words_text = np.array([["Chef"]]* len(input_texts), dtype=object)
# bad_words_list = to_word_list_format(bad_words_text, tokenizer)
# bad_words_list = torch.Tensor(bad_words_list).to(torch.int32).to("cuda").contiguous()
bad_words_list = None
# An example to stop generation when the model generate "Chef"
# stop_words_text = np.array([["Chef"]] * len(input_texts), dtype=object)
# stop_words_list = to_word_list_format(stop_words_text, tokenizer)
# stop_words_list = torch.Tensor(stop_words_list).to(torch.int32).to("cuda").contiguous()
stop_words_list = None
if translation_result_list[i].frame_work == "HF":
if translation_result_list[i].name.find("beamsearch") != -1:
hf_outputs = t5_model.generate(input_token.input_ids.to("cuda"),
max_length=max_seq_len,
early_stopping=True,
num_beams=beam_size)
elif translation_result_list[i].name.find("sampling") != -1:
hf_outputs = t5_model.generate(input_token.input_ids.to("cuda"),
max_length=max_seq_len,
early_stopping=True,
do_sample=True,
top_k=topk if topk > 0 else None,
top_p=topp if topp > 0.0 else None)
translation_result_list[i].batch_ids_list.append(hf_outputs)
translation_result_list[i].batch_seq_len_list.append(np.ones(len(input_texts)) * max_seq_len)
elif translation_result_list[i].frame_work == "FT":
tmp_beam_size = beam_size
if translation_result_list[i].name.find("sampling") != -1:
tmp_beam_size = 1
ft_decoding_outputs, ft_decoding_seq_lens = ft_t5(input_token,
None,
tmp_beam_size,
max_seq_len,
topk,
topp,
beam_search_diversity_rate=beam_search_diversity_rate,
is_return_output_log_probs=args_dict["return_output_log_probs"],
is_return_cum_log_probs=args_dict["return_cum_log_probs"],
repetition_penalty=repetition_penalty,
temperature=temperature,
len_penalty=len_penalty,
bad_words_list=bad_words_list,
stop_words_list=stop_words_list,)
translation_result_list[i].batch_ids_list.append(ft_decoding_outputs)
translation_result_list[i].batch_seq_len_list.append(ft_decoding_seq_lens)
translation_result_list[i].sentence_num += len(input_token)
translation_result_list[i].batch_num += 1
if translation_result_list[i].name.find("warmup") != -1 and \
(translation_result_list[i].batch_num > 10 or translation_result_list[i].sentence_num > 300):
break
if translation_result_list[i].batch_num >= max_ite:
break
stop_time = datetime.now()
translation_result_list[i].execution_time = (stop_time - start_time).total_seconds()
if translation_result_list[i].name.find("warmup") != -1:
continue
for batch_token, batch_seq_len in zip(translation_result_list[i].batch_ids_list, translation_result_list[i].batch_seq_len_list):
for j in range(len(batch_token)):
if translation_result_list[i].frame_work == "HF":
translation_result_list[i].token_list.append(fast_tokenizer.decode(batch_token[j][1:], skip_special_tokens=True))
translation_result_list[i].token_num += sum(batch_token[j][:] != 0)
elif translation_result_list[i].frame_work == "FT":
translation_result_list[i].token_list.append(fast_tokenizer.decode(batch_token[j][0][:batch_seq_len[j][0]], skip_special_tokens=True))
translation_result_list[i].token_num += batch_seq_len[j][0]
if rank == 0:
translation_result_list[i].bleu_score = bleu_score(translation_result_list[i].token_list, tgt_text[:len(translation_result_list[i].token_list)])
with open(translation_result_list[i].name + ".txt", 'w') as f:
for line in translation_result_list[i].token_list:
f.write(line)
if rank == 0:
for t in translation_result_list:
if t.name.find("warmup") != -1:
continue
LOGGER.info(f"{t.name} translates {t.batch_num} batches taking {t.execution_time:.2f} sec to translate "
f"{t.token_num} tokens, BLEU score: {t.bleu_score.score:.2f}, {(t.token_num / t.execution_time):.0f} tokens/sec."
f" ({t.bleu_score.sys_len} words, {(t.bleu_score.sys_len / t.execution_time):.0f} words/sec)")
if t.name == "ft-beamsearch" and args_dict["ft_beamsearch_BLEU_threshold"] != None:
assert t.bleu_score.score >= args_dict["ft_beamsearch_BLEU_threshold"], f"[ERROR] {t.name} test fail !"
LOGGER.info(f"{t.name} PASS !")
if t.name == "ft-sampling" and args_dict["ft_sampling_BLEU_threshold"] != None:
assert t.bleu_score.score >= args_dict["ft_sampling_BLEU_threshold"], f"[ERROR] {t.name} test fail !"
LOGGER.info(f"{t.name} PASS !")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=200, metavar='NUMBER',
help='max sequence length (default: 200)')
parser.add_argument("--source", default="../examples/pytorch/decoding/utils/translation/test.en",
help="Path to the source file.")
parser.add_argument("--target", default="../examples/pytorch/decoding/utils/translation/test.de",
help="Path to the target file.")
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test hf_beamsearch
'1': test ft_beamsearch
'2': test hf_sampling
'3': test ft_sampling
'e.g., if you want to test tf_beamsearch and ft_sampling,
then you need to use -time '03' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-repeat_penalty', '--repetition_penalty', type=float, default=1.0, metavar='NUMBER',
help='Repetition penalty for generating tokens. Default is 1.0.')
parser.add_argument('-temperature', '--temperature', type=float, default=1.0, metavar='NUMBER',
help='Temperature penalty for generating tokens. Default is 1.0.')
parser.add_argument('-len_penalty', '--len_penalty', type=float, default=0.0, metavar='NUMBER',
help='Length penalty for generating tokens. Default is 0.0.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type for inference (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-ld', '--load_data_type', type=str, default="fp32", metavar='STRING',
help='data type for loading weights (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-lib_path', '--lib_path', type=str, default="lib/libth_transformer.so", metavar='STRING',
help='the path of FasterTransformer pytorch t5 op library.')
parser.add_argument('-model_path', '--model_path', type=str, default=None, metavar='STRING',
help='T5 model path.')
parser.add_argument('-model', '--model', type=str, default="t5-small", metavar='STRING',
help='T5 model size. Only used when --model_path=None')
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='size of tensor parallelism (default: 1)')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='size of pipeline parallelism (default: 1)')
# assume checkpoint config is also in the same path
parser.add_argument('--ckpt_path', type=str, help='path to the checkpoint file.')
parser.add_argument('-max_ite', '--max_iteration', type=int, default=100000, metavar='NUMBER',
help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
parser.add_argument('--model_type', type=str, default="Huggingface", choices=["Huggingface", "Megatron", "Megatron-DeepSpeed"],
help='Megatron T5 uses bias and supports both absulte and relative positional embedding;'
'Huggingface T4 adopts the paper\'s implementation and has no bias')
parser.add_argument('--return_output_log_probs', action='store_true',
help='Return the log probability of generated tokens.')
parser.add_argument('--return_cum_log_probs', action='store_true',
help='Return the cumulative log probability of generated tokens.')
parser.add_argument('--ft_beamsearch_BLEU_threshold', type=float,
help='Threshold of FT beam search BLEU score')
parser.add_argument('--ft_sampling_BLEU_threshold', type=float,
help='Threshold of FT beam search BLEU score')
parser.add_argument("--verbose", action="store_true", help="Provide verbose messages")
args = parser.parse_args()
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO, format=log_format)
translate(vars(args))
|
FasterTransformer-main
|
examples/pytorch/t5/translate_example.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import dataclasses
import json
import os
import pathlib
import time
import numpy as np
import torch
import torch.distributed as dist
from tqdm import tqdm
from omegaconf.omegaconf import OmegaConf
from nemo.collections.nlp.data.glue_benchmark.glue_benchmark_dataset import (
TextToTextGLUEDataset,
TextToTextXNLIDataset,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.collections.common.metrics.classification_accuracy import ExactStringPerCategoryMatchMetric
from examples.pytorch.t5.utils.ft_encoder import FTT5EncoderWeight, FTT5Encoder
from examples.pytorch.t5.utils.ft_decoding import FTT5DecodingWeight, FTT5Decoding, FTT5
from examples.pytorch.tokenizer import add_special_tokens_to_tokenizer
def _build_dataset(data_cfg, tokenizer):
if data_cfg.task_name == 'xnli':
dataset = TextToTextXNLIDataset(
data_cfg.file_path,
task_name=data_cfg.task_name,
tokenizer=tokenizer,
max_seq_length=data_cfg.max_seq_length,
lang_list=data_cfg.eval_languages,
)
else:
dataset = TextToTextGLUEDataset(
data_cfg.file_path,
task_name=data_cfg.task_name,
tokenizer=tokenizer,
max_seq_length=data_cfg.max_seq_length,
)
return dataset
@dataclasses.dataclass
class Metric:
acc: float
@dataclasses.dataclass
class RequestAndResult:
model_answer: str
target: str
lang: str
metrics: Metric
def preds_and_labels_to_text(tokenizer, preds, labels):
preds = preds.cpu().numpy().tolist()
labels = labels.cpu().numpy().tolist()
# preds = [pred[0] for pred in preds]
preds_text, labels_text = [], []
for _, (pred, label) in enumerate(zip(preds, labels)):
if tokenizer.eos_id in pred:
idx = pred.index(tokenizer.eos_id)
pred = pred[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(tokenizer, 'special_token_to_id'):
pred = [id for id in pred if id not in tokenizer.special_token_to_id.values()]
label = [id for id in label if id not in tokenizer.special_token_to_id.values()]
pred = tokenizer.ids_to_text(pred)
label = tokenizer.ids_to_text(label)
preds_text.append(pred)
labels_text.append(label)
return preds_text, labels_text
def accuracy_score(pred, ref):
assert len(pred) == len(ref)
total = len(pred)
correct = 0
for p, r in zip(pred, ref):
if p in r:
correct += 1
# else:
# print(f"[pred]: {p} [label]: {r}")
print(f"[total_acc] {correct / total}")
return correct / total
class InputToken:
def __init__(self, input_ids, attention_mask):
self.input_ids = input_ids
self.attention_mask = attention_mask
class EncoderDecoderConfig:
def __init__(self, d_model, vocab_size, num_heads, d_kv, d_ff, num_layers,
relative_attention_num_buckets_or_max_pos_seq_len, decoder_start_token_id=0, decoder_end_token_id=1):
self.d_model = d_model
self.vocab_size = vocab_size
self.num_heads = num_heads
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.relative_attention_num_buckets = relative_attention_num_buckets_or_max_pos_seq_len
self.decoder_start_token_id = decoder_start_token_id
self.decoder_end_token_id = decoder_end_token_id
data_type_mapping = {"fp32": 0, "fp16": 1, "bf16": 2}
def xnli_task(args_dict):
torch.set_printoptions(precision=6)
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_output_len = args_dict['max_output_len']
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
topk = args_dict['sampling_topk']
topp = args_dict['sampling_topp']
tensor_para_size = args_dict['tensor_para_size']
pipeline_para_size = args_dict['pipeline_para_size']
if args_dict['ckpt_path'] is None:
raise Exception("Megatron T5 model needs to specify checkpoint path !")
if dist.is_mpi_available():
try:
dist.init_process_group(backend='mpi')
rank = dist.get_rank()
except:
rank = dist.get_rank()
else:
rank = 0
assert dist.get_world_size() == tensor_para_size * pipeline_para_size
ckpt_path = args_dict['ckpt_path']
## read checkpoint config if exists
ckpt_config = configparser.ConfigParser()
if args_dict['ckpt_path'] is None:
raise Exception("Megatron T5 model needs to specify checkpoint path !")
tokenizer_model_path = os.path.join(ckpt_path, "tokenizer.model")
ckpt_config_path = os.path.join(ckpt_path, 'config.ini')
if os.path.isfile(ckpt_config_path):
ckpt_config.read(ckpt_config_path)
## update structure config
t5_with_bias = ckpt_config.getboolean('structure', 't5_with_bias')
## megatron with bias and use absolute position embedding
## relative position embedding -> 0, absolute position embedding -> 1
position_embedding_type = 0 if ckpt_config.get('structure', 'position_embedding_type') == 'relative' else 1
use_gated_activation = ckpt_config.getboolean('structure', 'use_gated_activation')
weight_data_type = {"fp16": np.float16, "fp32": np.float32}[ckpt_config.get("encoder", "weight_data_type")]
activation_type = ckpt_config.get('encoder', 'feed_forward_proj')
assert ckpt_config.getint("encoder", "tensor_para_size") == tensor_para_size
else:
raise Exception("config file does exist with the ckpt !")
if rank == 0:
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
lib_path = args_dict['lib_path']
#xnli
tokenizer_mt5 = get_nmt_tokenizer(
library='sentencepiece',
model_name=None,
tokenizer_model=tokenizer_model_path,
vocab_file=None,
merges_file=None,
legacy=True,
)
add_special_tokens_to_tokenizer(tokenizer_mt5)
assert tokenizer_mt5.bos_id == ckpt_config.getint("decoder", "decoder_start_token_id")
assert tokenizer_mt5.eos_id == ckpt_config.getint("decoder", "eos_token_id")
token_params = {
tokenizer_mt5.bos_token: tokenizer_mt5.bos_id,
tokenizer_mt5.eos_token: tokenizer_mt5.eos_id,
tokenizer_mt5.pad_token: tokenizer_mt5.pad_id,
}
print(f"tokenizer special tokens: {token_params}")
xnli_cfg = OmegaConf.create({
"file_path": args_dict['data_path'],
"task_name": "xnli",
"max_seq_length": 512,
"eval_languages": ['en', 'es', 'de', 'fr']
})
xnli_dataset = _build_dataset(xnli_cfg, tokenizer_mt5)
data_loader = torch.utils.data.DataLoader(
xnli_dataset,
collate_fn=xnli_dataset.collate_fn,
batch_size=batch_size,
num_workers=1,
pin_memory=False,
drop_last=True)
q_scaling = 1.0
encoder_config = EncoderDecoderConfig(ckpt_config.getint('encoder', 'd_model'),
ckpt_config.getint('encoder', 'vocab_size'),
ckpt_config.getint('encoder', 'num_heads'),
ckpt_config.getint('encoder', 'd_kv'),
ckpt_config.getint('encoder', 'd_ff'),
ckpt_config.getint('encoder', 'num_layers'),
ckpt_config.getint('encoder', 'relative_attention_num_buckets_or_max_pos_seq_len')
)
decoder_config = EncoderDecoderConfig(ckpt_config.getint('decoder', 'd_model'),
ckpt_config.getint('decoder', 'vocab_size'),
ckpt_config.getint('decoder', 'num_heads'),
ckpt_config.getint('decoder', 'd_kv'),
ckpt_config.getint('decoder', 'd_ff'),
ckpt_config.getint('decoder', 'num_layers'),
ckpt_config.getint('decoder', 'relative_attention_num_buckets_or_max_pos_seq_len'),
tokenizer_mt5.bos_id,
tokenizer_mt5.eos_id
)
## run gemm test
if os.path.isfile("gemm_config.in") and rank == 0:
cmd = f"rm gemm_config.in"
print(f"Run {cmd}")
os.system(cmd)
if rank == 0:
data_type = data_type_mapping[args_dict['data_type']]
cmd = f"./bin/t5_gemm {batch_size // pipeline_para_size} {beam_size} {128} " \
f"{encoder_config.d_model} {encoder_config.num_heads} {encoder_config.d_kv} {encoder_config.d_ff} " \
f"{decoder_config.d_model} {decoder_config.num_heads} {decoder_config.d_kv} {decoder_config.d_ff} " \
f"{decoder_config.vocab_size} {data_type} {tensor_para_size} 1 > .tmp_gemm.log"
print(f"Run gemm test: {cmd}")
os.system(cmd)
dist.barrier()
ft_encoder_weight = FTT5EncoderWeight(
encoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
ft_decoding_weight = FTT5DecodingWeight(
decoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
ft_encoder_weight.load_from_bin(args_dict["ckpt_path"], "Megatron")
ft_decoding_weight.load_from_bin(args_dict["ckpt_path"], "Megatron")
if args_dict['data_type'] == 'fp16':
ft_encoder_weight.to_half()
ft_decoding_weight.to_half()
elif args_dict['data_type'] == 'fp32':
ft_encoder_weight.to_single()
ft_decoding_weight.to_single()
elif args_dict['data_type'] == 'bf16':
ft_encoder_weight.to_bfloat16()
ft_decoding_weight.to_bfloat16()
remove_padding = True if batch_size > 32 else False
ft_encoder = FTT5Encoder(ft_encoder_weight.w, lib_path, encoder_config.num_heads,
encoder_config.d_kv, encoder_config.d_ff,
encoder_config.d_model, remove_padding, encoder_config.num_layers,
encoder_config.relative_attention_num_buckets,
0, # num_experts
[], # moe_layer_index
128, False, q_scaling, tensor_para_size, pipeline_para_size, t5_with_bias, position_embedding_type, 0, activation_type)
ft_decoding = FTT5Decoding(ft_decoding_weight.w, lib_path,
decoder_config.num_heads, decoder_config.d_kv,
decoder_config.d_ff, encoder_config.d_model,
decoder_config.d_model, decoder_config.num_layers,
decoder_config.decoder_start_token_id, decoder_config.decoder_end_token_id,
decoder_config.vocab_size,
q_scaling,
decoder_config.relative_attention_num_buckets,
0, # num_experts
[], # moe_layer_index
max_distance=128,
tensor_para_size=tensor_para_size, pipeline_para_size=pipeline_para_size,
t5_with_bias=t5_with_bias, moe_k=0, activation_type=activation_type, position_embedding_type=position_embedding_type)
ft_t5 = FTT5(ft_encoder, ft_decoding)
#metric
languages = ['de','en','es','fr']
acc_metric = ExactStringPerCategoryMatchMetric(languages)
preds_list = []
labels_list = []
results_list = []
start = time.time()
for idx, batch in tqdm(enumerate(data_loader)):
input_token = InputToken(batch['text_enc'], batch['enc_mask'])
ft_decoding_outputs, ft_decoding_seq_lens = ft_t5(input_token,
None,
beam_size,
max_output_len,
topk,
topp,
beam_search_diversity_rate=beam_search_diversity_rate,
is_return_output_log_probs=args_dict["return_output_log_probs"],
is_return_cum_log_probs=args_dict["return_cum_log_probs"])
ft_decoding_outputs = ft_decoding_outputs.squeeze()
preds, labels = preds_and_labels_to_text(tokenizer_mt5, torch.IntTensor(ft_decoding_outputs), batch['labels'])
langs = batch['lang']
for _, (pred, label, lang) in enumerate(zip(preds, labels, langs)):
_ = acc_metric(pred, label, lang)
labels_list += labels
preds_list += preds
results_list.extend([
RequestAndResult(
model_answer=pred,
target=label,
lang=lang,
metrics=Metric(acc=(pred == label))
)
for lang, pred, label in zip(langs, preds, labels)
])
end = time.time()
lang_accuracy = acc_metric.compute()
if rank == 0:
print(f"\n[Elapsed Time]: {end - start} seconds")
# each language
for lang in languages:
print(f'[{lang}_acc]', lang_accuracy[lang].item())
# total accuracy
accuracy = accuracy_score(preds_list, labels_list)
output_path = args_dict.get("output_path")
if output_path is not None and rank == 0:
output_path = pathlib.Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as output_file:
results = {
"results": {
"xnli": {
"acc": accuracy
}
},
"output": {
"xnli": [
dataclasses.asdict(r) for r in results_list
]
}
}
json.dump(results, output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_output_len', type=int, default=10, metavar='NUMBER',
help='max output length (default: 10)')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-lib_path', '--lib_path', type=str, default="/workspace/FasterTransformer/build/lib/libth_transformer.so", metavar='STRING',
help='the path of FasterTransformer pytorch t5 op library.')
parser.add_argument('-data_path', '--data_path', type=str, required=True, help="the xnli task data path")
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='size of tensor parallelism (default: 1)')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='size of pipeline parallelism (default: 1)')
# assume checkpoint config is also in the same path
parser.add_argument('--ckpt_path', type=str, help='path to the checkpoint file.')
parser.add_argument('--output_path', help='path to results file with calculated metrics.')
parser.add_argument('--return_output_log_probs', action='store_true',
help='Return the log probability of generated tokens.')
parser.add_argument('--return_cum_log_probs', action='store_true',
help='Return the cumulative log probability of generated tokens.')
args = parser.parse_args()
xnli_task(vars(args))
|
FasterTransformer-main
|
examples/pytorch/t5/xnli_task_example.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
perf_benchmark.py
Unlike translate_example.py, this script focuses on benchmarking the performance of the FasterTransformers T5
implementation such that it can be compared with other frameworks apples-to-apples. The changes include:
- Use random input data and disable accuracy checking.
- Use fixed input/output sequence lengths and disable early_stopping.
- Add better controls on the number of warm-ups and the number of iterations to run the inference for.
"""
import argparse
import configparser
import os
import sys
import math
from datetime import datetime
import numpy as np
import torch
import torch.distributed as dist
# dir_path = os.path.dirname(os.path.realpath(__file__))
# sys.path.append(dir_path + "/../../../3rdparty/transformers/src/")
from transformers import T5ForConditionalGeneration # transformers-4.10.0-py3
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.t5.utils.ft_encoder import FTT5EncoderWeight, FTT5Encoder
from examples.pytorch.t5.utils.ft_decoding import FTT5DecodingWeight, FTT5Decoding, FTT5
class TranslationResult(object):
def __init__(self, name, frame_work):
self.name = name
self.frame_work = frame_work # FT or HF
self.file_name = name + ".txt"
self.token_list = []
self.batch_ids_list = []
self.batch_seq_len_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.token_num = 0
class InputTokens(object):
def __init__(self, batch_size, input_seq_len, bos_token, eos_token, vocab_size):
# Set the last token of each sequence to eos and replace the bos/eos tokens in the middle of the sequences to
# some other tokens.
normal_token_list = list(range(vocab_size))
if bos_token in normal_token_list:
normal_token_list.remove(bos_token)
if eos_token in normal_token_list:
normal_token_list.remove(eos_token)
self.input_ids = torch.randint(0, len(normal_token_list), (batch_size, input_seq_len))
for batch_idx in range(batch_size):
for token_idx in range(input_seq_len):
if token_idx == input_seq_len - 1:
self.input_ids[batch_idx][token_idx] = eos_token
else:
self.input_ids[batch_idx][token_idx] = normal_token_list[self.input_ids[batch_idx][token_idx]]
# Set attention masks to all ones.
self.attention_mask = torch.ones((batch_size, input_seq_len), dtype=torch.int64)
def translate(args_dict):
torch.set_printoptions(precision=6)
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
output_seq_len = args_dict['seq_len']
input_seq_len = args_dict['input_seq_len'] if args_dict['input_seq_len'] > 0 else output_seq_len
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
topk = args_dict['sampling_topk']
topp = args_dict['sampling_topp']
tensor_para_size = args_dict['tensor_para_size']
pipeline_para_size = args_dict['pipeline_para_size']
warmup_iterations = args_dict['warmup_iterations']
infer_iterations = args_dict['iterations']
infer_duration = args_dict['duration']
seed = args_dict['seed']
skip_gemm = args_dict['skip_gemm']
torch.manual_seed(seed)
## huggingface without bias and use relative position embedding
## relative position embedding -> 0, absolute position embedding -> 1
t5_with_bias = False
use_gated_activation = False
t5_with_moe = False
position_embedding_type = 0
weight_data_type = np.float32
## only huggingface model path supported
model_path = args_dict['model_path'] if args_dict['model_path'] != None else args_dict['model']
ckpt_path = args_dict['ckpt_path']
model_type = args_dict['model_type']
## read checkpoint config if exists
ckpt_config = configparser.ConfigParser()
activation_type = "relu"
if (model_type in ["Megatron", "Megatron-DeepSpeed"]):
ckpt_config_path = os.path.join(ckpt_path, 'config.ini')
if os.path.isfile(ckpt_config_path):
ckpt_config.read(ckpt_config_path)
## update structure config
t5_with_bias = ckpt_config.getboolean('structure', 't5_with_bias')
position_embedding_type = 0 if ckpt_config.get('structure', 'position_embedding_type') == 'relative' else 1
use_gated_activation = ckpt_config.getboolean('structure', 'use_gated_activation')
t5_with_moe= ckpt_config.getint('structure', 't5_with_moe') == 1
weight_data_type = {"fp16": np.float16, "fp32": np.float32}[ckpt_config.get("encoder", "weight_data_type")]
activation_type = "gated-gelu" if use_gated_activation else "gelu" # change to gelu, which is default setting of Megatron T5
moe_layers_in_encoder = []
moe_layers_in_decoder = []
if (ckpt_config.get('structure', 'moe_layers_in_encoder') != '[]'):
moe_layers_in_encoder = [int(n) for n in ckpt_config.get('structure', 'moe_layers_in_encoder')[1:-1].replace(" ", "").split(',')]
if (ckpt_config.get('structure', 'moe_layers_in_decoder') != '[]'):
moe_layers_in_decoder = [int(n) for n in ckpt_config.get('structure', 'moe_layers_in_encoder')[1:-1].replace(" ", "").split(',')]
else:
raise Exception("config file does exist with the ckpt !")
if model_type == "Megatron" and args_dict['ckpt_path'] == None:
raise Exception("Megatron T5 model needs to specify checkpoint path !")
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
lib_path = args_dict['lib_path']
t5_model = T5ForConditionalGeneration.from_pretrained(model_path)
if dist.is_mpi_available():
try:
dist.init_process_group(backend='mpi')
rank = dist.get_rank()
except:
rank = dist.get_rank()
else:
rank = 0
if time_args.find("0") != -1 or time_args.find("2") != -1:
t5_model = t5_model.to(rank)
if args_dict['data_type'] == 'fp16':
t5_model = t5_model.half()
encoder_config = t5_model.encoder.config
decoder_config = t5_model.decoder.config
encoder_config.update({"num_experts": 0})
decoder_config.update({"num_experts": 0})
encoder_config.update({"moe_layer_index": []})
decoder_config.update({"moe_layer_index": []})
q_scaling = 1.0 / (math.sqrt(encoder_config.d_kv))
if (model_type in ["Megatron", "Megatron-DeepSpeed"]):
## update configs when using Megatron model structure
q_scaling = 1.0
encoder_config.d_model = ckpt_config.getint('encoder', 'd_model')
encoder_config.vocab_size = ckpt_config.getint('encoder', 'vocab_size')
encoder_config.num_heads = ckpt_config.getint('encoder', 'num_heads')
encoder_config.d_kv = ckpt_config.getint('encoder', 'd_kv')
encoder_config.d_ff = ckpt_config.getint('encoder', 'd_ff')
encoder_config.num_layers = ckpt_config.getint('encoder', 'num_layers')
encoder_config.relative_attention_num_buckets = ckpt_config.getint('encoder', 'relative_attention_num_buckets_or_max_pos_seq_len')
if model_type == "Megatron-DeepSpeed":
encoder_config.num_experts = ckpt_config.getint('encoder', 'num_experts')
encoder_config.moe_layer_index = moe_layers_in_encoder
decoder_config.d_model = ckpt_config.getint('decoder', 'd_model')
decoder_config.vocab_size = ckpt_config.getint('decoder', 'vocab_size')
decoder_config.num_heads = ckpt_config.getint('decoder', 'num_heads')
decoder_config.d_kv = ckpt_config.getint('decoder', 'd_kv')
decoder_config.d_ff = ckpt_config.getint('decoder', 'd_ff')
decoder_config.num_layers = ckpt_config.getint('decoder', 'num_layers')
decoder_config.relative_attention_num_buckets = ckpt_config.getint('decoder', 'relative_attention_num_buckets_or_max_pos_seq_len')
if model_type == "Megatron-DeepSpeed":
decoder_config.num_experts = ckpt_config.getint('decoder', 'num_experts')
decoder_config.moe_layer_index = moe_layers_in_decoder
decoder_config.decoder_start_token_id = ckpt_config.getint('decoder', 'decoder_start_token_id')
decoder_config.eos_token_id = ckpt_config.getint('decoder', 'eos_token_id')
print(f"{model_type} encoder_config: {encoder_config}")
print(f"{model_type} decoder_config: {decoder_config}")
if os.path.isfile("gemm_config.in") and rank == 0:
cmd = f"rm gemm_config.in"
print(f"Run {cmd}")
os.system(cmd)
translation_result_list = []
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult("hf-beamsearch-warmup", "HF"))
translation_result_list.append(TranslationResult("hf-beamsearch", "HF"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult("ft-beamsearch-warmup", "FT"))
translation_result_list.append(TranslationResult("ft-beamsearch", "FT"))
if rank == 0 and not skip_gemm:
is_fp16 = 1 if args_dict['data_type'] == 'fp16' else 0
cmd = f"./bin/t5_gemm {math.ceil(batch_size / pipeline_para_size)} {beam_size} {128} " \
f"{encoder_config.d_model} {encoder_config.num_heads} {encoder_config.d_kv} {encoder_config.d_ff} " \
f"{decoder_config.d_model} {decoder_config.num_heads} {decoder_config.d_kv} {decoder_config.d_ff} " \
f"{decoder_config.vocab_size} {is_fp16} {tensor_para_size} 1 > .tmp_gemm.log"
print(f"Run gemm test: {cmd}")
os.system(cmd)
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult("hf-sampling-warmup", "HF"))
translation_result_list.append(TranslationResult("hf-sampling", "HF"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult("ft-sampling-warmup", "FT"))
translation_result_list.append(TranslationResult("ft-sampling", "FT"))
if rank == 0 and not skip_gemm:
is_fp16 = 1 if args_dict['data_type'] == 'fp16' else 0
cmd = f"./bin/t5_gemm {math.ceil(batch_size / pipeline_para_size)} {1} {128} " \
f"{encoder_config.d_model} {encoder_config.num_heads} {encoder_config.d_kv} {encoder_config.d_ff} " \
f"{decoder_config.d_model} {decoder_config.num_heads} {decoder_config.d_kv} {decoder_config.d_ff} " \
f"{decoder_config.vocab_size} {is_fp16} {tensor_para_size} 1 1 > .tmp_gemm.log"
print(f"Run gemm test: {cmd}")
os.system(cmd)
if time_args.find("1") != -1 or time_args.find("3") != -1:
ft_encoder_weight = FTT5EncoderWeight(
encoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
t5_with_moe=t5_with_moe,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type
)
ft_decoding_weight = FTT5DecodingWeight(
decoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
t5_with_moe=t5_with_moe,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
if args_dict["ckpt_path"] is not None:
ft_encoder_weight.load_from_bin(args_dict["ckpt_path"], model_type=model_type)
ft_decoding_weight.load_from_bin(args_dict["ckpt_path"], model_type=model_type)
else:
ft_encoder_weight.load_from_model(t5_model)
ft_decoding_weight.load_from_model(t5_model)
if args_dict['data_type'] == 'fp16':
t5_model = t5_model.half()
ft_encoder_weight.to_half()
ft_decoding_weight.to_half()
# This script assumes fixed sequence length, so using remove_padding will not benefit.
remove_padding = False
ft_encoder = FTT5Encoder(ft_encoder_weight.w, lib_path, encoder_config.num_heads,
encoder_config.d_kv, encoder_config.d_ff,
encoder_config.d_model, remove_padding, encoder_config.num_layers,
encoder_config.relative_attention_num_buckets, encoder_config.num_experts, encoder_config.moe_layer_index,
128, False, q_scaling, tensor_para_size, pipeline_para_size, t5_with_bias,
position_embedding_type, 0, activation_type)
ft_decoding = FTT5Decoding(ft_decoding_weight.w, lib_path,
decoder_config.num_heads, decoder_config.d_kv,
decoder_config.d_ff, encoder_config.d_model,
decoder_config.d_model, decoder_config.num_layers,
decoder_config.decoder_start_token_id,
# Set eos token id to -1 to effectively disable early stopping.
# decoder_config.eos_token_id,
-1,
decoder_config.vocab_size,
q_scaling,
decoder_config.relative_attention_num_buckets, decoder_config.num_experts, decoder_config.moe_layer_index, max_distance=128,
tensor_para_size=tensor_para_size, pipeline_para_size=pipeline_para_size,
t5_with_bias=t5_with_bias, moe_k=0, activation_type=activation_type,
position_embedding_type = position_embedding_type)
ft_t5 = FTT5(ft_encoder, ft_decoding)
input_token = InputTokens(batch_size, input_seq_len, decoder_config.decoder_start_token_id, decoder_config.eos_token_id, decoder_config.vocab_size)
for i in range(len(translation_result_list)):
sys.stdout.flush()
is_warmup = (translation_result_list[i].name.find("warmup") != -1)
min_duration = infer_duration if not is_warmup else 0
min_iterations = infer_iterations if not is_warmup else warmup_iterations
iter_idx = 0
start_time = datetime.now()
while iter_idx < min_iterations or (datetime.now() - start_time).total_seconds() < min_duration:
iter_idx += 1
if translation_result_list[i].frame_work == "HF":
if translation_result_list[i].name.find("beamsearch") != -1:
hf_outputs = t5_model.generate(input_token.input_ids.to("cuda"),
min_length=output_seq_len + 1,
max_length=output_seq_len + 1, # "+1" because HF counts <bos> as well.
early_stopping=False,
num_beams=beam_size)
elif translation_result_list[i].name.find("sampling") != -1:
hf_outputs = t5_model.generate(input_token.input_ids.to("cuda"),
min_length=output_seq_len + 1,
max_length=output_seq_len + 1, # "+1" because HF counts <bos> as well.
early_stopping=False,
do_sample=True,
top_k=topk if topk > 0 else None,
top_p=topp if topp > 0.0 else None)
translation_result_list[i].batch_ids_list.append(hf_outputs)
translation_result_list[i].batch_seq_len_list.append(np.ones(input_seq_len) * output_seq_len)
elif translation_result_list[i].frame_work == "FT":
tmp_beam_size = beam_size
if translation_result_list[i].name.find("sampling") != -1:
tmp_beam_size = 1
ft_decoding_outputs, ft_decoding_seq_lens = ft_t5(input_token,
None,
tmp_beam_size,
output_seq_len,
topk,
topp,
beam_search_diversity_rate=beam_search_diversity_rate)
translation_result_list[i].batch_ids_list.append(ft_decoding_outputs)
translation_result_list[i].batch_seq_len_list.append(ft_decoding_seq_lens)
translation_result_list[i].batch_num += 1
stop_time = datetime.now()
translation_result_list[i].execution_time = (stop_time - start_time).total_seconds()
if translation_result_list[i].name.find("warmup") != -1:
continue
for batch_token, batch_seq_len in zip(translation_result_list[i].batch_ids_list, translation_result_list[i].batch_seq_len_list):
for j in range(len(batch_token)):
if translation_result_list[i].frame_work == "HF":
translation_result_list[i].token_list.append(batch_token[j][1:])
translation_result_list[i].token_num += sum(batch_token[j][1:] != 0)
elif translation_result_list[i].frame_work == "FT":
translation_result_list[i].token_list.append(batch_token[j][0][:batch_seq_len[j][0]])
translation_result_list[i].token_num += batch_seq_len[j][0]
if rank == 0:
for t in translation_result_list:
if t.name.find("warmup") != -1:
continue
print(f"[INFO] {t.name} translates {t.batch_num} batches taking {t.execution_time:.2f} sec to translate "
f"{t.token_num} tokens ({(t.execution_time / t.batch_num * 1000):.4f} ms per batch), "
f"{(t.token_num / t.execution_time):.0f} tokens/sec.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--seq_len', type=int, default=256, metavar='NUMBER',
help='fixed output sequence length, excluding bos but including eos (default: 256)')
parser.add_argument('-inseq', '--input_seq_len', type=int, default=0, metavar='NUMBER',
help='fixed input sequence length, including eos (default: same as fixed output sequence length)')
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test hf_beamsearch
'1': test ft_beamsearch
'2': test hf_sampling
'3': test ft_sampling
'e.g., if you want to test tf_beamsearch and ft_sampling,
then you need to use -time '03' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type for inference (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-ld', '--load_data_type', type=str, default="fp32", metavar='STRING',
help='data type for loading weights (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-lib_path', '--lib_path', type=str, default="lib/libth_transformer.so", metavar='STRING',
help='the path of FasterTransformer pytorch t5 op library.')
parser.add_argument('-model_path', '--model_path', type=str, default=None, metavar='STRING',
help='T5 model path.')
parser.add_argument('-model', '--model', type=str, default="t5-small", metavar='STRING',
help='T5 model size. Only used when --model_path=None', choices=["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b"])
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='size of tensor parallelism (default: 1)')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='size of pipeline parallelism (default: 1)')
# assume checkpoint config is also in the same path
parser.add_argument('--ckpt_path', type=str, help='path to the checkpoint file.')
parser.add_argument('--model_type', type=str, default="Huggingface", choices=["Huggingface", "Megatron", "Megatron-DeepSpeed"],
help='Megatron T5 uses bias and supports both absulte and relative positional embedding;'
'Huggingface T4 adopts the paper\'s implementation and has no bias')
# flags for performance benchmarking
parser.add_argument('-warmup_iter', '--warmup_iterations', type=int, default=1, metavar='NUMBER',
help='Number of warm-up iterations for each implementation.')
parser.add_argument('-iter', '--iterations', type=int, default=10, metavar='NUMBER',
help='Minimal number of inference iterations for each implementation.')
parser.add_argument('-duration', '--duration', type=int, default=3, metavar='NUMBER',
help='Minimal duration in seconds for inference iterations for each implementation.')
parser.add_argument('-seed', '--seed', type=int, default=0, metavar='NUMBER',
help='Random seed used to generate random input values.')
parser.add_argument('-skip_gemm', '--skip_gemm', action="store_true",
help='Skip the gemm autotuning by not calling the ./bin/t5_gemm binary.')
args = parser.parse_args()
translate(vars(args))
|
FasterTransformer-main
|
examples/pytorch/t5/perf_benchmark.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import dataclasses
import json
import os
import pathlib
import time
import numpy as np
import torch
import torch.distributed as dist
import typing
from tqdm import tqdm
from omegaconf.omegaconf import OmegaConf, open_dict
from nemo.collections.nlp.data.glue_benchmark.glue_benchmark_dataset import (
TextToTextGLUEDataset,
TextToTextXNLIDataset,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from examples.pytorch.t5.utils.ft_encoder import FTT5EncoderWeight, FTT5Encoder
from examples.pytorch.t5.utils.ft_decoding import FTT5DecodingWeight, FTT5Decoding, FTT5
def _build_dataset(data_cfg, tokenizer):
if data_cfg.task_name == 'xnli':
dataset = TextToTextXNLIDataset(
data_cfg.file_path,
task_name=data_cfg.task_name,
tokenizer=tokenizer,
max_seq_length=data_cfg.max_seq_length,
lang_list=data_cfg.eval_languages,
)
else:
dataset = TextToTextGLUEDataset(
data_cfg.file_path,
task_name=data_cfg.task_name,
tokenizer=tokenizer,
max_seq_length=data_cfg.max_seq_length,
)
return dataset
def preds_and_labels_to_text(tokenizer, preds, labels):
preds = preds.cpu().numpy().tolist()
labels = labels.cpu().numpy().tolist()
preds = [pred[0] for pred in preds]
preds_text, labels_text = [], []
for _, (pred, label) in enumerate(zip(preds, labels)):
if tokenizer.eos_id in pred:
idx = pred.index(tokenizer.eos_id)
pred = pred[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(tokenizer, 'special_token_to_id'):
pred = [id for id in pred if id not in tokenizer.special_token_to_id.values()]
label = [id for id in label if id not in tokenizer.special_token_to_id.values()]
pred = tokenizer.ids_to_text(pred)
label = tokenizer.ids_to_text(label)
preds_text.append(pred)
labels_text.append(label)
return preds_text, labels_text
def accuracy_score(pred, ref):
assert len(pred) == len(ref)
total = len(pred)
correct = 0
for p, r in zip(pred, ref):
if p == r:
correct += 1
# else:
# print(f"[pred]: {p} [label]: {r}")
accuracy = correct / total
print(f"[accuracy]: {accuracy}")
return accuracy
@dataclasses.dataclass
class Metric:
acc: float
@dataclasses.dataclass
class RequestAndResult:
model_answer: str
target: str
metrics: Metric
class InputToken:
def __init__(self, input_ids, attention_mask):
self.input_ids = input_ids
self.attention_mask = attention_mask
class EncoderDecoderConfig:
def __init__(self, d_model, vocab_size, num_heads, d_kv, d_ff, num_layers,
relative_attention_num_buckets_or_max_pos_seq_len,
adapter_inter_size, adapter_norm_position, decoder_start_token_id=0, decoder_end_token_id=1):
self.d_model = d_model
self.vocab_size = vocab_size
self.num_heads = num_heads
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.relative_attention_num_buckets = relative_attention_num_buckets_or_max_pos_seq_len
self.adapter_inter_size = adapter_inter_size
self.adapter_norm_position = adapter_norm_position
self.decoder_start_token_id = decoder_start_token_id
self.decoder_end_token_id = decoder_end_token_id
data_type_mapping = {"fp32": 0, "fp16": 1, "bf16": 2}
def mnli_task(args_dict):
torch.set_printoptions(precision=6)
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_output_len = args_dict['max_output_len']
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
topk = args_dict['sampling_topk']
topp = args_dict['sampling_topp']
tensor_para_size = args_dict['tensor_para_size']
pipeline_para_size = args_dict['pipeline_para_size']
if args_dict['ckpt_path'] is None:
raise Exception("Megatron T5 model needs to specify checkpoint path !")
if dist.is_mpi_available():
try:
dist.init_process_group(backend='mpi')
rank = dist.get_rank()
except:
rank = dist.get_rank()
else:
rank = 0
assert dist.get_world_size() == tensor_para_size * pipeline_para_size
ckpt_path = pathlib.Path(args_dict['ckpt_path'])
## read checkpoint config if exists
ckpt_config = configparser.ConfigParser()
vocab_path = ckpt_path / "vocab.txt"
ckpt_config_path = ckpt_path / "config.ini"
if ckpt_config_path.is_file():
ckpt_config.read(ckpt_config_path)
## update structure config
t5_with_bias = ckpt_config.getboolean('structure', 't5_with_bias')
## megatron with bias and use absolute position embedding
## relative position embedding -> 0, absolute position embedding -> 1
position_embedding_type = 0 if ckpt_config.get('structure', 'position_embedding_type') == 'relative' else 1
use_gated_activation = ckpt_config.getboolean('structure', 'use_gated_activation')
weight_data_type = {"fp16": np.float16, "fp32": np.float32}[ckpt_config.get("encoder", "weight_data_type")]
activation_type = ckpt_config.get('encoder', 'feed_forward_proj')
assert ckpt_config.getint("encoder", "tensor_para_size") == tensor_para_size
else:
raise Exception("config file does exist with the ckpt !")
if rank == 0:
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
lib_path = args_dict['lib_path']
## build tokenizer, dataset, dataloader
tokenizer_t5 = get_nmt_tokenizer(
library='megatron',
model_name='BertWordPieceCase',
tokenizer_model=None,
vocab_file=vocab_path.as_posix(),
merges_file=None,
legacy=False,
)
assert tokenizer_t5.bos_id == ckpt_config.getint("decoder", "decoder_start_token_id")
assert tokenizer_t5.eos_id == ckpt_config.getint("decoder", "eos_token_id")
token_params = {
tokenizer_t5.bos_token: tokenizer_t5.bos_id,
tokenizer_t5.eos_token: tokenizer_t5.eos_id,
tokenizer_t5.pad_token: tokenizer_t5.pad_id,
}
print(f"tokenizer special tokens: {token_params}")
mnli_cfg = OmegaConf.create({
"file_path": args_dict['data_path'],
"task_name": "mnli",
"max_seq_length": 512
})
mnli_dataset = _build_dataset(mnli_cfg, tokenizer_t5)
data_loader = torch.utils.data.DataLoader(
mnli_dataset,
collate_fn=mnli_dataset.collate_fn,
batch_size=batch_size,
num_workers=8,
pin_memory=True,
drop_last=True)
q_scaling = 1.0
encoder_config = EncoderDecoderConfig(ckpt_config.getint('encoder', 'd_model'),
ckpt_config.getint('encoder', 'vocab_size'),
ckpt_config.getint('encoder', 'num_heads'),
ckpt_config.getint('encoder', 'd_kv'),
ckpt_config.getint('encoder', 'd_ff'),
ckpt_config.getint('encoder', 'num_layers'),
ckpt_config.getint('encoder',
'relative_attention_num_buckets_or_max_pos_seq_len'),
ckpt_config.getint('encoder', 'adapter_inter_size', fallback=0),
ckpt_config.get('encoder', 'adapter_norm_position', fallback="pre")
)
decoder_config = EncoderDecoderConfig(ckpt_config.getint('decoder', 'd_model'),
ckpt_config.getint('decoder', 'vocab_size'),
ckpt_config.getint('decoder', 'num_heads'),
ckpt_config.getint('decoder', 'd_kv'),
ckpt_config.getint('decoder', 'd_ff'),
ckpt_config.getint('decoder', 'num_layers'),
ckpt_config.getint('decoder',
'relative_attention_num_buckets_or_max_pos_seq_len'),
ckpt_config.getint('decoder', 'adapter_inter_size', fallback=0),
ckpt_config.get('decoder', 'adapter_norm_position', fallback="pre"),
tokenizer_t5.bos_id,
tokenizer_t5.eos_id
)
## run gemm test
if os.path.isfile("gemm_config.in") and rank == 0:
cmd = f"rm gemm_config.in"
print(f"Run {cmd}")
os.system(cmd)
if rank == 0:
data_type = data_type_mapping[args_dict['data_type']]
cmd = f"./bin/t5_gemm {batch_size // pipeline_para_size} {beam_size} {128} " \
f"{encoder_config.d_model} {encoder_config.num_heads} {encoder_config.d_kv} {encoder_config.d_ff} " \
f"{decoder_config.d_model} {decoder_config.num_heads} {decoder_config.d_kv} {decoder_config.d_ff} " \
f"{decoder_config.vocab_size} {data_type} {tensor_para_size} 1 > .tmp_gemm.log"
print(f"Run gemm test: {cmd}")
os.system(cmd)
dist.barrier()
ft_encoder_weight = FTT5EncoderWeight(
encoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
ft_decoding_weight = FTT5DecodingWeight(
decoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
ft_encoder_weight.load_from_bin(ckpt_path.as_posix(), "Megatron")
ft_decoding_weight.load_from_bin(ckpt_path.as_posix(), "Megatron")
if args_dict['data_type'] == 'fp16':
ft_encoder_weight.to_half()
ft_decoding_weight.to_half()
elif args_dict['data_type'] == 'fp32':
ft_encoder_weight.to_single()
ft_decoding_weight.to_single()
elif args_dict['data_type'] == 'bf16':
ft_encoder_weight.to_bfloat16()
ft_decoding_weight.to_bfloat16()
remove_padding = True if batch_size > 32 else False
ft_encoder = FTT5Encoder(ft_encoder_weight.w, lib_path, encoder_config.num_heads,
encoder_config.d_kv, encoder_config.d_ff,
encoder_config.d_model, remove_padding, encoder_config.num_layers,
encoder_config.relative_attention_num_buckets,
0, # num_experts
[], # moe_layer_index
128, False,
q_scaling, tensor_para_size, pipeline_para_size, t5_with_bias,
position_embedding_type, 0, activation_type,
adapter_inter_size=encoder_config.adapter_inter_size,
adapter_norm_position=encoder_config.adapter_norm_position)
ft_decoding = FTT5Decoding(ft_decoding_weight.w, lib_path,
decoder_config.num_heads, decoder_config.d_kv,
decoder_config.d_ff, encoder_config.d_model,
decoder_config.d_model, decoder_config.num_layers,
decoder_config.decoder_start_token_id, decoder_config.decoder_end_token_id,
decoder_config.vocab_size,
q_scaling,
decoder_config.relative_attention_num_buckets,
0, # num_experts
[], # moe_layer_index
max_distance=128,
tensor_para_size=tensor_para_size, pipeline_para_size=pipeline_para_size,
moe_k=0, activation_type=activation_type,
t5_with_bias=t5_with_bias, position_embedding_type=position_embedding_type,
adapter_inter_size=decoder_config.adapter_inter_size,
adapter_norm_position=decoder_config.adapter_norm_position)
ft_t5 = FTT5(ft_encoder, ft_decoding)
preds_list = []
labels_list = []
results_list = []
start = time.time()
for idx, batch in tqdm(enumerate(data_loader)):
input_token = InputToken(batch['text_enc'], batch['enc_mask'])
ft_decoding_outputs, ft_decoding_seq_lens = ft_t5(input_token,
None,
beam_size,
max_output_len,
topk,
topp,
beam_search_diversity_rate=beam_search_diversity_rate,
is_return_output_log_probs=args_dict["return_output_log_probs"],
is_return_cum_log_probs=args_dict["return_cum_log_probs"])
preds, labels = preds_and_labels_to_text(tokenizer_t5, torch.IntTensor(ft_decoding_outputs), batch['labels'])
labels_list += labels
preds_list += preds
results_list.extend([
RequestAndResult(
model_answer=pred,
target=label,
metrics=Metric(acc=pred == label)
)
for pred, label in zip(preds, labels)
])
end = time.time()
if rank == 0:
print(f"\n[Elapsed Time]: {end - start} seconds")
accuracy = accuracy_score(preds_list, labels_list)
output_path = args_dict.get("output_path")
if output_path is not None and rank == 0:
output_path = pathlib.Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as output_file:
results = {
"results": {
"mnli": {
"acc": accuracy
}
},
"output": {
"mnli": [
dataclasses.asdict(r) for r in results_list
]
}
}
json.dump(results, output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_output_len', type=int, default=10, metavar='NUMBER',
help='max output length (default: 10)')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-lib_path', '--lib_path', type=str, default="/workspace/FasterTransformer/build/lib/libth_transformer.so", metavar='STRING',
help='the path of FasterTransformer pytorch t5 op library.')
parser.add_argument('-data_path', '--data_path', type=str, required=True, help="the MNLI task data path")
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='size of tensor parallelism (default: 1)')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='size of pipeline parallelism (default: 1)')
# assume checkpoint config is also in the same path
parser.add_argument('--ckpt_path', type=str, help='path to the checkpoint file.')
parser.add_argument('--output_path', help='path to results file with calculated metrics.')
parser.add_argument('--return_output_log_probs', action='store_true',
help='Return the log probability of generated tokens.')
parser.add_argument('--return_cum_log_probs', action='store_true',
help='Return the cumulative log probability of generated tokens.')
args = parser.parse_args()
mnli_task(vars(args))
|
FasterTransformer-main
|
examples/pytorch/t5/mnli_task_example.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This example is used to verify the correctess on summarization task. So, we don't
put benchmark testing in this example.
'''
from __future__ import print_function
import argparse
import json
import numpy as np
import os
import sys
import torch
import torch.distributed as dist
from datasets import load_dataset, load_metric
# dir_path = os.path.dirname(os.path.realpath(__file__))
# sys.path.append(dir_path + "/../../../3rdparty/transformers/src/")
from transformers import T5ForConditionalGeneration, AutoTokenizer, T5Config
from tqdm import tqdm
import configparser
import math
import datetime
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.t5.utils.ft_decoding import FTT5DecodingWeight, FTT5Decoding, FTT5
from examples.pytorch.t5.utils.ft_encoder import FTT5EncoderWeight, FTT5Encoder
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--ft_model_location', type=str,
default='/models/T5/HF/t5-base/c-models/')
parser.add_argument('--hf_model_location', type=str,
default='/models/T5/HF/t5-base/')
parser.add_argument('--disable_summarize', action='store_true')
parser.add_argument('--test_hf', action='store_true')
parser.add_argument('--test_ft', action='store_true')
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument("--cache_path", type=str, default="/workdir/datasets/ccdv/")
parser.add_argument("--max_ite", type=int, default=20)
parser.add_argument("--max_seq_len", type=int, default=200)
parser.add_argument("--ft_use_hf_config", action="store_true",
help="use the hyper-parameters from the hf model")
parser.add_argument('--lib_path', type=str, default='./lib/libth_transformer.so',
help='path to the pyt_fastertransformer dynamic lib file.')
parser.add_argument('--tensor_para_size', type=int, default=1,
help='tensor parallel size')
parser.add_argument('--pipeline_para_size', type=int, default=1,
help='pipeline parallel size')
parser.add_argument('--rougeLsum_threshold', type=float,
help='Threshold of FT rougeLsum score')
parser.add_argument("--top_k", type=int, default=1, help="top k for sampling")
parser.add_argument("--top_p", type=float, default=0.0, help="top p for sampling")
parser.add_argument("--beam_width", type=int, default=1, help="beam width for beam search")
args = parser.parse_args()
np.random.seed(0) # rouge score use sampling to compute the score
if dist.is_mpi_available():
try:
dist.init_process_group(backend='mpi')
rank = dist.get_rank()
except:
rank = dist.get_rank()
else:
rank = 0
disable_summarize = args.disable_summarize
test_hf = args.test_hf
test_ft = args.test_ft
tensor_para_size = args.tensor_para_size
pipeline_para_size = args.pipeline_para_size
ft_model_location = args.ft_model_location + f"/{tensor_para_size}-gpu/"
hf_model_location = args.hf_model_location
tokenizer = AutoTokenizer.from_pretrained(hf_model_location)
tokenizer.pad_token = tokenizer.eos_token
dataset_cnn = load_dataset("ccdv/cnn_dailymail", '3.0.0', cache_dir=args.cache_path)
if rank == 0 and test_hf:
start_time = datetime.datetime.now()
if args.data_type == "fp32":
model = T5ForConditionalGeneration.from_pretrained(hf_model_location, torch_dtype=torch.float32).cuda()
elif args.data_type == "fp16":
model = T5ForConditionalGeneration.from_pretrained(hf_model_location, torch_dtype=torch.float16).cuda()
elif args.data_type == "bf16":
model = T5ForConditionalGeneration.from_pretrained(hf_model_location, torch_dtype=torch.bfloat16).cuda()
stop_time = datetime.datetime.now()
print(f"[INFO] load HF model spend {(stop_time - start_time).total_seconds()} sec")
if test_ft:
ckpt_config = configparser.ConfigParser()
ckpt_config_path = os.path.join(ft_model_location, 'config.ini')
if os.path.isfile(ckpt_config_path):
ckpt_config.read(ckpt_config_path)
else:
assert False, "[ERROR] This example only support loading model with FT format directly."
weight_data_type = np.float32
weight_data_type = {"fp16": np.float16, "fp32": np.float32}[ckpt_config.get("encoder", "weight_data_type")]
relative_attention_max_distance = 128
encoder_config = T5Config(vocab_size=ckpt_config.getint("encoder", "vocab_size"),
d_model=ckpt_config.getint("encoder", "d_model"),
d_kv=ckpt_config.getint("encoder", "d_kv"),
d_ff=ckpt_config.getint("encoder", "d_ff"),
num_layers=ckpt_config.getint("encoder", "num_layers"),
num_decoder_layers=ckpt_config.getint("encoder", "num_decoder_layers"),
num_heads=ckpt_config.getint("encoder", "num_heads"),
relative_attention_num_buckets=ckpt_config.getint(
"encoder", "relative_attention_num_buckets_or_max_pos_seq_len"),
feed_forward_proj=ckpt_config.get("encoder", "feed_forward_proj"),
pad_token_id=ckpt_config.getint("encoder", "pad_token_id"),
eos_token_id=ckpt_config.getint("encoder", "eos_token_id"),
is_gated_act=ckpt_config.getboolean("encoder", "is_gated_act", fallback=0),
)
decoder_config = T5Config(vocab_size=ckpt_config.getint("decoder", "vocab_size"),
d_model=ckpt_config.getint("decoder", "d_model"),
d_kv=ckpt_config.getint("decoder", "d_kv"),
d_ff=ckpt_config.getint("decoder", "d_ff"),
num_layers=ckpt_config.getint("decoder", "num_layers"),
num_decoder_layers=ckpt_config.getint("decoder", "num_decoder_layers"),
num_heads=ckpt_config.getint("decoder", "num_heads"),
relative_attention_num_buckets=ckpt_config.getint(
"decoder", "relative_attention_num_buckets_or_max_pos_seq_len"),
feed_forward_proj=ckpt_config.get("decoder", "feed_forward_proj"),
pad_token_id=ckpt_config.getint("decoder", "pad_token_id"),
eos_token_id=ckpt_config.getint("decoder", "eos_token_id"),
decoder_start_token_id=ckpt_config.getint("decoder", "decoder_start_token_id"),
is_gated_act=ckpt_config.getboolean("decoder", "is_gated_act", fallback=0),
)
assert decoder_config.feed_forward_proj == encoder_config.feed_forward_proj
assert decoder_config.feed_forward_proj == encoder_config.feed_forward_proj
t5_with_bias = ckpt_config.getboolean("structure", "t5_with_bias")
use_gated_activation = encoder_config.is_gated_act
position_embedding_type = 0 if ckpt_config.get('structure', 'position_embedding_type') == 'relative' else 1
activation_type = encoder_config.feed_forward_proj
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L1660
# if tie_word_embeddings == True, scale the decoder output by sequence_output = sequence_output * (self.model_dim**-0.5)
tie_word_embeddings = ckpt_config.getboolean("decoder", "tie_word_embeddings")
ft_encoder_weight = FTT5EncoderWeight(
encoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type
)
ft_decoding_weight = FTT5DecodingWeight(
decoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
use_gated_activation=use_gated_activation,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
start_time = datetime.datetime.now()
ft_encoder_weight.load_from_bin(ft_model_location, "Megatron")
stop_time = datetime.datetime.now()
print(f"[INFO] load FT encoder model spend {(stop_time - start_time).total_seconds()} sec")
start_time = datetime.datetime.now()
ft_decoding_weight.load_from_bin(ft_model_location, "Megatron")
stop_time = datetime.datetime.now()
print(f"[INFO] load FT decoding model spend {(stop_time - start_time).total_seconds()} sec")
if args.data_type == "fp32":
ft_encoder_weight.to_float()
ft_decoding_weight.to_float()
elif args.data_type == "fp16":
ft_encoder_weight.to_half()
ft_decoding_weight.to_half()
elif args.data_type == "bf16":
ft_encoder_weight.to_bfloat16()
ft_decoding_weight.to_bfloat16()
ft_encoder_weight.to_cuda()
ft_decoding_weight.to_cuda()
q_scaling = 1.0 / (math.sqrt(encoder_config.d_kv))
remove_padding = True
ft_encoder = FTT5Encoder(ft_encoder_weight.w, args.lib_path, encoder_config.num_heads,
encoder_config.d_kv, encoder_config.d_ff,
encoder_config.d_model, remove_padding, encoder_config.num_layers,
encoder_config.relative_attention_num_buckets,
0, # num_experts
[], # moe_layer_index
relative_attention_max_distance, False, q_scaling, tensor_para_size,
pipeline_para_size, t5_with_bias,
position_embedding_type, moe_k=0, activation_type=activation_type)
ft_decoding = FTT5Decoding(ft_decoding_weight.w, args.lib_path,
decoder_config.num_heads, decoder_config.d_kv,
decoder_config.d_ff, encoder_config.d_model,
decoder_config.d_model, decoder_config.num_layers,
decoder_config.decoder_start_token_id, decoder_config.eos_token_id,
decoder_config.vocab_size, q_scaling,
decoder_config.relative_attention_num_buckets,
0, # num_experts
[], # moe_layer_index
max_distance=relative_attention_max_distance,
tensor_para_size=tensor_para_size, pipeline_para_size=pipeline_para_size,
t5_with_bias=t5_with_bias, position_embedding_type=position_embedding_type,
moe_k=0, activation_type=activation_type, tie_word_embeddings=tie_word_embeddings)
ft_t5 = FTT5(ft_encoder, ft_decoding)
beam_width = args.beam_width
top_k = args.top_k
top_p = args.top_p
def summarize_ft(datapoint):
if not disable_summarize:
line = "summarize: " + datapoint['article']
else:
line = datapoint['article']
line = line.strip()
line = line.replace(" n't", "n't")
line_tokens = tokenizer(line, return_tensors='pt')
with torch.no_grad():
output, ft_output_len = ft_t5(line_tokens,
None,
beam_width,
args.max_seq_len,
top_k,
top_p,
beam_search_diversity_rate=0.0,
is_return_output_log_probs=False,
len_penalty=1.0,
is_return_cum_log_probs=False)
tokens = [output[0][beam_idx][:ft_output_len[0][beam_idx]] for beam_idx in range(beam_width)]
output_lines = [tokenizer.decode(output[0][beam_idx][:ft_output_len[0][beam_idx]], skip_special_tokens=True) for beam_idx in range(beam_width)]
output_lines = [".".join(output_line.split('.')[:4]) + "." for output_line in output_lines]
return output_lines, tokens
def summarize_hf(datapoint):
if not disable_summarize:
line = "summarize: " + datapoint['article']
else:
line = datapoint['article']
line = line.strip()
line = line.replace(" n't", "n't")
line_encoded = tokenizer.encode(line, return_tensors='pt')
line_encoded = line_encoded.cuda()
with torch.no_grad():
if beam_width > 1:
output = model.generate(line_encoded,
max_length=args.max_seq_len + 1,
num_beams=beam_width,
num_return_sequences=beam_width,
early_stopping=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id)
else:
output = model.generate(line_encoded,
max_length=args.max_seq_len + 1,
do_sample=True,
top_k=top_k if top_k > 0 else None,
top_p=top_p if top_p > 0.0 else None,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id)
tokens = [output[beam_idx].cpu().numpy() for beam_idx in range(beam_width)]
output_lines = [tokenizer.decode(output[beam_idx], skip_special_tokens=True) for beam_idx in range(beam_width)]
output_lines = [".".join(output_line.split('.')[:4]) + "." for output_line in output_lines]
return output_lines, tokens
if disable_summarize:
tokens = []
else:
metric_fts = [load_metric("rouge") for beam_idx in range(beam_width)]
metric_hfs = [load_metric("rouge") for beam_idx in range(beam_width)]
if not disable_summarize:
datapoint = dataset_cnn['test'][0]
if test_ft:
summary_ft, _ = summarize_ft(datapoint)
if rank == 0:
print('---------------------------------------------------------')
print('FT Generated : ')
print(' Article : ', datapoint['article'])
print('\n Highlights : ', datapoint['highlights'])
print('\n Summary : ', summary_ft)
print('---------------------------------------------------------')
for i in range(beam_width):
metric_fts[i].add_batch(predictions=[summary_ft[i]], references=[[datapoint['highlights']]])
if test_hf and rank == 0:
summary_hf, _ = summarize_hf(datapoint)
print('---------------------------------------------------------')
print('HF Generated : ')
print(' Article : ', datapoint['article'])
print('\n Highlights : ', datapoint['highlights'])
print('\n Summary : ', summary_hf)
print('---------------------------------------------------------')
for i in range(beam_width):
metric_hfs[i].add_batch(predictions=[summary_hf[i]], references=[[datapoint['highlights']]])
ft_time = 0.0
hf_time = 0.0
for data_point_idx in tqdm(range(1, 11490, int(11490 / args.max_ite))):
try:
datapoint = dataset_cnn['test'][data_point_idx]
start_time = datetime.datetime.now()
if test_ft:
summary_ft, tokens_ft = summarize_ft(datapoint)
stop_time = datetime.datetime.now()
ft_time += (stop_time - start_time).total_seconds()
if rank == 0 and ((test_hf and not disable_summarize) or disable_summarize):
start_time = datetime.datetime.now()
summary_hf, tokens_hf = summarize_hf(datapoint)
stop_time = datetime.datetime.now()
hf_time += (stop_time - start_time).total_seconds()
if rank == 0:
if not disable_summarize:
if test_ft:
for i in range(beam_width):
metric_fts[i].add_batch(predictions=[summary_ft[i]], references=[[datapoint['highlights']]])
if test_hf:
for i in range(beam_width):
metric_hfs[i].add_batch(predictions=[summary_hf[i]], references=[[datapoint['highlights']]])
else:
tokens.append((tokens_ft, tokens_hf))
except Exception as e:
print(f'Error with datapoint: {data_point_idx} with error {e}')
def compute_exact_match(tokens, n_tokens=[1, 10, 25, 50, 100, 150, 200, 250]):
em_metrics = []
for t in n_tokens:
errors = 0
total = 0
for ft_tokens, hf_tokens in tokens:
if len(ft_tokens) > t and len(hf_tokens) > t:
total = total + 1
if not np.array_equal(ft_tokens[:t], hf_tokens[:t]):
errors = errors + 1
if total > 0:
print(f"{t}-token exact match acc: {100*(1-errors/total):.2f}")
em_metrics.append(1 - errors / total)
else:
em_metrics.append(np.nan)
return em_metrics
if rank == 0:
if not disable_summarize:
if test_ft:
computed_metrics_fts = [metric_ft.compute() for metric_ft in metric_fts]
if test_hf:
computed_metrics_hfs = [metric_hf.compute() for metric_hf in metric_hfs]
print(f'Hugging Face (total latency: {hf_time} sec)')
for i in range(beam_width):
computed_metrics_hf = computed_metrics_hfs[i]
print(f"beam_id: {i}")
for key in computed_metrics_hf.keys():
print(f'{key} : {computed_metrics_hf[key].mid[2]*100}')
print()
if test_ft:
print(f'Faster Transformers (total latency: {ft_time} sec)')
for i in range(beam_width):
computed_metrics_ft = computed_metrics_fts[i]
print(f"beam_id: {i}")
for key in computed_metrics_ft.keys():
print(f'{key} : {computed_metrics_ft[key].mid[2]*100}')
print()
if args.rougeLsum_threshold != None:
assert computed_metrics_fts[0]["rougeLsum"].mid[2] * \
100 >= args.rougeLsum_threshold, "[INFO] TEST FAIL !"
print(f"[INFO] TEST PASS !")
else:
em_metrics = compute_exact_match(tokens)
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/t5/summarization.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
from datetime import datetime
import multiprocessing
import shutil
from pathlib import Path
import numpy as np
import torch # pytype: disable=import-error
import sys
sys.path.append("/workdir/megatron-lm")
shared_mapping = {
"wte":"shared.weight",
"wte_T":"shared.weight_T",
"ape":"shared.ape",
"encoder_rpe":"block.0.layer.0.SelfAttention.relative_attention_bias",
"decoder_rpe":"block.0.layer.0.SelfAttention.relative_attention_bias"
}
encoder_mapping = {
"input_layernorm":"layer.0.layer_norm",
"self_attention.query_key_value":["layer.0.SelfAttention.q", "layer.0.SelfAttention.k", "layer.0.SelfAttention.v"],
"self_attention.dense":"layer.0.SelfAttention.o",
"post_attention_layernorm":"layer.1.layer_norm",
"mlp.dense_h_to_4h":"layer.1.DenseReluDense.wi",
"mlp.dense_4h_to_h":"layer.1.DenseReluDense.wo",
"final_layernorm":"final_layer_norm"
}
decoder_mapping = {
"input_layernorm":"layer.0.layer_norm",
"self_attention.query_key_value":["layer.0.SelfAttention.qkv"],
"self_attention.dense":"layer.0.SelfAttention.o",
"post_attention_layernorm":"layer.1.layer_norm",
"inter_attention.query":["layer.1.EncDecAttention.q"],
"inter_attention.key_value":["layer.1.EncDecAttention.k","layer.1.EncDecAttention.v"],
"inter_attention.dense":"layer.1.EncDecAttention.o",
"post_inter_attention_layernorm":"layer.2.layer_norm",
"mlp.dense_h_to_4h":"layer.2.DenseReluDense.wi",
"mlp.dense_4h_to_h":"layer.2.DenseReluDense.wo",
"final_layernorm":"final_layer_norm"
}
megatron_HF_name_mapping = {
"shared":shared_mapping,
"encoder":encoder_mapping,
"decoder":decoder_mapping
}
encoder_config_mapping = {
"num_attention_heads":"num_heads",
"kv_channels":"d_kv",
"hidden_size":"d_model",
"ffn_hidden_size":"d_ff",
"num_layers":"num_layers",
"padded_vocab_size":"vocab_size",
"max_position_embeddings":"relative_attention_num_buckets_or_max_pos_seq_len",
"relative_position_num_buckets":"relative_attention_num_buckets_or_max_pos_seq_len"
}
decoder_config_mapping = {
"num_attention_heads":"num_heads",
"kv_channels":"d_kv",
"hidden_size":"d_model",
"ffn_hidden_size":"d_ff",
"num_layers":"num_layers",
"padded_vocab_size":"vocab_size",
"max_position_embeddings":"relative_attention_num_buckets_or_max_pos_seq_len",
"relative_position_num_buckets":"relative_attention_num_buckets_or_max_pos_seq_len"
}
decoder_new_config = {
"decoder_start_token_id":30522, ## need to adjust
"eos_token_id":30523 ## need to adjust
}
model_new_config = {"structure":{"t5_with_bias": "true", "use_gated_activation": "false", "position_embedding_type": "absolute"}}
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def convert_megatron_to_HF_naming_style_single(saved_key, name_mapping):
saved_key = saved_key.replace("layers","block")
mapping_key = saved_key.rsplit(sep=".", maxsplit=1)[0]
mapping_key_no_num = mapping_key[mapping_key.find(".", 6) + 1 :]
block_num = mapping_key[: mapping_key.find(".", 6) + 1]
weight_or_bias = saved_key.rsplit(sep=".", maxsplit=1)[1]
saved_key = block_num + name_mapping[mapping_key_no_num] + "." + weight_or_bias
return saved_key
def convert_megatron_to_HF_naming_style_multiple(saved_key, name_mapping):
saved_key = saved_key.replace("layers","block")
mapping_key = saved_key.rsplit(sep=".", maxsplit=1)[0]
mapping_key_no_num = mapping_key[mapping_key.find(".", 6) + 1 :]
mapping_vals_no_num = name_mapping[mapping_key_no_num]
block_num = mapping_key[: mapping_key.find(".", 6) + 1]
weight_or_bias = saved_key.rsplit(sep=".", maxsplit=1)[1]
saved_keys = [block_num + val + "." + weight_or_bias for val in mapping_vals_no_num]
return saved_keys
def _gpu_map_location(storage, loc):
if loc.startswith("cuda"):
training_gpu_idx = int(loc.split(":")[1])
inference_gpu_idx = training_gpu_idx % torch.cuda.device_count()
return storage.cuda(inference_gpu_idx)
elif loc.startswith("cpu"):
return storage.cpu()
else:
raise NotImplementedError(f"Not handled {loc}")
# This tool is used to support the new megatron model trained by pipeline parallel + tensor parallel
def merge_and_convert_process(model_type, i, pipeline_para_rank, saved_dir, factor, key, model_args, transformer_model_list, ckpt_ver, np_weight_data_type):
prefix = model_type
name_mapping = megatron_HF_name_mapping[model_type]
saved_dir = Path(saved_dir)
if key.find("layers.") != -1:
layer_index = (int)(key[7 : key.find(".", 7)])
saved_key = key.replace(
"layers.%d." % layer_index,
"layers.%d." % (layer_index + pipeline_para_rank * model_args.num_layers // model_args.pipeline_model_parallel_size))
else:
saved_key = key
major_device = transformer_model_list[0][key].device
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("self_attention.dense.bias") != -1
or key.find("inter_attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_inter_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("post_inter_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1):
# shared weights, only need to convert the weights of rank 0
if i == 0:
val = transformer_model_list[0][key].T.float().cpu().numpy()
saved_key = convert_megatron_to_HF_naming_style_single(saved_key, name_mapping)
saved_path = saved_dir / f"{prefix}.{saved_key}.bin"
np.squeeze(val).astype(np_weight_data_type).tofile(saved_path)
elif (key.find("self_attention.dense.weight") != -1
or key.find("inter_attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1):
vals = []
for k in range(factor):
vals.append(transformer_model_list[k][key].T.float().to(major_device))
saved_key = convert_megatron_to_HF_naming_style_single(saved_key, name_mapping)
torch.cat(vals, dim=0).cpu().numpy().astype(np_weight_data_type).tofile(saved_path)
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
vals = []
for k in range(factor):
vals.append(transformer_model_list[k][key].T.float().to(major_device))
saved_key = convert_megatron_to_HF_naming_style_single(saved_key, name_mapping)
saved_path = saved_dir / f"{prefix}.{saved_key}.{i:d}.bin"
torch.cat(vals, dim=-1).cpu().numpy().astype(np_weight_data_type).tofile(saved_path)
elif (key.find("self_attention.query_key_value.bias") != -1
or key.find("inter_attention.query.bias") != -1
or key.find("inter_attention.key_value.bias") != -1):
num_splits = 3
if key.find("inter_attention.key_value.bias") != -1:
num_splits = 2
if key.find("inter_attention.query.bias") != -1:
num_splits = 1
vals = []
for k in range(factor):
val = transformer_model_list[k][key].T.float()
local_dim = int(val.shape[-1] / num_splits)
if ckpt_ver == 3:
head_num = model_args.num_attention_heads // model_args.tensor_model_parallel_size
size_per_head = model_args.kv_channels
val = val.reshape(head_num, num_splits, size_per_head)
val = val.permute(1, 0, 2)
val = val.reshape(num_splits, local_dim)
vals.append(val.to(major_device))
saved_vals = torch.cat(vals, dim=-1)
saved_keys = convert_megatron_to_HF_naming_style_multiple(saved_key, name_mapping)
if len(saved_keys) == 1:
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{i:d}.bin"
saved_vals.cpu().numpy().astype(np_weight_data_type).tofile(saved_path)
return
for index in range(len(saved_keys)):
saved_path = saved_dir / f"{prefix}.{saved_keys[index]}.{i:d}.bin"
saved_vals[index,...].cpu().numpy().astype(np_weight_data_type).tofile(saved_path)
elif (key.find("self_attention.query_key_value.weight") != -1
or key.find("inter_attention.query.weight") != -1
or key.find("inter_attention.key_value.weight") != -1):
num_splits = 3
if key.find("inter_attention.key_value.weight") != -1:
num_splits = 2
if key.find("inter_attention.query.weight") != -1:
num_splits = 1
vals = []
for k in range(factor):
val = transformer_model_list[k][key].T.float()
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / num_splits)
if ckpt_ver == 3:
head_num = model_args.num_attention_heads
size_per_head = model_args.kv_channels
head_num = head_num // model_args.tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.permute(0, 2, 1, 3)
val = val.reshape(hidden_dim, num_splits, local_dim)
vals.append(val.to(major_device))
saved_vals = torch.cat(vals, dim=-1)
saved_keys = convert_megatron_to_HF_naming_style_multiple(saved_key, name_mapping)
if len(saved_keys) == 1:
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{i:d}.bin"
saved_vals.cpu().numpy().astype(np_weight_data_type).tofile(saved_path)
return
for index in range(len(saved_keys)):
saved_path = saved_dir / f"{prefix}.{saved_keys[index]}.{i:d}.bin"
saved_vals[:, index, ...].cpu().numpy().astype(np_weight_data_type).tofile(saved_path)
else:
print(f"[ERROR] cannot find key '{key}'")
def split_and_convert_process(model_type, i, pipeline_para_rank, saved_dir, factor, key, model_args, transformer_model_list, ckpt_ver, np_weight_data_type):
prefix = model_type
name_mapping = megatron_HF_name_mapping[model_type]
val = transformer_model_list[0][key].T.float().cpu().numpy().astype(np_weight_data_type)
del transformer_model_list[0][key]
if key.find("layers.") != -1:
layer_index = (int)(key[7 : key.find(".", 7)])
saved_key = key.replace(
"layers.%d." % layer_index,
"layers.%d." % (layer_index + pipeline_para_rank * model_args.num_layers // model_args.pipeline_model_parallel_size))
else:
saved_key = key
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("self_attention.dense.bias") != -1
or key.find("inter_attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_inter_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("post_inter_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1):
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_key = convert_megatron_to_HF_naming_style_single(saved_key, name_mapping)
saved_path = saved_dir / f"{prefix}.{saved_key}.bin"
val.tofile(saved_path.as_posix())
elif (key.find("self_attention.dense.weight") != -1
or key.find("inter_attention.dense.weight") != -1
or key.find("mlp.dense_4h_to_h.weight") != -1):
split_vals = np.split(val, factor, axis=0)
saved_key = convert_megatron_to_HF_naming_style_single(saved_key, name_mapping)
for j in range(factor):
saved_path = saved_dir / f"{prefix}.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
split_vals = np.split(val, factor, axis=-1)
saved_key = convert_megatron_to_HF_naming_style_single(saved_key, name_mapping)
for j in range(factor):
saved_path = saved_dir / f"{prefix}.{saved_key}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (key.find("self_attention.query_key_value.bias") != -1
or key.find("inter_attention.query.bias") != -1
or key.find("inter_attention.key_value.bias") != -1):
num_splits = 3
if key.find("inter_attention.key_value.bias") != -1:
num_splits = 2
if key.find("inter_attention.query.bias") != -1:
num_splits = 1
local_dim = int(val.shape[-1] / num_splits)
if ckpt_ver == 3:
head_num = model_args.num_attention_heads // model_args.tensor_model_parallel_size
size_per_head = model_args.kv_channels
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(num_splits, local_dim)
split_vals = np.split(val, factor, axis=-1)
saved_keys = convert_megatron_to_HF_naming_style_multiple(saved_key, name_mapping)
for j in range(factor):
if len(saved_keys) == 1:
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
continue
for index in range(len(saved_keys)):
saved_path = saved_dir / f"{prefix}.{saved_keys[index]}.{i * factor + j:d}.bin"
split_vals[j][index, ...].tofile(saved_path.as_posix())
elif (key.find("self_attention.query_key_value.weight") != -1
or key.find("inter_attention.query.weight") != -1
or key.find("inter_attention.key_value.weight") != -1):
num_splits = 3
if key.find("inter_attention.key_value.weight") != -1:
num_splits = 2
if key.find("inter_attention.query.weight") != -1:
num_splits = 1
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / num_splits)
if ckpt_ver == 3:
head_num = model_args.num_attention_heads
size_per_head = model_args.kv_channels
head_num = head_num // model_args.tensor_model_parallel_size
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, num_splits, local_dim)
split_vals = np.split(val, factor, axis=-1)
saved_keys = convert_megatron_to_HF_naming_style_multiple(saved_key, name_mapping)
for j in range(factor):
if len(saved_keys) == 1:
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{i * factor + j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
continue
for index in range(len(saved_keys)):
saved_path = saved_dir / f"{prefix}.{saved_keys[index]}.{i * factor + j:d}.bin"
split_vals[j][:, index, ...].tofile(saved_path.as_posix())
else:
print(f"[ERROR] cannot find key '{key}'")
def convert_checkpoint(args):
saved_dir = Path(args.saved_dir) / f"{args.infer_gpu_num:d}-gpu"
saved_dir.mkdir(parents=True, exist_ok=True)
if args.vocab_path:
shutil.copy(args.vocab_path, (saved_dir / "vocab.json").as_posix())
if args.merges_path:
shutil.copy(args.merges_path, (saved_dir / "merges.txt").as_posix())
prefix = Path(args.in_file)
ckpt_name = "model_optim_rng.pt"
# load position_embedding from rank 0
if (prefix / "mp_rank_00").is_dir():
model_00 = torch.load((prefix / "mp_rank_00" / ckpt_name).as_posix(), map_location=_gpu_map_location)
elif (prefix / "mp_rank_00_000").is_dir():
model_00 = torch.load((prefix / "mp_rank_00_000" / ckpt_name).as_posix(), map_location=_gpu_map_location)
else:
print(f"[ERROR] Cannot find checkpoint in {prefix}.")
exit(1)
model_args = model_00["args"]
# update model structure config
if not hasattr(model_args, 'position_embedding_type') or model_args.position_embedding_type == "absolute":
model_new_config["structure"]["position_embedding_type"] = "absolute"
config = configparser.ConfigParser()
config["encoder"] = {}
config["decoder"] = {}
for key, val in vars(model_args).items():
if key in encoder_config_mapping.keys():
config["encoder"][encoder_config_mapping[key]] = f"{val}"
if key in decoder_config_mapping.keys():
config["decoder"][decoder_config_mapping[key]] = f"{val}"
for key, val in decoder_new_config.items():
config["decoder"][key] = f"{val}"
for key, val in model_new_config.items():
config[key] = {}
for val_key, val_val in val.items():
config[key][val_key] = f"{val_val}"
## add weight data type
np_weight_data_type = get_weight_data_type(args.weight_data_type)
config['encoder']['weight_data_type'] = args.weight_data_type
config['decoder']['weight_data_type'] = args.weight_data_type
# add model name
config["encoder"]["_name_or_path"] = args.model_name
config["decoder"]["_name_or_path"] = args.model_name
# add weight data type
config["encoder"]["weight_data_type"] = args.weight_data_type
config["decoder"]["weight_data_type"] = args.weight_data_type
np_weight_data_type = get_weight_data_type(args.weight_data_type)
with open((saved_dir / f"config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
if "position_embeddings" in model_00["model"]["language_model"]["embedding"].keys():
model_00["model"]["language_model"]["embedding"]["position_embeddings"]["weight"] \
.float().cpu().numpy().astype(np_weight_data_type) \
.tofile((saved_dir / "shared.ape.bin").as_posix())
# inference factor calculation
t_gpu_num = model_args.tensor_model_parallel_size
i_gpu_num = args.infer_gpu_num
if t_gpu_num > i_gpu_num:
assert t_gpu_num % i_gpu_num == 0
is_merge_ckpt = True
factor = int(t_gpu_num / i_gpu_num)
else:
assert i_gpu_num % t_gpu_num == 0
is_merge_ckpt = False
factor = int(i_gpu_num / t_gpu_num)
main_loop = min(t_gpu_num, i_gpu_num)
# split rpe into tensor parallel ranks
if "encoder_relative_position_embedding" in model_00["model"]["language_model"].keys():
encoder_relative_position_embedding = model_00["model"]["language_model"]["encoder_relative_position_embedding"]["weight"] \
.T.float().cpu().numpy().astype(np_weight_data_type)
encoder_relative_position_embedding_split = np.split(encoder_relative_position_embedding, i_gpu_num, axis=0)
for i in range(i_gpu_num):
saved_path = saved_dir / f"encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight.{i}.bin"
encoder_relative_position_embedding_split[i].tofile(saved_path.as_posix())
del encoder_relative_position_embedding, encoder_relative_position_embedding_split
if "decoder_relative_position_embedding" in model_00["model"]["language_model"].keys():
decoder_relative_position_embedding = model_00["model"]["language_model"]["decoder_relative_position_embedding"]["weight"] \
.T.float().cpu().numpy().astype(np_weight_data_type)
decoder_relative_position_embedding_split = np.split(decoder_relative_position_embedding, i_gpu_num, axis=0)
for i in range(i_gpu_num):
saved_path = saved_dir / f"decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight.{i}.bin"
decoder_relative_position_embedding_split[i].tofile(saved_path.as_posix())
del decoder_relative_position_embedding, decoder_relative_position_embedding_split
del model_00
w_e_list = []
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
pool = multiprocessing.Pool(args.processes)
for i in range(main_loop):
for j in range(model_args.pipeline_model_parallel_size):
if model_args.pipeline_model_parallel_size == 1:
layer_rank_num = ""
else:
layer_rank_num = f"_{j:03d}"
encoder_models = []
decoder_models = []
if is_merge_ckpt == True:
for k in range(factor):
m = torch.load((prefix / f"mp_rank_{i * factor + k:02d}{layer_rank_num}" / ckpt_name).as_posix(), map_location=_gpu_map_location)
encoder_models.append(m["model"]["language_model"]["encoder"])
decoder_models.append(m["model"]["language_model"]["decoder"])
if j == 0:
w_e_list.append(m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"].float().cpu().numpy().astype(np_weight_data_type))
else:
m = torch.load((prefix / f"mp_rank_{i:02d}{layer_rank_num}" / ckpt_name).as_posix(), map_location=_gpu_map_location)
if j == 0:
w_e_list.append(
m["model"]["language_model"]["embedding"]["word_embeddings"]["weight"]
.float()
.cpu()
.numpy()
.astype(np_weight_data_type)
)
encoder_models.append(m["model"]["language_model"]["encoder"])
decoder_models.append(m["model"]["language_model"]["decoder"])
pool.starmap(
merge_and_convert_process if is_merge_ckpt == True else split_and_convert_process,
[
(
"encoder",
i,
j,
saved_dir,
factor,
k,
model_args,
encoder_models,
m["checkpoint_version"],
np_weight_data_type
)
for k in encoder_models[0].keys()
],
)
pool.starmap(
merge_and_convert_process if is_merge_ckpt == True else split_and_convert_process,
[
(
"decoder",
i,
j,
saved_dir,
factor,
k,
model_args,
decoder_models,
m["checkpoint_version"],
np_weight_data_type
)
for k in decoder_models[0].keys()
],
)
pool.close()
pool.join()
np.concatenate(w_e_list, axis=0).tofile((saved_dir / "shared.weight_T.bin").as_posix())
m["model"]["lm_head"]["bias"].float().cpu().numpy().tofile((saved_dir / "shared.bias.bin").as_posix())
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-saved_dir", "-o", type=str, help="file name of output file", required=True)
parser.add_argument("-in_file", "-i", type=str, help="file name of input checkpoint file", required=True)
parser.add_argument("-infer_gpu_num", "-i_g", type=int, help="How many gpus for inference", required=True)
parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 64)", default=64)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
parser.add_argument("-model_name", "-m", type=str, help="model name", required=True)
parser.add_argument(
"--vocab-path",
type=str,
help="Path to vocabulary file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--merges-path", type=str, help="Path to merges file to embed in FasterTransformer checkpoint", required=False
)
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print(f"{key}: {vars(args)[key]}")
print("========================================")
start_time = datetime.now()
convert_checkpoint(args)
stop_time = datetime.now()
run_time = (stop_time - start_time)
print("[INFO] Spend {} (h:m:s) to convert the model".format(run_time))
|
FasterTransformer-main
|
examples/pytorch/t5/utils/megatron_t5_ckpt_convert.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tracemalloc import start
import torch
import torch.nn as nn
import torch.distributed as dist
import numpy as np
import os
class FTT5EncoderWeight(object):
def __init__(
self,
config,
tensor_para_size,
pipeline_para_size,
*,
t5_with_bias=False,
use_gated_activation=False,
t5_with_moe=False,
position_embedding_type=0,
weight_data_type
):
self.num_layer = config.num_layers
self.config = config
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.t5_with_bias = t5_with_bias
self.use_gated_activation = use_gated_activation
self.real_weights_num = 23 # assume all weights are allocated
self.t5_with_moe = t5_with_moe
self.position_embedding_type = position_embedding_type
self.weight_data_type = weight_data_type
self.adapter_inter_size = config.adapter_inter_size if hasattr(config, "adapter_inter_size") else 0
self.w = []
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend = 'mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
self.rank = dist.get_rank() if self.use_mpi else 0
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size() if self.use_mpi else 1
assert world_size == tensor_para_size * \
pipeline_para_size, "[ERROR] world_size != tensor_para_size * pipeline_para_size"
self.tensor_para_rank = self.rank % self.tensor_para_size
self.pipeline_para_rank = self.rank // self.tensor_para_size
def load_from_model(self, model): # assume this only applies to huggingface models
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
np_weight_dtype = self.weight_data_type
torch_weight_dtype = {np.float32: torch.float32, np.float16: torch.float16}[np_weight_dtype]
encoder_weight_dict = {}
for name, param in model.named_parameters():
if param.dim() == 2:
param_t = param.transpose(1, 0)
elif param.dim() == 1:
param_t = param
else:
assert False, f"The dimension of param {name} should be 2"
if name.find("encoder.block") != -1 or name.find("encoder.final_layer_norm.weight") != -1:
encoder_weight_dict[name] = param_t
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.0.layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.0.SelfAttention.q.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.0.SelfAttention.k.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.0.SelfAttention.v.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.0.SelfAttention.o.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous())
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.1.layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
if self.use_gated_activation:
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.1.DenseReluDense.wi_0.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.1.DenseReluDense.wi_1.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
else:
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.1.DenseReluDense.wi.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# add empty wi2
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
t = torch.stack([encoder_weight_dict["encoder.block.{}.layer.1.DenseReluDense.wo.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous())
t = encoder_weight_dict["encoder.final_layer_norm.weight"].contiguous().cuda()
self.w.append(t)
t = encoder_weight_dict["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"].contiguous().cuda()
self.w.append(t.split(t.shape[0] // self.tensor_para_size, dim=0)[self.tensor_para_rank].contiguous())
t = model.get_input_embeddings().weight.contiguous().cuda()
self.w.append(t)
#TODO: pass None Type to Torch Op
for i in range(19):
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
def load_from_bin(self, ckpt_path, model_type): # assume this only applies to megatron models and megatron-deepspedd models
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
np_weight_dtype = self.weight_data_type
torch_weight_dtype = {np.float32: torch.float32, np.float16: torch.float16}[np_weight_dtype]
# load by binary files
if model_type == "Megatron":
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.layer_norm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.SelfAttention.q.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.SelfAttention.k.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.SelfAttention.v.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.SelfAttention.o.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.layer_norm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.DenseReluDense.wi.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
if self.use_gated_activation:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.DenseReluDense.wi2.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.DenseReluDense.wo.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.final_layer_norm.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
t = None
if (self.position_embedding_type == 0):
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)).contiguous().cuda()
else:
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/shared.ape.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/shared.weight_T.bin", dtype=np_weight_dtype).reshape([self.config.d_model, self.config.vocab_size])).contiguous().cuda()
self.w.append(t)
# add 10 additional bias if it is t5 megatron structure
if self.t5_with_bias:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.layer_norm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.SelfAttention.q.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.SelfAttention.k.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.SelfAttention.v.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.0.SelfAttention.o.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.layer_norm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.DenseReluDense.wi.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
if self.use_gated_activation:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.DenseReluDense.wi2.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.DenseReluDense.wo.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.final_layer_norm.bias.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
else:
#TODO: pass None Type to Torch Op
for i in range(10):
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
# add empty moe gate weight
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
if self.adapter_inter_size > 0:
ckpt_path_block = f"{ckpt_path}/encoder.block"
for adapter in ["after_attention_adapter", "after_ffn_adapter"]:
for in_out in ["wi", "wo"]:
t = torch.stack([torch.from_numpy(np.fromfile(
f"{ckpt_path_block}.{i}.{adapter}.DenseSiluDense.{in_out}.weight.{self.tensor_para_rank}.bin",
dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
for weight_bias in ["weight", "bias"]:
t = torch.stack([torch.from_numpy(np.fromfile(
f"{ckpt_path_block}.{i}.{adapter}.layer_norm.{weight_bias}.bin",
dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
for i in range(8):
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
else: # Megatron-DeepSpeed, no tensor parallelism currently
#TODO: add tensor parallelism in the conversion script
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.input_layernorm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.attention.query.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.attention.key.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.attention.value.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.attention.dense.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.post_attention_layernorm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# =========== process normal and moe dense layer =================
t_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/encoder.layers.{i}.mlp.dense_h_to_4h.weight.{self.tensor_para_rank}.bin")):
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.dense_h_to_4h.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
else:
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.deepspeed_moe.experts.deepspeed_experts.dense_h_to_4h.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
self.w.append(torch.cat(t_list, 0).contiguous().cuda())
# ================================================================
# We don't have use_gated_activation in Megatron-DeepSpeed currently, so here weight placeholder is always empty
# If we have it in the future, the binary file name should be modified according to the actual name.
if self.use_gated_activation:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.DenseReluDense.wi2.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
# =========== process normal and moe dense layer =================
t_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/encoder.layers.{i}.mlp.dense_4h_to_h.weight.{self.tensor_para_rank}.bin")):
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.dense_4h_to_h.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
else:
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.deepspeed_moe.experts.deepspeed_experts.dense_4h_to_h.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
self.w.append(torch.cat(t_list, 0).contiguous().cuda())
# ================================================================
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.final_layernorm.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
# assume absolute position
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/position_embeddings.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/word_embeddings.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
if self.t5_with_bias:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.input_layernorm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.attention.query.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.attention.key.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.attention.value.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.attention.dense.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.post_attention_layernorm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# =========== process normal and moe dense layer =================
t_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/encoder.layers.{i}.mlp.dense_h_to_4h.bias.{self.tensor_para_rank}.bin")):
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.dense_h_to_4h.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
else:
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.deepspeed_moe.experts.deepspeed_experts.dense_h_to_4h.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
self.w.append(torch.cat(t_list, 0).contiguous().cuda())
# ================================================================
# We don't have use_gated_activation in Megatron-DeepSpeed currently, so here weight placeholder is always empty
# If we have it in the future, the binary file name should be modified according to the actual name.
if self.use_gated_activation:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.block.{i}.layer.1.DenseReluDense.wi2.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
# =========== process normal and moe dense layer =================
t_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/encoder.layers.{i}.mlp.dense_4h_to_h.bias.bin")):
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.dense_4h_to_h.bias.bin", dtype=np_weight_dtype)))
else:
t_list.append(torch.zeros_like(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.deepspeed_moe.experts.deepspeed_experts.dense_4h_to_h.bias.bin", dtype=np_weight_dtype))))
self.w.append(torch.cat(t_list, 0).contiguous().cuda())
# ================================================================
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.final_layernorm.bias.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
else:
for i in range(10):
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
if self.t5_with_moe:
gate_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/encoder.layers.{i}.mlp.deepspeed_moe.gate.wg.weight.bin")):
gate_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/encoder.layers.{i}.mlp.deepspeed_moe.gate.wg.weight.bin", dtype=np_weight_dtype)))
self.w.append(torch.stack(gate_list, 0).contiguous().cuda())
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
# adapters are not supported in Megatron-DeepSpeed currently, so here weight placeholder is always empty
for i in range(8):
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
def to_cuda(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].cuda()
def to_float(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].float()
def to_half(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].half()
def to_single(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].float()
def to_bfloat16(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].bfloat16()
class FTT5Encoder(nn.Module):
def __init__(self, encoder_weight_list, lib_path, head_num, head_size, inter_size, d_model, is_remove_padding,
num_layer, num_bucket=32, num_expert=0, moe_layer_index=[], max_distance=128, sparse=False,
q_scaling=1.0, tensor_para_size=1, pipeline_para_size=1, t5_with_bias=False,
position_embedding_type=0, moe_k=0, activation_type="relu", adapter_inter_size=0,
adapter_norm_position="pre"):
super().__init__()
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend = 'mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
torch.classes.load_library(lib_path)
try:
self.encoder = torch.classes.FasterTransformer.T5Encoder(*encoder_weight_list, moe_layer_index, head_num,
head_size, inter_size, d_model,
is_remove_padding, num_layer, num_bucket,
num_expert, max_distance, sparse, q_scaling,
tensor_para_size, pipeline_para_size,
t5_with_bias, position_embedding_type, moe_k,
activation_type, adapter_inter_size,
adapter_norm_position)
except:
self.encoder = torch.classes.FasterTransformerT5Encoder(*encoder_weight_list, moe_layer_index, head_num,
head_size, inter_size, d_model,
is_remove_padding, num_layer, num_bucket,
num_expert, max_distance, sparse, q_scaling,
tensor_para_size, pipeline_para_size,
t5_with_bias, position_embedding_type, moe_k,
activation_type, adapter_inter_size,
adapter_norm_position)
def forward(self, input, seq_len, inputs_embeds=None):
output = self.encoder.forward(input, seq_len, inputs_embeds)
return output
|
FasterTransformer-main
|
examples/pytorch/t5/utils/ft_encoder.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import concurrent.futures
import configparser
import datetime
import logging
import os
import pathlib
import shutil
import sys
import tempfile
import typing
import numpy as np
import torch # pytype: disable=import-error
import yaml
# verify if root package is in PYTHONPATH
__root_package_path__ = pathlib.Path(__file__).parent.parent.parent.parent.parent.absolute().as_posix()
if __root_package_path__ not in sys.path:
print(
f"[ERROR] add project root directory to your PYTHONPATH with "
f"'export PYTHONPATH={__root_package_path__}:${{PYTHONPATH}}'"
)
from collections import defaultdict
from examples.pytorch.nemo import unpack_nemo_ckpt, UnpackedNemoCheckpointDir, extract_layers_with_prefix
from examples.pytorch.utils import gpu_map_location, WEIGHT2DTYPE, torch2np, cpu_map_location, safe_transpose
from functools import partial
from multiprocessing import Pool
LOGGER = logging.getLogger(__name__)
def merge_adapters(models):
model_fused = {}
for key in models[0].keys():
model_fused[key] = {}
model_fused[key]["scalers"] = torch.cat([models[i][key]["scalers"] for i in range(len(models))])
return model_fused
def convert_checkpoint(unpacked_checkpoints_dir: UnpackedNemoCheckpointDir, args: argparse.Namespace):
nemo_model_config = unpacked_checkpoints_dir.model_config
infer_tp = args.infer_gpu_num
saved_dir = pathlib.Path(args.saved_dir) / f"{infer_tp:d}-gpu"
saved_config_file = saved_dir / "config.ini"
if not saved_dir.is_dir():
LOGGER.error("No models found at " + str(saved_dir) + ". Run Nemo converter on base model first")
sys.exit(1)
if not saved_config_file.is_file():
LOGGER.error("No model config at " + str(saved_config_file) + ". Run Nemo converter on base model first")
sys.exit(1)
saved_config = configparser.ConfigParser()
with open(saved_config_file) as f:
saved_config.read_file(f)
if ("structure" in saved_config
and saved_config["structure"].get("ia3_adapted", "False") == "True"):
LOGGER.error("Model is already ia3-adapted. Refusing to go further")
sys.exit(1)
if ("structure" in saved_config
and int(saved_config["structure"].get("ia3_num_tasks", "0")) > 0
and args.in_place):
LOGGER.error("Model already has ia3 weights. Refusing to adapt it in-place.")
sys.exit(1)
train_tp = nemo_model_config.get("tensor_model_parallel_size", 1)
train_pp = nemo_model_config.get("pipeline_model_parallel_size", 1)
checkpoint_paths = unpacked_checkpoints_dir.get_checkpoints_paths(train_tp, train_pp)
checkpoint_paths_T = [[checkpoint_paths[i][j] for i in range(train_tp)] for j in range(train_pp)]
if "encoder" in saved_config and "weight_data_type" in saved_config["encoder"]:
weight_dt = WEIGHT2DTYPE[saved_config["encoder"]["weight_data_type"]]
elif "decoder" in saved_config and "weight_data_type" in saved_config["decoder"]:
weight_dt = WEIGHT2DTYPE[saved_config["decoder"]["weight_data_type"]]
else:
LOGGER.info("Could not find existing model data type. Using fp32")
weight_dt = np.float32
for ckpt_tp in checkpoint_paths_T:
model = merge_adapters([torch.load(ckpt, map_location=cpu_map_location) for ckpt in ckpt_tp])
if args.in_place:
grouped_layers = defaultdict(list)
for layer_name, layer in model.items():
target_file = str(saved_dir / corresponding_ft_name(layer_name))
grouped_layers[target_file].append((layer_name, layer))
args_list = grouped_layers.items()
else:
args_list = list(model.items())
with Pool() as p:
call_fn = multiply_weights if args.in_place else add_ia3_task
call_fn = partial(call_fn, saved_dir, saved_config, weight_dt, infer_tp)
ret = p.starmap(call_fn, args_list)
if args.in_place:
saved_config["structure"]["ia3_adapted"] = "True"
else:
ia3_num_tasks = int(saved_config["structure"].get("ia3_num_tasks", "0")) + 1
saved_config["structure"]["ia3_num_tasks"] = str(ia3_num_tasks)
LOGGER.info("Model now has {} IA3 task(s)".format(ia3_num_tasks))
with open(saved_config_file, "w") as f:
saved_config.write(f)
def add_ia3_task(saved_dir, saved_config, weight_dt, infer_tp, layer_name, layer):
ia3_weights_tp = np.array(layer["scalers"], dtype=weight_dt)
for tp, ia3_weights in zip(range(infer_tp), np.split(ia3_weights_tp, infer_tp)):
ia3_name = corresponding_ft_name(layer_name, ia3_name=True).format(tp=tp)
enc_dec = "encoder" if "encoder" in ia3_name else "decoder"
ia3_filename = saved_dir / ia3_name
if ia3_filename.is_file():
previous_weights = np.fromfile(ia3_filename, dtype=weight_dt)
if "DenseReluDense" in ia3_name:
hidden_dim = int(saved_config[enc_dec]["d_ff"])
else:
hidden_dim = int(saved_config[enc_dec]["d_model"])
previous_weights = previous_weights.reshape((-1, hidden_dim))
ia3_weights = np.concatenate((previous_weights, ia3_weights[None, :]))
ia3_weights.tofile(ia3_filename)
def corresponding_ft_name(ia3_weight, ia3_name=False):
ia3_suffix = ".ia3" if ia3_name else ""
name = ""
is_decoder = "decoder" in ia3_weight
if is_decoder:
name += "decoder."
else:
name += "encoder."
layer_id = ia3_weight.split(".")[-1].split(":")[0]
name += "block." + layer_id + ".layer."
if "mlp_infused_adapter" in ia3_weight:
name += ("2" if is_decoder else "1") + ".DenseReluDense"
name += (ia3_suffix if ia3_name else ".wo") + ".weight.{tp}.bin"
else:
is_cross_attention = "inter" in ia3_weight
rank = "1" if is_cross_attention else "0"
base_layer = "EncDecAttention" if is_cross_attention else "SelfAttention"
features = "k" if "key" in ia3_weight else "v"
features = "qkv" if (is_decoder and not is_cross_attention and not ia3_name) else features
features += ia3_suffix
name += ".".join((rank, base_layer, features)) + ".weight.{tp}.bin"
return name
def reshape(config, name, array):
enc_dec = "encoder" if "encoder" in name else "decoder"
if "DenseReluDense" in name:
dims = int(config[enc_dec]["d_ff"]), int(config[enc_dec]["d_model"])
elif enc_dec == "decoder" and "SelfAttention.qkv" in name:
dims = (3, int(config[enc_dec]["d_model"]), int(config[enc_dec]["d_model"]))
elif "SelfAttention" in name or "EncDecAttention" in name:
dims = int(config[enc_dec]["d_model"]), int(config[enc_dec]["d_model"])
return array.reshape(dims)
def multiply_weights(saved_dir, saved_config, weight_dt, infer_tp, weight_file, layers):
for tp in range(infer_tp):
weight_file = weight_file.format(tp=tp)
weights = reshape(saved_config, weight_file, np.fromfile(weight_file, dtype=weight_dt))
if len(layers) == 1:
ia3_weights = np.split(np.array(layers[0][1]['scalers'], dtype=weight_dt), infer_tp)[tp]
if "DenseReluDense" in weight_file:
ia3_weights = ia3_weights[:, None] # column-wise broadcast
else:
ia3_weights = ia3_weights[None, :] # row-wise broadcast
else:
if "key_infused_adapter" in layers[0][0]:
key, value = layers[0][1], layers[1][1]
else:
key, value = layers[1][1], layers[0][1]
key, value = np.array(key['scalers'], dtype=weight_dt), np.array(value['scalers'], dtype=weight_dt)
key, value = np.split(key, infer_tp)[tp], np.split(value, infer_tp)[tp]
query = np.ones_like(key)
ia3_weights = np.stack((query, key, value))[:, None, :]
ia3_adapted = weights * ia3_weights
ia3_adapted.tofile(weight_file)
def main():
""" Enhance your model IA3 features.
The converter has two modes:
- Out-of-place: use dedicated IA3 weight files to apply the converters at run-time (default). This allows using multiple IA3 tasks with a single base model. Running this script multiple times with the same output directory and different IA3 tasks will stack IA3 adapters.
- In-place: pre-process existing model by multiplying weights with IA3 adapters. With this scheme, only one fine-tuned task is supported.
"""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument(
"--in-place",
dest="in_place",
action="store_true",
help="multiply model weights directly"
)
parser.add_argument(
"--saved-dir",
"-o",
dest="saved_dir",
help="folder name of output files",
required=True,
)
parser.add_argument(
"--in-file",
"-i",
dest="in_file",
help="file name of .nemo IA3 checkpoint file",
required=True,
)
parser.add_argument(
"--clean-tasks",
"-c",
dest="clean_tasks",
action="store_true",
help="in Out-of-place mode, clean previous IA3_tasks"
)
parser.add_argument(
"--infer-gpu-num",
"-i_g",
dest="infer_gpu_num",
type=int,
help="how many gpus for inference",
required=True,
)
parser.add_argument("--verbose", "-v", action="store_true", help="increase verbosity")
args = parser.parse_args()
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO, format=log_format)
print("\n=============== Argument ===============")
for key in vars(args):
print(f"{key}: {vars(args)[key]}")
print("========================================")
input_path = pathlib.Path(args.in_file)
if not input_path.exists():
LOGGER.error("%s does not exists", input_path)
sys.exit(1)
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir = pathlib.Path(temp_dir)
# unpack if needed
if input_path.is_file():
checkpoint_dir_path = temp_dir / "unpacked"
start_time = datetime.datetime.now()
unpacked_checkpoint_dir = UnpackedNemoCheckpointDir(
unpack_nemo_ckpt(args.in_file, checkpoint_dir_path),
load_checkpoints_to_cpu=True,
)
LOGGER.info("Spent %s (h:m:s) to unpack NeMo archive", datetime.datetime.now() - start_time)
else:
unpacked_checkpoint_dir = UnpackedNemoCheckpointDir(
input_path, load_checkpoints_to_cpu=True,
)
LOGGER.debug("Unpacked NeMo checkpoint contains:")
for file_path in unpacked_checkpoint_dir.checkpoints_dir.rglob("*"):
LOGGER.debug(" %s", file_path)
start_time = datetime.datetime.now()
convert_checkpoint(unpacked_checkpoint_dir, args)
LOGGER.info("Spent %s (h:m:s) to convert the model", datetime.datetime.now() - start_time)
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/t5/utils/nemo_t5_ia3.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.distributed as dist
import numpy as np
import os
class FTT5DecodingWeight(object):
def __init__(
self,
config,
tensor_para_size,
pipeline_para_size,
*,
t5_with_bias=False,
use_gated_activation=False,
t5_with_moe=False,
position_embedding_type=0,
weight_data_type
):
self.config = config
self.num_layer = config.num_layers
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.t5_with_bias = t5_with_bias
self.use_gated_activation = use_gated_activation
self.t5_with_moe = t5_with_moe
self.position_embedding_type = position_embedding_type
self.real_weights_num = 31 # assume all weights are allocated and converted to specific data type
self.weight_data_type = weight_data_type
self.adapter_inter_size = config.adapter_inter_size if hasattr(config, "adapter_inter_size") else 0
self.w = []
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend = 'mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
self.rank = dist.get_rank() if self.use_mpi else 0
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size() if self.use_mpi else 1
assert world_size == tensor_para_size * \
pipeline_para_size, "[ERROR] world_size != tensor_para_size * pipeline_para_size"
self.tensor_para_rank = self.rank % self.tensor_para_size
self.pipeline_para_rank = self.rank // self.tensor_para_size
def load_from_model(self, model):
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
np_weight_dtype = self.weight_data_type
torch_weight_dtype = {np.float32: torch.float32, np.float16: torch.float16}[np_weight_dtype]
weight_dict = {}
qkv_tmp = []
for name, param in model.state_dict().items():
if param.dim() == 2:
param_t = param.transpose(1, 0)
elif param.dim() == 1:
param_t = param
else:
assert False, f"The dimension of param {name} should be 2"
if name.find("decoder.block") != -1:
if name.find(".SelfAttention.q.weight") != -1 or name.find(".SelfAttention.k.weight") != -1 or name.find(".SelfAttention.v.weight") != -1:
qkv_tmp.append(param_t)
if len(qkv_tmp) == 3:
qkv = torch.cat(qkv_tmp, dim=-1)
weight_dict[name.replace("v.weight", "qkv.weight")] = qkv
qkv_tmp = []
else:
weight_dict[name] = param_t
elif name.find("decoder") != -1:
weight_dict[name] = param_t
else:
weight_dict[name] = param_t
# load by torch model directly
t = torch.stack([weight_dict["decoder.block.{}.layer.0.layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.0.SelfAttention.qkv.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.reshape([t.shape[0], t.shape[1], 3, t.shape[2] // 3])
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.0.SelfAttention.o.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.1.layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.1.EncDecAttention.q.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.1.EncDecAttention.k.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.1.EncDecAttention.v.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.1.EncDecAttention.o.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.2.layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
if self.use_gated_activation:
t = torch.stack([weight_dict["decoder.block.{}.layer.2.DenseReluDense.wi_0.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
t = torch.stack([weight_dict["decoder.block.{}.layer.2.DenseReluDense.wi_1.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
else:
t = torch.stack([weight_dict["decoder.block.{}.layer.2.DenseReluDense.wi.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
## empty wi2 weight
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
t = torch.stack([weight_dict["decoder.block.{}.layer.2.DenseReluDense.wo.weight".format(i)] for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous()
self.w.append(t)
t = weight_dict["decoder.final_layer_norm.weight"].contiguous().cuda()
self.w.append(t)
t = weight_dict["shared.weight"].transpose(1, 0).contiguous().cuda()
self.w.append(t)
t = weight_dict["lm_head.weight"].transpose(1, 0).contiguous().cuda() # Transpose back to [vocab, hidden]
self.w.append(t)
t = weight_dict["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"].contiguous().cuda()
t = t.split(t.shape[0] // self.tensor_para_size, dim=0)[self.tensor_para_rank].contiguous()
self.w.append(t)
#TODO: pass None Type to Torch Op
for i in range(23):
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
def load_from_bin(self, ckpt_path, model_type):
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
np_weight_dtype = self.weight_data_type
torch_weight_dtype = {np.float32: torch.float32, np.float16: torch.float16}[np_weight_dtype]
# load by binary files
if model_type == "Megatron":
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.0.layer_norm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.0.SelfAttention.qkv.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.0.SelfAttention.o.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.layer_norm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.EncDecAttention.q.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.EncDecAttention.k.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.EncDecAttention.v.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.EncDecAttention.o.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.layer_norm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.DenseReluDense.wi.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
if self.use_gated_activation:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.DenseReluDense.wi2.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.DenseReluDense.wo.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.final_layer_norm.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/shared.weight_T.bin", dtype=np_weight_dtype).reshape([self.config.d_model, self.config.vocab_size])).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/lm_head.weight.bin", dtype=np_weight_dtype).reshape(
[self.config.d_model, self.config.vocab_size])).contiguous().cuda()
self.w.append(t)
t = None
if (self.position_embedding_type == 0):
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)).contiguous().cuda()
else:
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/shared.ape.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
# add 14 additional bias if it is t5 megatron structure
if self.t5_with_bias:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.0.layer_norm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.0.SelfAttention.qkv.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.0.SelfAttention.o.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.layer_norm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.EncDecAttention.q.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.EncDecAttention.k.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.EncDecAttention.v.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.1.EncDecAttention.o.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.layer_norm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.DenseReluDense.wi.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
if self.use_gated_activation:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.DenseReluDense.wi2.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.DenseReluDense.wo.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.final_layer_norm.bias.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/shared.bias.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
else:
#TODO: pass None Type to Torch Op
for i in range(14):
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
# add empty moe gate weight
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
if self.adapter_inter_size > 0:
ckpt_path_block = f"{ckpt_path}/decoder.block"
for adapter in ["after_attention_adapter", "after_ffn_adapter"]:
for in_out in ["wi", "wo"]:
t = torch.stack([torch.from_numpy(np.fromfile(
f"{ckpt_path_block}.{i}.{adapter}.DenseSiluDense.{in_out}.weight.{self.tensor_para_rank}.bin",
dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
for weight_bias in ["weight", "bias"]:
t = torch.stack([torch.from_numpy(np.fromfile(
f"{ckpt_path_block}.{i}.{adapter}.layer_norm.{weight_bias}.bin",
dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
for i in range(8):
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
else:
# Megatron-DeepSpeed, no tensor parallelism currently
#TODO: add tensor parallelism in the conversion script
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.input_layernorm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.attention.query_key_value.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.attention.dense.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.post_attention_layernorm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.inter_attention.query.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.inter_attention.key.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.inter_attention.value.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.inter_attention.dense.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.post_inter_attention_layernorm.weight.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# =========== process normal and moe dense layer =================
t_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/decoder.layers.{i}.mlp.dense_h_to_4h.weight.{self.tensor_para_rank}.bin")):
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.dense_h_to_4h.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
else:
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.deepspeed_moe.experts.deepspeed_experts.dense_h_to_4h.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
self.w.append(torch.cat(t_list, 0).contiguous().cuda())
# ================================================================
# We don't have use_gated_activation in Megatron-DeepSpeed currently, so here weight placeholder is always empty
# If we have it in the future, the binary file name should be modified according to the actual name.
if self.use_gated_activation:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.DenseReluDense.wi2.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
# =========== process normal and moe dense layer =================
t_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/decoder.layers.{i}.mlp.dense_4h_to_h.weight.{self.tensor_para_rank}.bin")):
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.dense_4h_to_h.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
else:
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.deepspeed_moe.experts.deepspeed_experts.dense_4h_to_h.weight.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
self.w.append(torch.cat(t_list, 0).contiguous().cuda())
# ================================================================
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.final_layernorm.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/word_embeddings.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
# lm_head weight
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/word_embeddings.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
# assume absolute position
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/position_embeddings.weight.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
if self.t5_with_bias:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.input_layernorm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.attention.query_key_value.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.attention.dense.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.post_attention_layernorm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.inter_attention.query.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.inter_attention.key.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.inter_attention.value.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.inter_attention.dense.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.post_inter_attention_layernorm.bias.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# =========== process normal and moe dense layer =================
t_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/decoder.layers.{i}.mlp.dense_h_to_4h.bias.{self.tensor_para_rank}.bin")):
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.dense_h_to_4h.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
else:
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.deepspeed_moe.experts.deepspeed_experts.dense_h_to_4h.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)))
self.w.append(torch.cat(t_list, 0).contiguous().cuda())
# ================================================================
# We don't have use_gated_activation in Megatron-DeepSpeed currently, so here weight placeholder is always empty
# If we have it in the future, the binary file name should be modified according to the actual name.
if self.use_gated_activation:
t = torch.stack([torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.block.{i}.layer.2.DenseReluDense.wi2.bias.{self.tensor_para_rank}.bin", dtype=np_weight_dtype)) for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
# =========== process normal and moe dense layer =================
t_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/decoder.layers.{i}.mlp.dense_4h_to_h.bias.bin")):
t_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.dense_4h_to_h.bias.bin", dtype=np_weight_dtype)))
else:
t_list.append(torch.zeros_like(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.deepspeed_moe.experts.deepspeed_experts.dense_4h_to_h.bias.bin", dtype=np_weight_dtype))))
self.w.append(torch.cat(t_list, 0).contiguous().cuda())
# ================================================================
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.final_layernorm.bias.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
t = torch.from_numpy(np.fromfile(f"{ckpt_path}/shared.bias.bin", dtype=np_weight_dtype)).contiguous().cuda()
self.w.append(t)
else:
for i in range(14):
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
if self.t5_with_moe:
gate_list = []
for i in range(start_layer, end_layer):
if (os.path.isfile(f"{ckpt_path}/decoder.layers.{i}.mlp.deepspeed_moe.gate.wg.weight.bin")):
gate_list.append(torch.from_numpy(np.fromfile(f"{ckpt_path}/decoder.layers.{i}.mlp.deepspeed_moe.gate.wg.weight.bin", dtype=np_weight_dtype)))
self.w.append(torch.stack(gate_list, 0).contiguous().cuda())
else:
self.w.append(torch.empty((1,1), dtype=torch_weight_dtype).contiguous().cuda())
# adapters are not supported in Megatron-DeepSpeed currently, so here weight placeholder is always empty
for i in range(8):
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
def to_cuda(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].cuda()
def to_float(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].float()
def to_half(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].half()
def to_single(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].float()
def to_bfloat16(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].bfloat16()
class FTT5Decoding(nn.Module):
def __init__(self, decoding_weight_list, lib_path, head_num, head_size, inter_size,
mem_d_model, d_model, num_layer, start_id, end_id, vocab_size, q_scaling=1.0, num_bucket=32,
num_expert=0, moe_layer_index=[],
max_distance=128, tensor_para_size=1, pipeline_para_size=1, t5_with_bias=False,
position_embedding_type=0, moe_k=0,
activation_type="relu", tie_word_embeddings=True, adapter_inter_size=0, adapter_norm_position="pre"):
super().__init__()
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend = 'mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
torch.classes.load_library(lib_path)
try:
self.decoding = torch.classes.FasterTransformer.T5Decoding(head_num, head_size, inter_size, mem_d_model,
d_model, num_layer,
vocab_size, num_bucket, num_expert, max_distance,
q_scaling, start_id, end_id,
tensor_para_size, pipeline_para_size,
t5_with_bias,
position_embedding_type, moe_k, activation_type,
tie_word_embeddings, adapter_inter_size,
adapter_norm_position,
moe_layer_index, *decoding_weight_list)
except:
self.decoding = torch.classes.FasterTransformerT5Decoding(head_num, head_size, inter_size, mem_d_model,
d_model, num_layer,
vocab_size, num_bucket, num_expert, max_distance,
q_scaling, start_id, end_id,
tensor_para_size, pipeline_para_size,
t5_with_bias,
position_embedding_type, moe_k, activation_type,
tie_word_embeddings, adapter_inter_size,
adapter_norm_position,
moe_layer_index, *decoding_weight_list)
def forward(self, beam_width, max_seq_len, top_k, top_p,
beam_search_diversity_rate, temperature,
len_penalty, repetition_penalty, presence_penalty, min_length, random_seed,
mem_hidden_states, mem_seq_len,
is_return_output_log_probs, is_return_cum_log_probs, is_return_cross_attentions=False,
bad_words_list=None, stop_words_list=None):
# TODO (bhsueh) Not found an method to put a None Type into op forward function
# So, the top_k and top_p must be some values now.
results = self.decoding.forward(beam_width, max_seq_len,
top_k, top_p, beam_search_diversity_rate,
temperature, len_penalty, repetition_penalty, presence_penalty, min_length,
random_seed, mem_hidden_states, mem_seq_len,
is_return_output_log_probs, is_return_cum_log_probs, is_return_cross_attentions,
bad_words_list, stop_words_list)
return results
class FTT5(nn.Module):
def __init__(self, encoder, decoding):
super().__init__()
self.encoder = encoder
self.decoding = decoding
def forward(self, input_token, inputs_embeds, beam_size, max_seq_len,
top_k, top_p, beam_search_diversity_rate = 0.0,
temperature=1.0, len_penalty=0.0, repetition_penalty=None, presence_penalty=None, min_length=0, random_seed=0,
is_return_output_log_probs=False, is_return_cum_log_probs=False, is_return_cross_attentions=False,
bad_words_list=None, stop_words_list=None):
input_ids = input_token.input_ids.to("cuda").type(torch.int32)
mem_seq_len = 0
if hasattr(input_token, "attention_mask"):
mem_seq_len = torch.sum(input_token.attention_mask, dim=1).type(torch.int32).to("cuda")
else:
mem_seq_len = input_token.seq_len.type(torch.int32).to("cuda")
ft_encoder_outputs = self.encoder.forward(input_ids, mem_seq_len, inputs_embeds)
results = self.decoding.forward(beam_size, # optional, can be None
max_seq_len,
top_k, # optional, can be None
top_p, # optional, can be None
beam_search_diversity_rate, # optional, can be None
temperature, # optional, can be None
len_penalty, # optional, can be None
repetition_penalty, # optional, can be None
presence_penalty, # optional, can be None
min_length, # optional, can be None
random_seed, # optional, can be None
ft_encoder_outputs,
mem_seq_len,
is_return_output_log_probs, # optional, can be None
is_return_cum_log_probs, # optional, can be None
is_return_cross_attentions, # optional, can be None
bad_words_list, # optional, can be None
stop_words_list, # optional, can be None
)
ft_decoding_outputs = results.pop(0).reshape([-1, beam_size, max_seq_len])
ft_decoding_seq_lens = results.pop(0).reshape([-1, beam_size])
if is_return_output_log_probs:
ft_output_log_probs = results.pop(0)
if is_return_cum_log_probs:
ft_cum_log_probs = results.pop(0)
if is_return_cross_attentions:
ft_cross_attentions = results.pop(0)
return ft_decoding_outputs.cpu().numpy(), ft_decoding_seq_lens.cpu().numpy(), ft_cross_attentions.cpu().numpy()
return ft_decoding_outputs.cpu().numpy(), ft_decoding_seq_lens.cpu().numpy()
|
FasterTransformer-main
|
examples/pytorch/t5/utils/ft_decoding.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import multiprocessing
from datetime import datetime
import logging
from pathlib import Path
import sys
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../../3rdparty/transformers/src/")
from transformers import T5ForConditionalGeneration, T5EncoderModel
import numpy as np
import torch # pytype: disable=import-error
LOGGER = logging.getLogger(__name__)
rename_mapping = {"relative_attention_num_buckets": "relative_attention_num_buckets_or_max_pos_seq_len"}
new_configs = {
"structure": {"t5_with_bias": "false", "use_gated_activation": "false", "position_embedding_type": "relative"}}
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def fuse_decoder_qkv(model, factor, saved_dir, np_weight_data_type):
model_dict = {}
for name, param in model.named_parameters():
if name.find("decoder") != -1 and name.find("SelfAttention") != -1:
model_dict[name] = param
for i in range(model.decoder.config.num_layers):
shape = model_dict[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"].T.shape
qkv = torch.cat([model_dict[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"].T,
model_dict[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"].T,
model_dict[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"].T], dim=-1)
qkv = qkv.reshape([shape[0], 3, shape[1]])
qkv = qkv.cpu().detach().numpy().astype(np_weight_data_type)
split_vals = np.split(qkv, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"decoder.block.{i}.layer.0.SelfAttention.qkv.weight.{j}.bin"
split_vals[j].tofile(saved_path.as_posix())
def split_and_convert_process(key, val, factor, saved_dir):
if val.ndim == 2:
val = val.transpose(1, 0)
saved_key = key
LOGGER.debug(f"key: {key}, val.shape: {val.shape}")
if key.find("shared.weight") != -1:
# shared weights, only need to convert the weights of rank 0
saved_path = saved_dir / f"{saved_key}.bin"
val.tofile(saved_path.as_posix())
saved_path = saved_dir / f"{saved_key}_T.bin"
val.T.tofile(saved_path.as_posix())
elif key.find("lm_head.weight") != -1:
# lm_head weights, only need to convert the weights of rank 0
val = val.transpose(1, 0) # For lm_head, we use TN gemm to compute, so we don't need to transpose
saved_path = saved_dir / f"{saved_key}.bin"
val.tofile(saved_path.as_posix())
elif key.find("layer_norm.weight") != -1:
# shared weights, only need to convert the weights of rank 0
saved_path = saved_dir / f"{saved_key}.bin"
val.tofile(saved_path.as_posix())
elif (
key.find("SelfAttention.o.weight") != -1
or key.find("EncDecAttention.o.weight") != -1
or key.find("DenseReluDense.wo.weight") != -1
):
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"{saved_key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("DenseReluDense.wi.weight") != -1
or (key.find("encoder") != -1 and (
key.find("SelfAttention.q.weight") != -1
or key.find("SelfAttention.k.weight") != -1
or key.find("SelfAttention.v.weight") != -1
)
)
or key.find("EncDecAttention.q.weight") != -1
or key.find("EncDecAttention.k.weight") != -1
or key.find("EncDecAttention.v.weight") != -1
):
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"{saved_key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("DenseReluDense.wi_0.weight") != -1
or key.find("DenseReluDense.wi_1.weight") != -1
):
# For gated activation.
if key.find("DenseReluDense.wi_0.weight") != -1:
saved_key = key.replace("wi_0", "wi")
elif key.find("DenseReluDense.wi_1.weight") != -1:
saved_key = key.replace("wi_1", "wi2")
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"{saved_key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("relative_attention_bias") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"{saved_key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("decoder") != -1 and
(
key.find("SelfAttention.q.weight") != -1
or key.find("SelfAttention.k.weight") != -1
or key.find("SelfAttention.v.weight") != -1
)
):
pass
elif key.find("encoder.embed_tokens.weight") != -1 or \
key.find("decoder.embed_tokens.weight") != -1:
LOGGER.warning(f"Not save {key}, using shared.weight directly.")
else:
LOGGER.warning(f"cannot find key '{key}' with shape {val.shape}")
def convert_checkpoint(args):
saved_dir = Path(args.saved_dir) / f"{args.inference_tensor_para_size:d}-gpu"
saved_dir.mkdir(parents=True, exist_ok=True)
if args.encoder_only:
t5_model = T5EncoderModel.from_pretrained(args.in_file)
else:
t5_model = T5ForConditionalGeneration.from_pretrained(args.in_file)
config = configparser.ConfigParser()
if t5_model.encoder.config.feed_forward_proj.find("gated") != -1:
new_configs["structure"]["use_gated_activation"] = "1"
config["encoder"] = {}
for key, val in t5_model.encoder.config.to_dict().items():
config["encoder"][key] = f"{val}"
config["encoder"]["weight_data_type"] = args.weight_data_type
config["decoder"] = {}
if not args.encoder_only:
for key, val in t5_model.decoder.config.to_dict().items():
config["decoder"][key] = f"{val}"
config["decoder"]["weight_data_type"] = args.weight_data_type
for key, val in rename_mapping.items():
config['encoder'][val] = config['encoder'].pop(key)
if not args.encoder_only:
config['decoder'][val] = config['decoder'].pop(key)
for key, val in new_configs.items():
config[key] = {}
for val_key, val_val in val.items():
config[key][val_key] = val_val
with open((saved_dir / f"config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
np_weight_data_type = get_weight_data_type(args.weight_data_type)
i_gpu_num = args.inference_tensor_para_size
pool = multiprocessing.Pool(args.processes)
pool.starmap_async(split_and_convert_process,
[(name, param.cpu().detach().numpy().astype(np_weight_data_type), i_gpu_num, saved_dir)
for name, param in t5_model.state_dict().items()])
pool.close()
pool.join()
if not args.encoder_only:
fuse_decoder_qkv(t5_model, i_gpu_num, saved_dir, np_weight_data_type)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-saved_dir", "-o", type=str, help="file name of output file", required=True)
parser.add_argument("-in_file", "-i", type=str, help="file name of input checkpoint file", required=True)
parser.add_argument("-inference_tensor_para_size", "-i_g", type=int, help="How many gpus for inference",
required=True)
parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 4)",
default=4)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
parser.add_argument("--encoder_only", "-e", action="store_true")
parser.add_argument("--verbose", action="store_true", help="Provide verbose messages")
args = parser.parse_args()
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO, format=log_format)
LOGGER.info("\n=============== Argument ===============")
for key in vars(args):
LOGGER.info(f"{key}: {vars(args)[key]}")
LOGGER.info("========================================")
start_time = datetime.now()
convert_checkpoint(args)
stop_time = datetime.now()
run_time = (stop_time - start_time)
LOGGER.info("Spend {} (h:m:s) to convert the model".format(run_time))
|
FasterTransformer-main
|
examples/pytorch/t5/utils/huggingface_t5_ckpt_convert.py
|
import configparser
import torch
import argparse
import os
import numpy as np
device = torch.device('cpu')
encoder_config_mapping = {
"num_attention_heads":"num_heads",
"kv_channels":"d_kv",
"hidden_size":"d_model",
"ffn_hidden_size":"d_ff",
"num_layers":"num_layers",
"num_experts":"num_experts",
"padded_vocab_size":"vocab_size",
"max_position_embeddings":"relative_attention_num_buckets_or_max_pos_seq_len",
"relative_position_num_buckets":"relative_attention_num_buckets_or_max_pos_seq_len"
}
decoder_config_mapping = {
"num_attention_heads":"num_heads",
"kv_channels":"d_kv",
"hidden_size":"d_model",
"ffn_hidden_size":"d_ff",
"num_layers":"num_layers",
"num_experts":"num_experts",
"padded_vocab_size":"vocab_size",
"max_position_embeddings":"relative_attention_num_buckets_or_max_pos_seq_len",
"relative_position_num_buckets":"relative_attention_num_buckets_or_max_pos_seq_len"
}
decoder_new_config = {
"decoder_start_token_id":250104, ## need to adjust
"eos_token_id":1 ## need to adjust
}
model_new_config = {"structure":{"t5_with_bias":1, "t5_with_moe":0, "moe_layers_in_encoder":[], "moe_layers_in_decoder":[], "use_gated_activation": 0, "position_embedding_type":0}}
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_dir", type=str, help="path to the deepspeed-megratron checkpoint")
parser.add_argument("--output_dir", type=str, help="path to the output directory to store binary files")
parser.add_argument("--tensor_para_size", type=int, help="tensor parallelism size")
parser.add_argument("--weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"])
args = parser.parse_args()
checkpoint_dir = args.checkpoint_dir
output_dir = args.output_dir
tensor_para_size = args.tensor_para_size
print("=========================")
print(f"checkpoint: '{checkpoint_dir}'")
print(f"output: '{output_dir}'")
print(f"tensor_para_size: '{tensor_para_size}'")
print("=========================")
if not os.path.isdir(checkpoint_dir):
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# for tencent t5 model which is trained under ZeRO2
files = [f for f in os.listdir(checkpoint_dir) if f.find('model_states') != -1]
model_states_file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
#model_states_file = os.path.join(checkpoint_dir, "model_optim_rng.pt")
model_states = torch.load(model_states_file, map_location=device)
model_args = vars(model_states['args'])
num_layers = model_args['num_layers']
num_experts = 0
if 'num_experts' in model_args.keys():
num_experts = model_args['num_experts'][0]
# update model structure config
if not hasattr(model_args, 'position_embedding_type') or model_args.position_embedding_type == "absolute":
model_new_config["structure"]["position_embedding_type"] = 1
if num_experts != 0:
model_new_config["structure"]["t5_with_moe"] = 1
model = model_states['module']['language_model']
embedding = model['embedding']
encoder = model['encoder']
decoder = model['decoder']
word_embeddings = embedding['word_embeddings']
position_embeddings = embedding['position_embeddings']
word_embeddings_weight = word_embeddings['weight'].float().detach().numpy()
file_name = os.path.join(output_dir, "word_embeddings.weight.bin")
print(f"Saving word_embeddings_weight to '{file_name}'")
print(f"Shape: '{word_embeddings_weight.shape}'")
word_embeddings_weight.tofile(file_name)
position_embeddings_weight = position_embeddings['weight'].float().detach().numpy()
file_name = os.path.join(output_dir, "position_embeddings.weight.bin")
print(f"Saving position_embeddings_weight to '{file_name}'")
print(f"Shape: '{position_embeddings_weight.shape}'")
position_embeddings_weight.tofile(file_name)
shared_bias = model_states['module']['lm_head']['bias'].float().detach().numpy()
file_name = os.path.join(output_dir, "shared.bias.bin")
print(f"Saving shared.bias to '{file_name}'")
print(f"Shape: '{shared_bias.shape}'")
shared_bias.tofile(file_name)
moe_layers_num = 0
moe_layers_in_encoder = []
moe_layers_in_decoder = []
for k, v in encoder.items():
val = v.T.float().cpu().numpy().astype(np.float32)
if k.find("attention.query_key_value.weight") != -1:
num_splits = 3
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / num_splits)
head_num = model_args['num_attention_heads']
size_per_head = model_args['kv_channels']
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, num_splits, local_dim)
factor = 1
split_vals = np.split(val, factor, axis=-1)
query = split_vals[0][:, 0, ...]
key = split_vals[0][:, 1, ...]
value = split_vals[0][:, 2, ...]
prefix = k[:-22] + "query.weight"
query_tensor_para = np.split(query, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "encoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
query_to_save = query_tensor_para[i]
print(f"Shape: '{query_to_save.shape}'")
query_to_save.tofile(file_name)
prefix = k[:-22] + "key.weight"
key_tensor_para = np.split(key, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "encoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
key_to_save = key_tensor_para[i]
print(f"Shape: '{key_to_save.shape}'")
key_to_save.tofile(file_name)
prefix = k[:-22] + "value.weight"
value_tensor_para = np.split(value, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "encoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
value_to_save = value_tensor_para[i]
print(f"Shape: '{value_to_save.shape}'")
value_to_save.tofile(file_name)
elif k.find("attention.query_key_value.bias") != -1:
num_splits = 3
local_dim = int(val.shape[-1] / num_splits)
head_num = model_args['num_attention_heads']
size_per_head = model_args['kv_channels']
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(num_splits, local_dim)
factor = 1
split_vals = np.split(val, factor, axis=-1)
q_bias = split_vals[0][0, ...]
k_bias = split_vals[0][1, ...]
v_bias = split_vals[0][2, ...]
prefix = k[:-20] + "query.bias"
q_bias_tensor_para = np.split(q_bias, tensor_para_size, axis=0)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "encoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
q_bias_to_save = q_bias_tensor_para[i]
print(f"Shape: '{q_bias_to_save.shape}'")
q_bias_to_save.tofile(file_name)
prefix = k[:-20] + "key.bias"
k_bias_tensor_para = np.split(k_bias, tensor_para_size, axis=0)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "encoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
k_bias_to_save = k_bias_tensor_para[i]
print(f"Shape: '{k_bias_to_save.shape}'")
k_bias_to_save.tofile(file_name)
prefix = k[:-20] + "value.bias"
v_bias_tensor_para = np.split(v_bias, tensor_para_size, axis=0)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "encoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
v_bias_to_save = v_bias_tensor_para[i]
print(f"Shape: '{v_bias_to_save.shape}'")
v_bias_to_save.tofile(file_name)
elif k.find('experts') == -1:
if k.find('deepspeed_moe.gate') != -1:
layer_id = int(k[7:k.find('.', 7)])
moe_layers_in_encoder.append(layer_id)
moe_layers_num += 1
prefix = k
if k.find('layernorm') != -1 or k.find('gate') != -1 or k.find("attention.dense.bias") != -1 or k.find("dense_4h_to_h.bias") != -1:
file_name = os.path.join(output_dir, "encoder." + prefix + ".bin")
print(f"Saving '{prefix}' to '{file_name}'")
print(f"Shape: '{val.shape}'")
val.tofile(file_name)
else:
val_tensor_para = []
if k.find("attention.dense.weight") != -1 or k.find("dense_4h_to_h.weight") != -1 or k.find("dense_h_to_4h.bias") != -1:
val_tensor_para = np.split(val, tensor_para_size, axis=0)
elif k.find("dense_h_to_4h.weight") != -1:
val_tensor_para = np.split(val, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "encoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[i]
print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
print('moe_layers_in_encoder: ', moe_layers_in_encoder)
model_new_config["structure"]["moe_layers_in_encoder"] = moe_layers_in_encoder
for k, v in decoder.items():
val = v.T.float().cpu().numpy().astype(np.float32)
if (k.find("attention.query_key_value.weight") != -1
or k.find("inter_attention.key_value.weight") != -1
or k.find("inter_attention.query.weight") != -1): #(d_model * 3, d_model)
num_splits = 3
if k.find("inter_attention.key_value.weight") != -1:
num_splits = 2
if k.find("inter_attention.query.weight") != -1:
num_splits = 1
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / num_splits)
head_num = model_args['num_attention_heads']
size_per_head = model_args['kv_channels']
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, num_splits, local_dim)
factor = 1
split_vals = np.split(val, factor, axis=-1)
if k.find("attention.query_key_value.weight") != -1:
query_key_value = split_vals[0]
prefix = k
query_key_value_tensor_para = np.split(query_key_value, tensor_para_size, axis=2)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
query_key_value_to_save = query_key_value_tensor_para[i]
print(f"Shape: '{query_key_value_to_save.shape}'")
query_key_value_to_save.tofile(file_name)
if k.find("inter_attention.key_value.weight") != -1:
key = split_vals[0][:, 0, ...]
value = split_vals[0][:, 1, ...]
prefix = k[:-16] + "key.weight"
key_tensor_para = np.split(key, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
key_to_save = key_tensor_para[i]
print(f"Shape: '{key_to_save.shape}'")
key_to_save.tofile(file_name)
prefix = k[:-16] + "value.weight"
value_tensor_para = np.split(value, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
value_to_save = value_tensor_para[i]
print(f"Shape: '{value_to_save.shape}'")
value_to_save.tofile(file_name)
if k.find("inter_attention.query.weight") != -1:
query = split_vals[0]
prefix = k
query_tensor_para = np.split(query, tensor_para_size, axis=2)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
query_to_save = query_tensor_para[i]
print(f"Shape: '{query_to_save.shape}'")
query_to_save.tofile(file_name)
elif (k.find("attention.query_key_value.bias") != -1
or k.find("inter_attention.key_value.bias") != -1
or k.find("inter_attention.query.bias") != -1):
num_splits = 3
if k.find("inter_attention.key_value.bias") != -1:
num_splits = 2
if k.find("inter_attention.query.bias") != -1:
num_splits = 1
local_dim = int(val.shape[-1] / num_splits)
head_num = model_args['num_attention_heads']
size_per_head = model_args['kv_channels']
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(num_splits, local_dim)
factor = 1
split_vals = np.split(val, factor, axis=-1)
if k.find("attention.query_key_value.bias") != -1:
query_key_value_bias = split_vals[0]
prefix = k
query_key_value_bias_tensor_para = np.split(query_key_value_bias, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
query_key_value_bias_to_save = query_key_value_bias_tensor_para[i]
print(f"Shape: '{query_key_value_bias_to_save.shape}'")
query_key_value_bias_to_save.tofile(file_name)
if k.find("inter_attention.key_value.bias") != -1:
key_bias = split_vals[0][0, ...]
value_bias = split_vals[0][1, ...]
prefix = k[:-14] + "key.bias"
key_bias_tensor_para = np.split(key_bias, tensor_para_size, axis=0)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
key_bias_to_save = key_bias_tensor_para[i]
print(f"Shape: '{key_bias_to_save.shape}'")
key_bias_to_save.tofile(file_name)
prefix = k[:-14] + "value.bias"
value_bias_tensor_para = np.split(value_bias, tensor_para_size, axis=0)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
value_bias_to_save = value_bias_tensor_para[i]
print(f"Shape: '{value_bias_to_save.shape}'")
value_bias_to_save.tofile(file_name)
if k.find("inter_attention.query.bias") != -1:
query_bias = split_vals[0]
prefix = k
query_bias_tensor_para = np.split(query_bias, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
query_bias_to_save = query_bias_tensor_para[i]
print(f"Shape: '{query_bias_to_save.shape}'")
query_bias_to_save.tofile(file_name)
elif k.find('experts') == -1:
if k.find('deepspeed_moe.gate') != -1:
layer_id = int(k[7:k.find('.', 7)])
moe_layers_in_decoder.append(layer_id)
moe_layers_num += 1
prefix = k
if k.find('layernorm') != -1 or k.find('gate') != -1 or k.find("attention.dense.bias") != -1 or k.find("dense_4h_to_h.bias") != -1 or \
k.find("inter_attention.dense.bias") != -1:
file_name = os.path.join(output_dir, "decoder." + prefix + ".bin")
print(f"Saving '{prefix}' to '{file_name}'")
print(f"Shape: '{val.shape}'")
val.tofile(file_name)
else:
val_tensor_para = []
if k.find("attention.dense.weight") != -1 or k.find("dense_4h_to_h.weight") != -1 or k.find("dense_h_to_4h.bias") != -1:
val_tensor_para = np.split(val, tensor_para_size, axis=0)
elif k.find("dense_h_to_4h.weight") != -1:
val_tensor_para = np.split(val, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, "decoder." + prefix + "." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[i]
print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
print('moe_layers_in_decoder: ', moe_layers_in_decoder)
model_new_config["structure"]["moe_layers_in_decoder"] = moe_layers_in_decoder
# Saving experts weight
print(f"The number of moe layers is '{moe_layers_num}'")
for n in range(moe_layers_num):
experts_in_layer = []
fc1_weight = []
fc1_bias = []
fc2_weight = []
fc2_bias = []
prefix = None
for e in range(num_experts):
file_name = f"layer_{n}_expert_{e}_mp_rank_00_model_states.pt"
file_path = os.path.join(checkpoint_dir, file_name)
expert_dict = torch.load(file_path, map_location=device)
for k, v in expert_dict.items():
#val = v.T.float().cpu()#.numpy().astype(np.float32)
if k.find('dense_h_to_4h.weight') != -1:
if prefix is None:
prefix = k[15:-22]
fc1_weight.append(v)
if k.find('dense_h_to_4h.bias') != -1:
fc1_bias.append(v)
if k.find('dense_4h_to_h.weight') != -1:
fc2_weight.append(v)
if k.find('dense_4h_to_h.bias') != -1:
fc2_bias.append(v)
stacked_fc1_weight = torch.stack(fc1_weight, 0).transpose(-1, -2).contiguous()
val = stacked_fc1_weight.float().cpu().numpy() # (num_experts, d_model, d_ff)
val_tensor_para = np.split(val, tensor_para_size, axis=2)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, prefix + "dense_h_to_4h.weight." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[i]
print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
stacked_fc1_bias = torch.stack(fc1_bias, 0).contiguous()
val = stacked_fc1_bias.float().cpu().numpy() # (num_experts, d_ff)
val_tensor_para = np.split(val, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, prefix + "dense_h_to_4h.bias." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[i]
print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
stacked_fc2_weight = torch.stack(fc2_weight, 0).transpose(-1, -2).contiguous()
val = stacked_fc2_weight.float().cpu().numpy() # (num_experts, d_ff, d_model)
val_tensor_para = np.split(val, tensor_para_size, axis=1)
for i in range(tensor_para_size):
file_name = os.path.join(output_dir, prefix + "dense_4h_to_h.weight." + str(i) + ".bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
val_to_save = val_tensor_para[i]
print(f"Shape: '{val_to_save.shape}'")
val_to_save.tofile(file_name)
stacked_fc2_bias = torch.stack(fc2_bias, 0)
val = stacked_fc2_bias.float().cpu().numpy()
file_name = os.path.join(output_dir, prefix + "dense_4h_to_h.bias.bin")
print(f"Saving '{i}' '{prefix}' to '{file_name}'")
print(f"Shape: '{val_to_save.shape}'")
val.tofile(file_name)
config = configparser.ConfigParser()
config["encoder"] = {}
config["decoder"] = {}
config["encoder"]["weight_data_type"] = args.weight_data_type
config["decoder"]["weight_data_type"] = args.weight_data_type
for key, val in model_args.items():
if key in encoder_config_mapping.keys():
if key == 'num_experts' and type(val) is list:
val = val[0]
config["encoder"][encoder_config_mapping[key]] = f"{val}"
if key in decoder_config_mapping.keys():
if key == 'num_experts' and type(val) is list:
val = val[0]
config["decoder"][decoder_config_mapping[key]] = f"{val}"
for key, val in decoder_new_config.items():
config["decoder"][key] = f"{val}"
for key, val in model_new_config.items():
config[key] = {}
for val_key, val_val in val.items():
config[key][val_key] = f"{val_val}"
config_save_path = os.path.join(output_dir, "config.ini")
with open(config_save_path, 'w') as configfile:
config.write(configfile)
|
FasterTransformer-main
|
examples/pytorch/t5/utils/megatron-deepspeed_t5_ckpt_convert.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import concurrent.futures
import configparser
import datetime
import logging
import os
import pathlib
import shutil
import sys
import tempfile
import typing
import numpy as np
import torch # pytype: disable=import-error
import yaml
# verify if root package is in PYTHONPATH
__root_package_path__ = pathlib.Path(__file__).parent.parent.parent.parent.parent.absolute().as_posix()
if __root_package_path__ not in sys.path:
print(
f"[ERROR] add project root directory to your PYTHONPATH with "
f"'export PYTHONPATH={__root_package_path__}:${{PYTHONPATH}}'"
)
from examples.pytorch.nemo import unpack_nemo_ckpt, UnpackedNemoCheckpointDir, extract_layers_with_prefix
from examples.pytorch.utils import gpu_map_location, WEIGHT2DTYPE, torch2np, cpu_map_location, safe_transpose
LOGGER = logging.getLogger(__name__)
shared_mapping = {
"wte": "shared.weight",
"wte_T": "shared.weight_T",
"ape": "shared.ape",
"encoder_rpe": "block.0.layer.0.SelfAttention.relative_attention_bias",
"decoder_rpe": "block.0.layer.0.SelfAttention.relative_attention_bias",
}
common_mapping = {
"input_layernorm": "layer.0.layer_norm",
"self_attention.dense": "layer.0.SelfAttention.o",
"post_attention_layernorm": "layer.1.layer_norm",
"final_layernorm": "final_layer_norm",
"adapter_1.layernorm": "after_attention_adapter.layer_norm",
"adapter_1.mlp.dense_h_to_4h": "after_attention_adapter.DenseSiluDense.wi",
"adapter_1.mlp.dense_4h_to_h": "after_attention_adapter.DenseSiluDense.wo",
"adapter_2.layernorm": "after_ffn_adapter.layer_norm",
"adapter_2.mlp.dense_h_to_4h": "after_ffn_adapter.DenseSiluDense.wi",
"adapter_2.mlp.dense_4h_to_h": "after_ffn_adapter.DenseSiluDense.wo",
}
encoder_mapping = {
**common_mapping,
"self_attention.query_key_value": ["layer.0.SelfAttention.q", "layer.0.SelfAttention.k", "layer.0.SelfAttention.v"],
"mlp.dense_h_to_4h": "layer.1.DenseReluDense.wi",
"mlp.dense_h_to_4h_2": "layer.1.DenseReluDense.wi2", ## gated activation
"mlp.dense_4h_to_h": "layer.1.DenseReluDense.wo",
}
decoder_mapping = {
**common_mapping,
"self_attention.query_key_value": ["layer.0.SelfAttention.qkv"],
"inter_attention.query": ["layer.1.EncDecAttention.q"],
"inter_attention.key_value": ["layer.1.EncDecAttention.k", "layer.1.EncDecAttention.v"],
"inter_attention.dense": "layer.1.EncDecAttention.o",
"post_inter_attention_layernorm": "layer.2.layer_norm",
"mlp.dense_h_to_4h": "layer.2.DenseReluDense.wi",
"mlp.dense_h_to_4h_2": "layer.2.DenseReluDense.wi2",
"mlp.dense_4h_to_h": "layer.2.DenseReluDense.wo",
}
megatron_HF_name_mapping = {"shared": shared_mapping, "encoder": encoder_mapping, "decoder": decoder_mapping}
encoder_config_mapping = {
"num_attention_heads": "num_heads",
"hidden_size": "d_model",
"kv_channels": "d_kv",
"ffn_hidden_size": "d_ff",
"num_layers": "num_layers",
"max_position_embeddings": "relative_attention_num_buckets_or_max_pos_seq_len",
"relative_attention_num_buckets": "relative_attention_num_buckets_or_max_pos_seq_len",
"activation": "feed_forward_proj",
}
decoder_config_mapping = {
"num_attention_heads": "num_heads",
"hidden_size": "d_model",
"kv_channels": "d_kv",
"ffn_hidden_size": "d_ff",
"num_layers": "num_layers",
"max_position_embeddings": "relative_attention_num_buckets_or_max_pos_seq_len",
"relative_attention_num_buckets": "relative_attention_num_buckets_or_max_pos_seq_len",
"activation": "feed_forward_proj",
}
def megatron2hf_name(saved_key, name_mapping):
saved_key = saved_key.replace("layers", "block")
split_last_dot = saved_key.rsplit(sep=".", maxsplit=1)
mapping_key = split_last_dot[0]
weight_or_bias = split_last_dot[1]
split_idx = mapping_key.find(".", 6) + 1
mapping_key_no_num = mapping_key[split_idx:]
block_num = mapping_key[: split_idx]
mapping_vals_no_num = name_mapping[mapping_key_no_num]
if not isinstance(mapping_vals_no_num, list):
mapping_vals_no_num = [mapping_vals_no_num]
saved_keys = [block_num + val + "." + weight_or_bias for val in mapping_vals_no_num]
return saved_keys
def prompt_convert(args, prompt_config, prompt_weights):
prompt_templates = prompt_config["task_templates"]
# model config save dir
config_saved_dir = pathlib.Path(args.saved_dir) / f"{args.infer_gpu_num:d}-gpu"
# Configuration for the model (load by triton backends)
config_path = config_saved_dir / "config.ini"
config = configparser.ConfigParser()
with config_path.open("r") as config_file:
config.read_file(config_file)
num_tasks = len(prompt_templates)
prompt_learning_type = 3 # p_prompt_tuning
prompt_learning_start_id = config["encoder"]["vocab_size"] # hard code here
config["encoder"]["num_tasks"] = str(num_tasks)
config["encoder"]["prompt_learning_start_id"] = str(prompt_learning_start_id)
config["encoder"]["prompt_learning_type"] = str(prompt_learning_type)
for task_name_id, prompt_task in enumerate(prompt_templates):
prompt_task_name = prompt_task["taskname"]
prompt_length = int(prompt_task["total_virtual_tokens"])
config[f"task_{task_name_id:d}"] = {}
config[f"task_{task_name_id:d}"]["task_name"] = prompt_task_name
config[f"task_{task_name_id:d}"]["prompt_length"] = str(prompt_length)
prompt_task_weights = prompt_weights["prompt_table"][
f"prompt_table.{prompt_task_name}.prompt_embeddings.weight"
]
# put converted prompts weights to the model weights saved dir
prompt_task_weights_output_path = config_saved_dir / f"model.prompt_table.{prompt_task_name}.weight.bin"
val = torch2np(prompt_task_weights)
val.tofile(prompt_task_weights_output_path)
if prompt_config["data"]["decoder_starts_with_pad"]:
config["decoder"]["decoder_start_token_id"] = config["decoder"]["pad_id"]
with config_path.open("w") as config_file:
config.write(config_file)
LOGGER.info(">>>>>>>>>>>>>>>> model saved config")
LOGGER.info(config_path.read_text())
# This tool is used to support the new megatron model trained by pipeline parallel + tensor parallel
def merge_and_convert_process(
model_type,
tensor_para_rank,
pipeline_para_rank,
saved_dir,
factor,
key,
nemo_model_config,
models_list,
np_weight_data_type,
):
try:
assert model_type == "encoder" or model_type == "decoder"
model_config = nemo_model_config.get(model_type, None)
num_layers = model_config["num_layers"] if model_config is not None else nemo_model_config["num_layers"]
prefix = model_type
pipeline_para_size = nemo_model_config["pipeline_model_parallel_size"]
pipeline_model_parallel_split_rank = nemo_model_config.get("pipeline_model_parallel_split_rank", 0)
major_device = models_list[0][key].device
name_mapping = megatron_HF_name_mapping[model_type]
saved_dir = pathlib.Path(saved_dir)
if key.startswith("layers."):
layer_index = int(key[7 : key.find(".", 7)])
encoder_num_pipeline_stages = pipeline_model_parallel_split_rank
decoder_num_pipeline_stages = pipeline_para_size - pipeline_model_parallel_split_rank
offset = 0
if model_type == "encoder" and pipeline_para_size > 1:
offset = pipeline_para_rank * (num_layers // encoder_num_pipeline_stages)
elif model_type == "decoder" and pipeline_para_size > 1:
offset = (pipeline_para_rank - pipeline_model_parallel_split_rank) * (
num_layers // decoder_num_pipeline_stages
)
saved_key = key.replace(f"layers.{layer_index}.", f"layers.{layer_index + offset}.")
else:
saved_key = key
is_not_legacy = model_config is None or not model_config.get("megatron_legacy", False)
if any(k in key for k in (
"input_layernorm.weight",
"input_layernorm.bias",
"self_attention.dense.bias",
"inter_attention.dense.bias",
"post_attention_layernorm.weight",
"post_inter_attention_layernorm.weight",
"post_attention_layernorm.bias",
"post_inter_attention_layernorm.bias",
"mlp.dense_4h_to_h.bias",
"final_layernorm.weight",
"final_layernorm.bias",
"adapter_1.layernorm.weight",
"adapter_1.layernorm.bias",
"adapter_2.layernorm.weight",
"adapter_2.layernorm.bias",
)):
# shared weights, only need to convert the weights of rank 0
if tensor_para_rank == 0:
saved_keys = megatron2hf_name(saved_key, name_mapping)
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.bin"
val = safe_transpose(models_list[0][key])
val = torch2np(val, np_weight_data_type)
val = np.squeeze(val)
LOGGER.debug(
"merge for pp_rank=%d tp_rank=%d only for tp_rank=0 src_key=%s filename=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
key,
saved_path.name,
val.shape,
val.dtype,
)
val.tofile(saved_path)
elif any(k in key for k in (
# split along the first dimension
"self_attention.dense.weight",
"inter_attention.dense.weight",
"mlp.dense_4h_to_h.weight",
# split along the last dimension
"mlp.dense_h_to_4h.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h_2.weight",
"mlp.dense_h_to_4h_2.bias",
)):
axis = 0 if any(k in key for k in (
"self_attention.dense.weight",
"inter_attention.dense.weight",
"mlp.dense_4h_to_h.weight",
)) else -1
vals = []
for k in range(factor):
val = safe_transpose(models_list[k][key])
val = val.float().to(major_device)
vals.append(val)
saved_keys = megatron2hf_name(saved_key, name_mapping)
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{tensor_para_rank:d}.bin"
val = torch.cat(vals, dim=axis)
val = torch2np(val, np_weight_data_type)
LOGGER.debug(
"merge for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
val.dtype,
)
val.tofile(saved_path)
elif any(k in key for k in (
"self_attention.query_key_value.bias",
"inter_attention.query.bias",
"inter_attention.key_value.bias",
)):
num_splits = 3
if "inter_attention.key_value.bias" in key:
num_splits = 2
if "inter_attention.query.bias" in key:
num_splits = 1
vals = []
for k in range(factor):
val = safe_transpose(models_list[k][key])
val = val.float()
local_dim = int(val.shape[-1] / num_splits)
if model_config is not None:
head_num = model_config["num_attention_heads"] // nemo_model_config["tensor_model_parallel_size"]
# t5 kv_channels may not be equal to hidden_size // head_num
size_per_head = model_config["kv_channels"]
else:
head_num = nemo_model_config["num_attention_heads"] // nemo_model_config["tensor_model_parallel_size"]
# t5 kv_channels may not be equal to hidden_size // head_num
size_per_head = nemo_model_config["kv_channels"]
if is_not_legacy:
val = val.reshape(head_num, num_splits, size_per_head)
val = val.permute(1, 0, 2)
val = val.reshape(num_splits, local_dim)
vals.append(val.to(major_device))
saved_vals = torch.cat(vals, dim=-1)
saved_keys = megatron2hf_name(saved_key, name_mapping)
if len(saved_keys) == 1:
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{tensor_para_rank:d}.bin"
val = torch2np(saved_vals, np_weight_data_type)
LOGGER.debug(
"merge for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
val.dtype,
)
val.tofile(saved_path)
else:
for index in range(len(saved_keys)):
saved_path = saved_dir / f"{prefix}.{saved_keys[index]}.{tensor_para_rank:d}.bin"
val = torch2np(saved_vals[index, ...], np_weight_data_type)
LOGGER.debug(
"merge for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
val.dtype,
)
val.tofile(saved_path)
elif any(k in key for k in (
"self_attention.query_key_value.weight",
"inter_attention.query.weight",
"inter_attention.key_value.weight",
)):
num_splits = 3
if "inter_attention.key_value.weight" in key:
num_splits = 2
if "inter_attention.query.weight" in key:
num_splits = 1
vals = []
for k in range(factor):
val = safe_transpose(models_list[k][key])
val = val.float()
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / num_splits)
if model_config is not None:
head_num = model_config["num_attention_heads"]
# t5 kv_channels may not be equal to hidden_size // head_num
size_per_head = model_config["kv_channels"]
head_num = head_num // nemo_model_config["tensor_model_parallel_size"]
else:
head_num = nemo_model_config["num_attention_heads"]
# t5 kv_channels may not be equal to hidden_size // head_num
size_per_head = nemo_model_config["kv_channels"]
head_num = head_num // nemo_model_config["tensor_model_parallel_size"]
if is_not_legacy:
# shape of self_attention.query_key_value.weight is [hidden_dim, head_num * 3 * size_per_head]
# convert to [hidden_dim, 3, head_num, size_per_head]
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.permute(0, 2, 1, 3)
val = val.reshape(hidden_dim, num_splits, local_dim)
vals.append(val.to(major_device))
saved_vals = torch.cat(vals, dim=-1)
saved_keys = megatron2hf_name(saved_key, name_mapping)
if len(saved_keys) == 1:
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{tensor_para_rank:d}.bin"
val = torch2np(saved_vals, np_weight_data_type)
LOGGER.debug(
"merge for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
val.dtype,
)
val.tofile(saved_path)
else:
for index in range(len(saved_keys)):
saved_path = saved_dir / f"{prefix}.{saved_keys[index]}.{tensor_para_rank:d}.bin"
val = torch2np(saved_vals[:, index, ...], np_weight_data_type)
LOGGER.debug(
"merge for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
val.dtype,
)
val.tofile(saved_path)
else:
LOGGER.error(f"cannot find key '{key}'")
except Exception as e:
LOGGER.error(f"fail to convert {key} with error {e}.")
def split_and_convert_process(
model_type,
tensor_para_rank,
pipeline_para_rank,
saved_dir,
factor,
key,
nemo_model_config,
models_list,
np_weight_data_type,
):
try:
assert model_type == "encoder" or model_type == "decoder"
model_config = nemo_model_config.get(model_type, None)
num_layers = model_config["num_layers"] if model_config is not None else nemo_model_config["num_layers"]
prefix = model_type
pipeline_para_size = nemo_model_config["pipeline_model_parallel_size"]
pipeline_model_parallel_split_rank = nemo_model_config.get("pipeline_model_parallel_split_rank", 0)
name_mapping = megatron_HF_name_mapping[model_type]
val = safe_transpose(models_list[0][key])
val = torch2np(val, np_weight_data_type)
if key.startswith("layers."):
layer_index = int(key[7 : key.find(".", 7)])
encoder_num_pipeline_stages = pipeline_model_parallel_split_rank
decoder_num_pipeline_stages = pipeline_para_size - pipeline_model_parallel_split_rank
offset = 0
if model_type == "encoder" and pipeline_para_size > 1:
offset = pipeline_para_rank * (num_layers // encoder_num_pipeline_stages)
elif model_type == "decoder" and pipeline_para_size > 1:
offset = (pipeline_para_rank - pipeline_model_parallel_split_rank) * (
num_layers // decoder_num_pipeline_stages
)
saved_key = key.replace(f"layers.{layer_index}.", f"layers.{layer_index + offset}.")
else:
saved_key = key
is_not_legacy = model_config is None or not model_config.get("megatron_legacy", False)
if any(k in key for k in (
"input_layernorm.weight",
"input_layernorm.bias",
"self_attention.dense.bias",
"inter_attention.dense.bias",
"post_attention_layernorm.weight",
"post_inter_attention_layernorm.weight",
"post_attention_layernorm.bias",
"post_inter_attention_layernorm.bias",
"mlp.dense_4h_to_h.bias",
"final_layernorm.weight",
"final_layernorm.bias",
"adapter_1.layernorm.weight",
"adapter_1.layernorm.bias",
"adapter_2.layernorm.weight",
"adapter_2.layernorm.bias",
)):
# shared weights, only need to convert the weights of rank 0
if tensor_para_rank == 0:
saved_keys = megatron2hf_name(saved_key, name_mapping)
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.bin"
LOGGER.debug(
"split for pp_rank=%d tp_rank=%d only for tp_rank=0 src_key=%s filename=%s "
"shape=%s (same as original) dtype=%s",
pipeline_para_rank,
tensor_para_rank,
key,
saved_path.name,
val.shape,
val.dtype,
)
val.tofile(saved_path)
elif any(k in key for k in (
# split along the first dimension
"self_attention.dense.weight",
"inter_attention.dense.weight",
"mlp.dense_4h_to_h.weight",
# split along the last dimension
"mlp.dense_h_to_4h.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h_2.weight",
"mlp.dense_h_to_4h_2.bias",
)):
axis = 0 if any(k in key for k in (
"self_attention.dense.weight",
"inter_attention.dense.weight",
"mlp.dense_4h_to_h.weight",
)) else -1
split_vals = np.split(val, factor, axis=axis)
saved_keys = megatron2hf_name(saved_key, name_mapping)
for j in range(factor):
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{tensor_para_rank * factor + j:d}.bin"
LOGGER.debug(
"split for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s original_shape=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
split_vals[j].shape,
split_vals[j].dtype,
)
split_vals[j].tofile(saved_path)
elif any(k in key for k in (
"self_attention.query_key_value.bias",
"inter_attention.query.bias",
"inter_attention.key_value.bias",
)):
num_splits = 3
if "inter_attention.key_value.bias" in key:
num_splits = 2
if "inter_attention.query.bias" in key:
num_splits = 1
local_dim = int(val.shape[-1] / num_splits)
if model_config is not None:
head_num = model_config["num_attention_heads"] // nemo_model_config["tensor_model_parallel_size"]
# t5 kv_channels may not be equal to hidden_size // head_num
size_per_head = model_config["kv_channels"]
else:
head_num = nemo_model_config["num_attention_heads"] // nemo_model_config["tensor_model_parallel_size"]
# t5 kv_channels may not be equal to hidden_size // head_num
size_per_head = nemo_model_config["kv_channels"]
if is_not_legacy:
val = val.reshape(head_num, num_splits, size_per_head)
val = val.transpose(1, 0, 2)
val = val.reshape(num_splits, local_dim)
split_vals = np.split(val, factor, axis=-1)
saved_keys = megatron2hf_name(saved_key, name_mapping)
for j in range(factor):
if len(saved_keys) == 1:
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{tensor_para_rank * factor + j:d}.bin"
LOGGER.debug(
"split for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s "
"preprocessed_shape=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
split_vals[j].shape,
split_vals[j].dtype,
)
split_vals[j].tofile(saved_path)
continue
for index in range(len(saved_keys)):
saved_path = saved_dir / f"{prefix}.{saved_keys[index]}.{tensor_para_rank * factor + j:d}.bin"
split_val_idxed = split_vals[j][index, ...]
LOGGER.debug(
"split for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s "
"preprocessed_shape=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
split_val_idxed.shape,
split_val_idxed.dtype,
)
split_val_idxed.tofile(saved_path)
elif any(k in key for k in (
"self_attention.query_key_value.weight",
"inter_attention.query.weight",
"inter_attention.key_value.weight",
)):
num_splits = 3
if "inter_attention.key_value.weight" in key:
num_splits = 2
if "inter_attention.query.weight" in key:
num_splits = 1
hidden_dim = val.shape[0]
local_dim = int(val.shape[-1] / num_splits)
if model_config is not None:
head_num = model_config["num_attention_heads"]
# t5 kv_channels may not be equal to hidden_size // head_num
size_per_head = model_config["kv_channels"]
head_num = head_num // nemo_model_config["tensor_model_parallel_size"]
else:
head_num = nemo_model_config["num_attention_heads"]
# t5 kv_channels may not be equal to hidden_size // head_num
size_per_head = nemo_model_config["kv_channels"]
head_num = head_num // nemo_model_config["tensor_model_parallel_size"]
if is_not_legacy:
# shape of self_attention.query_key_value.weight is [hidden_dim, head_num * 3 * size_per_head]
# convert to [hidden_dim, 3, head_num, size_per_head]
val = val.reshape(hidden_dim, head_num, num_splits, size_per_head)
val = val.transpose(0, 2, 1, 3)
val = val.reshape(hidden_dim, num_splits, local_dim)
split_vals = np.split(val, factor, axis=-1)
saved_keys = megatron2hf_name(saved_key, name_mapping)
for j in range(factor):
if len(saved_keys) == 1:
saved_path = saved_dir / f"{prefix}.{saved_keys[0]}.{tensor_para_rank * factor + j:d}.bin"
LOGGER.debug(
"split for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s "
"preprocessed_shape=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
split_vals[j].shape,
split_vals[j].dtype,
)
split_vals[j].tofile(saved_path)
continue
for index in range(len(saved_keys)):
saved_path = saved_dir / f"{prefix}.{saved_keys[index]}.{tensor_para_rank * factor + j:d}.bin"
split_val_idxed = split_vals[j][:, index, ...]
LOGGER.debug(
"split for pp_rank=%d tp_rank=%d factor=%d src_key=%s filename=%s "
"preprocessed_shape=%s shape=%s dtype=%s",
pipeline_para_rank,
tensor_para_rank,
factor,
key,
saved_path.name,
val.shape,
split_val_idxed.shape,
split_val_idxed.dtype,
)
split_val_idxed.tofile(saved_path)
else:
LOGGER.error(f"cannot find key '{key}'")
except Exception as e:
LOGGER.error(f"fail to convert {key} with error {e}.")
def flatten_adapter(adapter: dict) -> dict:
"""Flatten adapter weights."""
adapter = adapter.get("state_dict", adapter)
result = {}
for key, val in adapter.items():
assert isinstance(key, str)
assert isinstance(val, dict)
pos_adapter = key.find(":adapter")
assert pos_adapter != -1
prefix = key[:pos_adapter]
suffix = key[pos_adapter + 1:]
assert suffix.endswith("_1") or suffix.endswith("_2")
key_prefix = f"{prefix}.{suffix}."
name_mapping = {
"module.0.bias": "layernorm.bias",
"module.0.weight": "layernorm.weight",
"module.1.weight": "mlp.dense_h_to_4h.weight",
"module.3.weight": "mlp.dense_4h_to_h.weight",
}
result.update({key_prefix + name_mapping[k]: v for k, v in val.items()})
return result
def convert_checkpoint(unpacked_checkpoints_dir: UnpackedNemoCheckpointDir, args: argparse.Namespace,
unpacked_adapter_dir: typing.Optional[UnpackedNemoCheckpointDir] = None):
nemo_model_config = unpacked_checkpoints_dir.model_config
has_adapters = unpacked_adapter_dir is not None
adapter_model_config = unpacked_adapter_dir.model_config if has_adapters else None
if has_adapters:
tuning_config = adapter_model_config['adapter_tuning']
# TODO: support type == 'parallel_adapter'
assert tuning_config['type'] == 'linear_adapter'
adapter_norm_position = tuning_config['norm_position']
# TODO: support norm_position == 'post'
assert adapter_norm_position == 'pre'
assert tuning_config['norm_type'] == 'mixedfusedlayernorm'
adapter_inter_size = tuning_config["adapter_dim"]
else:
adapter_norm_position = None
adapter_inter_size = None
encoder_config = nemo_model_config.get("encoder", None)
decoder_config = nemo_model_config.get("decoder", None)
assert (encoder_config is None and decoder_config is None) or (
encoder_config is not None and decoder_config is not None)
if encoder_config is not None:
if encoder_config.get("kv_channels", None) is None:
encoder_config["kv_channels"] = encoder_config["hidden_size"] // encoder_config["num_attention_heads"]
if decoder_config.get("kv_channels", None) is None:
decoder_config["kv_channels"] = decoder_config["hidden_size"] // decoder_config["num_attention_heads"]
else:
# shared config for encoder and decoder
if nemo_model_config.get("kv_channels", None) is None:
nemo_model_config["kv_channels"] = nemo_model_config["hidden_size"] // nemo_model_config[
"num_attention_heads"]
inference_tensor_para_size = args.infer_gpu_num
# if checkpoints files could be found - start preparing output dir
saved_dir = pathlib.Path(args.saved_dir) / f"{inference_tensor_para_size:d}-gpu"
if saved_dir.exists():
LOGGER.error("Remove %s target directory before running conversion", saved_dir)
for file_path in saved_dir.rglob("*"):
LOGGER.debug(" %s", file_path.relative_to(saved_dir))
sys.exit(1)
saved_dir.mkdir(parents=True)
training_tensor_para_size = nemo_model_config.get("tensor_model_parallel_size", 1)
training_pipeline_para_size = nemo_model_config.get("pipeline_model_parallel_size", 1)
checkpoints_paths = unpacked_checkpoints_dir.get_checkpoints_paths(
training_tensor_para_size,
training_pipeline_para_size,
)
LOGGER.debug("Expecting checkpoints paths in:")
for tp_rank_checkpoints_paths in checkpoints_paths:
for checkpoint_path in tp_rank_checkpoints_paths:
LOGGER.debug(" %s", checkpoint_path)
if has_adapters:
adapter_tensor_para_size = adapter_model_config.get("tensor_model_parallel_size", 1)
assert adapter_tensor_para_size == training_tensor_para_size
adapter_pipeline_para_size = adapter_model_config.get("pipeline_model_parallel_size", 1)
assert adapter_pipeline_para_size == training_pipeline_para_size
adapter_paths = unpacked_adapter_dir.get_checkpoints_paths(
training_tensor_para_size,
training_pipeline_para_size,
)
LOGGER.debug("Expecting adapter checkpoints paths in:")
for tp_rank_adapter_paths in adapter_paths:
for adapter_path in tp_rank_adapter_paths:
LOGGER.debug(" %s", adapter_path)
else:
adapter_paths = None
map_location_fn = cpu_map_location if bool(args.load_checkpoints_to_cpu) else gpu_map_location
np_weight_data_type = WEIGHT2DTYPE[args.weight_data_type]
has_gated_activations = False
for pipeline_rank in range(len(checkpoints_paths[0])):
model_from_selected_pipeline = torch.load(checkpoints_paths[0][pipeline_rank], map_location=map_location_fn)
model_from_selected_pipeline = model_from_selected_pipeline.get("state_dict", model_from_selected_pipeline)
LOGGER.debug(f"Existent pipeline_rank={pipeline_rank} keys:")
for key in model_from_selected_pipeline.keys():
LOGGER.debug(" %s", key)
if adapter_paths is not None:
adapter_from_selected_pipeline = torch.load(adapter_paths[0][pipeline_rank], map_location=map_location_fn)
adapter_from_selected_pipeline = adapter_from_selected_pipeline.get("state_dict", adapter_from_selected_pipeline)
LOGGER.debug(f"Existent adapter pipeline_rank={pipeline_rank} keys:")
for key in adapter_from_selected_pipeline.keys():
LOGGER.debug(" %s", key)
encoder_ape_key = "enc_dec_model.encoder_embedding.position_embeddings.weight"
if encoder_ape_key in model_from_selected_pipeline.keys():
saved_path = saved_dir / "shared.ape.bin"
# not weight, do not need to transpose
val = model_from_selected_pipeline[encoder_ape_key]
val = torch2np(val, np_weight_data_type)
LOGGER.debug(
"save for pp_rank=%d src_key=%s saved_keys=%s shape=%s dtype=%s",
pipeline_rank,
encoder_ape_key,
saved_path.name,
val.shape,
val.dtype,
)
val.tofile(saved_path)
has_gated_activations |= any("mlp.dense_h_to_4h_2" in key for key in model_from_selected_pipeline.keys())
def _split(src_key, dst_filename_fn):
if src_key in model_from_selected_pipeline.keys():
_val = model_from_selected_pipeline[src_key]
_val = safe_transpose(_val)
_val = torch2np(_val, np_weight_data_type)
_val = np.split(_val, inference_tensor_para_size, axis=0)
for tensor_idx in range(inference_tensor_para_size):
saved_path = saved_dir / dst_filename_fn(tensor_idx)
LOGGER.debug(
"save for pp_rank=%d src_key=%s filename=%s shape=%s dtype=%s",
pipeline_rank,
src_key,
saved_path.name,
_val[tensor_idx].shape,
_val[tensor_idx].dtype,
)
_val[tensor_idx].tofile(saved_path)
del _val
# split rpe into tensor parallel ranks
_split(
"enc_dec_model.encoder_relative_position_embedding.relative_position_embedding.weight",
lambda idx: f"encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight.{idx}.bin",
)
_split(
"enc_dec_model.decoder_relative_position_embedding.relative_position_embedding.weight",
lambda idx: f"decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight.{idx}.bin",
)
del model_from_selected_pipeline
if encoder_config is not None:
nemo_position_embedding_type = encoder_config.get("position_embedding_type", "absolute")
assert encoder_config.get("position_embedding_type", "absolute") == decoder_config.get("position_embedding_type", "absolute")
else:
nemo_position_embedding_type = nemo_model_config.get("position_embedding_type", "absolute")
nemo_position_embedding_type = (
"absolute" if nemo_position_embedding_type == "learned_absolute" else nemo_position_embedding_type
)
with_bias = nemo_model_config.get("tokens_head_bias", True)
model_new_config = {
"structure": {
"t5_with_bias": str(with_bias),
"position_embedding_type": nemo_position_embedding_type,
"use_gated_activation": str(has_gated_activations),
}
}
if training_tensor_para_size > inference_tensor_para_size:
assert training_tensor_para_size % inference_tensor_para_size == 0
is_merge_ckpt = True
factor = int(training_tensor_para_size / inference_tensor_para_size)
else:
assert inference_tensor_para_size % training_tensor_para_size == 0
is_merge_ckpt = False
factor = int(inference_tensor_para_size / training_tensor_para_size)
if encoder_config is not None:
assert encoder_config["ffn_hidden_size"] % inference_tensor_para_size == 0
assert encoder_config["num_attention_heads"] % inference_tensor_para_size == 0
assert encoder_config["ffn_hidden_size"] == decoder_config["ffn_hidden_size"]
assert encoder_config["num_attention_heads"] == decoder_config["num_attention_heads"]
else:
assert nemo_model_config["ffn_hidden_size"] % inference_tensor_para_size == 0
assert nemo_model_config["num_attention_heads"] % inference_tensor_para_size == 0
main_loop = min(training_tensor_para_size, inference_tensor_para_size)
word_embedding_key = "enc_dec_model.encoder_embedding.word_embeddings.weight"
w_e_list = []
lm_head_bias_key = "enc_dec_model.tokens_head.bias"
lm_head_bias_list = []
lm_head_weight_key = "enc_dec_model.tokens_head.weight"
lm_head_weight_list = []
def _extract_pp_weights(model, pp_idx: int):
if pp_idx == 0:
word_embedding_val = model[word_embedding_key]
word_embedding_val = torch2np(word_embedding_val, np_weight_data_type)
w_e_list.append(word_embedding_val)
if pp_idx == training_pipeline_para_size - 1 and with_bias:
lm_hb_val = model[lm_head_bias_key]
lm_hb_val = torch2np(lm_hb_val, np_weight_data_type)
lm_head_bias_list.append(lm_hb_val)
if lm_head_weight_key in model:
if pp_idx == training_pipeline_para_size - 1:
lm_hw_val = model[lm_head_weight_key]
lm_hw_val = torch2np(lm_hw_val, np_weight_data_type)
lm_head_weight_list.append(lm_hw_val)
else:
if pp_idx == 0:
lm_hw_val = model[word_embedding_key]
lm_hw_val = torch2np(lm_hw_val, np_weight_data_type)
lm_head_weight_list.append(lm_hw_val)
torch.multiprocessing.set_start_method("spawn")
torch.multiprocessing.set_sharing_strategy("file_system")
with concurrent.futures.ProcessPoolExecutor(args.processes) as pool:
for tp_idx in range(main_loop):
for pp_idx in range(training_pipeline_para_size):
encoder_models = []
decoder_models = []
def _append_to_models(model, adapter, rank_weights, is_merge: bool):
prefix_encoder = "enc_dec_model.enc_dec_model.encoder.model."
prefix_decoder = "enc_dec_model.enc_dec_model.decoder.model."
encoder_models.append(
extract_layers_with_prefix(model, prefix_encoder)
)
decoder_models.append(
extract_layers_with_prefix(model, prefix_decoder)
)
if adapter is not None:
encoder_models[-1].update(extract_layers_with_prefix(adapter, prefix_encoder))
decoder_models[-1].update(extract_layers_with_prefix(adapter, prefix_decoder))
operation = "merging" if is_merge else "copy/splitting"
LOGGER.debug(
"For pp_idx=%d tp_id=%d %s weights from %s extracted:", pp_idx, tp_idx, operation, rank_weights
)
LOGGER.debug(" encoder layers")
for name in encoder_models[-1]:
LOGGER.debug(" %s", name)
LOGGER.debug(" decoder layers")
for name in decoder_models[-1]:
LOGGER.debug(" %s", name)
if is_merge_ckpt:
for k in range(factor):
rank_weights = checkpoints_paths[tp_idx * factor + k][pp_idx]
model = torch.load(rank_weights, map_location=map_location_fn)
model = model.get("state_dict", model)
_extract_pp_weights(model, pp_idx)
if adapter_paths is not None:
adapter = flatten_adapter(
torch.load(adapter_paths[tp_idx][pp_idx], map_location=map_location_fn))
else:
adapter = None
_append_to_models(model, adapter, rank_weights, is_merge_ckpt)
else:
rank_weights = checkpoints_paths[tp_idx][pp_idx]
model = torch.load(rank_weights, map_location=map_location_fn)
model = model.get("state_dict", model)
_extract_pp_weights(model, pp_idx)
if adapter_paths is not None:
adapter = flatten_adapter(
torch.load(adapter_paths[tp_idx][pp_idx], map_location=map_location_fn))
else:
adapter = None
_append_to_models(model, adapter, rank_weights, is_merge_ckpt)
process_fn = merge_and_convert_process if is_merge_ckpt else split_and_convert_process
for key in encoder_models[0]:
pool.submit(
process_fn,
"encoder",
tp_idx, # tp_rank
pp_idx, # pp_rank
saved_dir,
factor,
key,
nemo_model_config,
encoder_models,
np_weight_data_type,
)
for key in decoder_models[0]:
pool.submit(
process_fn,
"decoder",
tp_idx, # tp_rank
pp_idx, # pp_rank
saved_dir,
factor,
key,
nemo_model_config,
decoder_models,
np_weight_data_type,
)
w_e_saved_path = saved_dir / "shared.weight_T.bin"
lm_head_weight_saved_path = saved_dir / "lm_head.weight.bin"
lm_head_saved_path = saved_dir / "shared.bias.bin"
w_e_val = np.concatenate(w_e_list, axis=0)
lm_head_bias_val = np.concatenate(lm_head_bias_list, axis=0) if with_bias else None
lm_head_weight_val = np.concatenate(lm_head_weight_list, axis=0)
LOGGER.debug(
"save for src_key=%s filename=%s shape=%s dtype=%s",
word_embedding_key,
w_e_saved_path.name,
w_e_val.shape,
w_e_val.dtype,
)
if lm_head_bias_val is not None:
LOGGER.debug(
"save for src_key=%s filename=%s shape=%s dtype=%s",
lm_head_bias_key,
lm_head_saved_path.name,
lm_head_bias_val.shape,
lm_head_bias_val.dtype,
)
LOGGER.debug(
"save for src_key=%s filename=%s shape=%s dtype=%s",
lm_head_weight_key,
lm_head_saved_path.name,
lm_head_weight_val.shape,
lm_head_weight_val.dtype,
)
w_e_val.tofile(w_e_saved_path)
if lm_head_bias_val is not None:
lm_head_bias_val.tofile(lm_head_saved_path)
lm_head_weight_val.tofile(lm_head_weight_saved_path)
vocab_size = w_e_val.shape[0]
config = configparser.ConfigParser()
if nemo_position_embedding_type == "absolute":
encoder_config_mapping.pop("relative_attention_num_buckets", None)
decoder_config_mapping.pop("relative_attention_num_buckets", None)
elif nemo_position_embedding_type == "relative":
encoder_config_mapping.pop("max_position_embeddings", None)
decoder_config_mapping.pop("max_position_embeddings", None)
else:
LOGGER.error(f"nemo_position_embedding_type should be absolute or relative")
# TODO adjust config for adapters
if encoder_config is not None:
merge_config = {}
for key, val in nemo_model_config.items():
if key not in encoder_config:
merge_config[key] = val
for key, val in encoder_config.items():
merge_config[key] = val
config["encoder"] = {
**{
"_name_or_path": args.model_name,
"model_type": "T5",
"weight_data_type": args.weight_data_type,
"tensor_para_size": str(inference_tensor_para_size),
"vocab_size": str(vocab_size),
},
**{
encoder_config_mapping[key]: str(val)
for key, val in merge_config.items()
if key in encoder_config_mapping
},
}
else:
config["encoder"] = {
**{
"_name_or_path": args.model_name,
"model_type": "T5",
"weight_data_type": args.weight_data_type,
"tensor_para_size": str(inference_tensor_para_size),
"vocab_size": str(vocab_size),
},
**{
encoder_config_mapping[key]: str(val)
for key, val in nemo_model_config.items()
if key in encoder_config_mapping
},
}
tokenizer_config = nemo_model_config["tokenizer"]
tokenizer_config = _update_tokenizer_config(tokenizer_config, unpacked_checkpoints_dir)
if args.tokenizer_model_path:
LOGGER.debug("Use tokenizer model passed from CLI: %s", args.tokenizer_model_path)
tokenizer_config["model"] = args.tokenizer_model_path
if args.vocab_path:
LOGGER.debug("Use tokenizer vocab passed from CLI: %s", args.vocab_path)
tokenizer_config["vocab_file"] = args.vocab_path
if args.merges_path:
LOGGER.debug("Use tokenizer merge passed from CLI: %s", args.merges_path)
tokenizer_config["merge_file"] = args.merges_path
_copy_tokenizer_file_if_defined("model", tokenizer_config["model"], saved_dir)
_copy_tokenizer_file_if_defined("vocab_file", tokenizer_config["vocab_file"], saved_dir)
_copy_tokenizer_file_if_defined("merge_file", tokenizer_config["merge_file"], saved_dir)
bos_id, eos_id, pad_id = _get_special_tokens_ids(tokenizer_config)
if decoder_config is not None:
merge_config = {}
for key, val in nemo_model_config.items():
if key not in decoder_config:
merge_config[key] = val
for key, val in decoder_config.items():
merge_config[key] = val
config["decoder"] = {
**{
"_name_or_path": args.model_name,
"model_type": "T5",
"weight_data_type": args.weight_data_type,
"tensor_para_size": str(inference_tensor_para_size),
"vocab_size": str(vocab_size),
"decoder_start_token_id": str(bos_id),
"eos_token_id": str(eos_id),
"pad_id": str(pad_id),
},
**{
decoder_config_mapping[key]: str(val)
for key, val in merge_config.items()
if key in decoder_config_mapping
},
}
else:
config["decoder"] = {
**{
"_name_or_path": args.model_name,
"model_type": "T5",
"weight_data_type": args.weight_data_type,
"tensor_para_size": str(inference_tensor_para_size),
"vocab_size": str(vocab_size),
"decoder_start_token_id": str(bos_id),
"eos_token_id": str(eos_id),
"pad_id": str(pad_id),
},
**{
decoder_config_mapping[key]: str(val)
for key, val in nemo_model_config.items()
if key in decoder_config_mapping
},
}
def _config_update_adapter(side: str):
config_side = config[side]
config_side['has_adapter'] = str(has_adapters)
config_side['adapter_inter_size'] = str(adapter_inter_size)
config_side['adapter_norm_position'] = adapter_norm_position
if has_adapters:
_config_update_adapter('encoder')
_config_update_adapter('decoder')
for section, section_dict in model_new_config.items():
config[section] = {k: str(v) for k, v in section_dict.items()}
with (saved_dir / f"config.ini").open("w") as configfile:
config.write(configfile)
def _update_tokenizer_config(tokenizer_config: typing.Dict, unpacked_checkpoints_dir):
def _update_config_entry(key, file_pattern):
old_file_path = tokenizer_config[key]
if old_file_path:
LOGGER.debug("tokenizer %s %s type %s", key, old_file_path, type(old_file_path))
old_file_path = pathlib.Path(old_file_path)
new_file_path = unpacked_checkpoints_dir.get_tokenizer_file_path("tokenizer", key, file_pattern)
if new_file_path:
LOGGER.debug("Update tokenizer %s %s -> %s", key, old_file_path, new_file_path)
tokenizer_config[key] = new_file_path.as_posix()
elif not old_file_path.exists():
LOGGER.warning("Because tokenizer %s %s does not exists - set it as None", key, old_file_path)
tokenizer_config[key] = None
_update_config_entry("model", "*.model")
_update_config_entry("vocab_file", "*vocab*")
_update_config_entry("merge_file", "*merge*.txt")
return tokenizer_config
def _copy_tokenizer_file_if_defined(key_name, tokenizer_file_path, saved_dir):
if tokenizer_file_path:
tokenizer_file_path = pathlib.Path(tokenizer_file_path)
if tokenizer_file_path.exists():
tokenizer_basename = {
"model": "tokenizer",
"vocab_file": "vocab",
"merge_file": "merges",
}[key_name]
dst_path = saved_dir / f"{tokenizer_basename}{tokenizer_file_path.suffix}"
LOGGER.debug("Copy of %s %s file as %s", tokenizer_file_path, key_name, dst_path)
shutil.copy(tokenizer_file_path.as_posix(), dst_path.as_posix())
else:
LOGGER.debug("%s %s file does not exists", tokenizer_file_path, key_name)
def _get_special_tokens_ids(tokenizer_config: typing.Dict):
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from examples.pytorch.tokenizer import add_special_tokens_to_tokenizer
logging.getLogger("git.cmd").setLevel(logging.INFO)
logging.getLogger("h5py._conv").setLevel(logging.INFO)
logging.getLogger("matplotlib").setLevel(logging.INFO)
logging.getLogger("matplotlib.font_manager").setLevel(logging.INFO)
logging.getLogger("matplotlib.pyplot").setLevel(logging.INFO)
tokenizer = get_nmt_tokenizer(
library=tokenizer_config["library"],
model_name=tokenizer_config["type"],
tokenizer_model=tokenizer_config["model"],
vocab_file=tokenizer_config["vocab_file"],
merges_file=tokenizer_config["merge_file"],
legacy=True,
)
if tokenizer_config["library"] == "sentencepiece":
add_special_tokens_to_tokenizer(tokenizer)
bos_id = tokenizer.bos_id
eos_id = tokenizer.eos_id
pad_id = tokenizer.pad_id
LOGGER.debug("for %s obtained tokenizer tokens ids bos_id=%d eos_id=%d", tokenizer_config, bos_id, eos_id)
return bos_id, eos_id, pad_id
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--saved-dir",
"-saved_dir",
"-o",
help="folder name of output files",
required=True,
)
parser.add_argument(
"--in-file",
"-in_file",
"-i",
help="file name of .nemo checkpoint file or checkpoint dir",
required=True,
)
parser.add_argument(
"--infer-gpu-num",
"-infer_gpu_num",
"-i_g",
type=int,
help="How many gpus for inference",
required=True,
)
parser.add_argument(
"--processes",
"-processes",
"-p",
type=int,
default=64,
help="How many processes to spawn for conversion",
)
parser.add_argument(
"--weight-data-type",
"-weight_data_type",
choices=["fp32", "fp16"],
default="fp32",
help="Data type of results weights",
)
parser.add_argument(
"--model-name",
"-model_name",
"-m",
help="model name",
required=True,
)
parser.add_argument(
"--vocab-path",
help="Path to vocabulary file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--merges-path",
help="Path to merges file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--tokenizer-model-path",
help="Path to tokenizer model file to embed in FasterTransformer checkpoint",
required=False,
)
parser.add_argument(
"--load-checkpoints-to-cpu",
"-load_checkpoints_to_cpu",
"-cpu",
type=int,
choices=[0, 1],
default=1,
help="Whether to load model weights to CPU",
)
parser.add_argument(
"--prompt-in-file",
"-prompt_in_file",
"-p_i",
help="file name of .nemo prompt checkpoint file",
)
parser.add_argument(
"--prompt-saved-dir",
"-prompt_saved_dir",
"-p_o",
help="folder name of prompt checkpoint output files",
)
parser.add_argument(
"--adapter-in-file",
"-adapter_in_file",
"-a_i",
help="file name of .nemo adapter checkpoint file",
)
parser.add_argument("--verbose", action="store_true", help="Provide verbose messages")
args = parser.parse_args()
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO, format=log_format)
print("\n=============== Argument ===============")
for key in vars(args):
print(f"{key}: {vars(args)[key]}")
print("========================================")
input_path = pathlib.Path(args.in_file)
if not input_path.exists():
LOGGER.error("%s does not exists", input_path)
sys.exit(1)
adapter_input_path = pathlib.Path(args.adapter_in_file) if args.adapter_in_file else None
if adapter_input_path and not adapter_input_path.exists():
LOGGER.error("%s does not exists", adapter_input_path)
sys.exit(1)
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir = pathlib.Path(temp_dir)
# unpack if needed
if input_path.is_file():
checkpoint_dir_path = temp_dir / "unpacked"
start_time = datetime.datetime.now()
unpacked_checkpoint_dir = UnpackedNemoCheckpointDir(
unpack_nemo_ckpt(input_path, checkpoint_dir_path),
load_checkpoints_to_cpu=bool(args.load_checkpoints_to_cpu),
)
LOGGER.info("Spent %s (h:m:s) to unpack NeMo archive", datetime.datetime.now() - start_time)
else:
unpacked_checkpoint_dir = UnpackedNemoCheckpointDir(
input_path, load_checkpoints_to_cpu=bool(args.load_checkpoints_to_cpu)
)
LOGGER.debug("Unpacked NeMo checkpoint contains:")
for file_path in unpacked_checkpoint_dir.checkpoints_dir.rglob("*"):
LOGGER.debug(" %s", file_path)
if adapter_input_path and adapter_input_path.is_file():
adapter_dir_path = temp_dir / "adapter"
start_time = datetime.datetime.now()
unpacked_adapter_dir = UnpackedNemoCheckpointDir(
unpack_nemo_ckpt(adapter_input_path, adapter_dir_path),
load_checkpoints_to_cpu=bool(args.load_checkpoints_to_cpu),
)
LOGGER.info("Spent %s (h:m:s) to unpack NeMo adapter archive", datetime.datetime.now() - start_time)
elif adapter_input_path:
unpacked_adapter_dir = UnpackedNemoCheckpointDir(
adapter_input_path, load_checkpoints_to_cpu=bool(args.load_checkpoints_to_cpu)
)
else:
unpacked_adapter_dir = None
if unpacked_adapter_dir is not None:
LOGGER.debug("Unpacked NeMo adapter checkpoint contains:")
for file_path in unpacked_adapter_dir.checkpoints_dir.rglob("*"):
LOGGER.debug(" %s", file_path)
start_time = datetime.datetime.now()
convert_checkpoint(unpacked_checkpoint_dir, args, unpacked_adapter_dir)
LOGGER.info("Spent %s (h:m:s) to convert the model", datetime.datetime.now() - start_time)
map_location_fn = cpu_map_location if bool(args.load_checkpoints_to_cpu) else gpu_map_location
model_config_yaml = "model_config.yaml"
model_weights_ckpt = "model_weights.ckpt"
if args.prompt_in_file is not None:
start_time = datetime.datetime.now()
assert args.prompt_saved_dir is not None
unpack_nemo_ckpt(args.prompt_in_file, args.prompt_saved_dir)
LOGGER.info("Spent %s (h:m:s) to unpack NeMo prompt archive", datetime.datetime.now() - start_time)
prompt_config_file = open(os.path.join(args.prompt_saved_dir, model_config_yaml), "r")
prompt_config = yaml.full_load(prompt_config_file)
LOGGER.info(prompt_config)
start_time = datetime.datetime.now()
prompt_weights = torch.load(
os.path.join(args.prompt_saved_dir, model_weights_ckpt),
map_location=map_location_fn,
)
prompt_convert(args, prompt_config, prompt_weights)
LOGGER.info(f"Spent %s (h:m:s) to unpack convert prompt model", datetime.datetime.now() - start_time)
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/t5/utils/nemo_t5_ckpt_convert.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import codecs
from onmt.translate import GNMTGlobalScorer
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from utils.translation_model import load_test_model
from utils.translator import Translator
import logging
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--beam_size", type=int, default=4, help="beam size")
parser.add_argument("--max_seq_len", type=int, default=100, help="max_seq_len")
parser.add_argument("--model_type", type=str, help="decoding_ext, torch_decoding, torch_decoding_with_decoder_ext",
choices=['decoding_ext', 'torch_decoding', 'torch_decoding_with_decoder_ext'], required=True)
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument('--model_path', type=str, default='./pytorch/translation/models/averaged-10-epoch.pt',
help='path for model checkpoint')
parser.add_argument('--decoding_ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('--decoder_ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('--encoder_ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('--input_file', type=str, default='../examples/tensorflow/decoding/utils/translation/test.en',
help='input file path')
parser.add_argument('--output_file', type=str, default='',
help='output file path')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
args = parser.parse_args()
opt = argparse.Namespace(models=[args.model_path],
fp32=False, data_type='text', output='/dev/null', report_align=False, report_time=True,
random_sampling_topk=args.sampling_topk, random_sampling_temp=1.0, seed=829,
beam_size=args.beam_size, min_length=0, max_length=args.max_seq_len,
stepwise_penalty=False, length_penalty='none', ratio=-0.0, coverage_penalty='none', alpha=0.0, beta=-0.0,
block_ngram_repeat=0, ignore_when_blocking=[], replace_unk=False, phrase_table='',
verbose=True, dump_beam='', n_best=1, batch_type='sents', gpu=0)
fields, model, model_opt = load_test_model(opt, args)
scorer = GNMTGlobalScorer.from_opt(opt)
out_file = codecs.open(opt.output, 'w+', 'utf-8')
logger = logging.getLogger()
translator = Translator.from_opt(
model,
fields,
opt,
model_opt,
args,
global_scorer=scorer,
out_file=out_file,
report_align=opt.report_align,
report_score=False,
logger=logger
)
res = []
n = 1
with open(args.input_file, 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
translated = translator.translate(lines, batch_size=args.batch_size)
for i in range(len(translated[1])):
res.append(translated[1][i][0])
if args.output_file:
with open(args.output_file, 'w') as f:
for line in res:
f.write(line + '\n')
|
FasterTransformer-main
|
examples/pytorch/decoding/translate_example.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import argparse
import timeit
import torch
import random
import numpy as np
# import torch.cuda.nvtx as nvtx
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.decoding.utils.decoding import DecodingWeights, TorchDecoding, ArgHelper
from examples.pytorch.decoding.utils.ft_decoding import FtDecodingWeights, CustomDecoding
def main():
parser = argparse.ArgumentParser()
parser.add_argument('batch_size', type=int,
help='batch size')
parser.add_argument('layer_num', type=int,
help='number of layers')
parser.add_argument('seq_len', type=int,
help='sequence length')
parser.add_argument('head_num', type=int,
help='head number')
parser.add_argument('head_size', type=int,
help='size per head')
parser.add_argument('-inter_size', '--inter_size', type=int, default=0, metavar='NUMBER',
help='inter_size (default: 0)')
parser.add_argument('-mem_hidden', '--memory_hidden_dim', type=int, default=512, metavar='NUMBER',
help='memory hidden dim (default: 512)')
parser.add_argument('beam_size', type=int,
help='beam size')
parser.add_argument('vocab_size', type=int,
help='vocab size')
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument('--time', action='store_true',
help='test the time or not.')
parser.add_argument('--use_pretrained', action='store_true',
help='use pretrained weights or not.')
parser.add_argument('--decoding_ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('--decoder_ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
args = parser.parse_args()
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
if args.use_pretrained:
layer_num = 6
head_num = 8
head_size = 64
inter_size = head_num * head_size * 4
vocab_size = 31538
else:
layer_num = args.layer_num
head_num = args.head_num
head_size = args.head_size
inter_size = args.inter_size
if inter_size == 0:
inter_size = 4 * head_num * head_size
vocab_size = args.vocab_size
hidden_dim = head_num * head_size
start_id = 2
end_id = 3
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
decodingargs1 = ArgHelper('torch_decoding', args.data_type, os.path.abspath(args.decoder_ths_path), os.path.abspath(args.decoding_ths_path))
decodingargs2 = ArgHelper('torch_decoding_with_decoder_ext', args.data_type, os.path.abspath(args.decoder_ths_path), os.path.abspath(args.decoding_ths_path))
mem = torch.empty(args.batch_size, args.seq_len, args.memory_hidden_dim).cuda()
torch.nn.init.uniform_(mem, -1, 1)
if args.data_type == "fp16":
mem = mem.half()
elif args.data_type == "bf16":
mem = mem.bloat16()
mem_seq_lens = torch.randint(1, args.seq_len+1, (args.batch_size,), dtype=torch.int32).cuda()
if args.use_pretrained:
ckpt = torch.load('./pytorch/translation/models/averaged-10-epoch.pt')
import re
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
ckpt['model'] = {fix_key(k): v for k, v in ckpt['model'].items()}
weights = DecodingWeights(layer_num, hidden_dim, vocab_size, ckpt)
else:
weights = DecodingWeights(layer_num, hidden_dim, vocab_size)
ft_weights = FtDecodingWeights(layer_num, hidden_dim, weights.w)
# TODO(bhsueh) Add decoder op
torch_decoding = TorchDecoding(layer_num, head_num, head_size, vocab_size, start_id, end_id, weights, args=decodingargs1)
torch_decoding_with_decoder_ext = TorchDecoding(layer_num, head_num, head_size, vocab_size, start_id, end_id, weights, args=decodingargs2)
torch_decoding.cuda()
torch_decoding_with_decoder_ext.cuda()
if args.data_type == "fp16":
torch_decoding.half()
torch_decoding_with_decoder_ext.half()
elif args.data_type == "bf16":
torch_decoding.bloat16()
torch_decoding_with_decoder_ext.bloat16()
torch_decoding.eval()
torch_decoding_with_decoder_ext.eval()
ft_weights.to_cuda()
if args.data_type == "fp16":
ft_weights.to_half()
elif args.data_type == "bf16":
ft_weights.to_bfloat16()
custom_decoding = CustomDecoding(head_num, head_size,
inter_size, args.memory_hidden_dim, layer_num, vocab_size,
start_id, end_id, args.beam_search_diversity_rate,
args.sampling_topk, args.sampling_topp, 1.0,
1.0, 1.0, ft_weights, args=decodingargs1)
with torch.no_grad():
output0, lens0 = torch_decoding(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
print(output0)
print(lens0)
# return
output1, lens1 = torch_decoding_with_decoder_ext(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
print(output1)
print(lens1)
output2, lens2 = custom_decoding(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
print(output2)
print(lens2)
diff = torch.abs((output0 - output1) / output0)
print('FT Decoder Mean relative diff: {} Max relative diff: {} Min relative diff: {}'.format(
torch.mean(diff), torch.max(diff), torch.min(diff)))
diff = torch.abs((output0 - output2) / output0)
print('FT Decoding Mean relative diff: {} Max relative diff: {} Min relative diff: {}'.format(
torch.mean(diff), torch.max(diff), torch.min(diff)))
if args.time:
iterations = 10
for i in range(iterations):
output, lens = torch_decoding(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
t00 = timeit.default_timer()
for i in range(iterations):
output, lens = torch_decoding(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
t0 = timeit.default_timer() - t00
# for i in range(iterations):
# output, lens = torch_decoding_with_decoder_ext(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
# t10 = timeit.default_timer()
# for i in range(iterations):
# output, lens = torch_decoding_with_decoder_ext(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
# t1 = timeit.default_timer() - t10
for i in range(iterations):
output2, lens2 = custom_decoding(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
t20 = timeit.default_timer()
for i in range(iterations):
output2, lens2 = custom_decoding(args.batch_size, args.beam_size, args.seq_len, mem, mem_seq_lens)
t2 = timeit.default_timer() - t20
print("[INFO] TorchDecoding time costs: {:.2f} ms".format(t0*1000/iterations))
# print("[INFO] TorchDecoding (with FTDecoder) time costs: {:.2f} ms".format(t1*1000/iterations))
print("[INFO] FTDecoding time costs: {:.2f} ms".format(t2*1000/iterations))
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/decoding/decoding_example.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from sacrebleu import corpus_bleu
def bleu_score(pred_file, ref_file, bleu_score_threshold=None):
with open(pred_file, "r") as pred_stream, open(ref_file, "r") as ref_stream:
pred_stream_txt = pred_stream.readlines()
ref_stream_txt = ref_stream.readlines()
bleu = corpus_bleu(pred_stream_txt, [ref_stream_txt], force=True)
print(" bleu score: {:6.2f}".format(bleu.score))
print(" bleu counts: {}".format(bleu.counts))
print(" bleu totals: {}".format(bleu.totals))
print(" bleu precisions: {}".format(bleu.precisions))
print(" bleu sys_len: {}; ref_len: {}".format(bleu.sys_len, bleu.ref_len))
if bleu_score_threshold != None:
assert bleu.score >= bleu_score_threshold, "TEST FAIL !"
print("[INFO] TEST PASS !")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--pred_file', type=str, metavar='NUMBER',
help='The prediction files.', required=True)
parser.add_argument('--ref_file', type=str, metavar='NUMBER',
help='The reference files.', required=True)
parser.add_argument('--bleu_score_threshold', type=float, metavar='NUMBER',
help='The threshold of bleu score.')
args = parser.parse_args()
bleu_score(args.pred_file, args.ref_file, args.bleu_score_threshold)
|
FasterTransformer-main
|
examples/pytorch/decoding/utils/bleu_score.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
FasterTransformer-main
|
examples/pytorch/decoding/utils/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
def recover_bpe(src):
dst = []
for line in src:
line = line.strip().split()
if line[-1] == '</s>':
line.pop()
if line[0][0] == '▁':
s = line[0][1:]
else:
s = line[0]
for w in line[1:]:
if w[0] == '▁':
s += ' ' + w[1:]
else:
s += w
s += '\n'
dst.append(s)
return dst
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str)
parser.add_argument('outfile', type=str)
args = parser.parse_args()
with open(args.infile, 'r') as infile:
with open(args.outfile, 'w') as outfile:
dst = recover_bpe(infile.readlines())
for line in dst:
outfile.write(line)
|
FasterTransformer-main
|
examples/pytorch/decoding/utils/recover_bpe.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import os
import math
import torch
import torch.nn as nn
import torch.cuda.nvtx as nvtx
from onmt.modules import Embeddings, AverageAttention
from onmt.decoders.decoder import DecoderBase
from onmt.decoders.transformer import TransformerDecoderLayer
from onmt.utils.misc import tile, sequence_mask
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
from examples.pytorch.decoder.utils.ft_decoder import FTDecoder, FtDecoderWeights
USE_CACHE_BATCH_MAJOR_ATTENTION = True
to_torch_type = {'fp32' : torch.float32, 'fp16' : torch.float16, 'bf16' : torch.bfloat16}
def get_op_cache_config(size_per_head, is_fp32):
x = 4 if is_fp32 else 8
use_batch_major_op_cache = True if USE_CACHE_BATCH_MAJOR_ATTENTION == True and \
size_per_head % x == 0 \
else False
x = x if use_batch_major_op_cache else 1
return use_batch_major_op_cache, x
class DecodingWeights(object):
def __init__(self, layer_num, hidden_dim, vocab_size, onmtcheckpoint=None, max_step_for_pe=2048):
self.hidden_dim = hidden_dim
self.max_step_for_pe = max_step_for_pe
# self.w = []
if onmtcheckpoint:
self.w = {}
for key in onmtcheckpoint:
if key == 'model' or key == 'generator':
self.w[key] = onmtcheckpoint[key]
else:
self.w = {}
self.w['model'] = {}
self.w['generator'] = {}
for i in range(layer_num):
prefix = 'decoder.transformer_layers.' + str(i)
self.w['model'][prefix + '.layer_norm_1.weight'] = torch.zeros(hidden_dim) # self_layernorm_gamma
self.w['model'][prefix + '.layer_norm_1.bias'] = torch.zeros(hidden_dim) # self_layernorm_beta
self.w['model'][prefix + '.self_attn.linear_query.weight'] = torch.zeros(hidden_dim, hidden_dim) # self_kernel_q
self.w['model'][prefix + '.self_attn.linear_keys.weight'] = torch.zeros(hidden_dim, hidden_dim) # self_kernel_k
self.w['model'][prefix + '.self_attn.linear_values.weight'] = torch.zeros(hidden_dim, hidden_dim) # self_kernel_v
self.w['model'][prefix + '.self_attn.linear_query.bias'] = torch.zeros(hidden_dim) # self_bias_q
self.w['model'][prefix + '.self_attn.linear_keys.bias'] = torch.zeros(hidden_dim) # self_bias_k
self.w['model'][prefix + '.self_attn.linear_values.bias'] = torch.zeros(hidden_dim) # self_bias_v
self.w['model'][prefix + '.self_attn.final_linear.weight'] = torch.zeros(hidden_dim, hidden_dim) # self_output_kernel
self.w['model'][prefix + '.self_attn.final_linear.bias'] = torch.zeros(hidden_dim) # self_output_bias
self.w['model'][prefix + '.layer_norm_2.weight'] = torch.zeros(hidden_dim) # cross_layernorm_gamma
self.w['model'][prefix + '.layer_norm_2.bias'] = torch.zeros(hidden_dim) # cross_layernorm_beta
self.w['model'][prefix + '.context_attn.linear_query.weight'] = torch.zeros(hidden_dim, hidden_dim) # cross_kernel_q
self.w['model'][prefix + '.context_attn.linear_keys.weight'] = torch.zeros(hidden_dim, hidden_dim) # cross_kernel_k
self.w['model'][prefix + '.context_attn.linear_values.weight'] = torch.zeros(hidden_dim, hidden_dim) # cross_kernel_v
self.w['model'][prefix + '.context_attn.linear_query.bias'] = torch.zeros(hidden_dim) # cross_bias_q
self.w['model'][prefix + '.context_attn.linear_keys.bias'] = torch.zeros(hidden_dim) # cross_bias_k
self.w['model'][prefix + '.context_attn.linear_values.bias'] = torch.zeros(hidden_dim) # cross_bias_v
self.w['model'][prefix + '.context_attn.final_linear.weight'] = torch.zeros(hidden_dim, hidden_dim) # cross_output_kernel
self.w['model'][prefix + '.context_attn.final_linear.bias'] = torch.zeros(hidden_dim) # cross_output_bias
self.w['model'][prefix + '.feed_forward.layer_norm.weight'] = torch.zeros(hidden_dim) # ffn_layernorm_gamma
self.w['model'][prefix + '.feed_forward.layer_norm.bias'] = torch.zeros(hidden_dim) # ffn_layernorm_beta
self.w['model'][prefix + '.feed_forward.w_1.weight'] = torch.zeros(4 * hidden_dim, hidden_dim) # inter_kernel
self.w['model'][prefix + '.feed_forward.w_1.bias'] = torch.zeros(4 * hidden_dim) # inter_bias
self.w['model'][prefix + '.feed_forward.w_2.weight'] = torch.zeros(hidden_dim, 4 * hidden_dim) # output_kernel
self.w['model'][prefix + '.feed_forward.w_2.bias'] = torch.zeros(hidden_dim) # output_bias
self.w['model']['decoder.layer_norm.weight'] = torch.zeros(hidden_dim) # decoding_gamma
self.w['model']['decoder.layer_norm.bias'] = torch.zeros(hidden_dim) # decoding_beta
self.w['model']['decoder.embeddings.make_embedding.emb_luts.0.weight'] = torch.zeros(vocab_size, hidden_dim) # embedding_table
self.w['generator']['0.weight'] = torch.zeros(vocab_size, hidden_dim)
self.w['generator']['0.bias'] = torch.zeros(vocab_size)
for key in self.w:
if isinstance(self.w[key], dict):
for next_key in self.w[key]:
torch.nn.init.uniform_(self.w[key][next_key], -0.5, 0.5)
else:
torch.nn.init.uniform_(self.w[key], -0.5, 0.5)
def to_cuda(self):
for key in self.w:
if isinstance(self.w[key], dict):
for next_key in self.w[key]:
self.w[key][next_key] = self.w[key][next_key].cuda()
else:
self.w[key] = self.w[key].cuda()
def to_half(self):
for key in self.w:
if isinstance(self.w[key], dict):
for next_key in self.w[key]:
self.w[key][next_key] = self.w[key][next_key].half()
else:
self.w[key] = self.w[key].half()
def to_bfloat16(self):
for key in self.w:
if isinstance(self.w[key], dict):
for next_key in self.w[key]:
self.w[key][next_key] = self.w[key][next_key].bfloat16()
else:
self.w[key] = self.w[key].bfloat16()
def _get_position_encoding(self):
pe = torch.zeros(self.max_step_for_pe, self.hidden_dim)
position = torch.arange(0, self.max_step_for_pe).unsqueeze(1)
div_term = torch.exp((torch.arange(0, self.hidden_dim, 2, dtype=torch.float) *
-(math.log(10000.0) / self.hidden_dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe
def gather_nd(params, indices):
indices = indices.t().long()
ndim = indices.size(0)
idx = torch.zeros_like(indices[0]).long()
m = 1
for i in range(ndim)[::-1]:
idx += indices[i] * m
m *= params.size(i)
params = params.reshape((-1, *tuple(torch.tensor(params.size()[ndim:]))))
return params[idx]
def gather_tree(step_ids, parent_ids, max_sequence_lengths, end_token):
beams = torch.empty_like(step_ids)
beams.fill_(end_token)
max_len = step_ids.size(0)
batch_size = step_ids.size(1)
beam_size = step_ids.size(-1)
batch_beam = batch_size * beam_size
for i in range(batch_beam):
batch = i // beam_size
beam = i % beam_size
max_seq_len_b = min(max_len, max_sequence_lengths[batch])
if max_seq_len_b <= 0:
continue
beams[max_seq_len_b - 1, batch, beam] = step_ids[max_seq_len_b - 1, batch, beam]
parent = parent_ids[max_seq_len_b - 1, batch, beam]
for level in range(max_seq_len_b - 2, -1, -1):
if parent < 0 or parent > beam_size:
raise ValueError("wrong parent id")
beams[level, batch, beam] = step_ids[level, batch, parent]
parent = parent_ids[level, batch, parent]
finished = False
for time in range(max_seq_len_b):
if finished:
beams[time, batch, beam] = end_token
elif beams[time, batch, beam] == end_token:
finished = True
return beams
def finalize(beam_size, output_ids, parent_ids, out_seq_lens, end_id, max_seq_len=None, args=None):
out_seq_lens = torch.reshape(out_seq_lens, (-1, beam_size))
max_lens = torch.max(out_seq_lens, 1)[0]
if max_seq_len:
shape = (max_seq_len, -1, beam_size)
else:
shape = (torch.max(max_lens), -1, beam_size)
output_ids = torch.reshape(output_ids, shape)
parent_ids = torch.reshape(parent_ids, shape)
if output_ids.is_cuda:
torch.classes.load_library("./lib/libth_transformer.so")
batch_size = output_ids.shape[1]
end_ids = end_id * torch.ones(batch_size, dtype=torch.int32, device=output_ids.device)
ids = torch.ops.fastertransformer.gather_tree(output_ids.to(torch.int32), parent_ids.to(torch.int32), out_seq_lens.to(torch.int32), end_ids)
else:
ids = gather_tree(output_ids, parent_ids, max_lens, end_id)
ids = torch.einsum('ijk->jki', ids) # batch_size, beam_size, max_seq_len
lengths = torch.eq(ids, end_id)
lengths = 1 - lengths.to(output_ids.dtype)
lengths = torch.sum(lengths, -1)
return ids, lengths
class TransformerDecoder(DecoderBase):
"""The Transformer decoder from "Attention is All You Need".
Args:
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
copy_attn (bool): if using a separate copy attention
self_attn_type (str): type of self-attention scaled-dot, average
dropout (float): dropout in residual, self-attn(dot) and feed-forward
attention_dropout (float): dropout in context_attn (and self-attn(avg))
embeddings (onmt.modules.Embeddings):
embeddings to use, should have positional encodings
max_relative_positions (int):
Max distance between inputs in relative positions representations
aan_useffn (bool): Turn on the FFN layer in the AAN decoder
full_context_alignment (bool):
whether enable an extra full context decoder forward for alignment
alignment_layer (int): N° Layer to supervise with for alignment guiding
alignment_heads (int):
N. of cross attention heads to use for alignment guiding
"""
def __init__(self, num_layers, d_model, heads, head_size, d_ff,
copy_attn, self_attn_type, dropout, attention_dropout,
embeddings, max_relative_positions, aan_useffn,
full_context_alignment, alignment_layer,
alignment_heads, args):
super(TransformerDecoder, self).__init__()
self.args = args
if not self.args.model_type:
raise ValueError("no model_type is supplied.")
self.embeddings = embeddings
# relevant to custom cache config
# self.use_batch_major_op_cache = False
# self.op_cache_dim_x = 1
self.is_fp32 = True if self.args.data_type == 'fp32' else False
self.use_batch_major_op_cache, self.op_cache_dim_x = get_op_cache_config(head_size, self.is_fp32)
self.head_num = heads
self.size_per_head = head_size
# Decoder State
self.state = {}
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout,
attention_dropout, self_attn_type=self_attn_type,
max_relative_positions=max_relative_positions,
aan_useffn=aan_useffn,
full_context_alignment=full_context_alignment,
alignment_heads=alignment_heads)
for i in range(num_layers)])
# previously, there was a GlobalAttention module here for copy
# attention. But it was never actually used -- the "copy" attention
# just reuses the context attention.
self._copy = copy_attn
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.alignment_layer = alignment_layer
@classmethod
def from_opt(cls, opt, embeddings, args):
"""Alternate constructor."""
return cls(
opt.dec_layers,
opt.dec_rnn_size,
opt.heads,
opt.dec_rnn_size // opt.heads,
opt.transformer_ff,
opt.copy_attn,
opt.self_attn_type,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.dropout,
embeddings,
opt.max_relative_positions,
opt.aan_useffn,
opt.full_context_alignment,
opt.alignment_layer,
alignment_heads=opt.alignment_heads,
args=args)
def init_state(self, src, memory_bank, enc_hidden):
"""Initialize decoder state."""
self.state["src"] = src
self.state["cache"] = None
def map_state(self, fn):
def _recursive_map(struct, batch_dim=0, use_batch_major_op_cache=False):
for k, v in struct.items():
if v is not None:
if isinstance(v, dict):
_recursive_map(v, batch_dim, use_batch_major_op_cache)
else:
if isinstance(v, list):
# only custom cache is passed as a list, so we know its batch dim == 0 or 1
batch_dim_ = 0 if use_batch_major_op_cache else 1
struct[k] = [fn(vv, batch_dim_) for vv in struct[k]]
else:
struct[k] = fn(v, batch_dim)
self.state["src"] = fn(self.state["src"], 1)
if self.args.model_type == 'ori' or self.args.model_type == 'torch_decoding':
if self.state["cache"] is not None:
_recursive_map(self.state["cache"], 0)
if self.args.model_type == 'decoder_ext' or self.args.model_type == 'torch_decoding_with_decoder_ext':
if self.state["cache"] is not None:
self.state["cache"]["self"][0] = fn(self.state["cache"]["self"][0], 1)
self.state["cache"]["self"][1] = fn(self.state["cache"]["self"][1], 1)
def detach_state(self):
self.state["src"] = self.state["src"].detach()
def forward(self, tgt, memory_bank, step=None, **kwargs):
"""Decode, possibly stepwise."""
decoding_max_seq_len = kwargs["decoding_max_seq_len"]
if step == 0:
self._init_cache(memory_bank, decoding_max_seq_len)
tgt_words = tgt[:, :, 0].transpose(0, 1)
emb = self.embeddings(tgt, step=step)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_memory_bank = memory_bank.transpose(0, 1).contiguous()
pad_idx = self.embeddings.word_padding_idx
src_lens = kwargs["memory_lengths"]
if self.args.model_type == 'ori' or self.args.model_type == 'torch_decoding':
src_max_len = self.state["src"].shape[0]
src_pad_mask = ~sequence_mask(src_lens, src_max_len).unsqueeze(1)
tgt_pad_mask = tgt_words.data.eq(pad_idx).unsqueeze(1) # [B, 1, T_tgt]
with_align = kwargs.pop('with_align', False)
attn_aligns = []
for i, layer in enumerate(self.transformer_layers):
layer_cache = self.state["cache"]["layer_{}".format(i)] \
if step is not None else None
output, attn, attn_align = layer(
output,
src_memory_bank,
src_pad_mask,
tgt_pad_mask,
layer_cache=layer_cache,
step=step,
with_align=with_align)
if attn_align is not None:
attn_aligns.append(attn_align)
elif self.args.model_type == 'decoder_ext' or self.args.model_type == 'torch_decoding_with_decoder_ext':
src_lens_ = src_lens.to(torch.int)
output, self_cache_, mem_cache_ = self.transformer_layers[0](output, src_memory_bank, src_lens_,
self.state["cache"]['self'], self.state["cache"]['mem'],
kwargs['sequence_lengths'], step)
self.state["cache"]['self'] = self_cache_
self.state["cache"]['mem'] = mem_cache_
output = self.layer_norm(output)
dec_outs = output.transpose(0, 1).contiguous()
attns = {}
# attn = attn.transpose(0, 1).contiguous()
# attns = {"std": attn}
# if self._copy:
# attns["copy"] = attn
# if with_align:
# attns["align"] = attn_aligns[self.alignment_layer] # `(B, Q, K)`
# # attns["align"] = torch.stack(attn_aligns, 0).mean(0) # All avg
# TODO change the way attns is returned dict => list or tuple (onnx)
return dec_outs, attns
def _init_cache(self, memory_bank, decoding_max_seq_len):
self.state["cache"] = {}
batch_size = memory_bank.size(1)
depth = memory_bank.size(-1)
if self.args.model_type == 'ori' or self.args.model_type == 'torch_decoding':
for i, layer in enumerate(self.transformer_layers):
layer_cache = {"memory_keys": None, "memory_values": None}
if isinstance(layer.self_attn, AverageAttention):
layer_cache["prev_g"] = torch.zeros((batch_size, 1, depth),
device=memory_bank.device)
else:
layer_cache["self_keys"] = None
layer_cache["self_values"] = None
self.state["cache"]["layer_{}".format(i)] = layer_cache
elif self.args.model_type == 'decoder_ext' or self.args.model_type == 'torch_decoding_with_decoder_ext':
max_seq_len = memory_bank.size(0)
dtype = to_torch_type[self.args.data_type]
self.state['cache']['mem'] = [torch.zeros(self.transformer_layers[0].layer_num, batch_size, max_seq_len, depth, dtype=dtype, device='cuda'),
torch.zeros(self.transformer_layers[0].layer_num, batch_size, max_seq_len, depth, dtype=dtype, device='cuda')]
self.state['cache']['self'] = [ torch.zeros(self.transformer_layers[0].layer_num, batch_size, self.head_num, self.size_per_head // self.op_cache_dim_x,
max_seq_len, self.op_cache_dim_x, dtype=dtype, device='cuda'),
torch.zeros(self.transformer_layers[0].layer_num, batch_size, self.head_num,
max_seq_len, self.size_per_head, dtype=dtype, device='cuda') ]
def update_dropout(self, dropout, attention_dropout):
self.embeddings.update_dropout(dropout)
for layer in self.transformer_layers:
layer.update_dropout(dropout, attention_dropout)
class ArgHelper(object):
def __init__(self, model_type=None, data_type=None, decoder_ths_path=None, decoding_ths_path=None):
self.model_type = model_type
self.data_type = data_type
self.decoder_ths_path = decoder_ths_path
self.decoding_ths_path = decoding_ths_path
class TorchDecoding(nn.Module):
def __init__(self, layer_num, head_num, head_size, vocab_size, start_id, end_id, weights,
beam_search_diversity_rate=0.0, args=None):
super().__init__()
self.layer_num = layer_num
self.hidden_dim = head_num * head_size
self.start_id = start_id
self.end_id = end_id
self.vocab_size = vocab_size
self.diversity_rate = beam_search_diversity_rate
self.args = args
emb = Embeddings(self.hidden_dim, vocab_size, 1, position_encoding=True)
self.decoder = TransformerDecoder(layer_num, self.hidden_dim, head_num, head_size, 4*self.hidden_dim,
False, 'scaled-dot', 0, 0, emb, 0, False, False, -3, 0, args)
self.generator = nn.Linear(self.hidden_dim, vocab_size)
self.logsoftmax = nn.LogSoftmax(dim=-1)
if args.model_type == 'torch_decoding':
for i in range(layer_num):
prefix = 'decoder.transformer_layers.' + str(i)
self.decoder.transformer_layers[i].layer_norm_1.weight.data = weights.w['model'][prefix + '.layer_norm_1.weight']
self.decoder.transformer_layers[i].layer_norm_1.bias.data = weights.w['model'][prefix + '.layer_norm_1.bias']
self.decoder.transformer_layers[i].self_attn.linear_query.weight.data = weights.w['model'][prefix + '.self_attn.linear_query.weight']
self.decoder.transformer_layers[i].self_attn.linear_keys.weight.data = weights.w['model'][prefix + '.self_attn.linear_keys.weight']
self.decoder.transformer_layers[i].self_attn.linear_values.weight.data = weights.w['model'][prefix + '.self_attn.linear_values.weight']
self.decoder.transformer_layers[i].self_attn.linear_query.bias.data = weights.w['model'][prefix + '.self_attn.linear_query.bias']
self.decoder.transformer_layers[i].self_attn.linear_keys.bias.data = weights.w['model'][prefix + '.self_attn.linear_keys.bias']
self.decoder.transformer_layers[i].self_attn.linear_values.bias.data = weights.w['model'][prefix + '.self_attn.linear_values.bias']
self.decoder.transformer_layers[i].self_attn.final_linear.weight.data = weights.w['model'][prefix + '.self_attn.final_linear.weight']
self.decoder.transformer_layers[i].self_attn.final_linear.bias.data = weights.w['model'][prefix + '.self_attn.final_linear.bias']
self.decoder.transformer_layers[i].layer_norm_2.weight.data = weights.w['model'][prefix + '.layer_norm_2.weight']
self.decoder.transformer_layers[i].layer_norm_2.bias.data = weights.w['model'][prefix + '.layer_norm_2.bias']
self.decoder.transformer_layers[i].context_attn.linear_query.weight.data = weights.w['model'][prefix + '.context_attn.linear_query.weight']
self.decoder.transformer_layers[i].context_attn.linear_keys.weight.data = weights.w['model'][prefix + '.context_attn.linear_keys.weight']
self.decoder.transformer_layers[i].context_attn.linear_values.weight.data = weights.w['model'][prefix + '.context_attn.linear_values.weight']
self.decoder.transformer_layers[i].context_attn.linear_query.bias.data = weights.w['model'][prefix + '.context_attn.linear_query.bias']
self.decoder.transformer_layers[i].context_attn.linear_keys.bias.data = weights.w['model'][prefix + '.context_attn.linear_keys.bias']
self.decoder.transformer_layers[i].context_attn.linear_values.bias.data = weights.w['model'][prefix + '.context_attn.linear_values.bias']
self.decoder.transformer_layers[i].context_attn.final_linear.weight.data = weights.w['model'][prefix + '.context_attn.final_linear.weight']
self.decoder.transformer_layers[i].context_attn.final_linear.bias.data = weights.w['model'][prefix + '.context_attn.final_linear.bias']
self.decoder.transformer_layers[i].feed_forward.layer_norm.weight.data = weights.w['model'][prefix + '.feed_forward.layer_norm.weight']
self.decoder.transformer_layers[i].feed_forward.layer_norm.bias.data = weights.w['model'][prefix + '.feed_forward.layer_norm.bias']
self.decoder.transformer_layers[i].feed_forward.w_1.weight.data = weights.w['model'][prefix + '.feed_forward.w_1.weight']
self.decoder.transformer_layers[i].feed_forward.w_1.bias.data = weights.w['model'][prefix + '.feed_forward.w_1.bias']
self.decoder.transformer_layers[i].feed_forward.w_2.weight.data = weights.w['model'][prefix + '.feed_forward.w_2.weight']
self.decoder.transformer_layers[i].feed_forward.w_2.bias.data = weights.w['model'][prefix + '.feed_forward.w_2.bias']
elif args.model_type == 'torch_decoding_with_decoder_ext':
w = []
ft_decoder_weights = FtDecoderWeights(layer_num, self.hidden_dim, weights.w)
ft_decoder_weights.to_cuda()
if args.data_type == 'fp16':
ft_decoder_weights.to_half()
elif args.data_type == 'bf16':
ft_decoder_weights.to_bfloat16()
self.decoder.transformer_layers = nn.ModuleList(
[FTDecoder(head_num, head_size, head_num * head_size, layer_num, ft_decoder_weights, args)])
else:
raise ValueError('wrong model_type')
self.decoder.layer_norm.weight.data = weights.w['model']['decoder.layer_norm.weight']
self.decoder.layer_norm.bias.data = weights.w['model']['decoder.layer_norm.bias']
self.decoder.embeddings.make_embedding.emb_luts[0].weight.data = weights.w['model']['decoder.embeddings.make_embedding.emb_luts.0.weight']
self.generator.weight.data = weights.w['generator']['0.weight']
self.generator.bias.data = weights.w['generator']['0.bias']
def forward(self, batch_size, beam_size, max_seq_len, memory, memory_seq_lens):
# nvtx.range_push("torch_decoding")
extended_memory = tile(memory, beam_size)
batchxbeam = extended_memory.size(0)
extended_memory = extended_memory.transpose(0, 1).contiguous()
extended_memory_seq_lens = tile(memory_seq_lens, beam_size)
start_ids = extended_memory_seq_lens.new_full((batchxbeam,), self.start_id, dtype=torch.int64)
initial_log_probs = extended_memory.new_full((beam_size,), -float("inf"), dtype=torch.float32)
initial_log_probs[0] = 0.
initial_log_probs = initial_log_probs.repeat(batch_size)
sequence_lengths = extended_memory_seq_lens.new_full((batchxbeam,), 0)
finished = extended_memory_seq_lens.new_full((batchxbeam,), 0, dtype=torch.bool)
dtype_info = torch.finfo(extended_memory.dtype)
eos_max_prob = extended_memory.new_full((batchxbeam, self.vocab_size), dtype_info.min)
eos_max_prob[:, self.end_id] = dtype_info.max
self.decoder.init_state(extended_memory, extended_memory, None)
word_ids = start_ids
cum_log_probs = initial_log_probs
for step in range(max_seq_len):
if not torch.bitwise_not(finished).any():
break
word_ids = word_ids.view(1, -1, 1)
dec_out, dec_attn = self.decoder(word_ids, extended_memory, memory_lengths=extended_memory_seq_lens,
step=step, decoding_max_seq_len=max_seq_len, sequence_lengths=sequence_lengths)
logits = self.generator(dec_out.squeeze(0))
logits = torch.where(finished.view(-1, 1), eos_max_prob, logits).to(torch.float32)
log_probs = self.logsoftmax(logits.to(torch.float32))
total_probs = log_probs + torch.unsqueeze(cum_log_probs, 1)
total_probs = total_probs.view(-1, beam_size * self.vocab_size)
# beamsearch
# _, sample_ids = torch.topk(total_probs, beam_size)
# sample_ids = sample_ids.view(-1)
#diversesiblingsearch
sibling_score = torch.arange(1, beam_size+1).to(total_probs.dtype).to(extended_memory.device) * self.diversity_rate # [beam_size]
scores, ids = torch.topk(total_probs.view(-1, beam_size, self.vocab_size), beam_size) # [batch size, beam width, beam width]
scores = scores + sibling_score # [batch size, beam width, beam width]
scores = scores.view(-1, beam_size * beam_size)
ids = ids + torch.unsqueeze(torch.unsqueeze(torch.arange(0, beam_size).to(extended_memory.device) * self.vocab_size, 0), -1)
ids = ids.view(-1, beam_size * beam_size)
_, final_ids = torch.topk(scores, beam_size) # [batch size, beam size]
final_ids = final_ids.view(-1, 1)
batch_index = torch.arange(0, batch_size).to(extended_memory.device).view(-1, 1).repeat(1, beam_size).view(-1, 1)
index = torch.cat([batch_index, final_ids], 1)
sample_ids = gather_nd(ids, index)
word_ids = sample_ids % self.vocab_size # [batch_size * beam_size]
beam_ids = sample_ids // self.vocab_size # [batch_size * beam_size]
beam_indices = (torch.arange(batchxbeam).to(extended_memory.device) // beam_size) * beam_size + beam_ids
sequence_lengths = torch.where(finished, sequence_lengths, sequence_lengths + 1)
batch_pos = torch.arange(batchxbeam).to(extended_memory.device) // beam_size
next_cum_log_probs = gather_nd(total_probs, torch.stack([batch_pos, sample_ids], -1)) # [batch_size * beam_size]
finished = finished.index_select(0, beam_indices)
sequence_lengths = sequence_lengths.index_select(0, beam_indices)
self.decoder.map_state(lambda state, dim: state.index_select(dim, beam_indices))
if step == 0:
parent_ids = beam_ids.view(1, -1)
output_ids = word_ids.view(1, -1)
else:
parent_ids = torch.cat((parent_ids, beam_ids.view(1, -1)))
output_ids = torch.cat((output_ids, word_ids.view(1, -1)))
cum_log_probs = torch.where(finished, cum_log_probs, next_cum_log_probs)
finished = torch.bitwise_or(finished, torch.eq(word_ids, self.end_id))
# nvtx.range_push("finalize")
beams, lengths = finalize(beam_size, output_ids, parent_ids, sequence_lengths, self.end_id, args=self.args)
# nvtx.range_pop()
# nvtx.range_pop()
return beams, lengths
|
FasterTransformer-main
|
examples/pytorch/decoding/utils/decoding.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import os
import math
import torch
import torch.nn as nn
import torch.cuda.nvtx as nvtx
from onmt.utils.misc import tile
class FtDecodingWeights(object):
def __init__(self, layer_num, hidden_dim, onmtcheckpoint, max_step_for_pe=2048):
self.max_step_for_pe = max_step_for_pe
self.hidden_dim = hidden_dim
self.w = []
prefix = 'decoder.transformer_layers.'
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.layer_norm_1.weight'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.layer_norm_1.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[torch.stack([onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_query.weight'].transpose(-1, -2),
onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_keys.weight'].transpose(-1, -2),
onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_values.weight'].transpose(-1, -2)], -2)
for i in range(layer_num)], 0).contiguous())
self.w.append(torch.stack(
[torch.stack([onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_query.bias'],
onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_keys.bias'],
onmtcheckpoint['model'][prefix + str(i) + '.self_attn.linear_values.bias']], -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.self_attn.final_linear.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.self_attn.final_linear.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.layer_norm_2.weight'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.layer_norm_2.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_query.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_keys.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_values.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_query.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_keys.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.linear_values.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.final_linear.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.context_attn.final_linear.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.layer_norm.weight'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.layer_norm.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.w_1.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.w_1.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.w_2.weight'].transpose(-1, -2) for i in range(layer_num)],
0).contiguous())
self.w.append(torch.stack(
[onmtcheckpoint['model'][prefix + str(i) + '.feed_forward.w_2.bias'] for i in range(layer_num)],
0).contiguous())
self.w.append(onmtcheckpoint['model']['decoder.layer_norm.weight'])
self.w.append(onmtcheckpoint['model']['decoder.layer_norm.bias'])
self.w.append(onmtcheckpoint['model']['decoder.embeddings.make_embedding.emb_luts.0.weight'])
self.w.append(self._get_position_encoding()) # pe_encoding
self.w.append(onmtcheckpoint['generator']['0.weight'].transpose(-1, -2).contiguous())
self.w.append(onmtcheckpoint['generator']['0.bias'])
def to_cuda(self):
for i in range(len(self.w)):
self.w[i] = self.w[i].cuda()
def to_half(self):
for i in range(len(self.w)):
self.w[i] = self.w[i].half()
def to_bfloat16(self):
for i in range(len(self.w)):
self.w[i] = self.w[i].bfloat16()
def _get_position_encoding(self):
pe = torch.zeros(self.max_step_for_pe, self.hidden_dim)
position = torch.arange(0, self.max_step_for_pe).unsqueeze(1)
div_term = torch.exp((torch.arange(0, self.hidden_dim, 2, dtype=torch.float) *
-(math.log(10000.0) / self.hidden_dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe.cuda().contiguous()
class CustomDecoding(nn.Module):
def __init__(self, head_num, head_size,
inter_size, mem_hidden_dim, layer_num, vocab_size, start_id, end_id,
beam_search_diversity_rate, top_k, top_p, temperature,
len_penalty, repetition_penalty, weights, args=None):
super().__init__()
self.end_id = end_id
self.args = args
torch.classes.load_library(os.path.abspath(args.decoding_ths_path))
try:
self.decoding = torch.classes.FasterTransformer.Decoding(head_num, head_size,
inter_size, mem_hidden_dim, layer_num, vocab_size, start_id, end_id,
beam_search_diversity_rate, top_k, top_p, temperature,
len_penalty, repetition_penalty, *weights.w)
except:
# legacy ths for 20.03 image
self.decoding = torch.classes.FasterTransformerDecoding(head_num, head_size,
inter_size, mem_hidden_dim, layer_num, vocab_size, start_id, end_id,
beam_search_diversity_rate, top_k, top_p, temperature,
len_penalty, repetition_penalty, *weights.w)
self.is_clean_cache = False
def forward(self, batch_size, beam_size, seq_len, memory, memory_seq_lens):
if self.is_clean_cache == False:
torch.cuda.empty_cache()
self.is_clean_cache = True
extended_memory = tile(memory, beam_size)
extended_memory_seq_lens = tile(memory_seq_lens, beam_size)
output_ids, parent_ids, out_seq_lens = self.decoding.forward(beam_size, seq_len, extended_memory, extended_memory_seq_lens)
output_ids = output_ids.reshape([seq_len, memory.size(0), beam_size])
output_ids = output_ids.permute(1, 2, 0)
return output_ids, out_seq_lens
class ArgHelper(object):
def __init__(self, model_type=None, data_type=None, ths_path=None):
self.model_type = model_type
self.data_type = data_type
self.ths_path = ths_path
|
FasterTransformer-main
|
examples/pytorch/decoding/utils/ft_decoding.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders.transformer import TransformerEncoder
from onmt.modules import Embeddings, VecEmbedding, CopyGenerator
from onmt.modules.util_class import Cast
from onmt.utils.misc import use_gpu
from onmt.utils.parse import ArgumentParser
from examples.pytorch.encoder.utils.ft_encoder import EncoderWeights, CustomEncoder
from .decoding import FTDecoder, DecodingWeights, TorchDecoding, TransformerDecoder
from .ft_decoding import FtDecodingWeights, CustomDecoding
def build_embeddings(opt, text_field, for_encoder=True):
"""
Args:
opt: the option in current environment.
text_field(TextMultiField): word and feats field.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
if opt.model_type == "vec" and for_encoder:
return VecEmbedding(
opt.feat_vec_size,
emb_dim,
position_encoding=opt.position_encoding,
dropout=(opt.dropout[0] if type(opt.dropout) is list
else opt.dropout),
)
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs
)
return emb
def load_test_model(opt, args):
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(
vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
)
else:
fields = vocab
model = build_base_model(model_opt, fields, use_gpu(opt), args, checkpoint,
opt.gpu)
if args.data_type == 'fp32':
model.float()
elif args.data_type == 'fp16':
model.half()
elif args.data_type == 'bf16':
model.bfloat16()
else:
raise ValueError('wrong data_type argument {}'.format(args.data_type))
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, args, checkpoint=None, gpu_id=None):
"""Build a model from opts.
Args:
model_opt: the option loaded from checkpoint. It's important that
the opts have been updated and validated. See
:class:`onmt.utils.parse.ArgumentParser`.
fields (dict[str, torchtext.data.Field]):
`Field` objects for the model.
gpu (bool): whether to use gpu.
checkpoint: the model generated by train phase, or a resumed snapshot
model from a stopped training.
gpu_id (int or NoneType): Which GPU to use.
Returns:
the NMTModel.
"""
# for back compat when attention_dropout was not defined
try:
model_opt.attention_dropout
except AttributeError:
model_opt.attention_dropout = model_opt.dropout
# Build embeddings.
if model_opt.model_type == "text" or model_opt.model_type == "vec":
src_field = fields["src"]
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
# Build encoder.
encoder = TransformerEncoder.from_opt(model_opt, src_emb)
# Build decoder.
tgt_field = fields["tgt"]
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert src_field.base_field.vocab == tgt_field.base_field.vocab, \
"preprocess with -share_vocab if you use share_embeddings"
tgt_emb.word_lut.weight = src_emb.word_lut.weight
decoder = TransformerDecoder.from_opt(model_opt, tgt_emb, args)
# Build NMTModel(= encoder + decoder).
if gpu and gpu_id is not None:
device = torch.device("cuda", gpu_id)
elif gpu and not gpu_id:
device = torch.device("cuda")
elif not gpu:
device = torch.device("cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab)),
Cast(torch.float32),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
tgt_base_field = fields["tgt"].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
if model_opt.share_decoder_embeddings:
generator.linear.weight = decoder.embeddings.word_lut.weight
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = {fix_key(k): v
for k, v in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
if args.model_type != 'torch_decoding':
encoder_weights = EncoderWeights(model_opt.enc_layers, model_opt.enc_rnn_size, checkpoint['model'])
if args.data_type == 'fp16':
encoder_weights.to_half()
elif args.data_type == 'bf16':
encoder_weights.to_bfloat16()
encoder_weights.to_cuda()
encoder = CustomEncoder(model_opt.enc_layers, model_opt.heads, model_opt.enc_rnn_size // model_opt.heads, encoder_weights,
path=args.encoder_ths_path, embedding=model.encoder.embeddings)
model.encoder = encoder
if args.model_type == 'decoding_ext':
vocab_size = len(fields["tgt"].base_field.vocab)
bos_idx = fields["tgt"].base_field.vocab.stoi[fields["tgt"].base_field.init_token]
eos_idx = fields["tgt"].base_field.vocab.stoi[fields["tgt"].base_field.eos_token]
decoding_weights = DecodingWeights(model_opt.dec_layers, model_opt.dec_rnn_size, vocab_size, checkpoint)
ft_decoding_weights = FtDecodingWeights(model_opt.dec_layers, model_opt.dec_rnn_size, decoding_weights.w)
if args.data_type == 'fp16':
ft_decoding_weights.to_half()
elif args.data_type == 'bf16':
ft_decoding_weights.to_bfloat16()
ft_decoding_weights.to_cuda()
model.decoder = CustomDecoding(model_opt.heads, model_opt.dec_rnn_size // model_opt.heads,
model_opt.dec_rnn_size * 4, model_opt.dec_rnn_size, model_opt.dec_layers,
vocab_size, bos_idx, eos_idx, args.beam_search_diversity_rate,
args.sampling_topk, args.sampling_topp, 1.0, 1.0, 1.0, ft_decoding_weights, args=args)
elif args.model_type == 'torch_decoding' or args.model_type == 'torch_decoding_with_decoder_ext':
vocab_size = len(fields["tgt"].base_field.vocab)
bos_idx = fields["tgt"].base_field.vocab.stoi[fields["tgt"].base_field.init_token]
eos_idx = fields["tgt"].base_field.vocab.stoi[fields["tgt"].base_field.eos_token]
decoding_weights = DecodingWeights(model_opt.dec_layers, model_opt.dec_rnn_size, vocab_size, checkpoint)
decoding_weights.to_cuda()
if args.data_type == 'fp16':
decoding_weights.to_half()
elif args.data_type == 'bf16':
decoding_weights.to_bfloat16()
model.decoder = TorchDecoding(model_opt.dec_layers, model_opt.heads, model_opt.dec_rnn_size // model_opt.heads,
vocab_size, bos_idx, eos_idx, decoding_weights, args=args)
else:
raise ValueError("Wrong model_type argument, must be one of [decoding_ext, torch_decoding, torch_decoding_with_decoder_ext]")
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec)
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16' and model_opt.optim == 'fusedadam':
model.half()
elif model_opt.model_dtype == 'bf16' and model_opt.optim == 'fusedadam':
model.bfloat16()
return model
|
FasterTransformer-main
|
examples/pytorch/decoding/utils/translation_model.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import codecs
import os
import time
import numpy as np
from itertools import count, zip_longest
import torch
import onmt.model_builder
import onmt.inputters as inputters
import onmt.decoders.ensemble
from onmt.translate.beam_search import BeamSearch
from onmt.translate.greedy_search import GreedySearch
from onmt.utils.misc import tile, set_random_seed, report_matrix
from onmt.utils.alignment import extract_alignment, build_align_pharaoh
from onmt.modules.copy_generator import collapse_copy_scores
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
# max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
src_elements = count * max_src_in_batch
return src_elements
class Translator(object):
"""Translate a batch of sentences with a saved model.
Args:
model (onmt.modules.NMTModel): NMT model to use for translation
fields (dict[str, torchtext.data.Field]): A dict
mapping each side to its list of name-Field pairs.
src_reader (onmt.inputters.DataReaderBase): Source reader.
tgt_reader (onmt.inputters.TextDataReader): Target reader.
gpu (int): GPU device. Set to negative for no GPU.
n_best (int): How many beams to wait for.
min_length (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
max_length (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
beam_size (int): Number of beams.
random_sampling_topk (int): See
:class:`onmt.translate.greedy_search.GreedySearch`.
random_sampling_temp (int): See
:class:`onmt.translate.greedy_search.GreedySearch`.
stepwise_penalty (bool): Whether coverage penalty is applied every step
or not.
dump_beam (bool): Debugging option.
block_ngram_repeat (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
ignore_when_blocking (set or frozenset): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
replace_unk (bool): Replace unknown token.
data_type (str): Source data type.
verbose (bool): Print/log every translation.
report_time (bool): Print/log total time/frequency.
copy_attn (bool): Use copy attention.
global_scorer (onmt.translate.GNMTGlobalScorer): Translation
scoring/reranking object.
out_file (TextIO or codecs.StreamReaderWriter): Output file.
report_score (bool) : Whether to report scores
logger (logging.Logger or NoneType): Logger.
"""
def __init__(
self,
model,
fields,
src_reader,
tgt_reader,
model_type='torch_decoding',
gpu=-1,
n_best=1,
min_length=0,
max_length=100,
ratio=0.,
beam_size=30,
random_sampling_topk=1,
random_sampling_temp=1,
stepwise_penalty=None,
dump_beam=False,
block_ngram_repeat=0,
ignore_when_blocking=frozenset(),
replace_unk=False,
phrase_table="",
data_type="text",
verbose=False,
report_time=False,
copy_attn=False,
global_scorer=None,
out_file=None,
report_align=False,
report_score=True,
logger=None,
seed=-1):
self.model = model
self.fields = fields
tgt_field = dict(self.fields)["tgt"].base_field
self._tgt_vocab = tgt_field.vocab
self._tgt_eos_idx = self._tgt_vocab.stoi[tgt_field.eos_token]
self._tgt_pad_idx = self._tgt_vocab.stoi[tgt_field.pad_token]
self._tgt_bos_idx = self._tgt_vocab.stoi[tgt_field.init_token]
self._tgt_unk_idx = self._tgt_vocab.stoi[tgt_field.unk_token]
self._tgt_vocab_len = len(self._tgt_vocab)
self.model_type = model_type
self._gpu = gpu
self._use_cuda = gpu > -1
self._dev = torch.device("cuda", self._gpu) \
if self._use_cuda else torch.device("cpu")
self.n_best = n_best
self.max_length = max_length
self.beam_size = beam_size
self.random_sampling_temp = random_sampling_temp
self.sample_from_topk = random_sampling_topk
self.min_length = min_length
self.ratio = ratio
self.stepwise_penalty = stepwise_penalty
self.dump_beam = dump_beam
self.block_ngram_repeat = block_ngram_repeat
self.ignore_when_blocking = ignore_when_blocking
self._exclusion_idxs = {
self._tgt_vocab.stoi[t] for t in self.ignore_when_blocking}
self.src_reader = src_reader
self.tgt_reader = tgt_reader
self.replace_unk = replace_unk
if self.replace_unk and not self.model.decoder.attentional:
raise ValueError(
"replace_unk requires an attentional decoder.")
self.phrase_table = phrase_table
self.data_type = data_type
self.verbose = verbose
self.report_time = report_time
self.copy_attn = copy_attn
self.global_scorer = global_scorer
if self.global_scorer.has_cov_pen and \
not self.model.decoder.attentional:
raise ValueError(
"Coverage penalty requires an attentional decoder.")
self.out_file = out_file
self.report_align = report_align
self.report_score = report_score
self.logger = logger
self.use_filter_pred = False
self._filter_pred = None
set_random_seed(seed, self._use_cuda)
@classmethod
def from_opt(
cls,
model,
fields,
opt,
model_opt,
args,
global_scorer=None,
out_file=None,
report_align=False,
report_score=True,
logger=None):
"""Alternate constructor.
Args:
model (onmt.modules.NMTModel): See :func:`__init__()`.
fields (dict[str, torchtext.data.Field]): See
:func:`__init__()`.
opt (argparse.Namespace): Command line options
model_opt (argparse.Namespace): Command line options saved with
the model checkpoint.
global_scorer (onmt.translate.GNMTGlobalScorer): See
:func:`__init__()`..
out_file (TextIO or codecs.StreamReaderWriter): See
:func:`__init__()`.
report_align (bool) : See :func:`__init__()`.
report_score (bool) : See :func:`__init__()`.
logger (logging.Logger or NoneType): See :func:`__init__()`.
"""
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader["text"].from_opt(opt)
return cls(
model,
fields,
src_reader,
tgt_reader,
model_type=args.model_type,
gpu=opt.gpu,
n_best=opt.n_best,
min_length=opt.min_length,
max_length=opt.max_length,
ratio=opt.ratio,
beam_size=opt.beam_size,
random_sampling_topk=opt.random_sampling_topk,
random_sampling_temp=opt.random_sampling_temp,
stepwise_penalty=opt.stepwise_penalty,
dump_beam=opt.dump_beam,
block_ngram_repeat=opt.block_ngram_repeat,
ignore_when_blocking=set(opt.ignore_when_blocking),
replace_unk=opt.replace_unk,
phrase_table=opt.phrase_table,
data_type=opt.data_type,
verbose=opt.verbose,
report_time=opt.report_time,
copy_attn=model_opt.copy_attn,
global_scorer=global_scorer,
out_file=out_file,
report_align=report_align,
report_score=report_score,
logger=logger,
seed=opt.seed)
def _log(self, msg):
if self.logger:
self.logger.info(msg)
else:
print(msg)
def _gold_score(self, batch, memory_bank, src_lengths, src_vocabs,
use_src_map, enc_states, batch_size, src):
if "tgt" in batch.__dict__:
gs = self._score_target(
batch, memory_bank, src_lengths, src_vocabs,
batch.src_map if use_src_map else None)
self.model.decoder.init_state(src, memory_bank, enc_states)
else:
gs = [0] * batch_size
return gs
def translate(
self,
src,
tgt=None,
src_dir=None,
batch_size=None,
batch_type="sents",
attn_debug=False,
align_debug=False,
phrase_table=""):
"""Translate content of ``src`` and get gold scores from ``tgt``.
Args:
src: See :func:`self.src_reader.read()`.
tgt: See :func:`self.tgt_reader.read()`.
src_dir: See :func:`self.src_reader.read()` (only relevant
for certain types of data).
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
align_debug (bool): enables the word alignment logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions
"""
if batch_size is None:
raise ValueError("batch_size must be set")
src_data = {"reader": self.src_reader, "data": src, "dir": src_dir}
tgt_data = {"reader": self.tgt_reader, "data": tgt, "dir": None}
_readers, _data, _dir = inputters.Dataset.config(
[('src', src_data), ('tgt', tgt_data)])
# corpus_id field is useless here
if self.fields.get("corpus_id", None) is not None:
self.fields.pop('corpus_id')
data = inputters.Dataset(
self.fields, readers=_readers, data=_data, dirs=_dir,
sort_key=inputters.str2sortkey[self.data_type],
filter_pred=self._filter_pred
)
data_iter = inputters.OrderedIterator(
dataset=data,
device=self._dev,
batch_size=batch_size,
batch_size_fn=max_tok_len if batch_type == "tokens" else None,
train=False,
sort=False,
sort_within_batch=True,
shuffle=False
)
xlation_builder = onmt.translate.TranslationBuilder(
data, self.fields, self.n_best, self.replace_unk, tgt,
self.phrase_table
)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
all_scores = []
all_predictions = []
start_time = time.time()
for batch in data_iter:
if self.model_type == 'decoding_ext' or self.model_type == 'torch_decoding' or self.model_type == 'torch_decoding_with_decoder_ext':
batch_data = self.translate_batch_ftdecoding(batch, data.src_vocabs)
else:
batch_data = self.translate_batch(
batch, data.src_vocabs, attn_debug
)
batch_data["batch"].indices = batch_data["batch"].indices.cpu()
batch_data["batch"].src = (batch_data["batch"].src[0].cpu(), batch_data["batch"].src[1].cpu())
if isinstance(batch_data["predictions"], torch.Tensor):
batch_data["predictions"] = batch_data["predictions"].cpu()
translations = xlation_builder.from_batch(batch_data)
for trans in translations:
all_scores += [trans.pred_scores[:self.n_best]]
pred_score_total += trans.pred_scores[0]
pred_words_total += len(trans.pred_sents[0])
if tgt is not None:
gold_score_total += trans.gold_score
gold_words_total += len(trans.gold_sent) + 1
n_best_preds = [" ".join(pred)
for pred in trans.pred_sents[:self.n_best]]
if self.report_align:
align_pharaohs = [build_align_pharaoh(align) for align
in trans.word_aligns[:self.n_best]]
n_best_preds_align = [" ".join(align) for align
in align_pharaohs]
n_best_preds = [pred + " ||| " + align
for pred, align in zip(
n_best_preds, n_best_preds_align)]
all_predictions += [n_best_preds]
self.out_file.write('\n'.join(n_best_preds) + '\n')
self.out_file.flush()
if self.verbose:
sent_number = next(counter)
output = trans.log(sent_number)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
if attn_debug:
preds = trans.pred_sents[0]
preds.append('</s>')
attns = trans.attns[0].tolist()
if self.data_type == 'text':
srcs = trans.src_raw
else:
srcs = [str(item) for item in range(len(attns[0]))]
output = report_matrix(srcs, preds, attns)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
if align_debug:
if trans.gold_sent is not None:
tgts = trans.gold_sent
else:
tgts = trans.pred_sents[0]
align = trans.word_aligns[0].tolist()
if self.data_type == 'text':
srcs = trans.src_raw
else:
srcs = [str(item) for item in range(len(align[0]))]
output = report_matrix(srcs, tgts, align)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
end_time = time.time()
if self.report_score:
msg = self._report_score('PRED', pred_score_total,
pred_words_total)
self._log(msg)
if tgt is not None:
msg = self._report_score('GOLD', gold_score_total,
gold_words_total)
self._log(msg)
if self.report_time:
total_time = end_time - start_time
print("Total translation time (s): %f" % total_time)
print("Average translation time (s): %f" % (
total_time / len(all_predictions)))
print("Tokens per second: %f" % (
pred_words_total / total_time))
if self.dump_beam:
import json
json.dump(self.translator.beam_accum,
codecs.open(self.dump_beam, 'w', 'utf-8'))
return all_scores, all_predictions
def translate_batch(self, batch, src_vocabs, attn_debug):
"""Translate a batch of sentences."""
with torch.no_grad():
if self.beam_size == 1:
decode_strategy = GreedySearch(
pad=self._tgt_pad_idx,
bos=self._tgt_bos_idx,
eos=self._tgt_eos_idx,
batch_size=batch.batch_size,
min_length=self.min_length, max_length=self.max_length,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=self._exclusion_idxs,
return_attention=attn_debug or self.replace_unk,
sampling_temp=self.random_sampling_temp,
keep_topk=self.sample_from_topk)
else:
# TODO: support these blacklisted features
assert not self.dump_beam
decode_strategy = BeamSearch(
self.beam_size,
batch_size=batch.batch_size,
pad=self._tgt_pad_idx,
bos=self._tgt_bos_idx,
eos=self._tgt_eos_idx,
n_best=self.n_best,
global_scorer=self.global_scorer,
min_length=self.min_length, max_length=self.max_length,
return_attention=attn_debug or self.replace_unk,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=self._exclusion_idxs,
stepwise_penalty=self.stepwise_penalty,
ratio=self.ratio)
return self._translate_batch_with_strategy(batch, src_vocabs,
decode_strategy)
def translate_batch_ftdecoding(self, batch, src_vocabs):
with torch.no_grad():
use_src_map = self.copy_attn
batch_size = batch.batch_size
src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)
results = {
"predictions": None,
"scores": None,
"attention": None,
"batch": batch,
"gold_score": self._gold_score(
batch, memory_bank, src_lengths, src_vocabs, use_src_map,
enc_states, batch_size, src)}
src_lengths_ = src_lengths.to(torch.int32)
memory_bank_ = memory_bank.transpose(0, 1).contiguous()
output, lengths = self.model.decoder(batch_size, self.beam_size, self.max_length, memory_bank_, src_lengths_)
results["scores"] = [(0,) for _ in range(batch_size)]
results["predictions"] = output
results["attention"] = [[None] * self.n_best for _ in range(batch_size)]
results["alignment"] = [[] for _ in range(batch_size)]
return results
def _run_encoder(self, batch):
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
enc_states, memory_bank, src_lengths = self.model.encoder(
src, src_lengths.to(torch.int32))
if src_lengths is None:
assert not isinstance(memory_bank, tuple), \
'Ensemble decoding only supported for text data'
src_lengths = torch.Tensor(batch.batch_size) \
.type_as(memory_bank) \
.long() \
.fill_(memory_bank.size(0))
return src, enc_states, memory_bank, src_lengths
def _decode_and_generate(
self,
decoder_in,
memory_bank,
batch,
src_vocabs,
memory_lengths,
src_map=None,
step=None,
batch_offset=None):
if self.copy_attn:
# Turn any copied words into UNKs.
decoder_in = decoder_in.masked_fill(
decoder_in.gt(self._tgt_vocab_len - 1), self._tgt_unk_idx
)
# Decoder forward, takes [tgt_len, batch, nfeats] as input
# and [src_len, batch, hidden] as memory_bank
# in case of inference tgt_len = 1, batch = beam times batch_size
# in case of Gold Scoring tgt_len = actual length, batch = 1 batch
dec_out, dec_attn = self.model.decoder(
decoder_in, memory_bank, memory_lengths=memory_lengths, step=step
)
# Generator forward.
if not self.copy_attn:
if "std" in dec_attn:
attn = dec_attn["std"]
else:
attn = None
log_probs = self.model.generator(dec_out.squeeze(0))
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
else:
attn = dec_attn["copy"]
scores = self.model.generator(dec_out.view(-1, dec_out.size(2)),
attn.view(-1, attn.size(2)),
src_map)
# here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]
if batch_offset is None:
scores = scores.view(-1, batch.batch_size, scores.size(-1))
scores = scores.transpose(0, 1).contiguous()
else:
scores = scores.view(-1, self.beam_size, scores.size(-1))
scores = collapse_copy_scores(
scores,
batch,
self._tgt_vocab,
src_vocabs,
batch_dim=0,
batch_offset=batch_offset
)
scores = scores.view(decoder_in.size(0), -1, scores.size(-1))
log_probs = scores.squeeze(0).log()
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
return log_probs, attn
def _translate_batch_with_strategy(
self,
batch,
src_vocabs,
decode_strategy):
"""Translate a batch of sentences step by step using cache.
Args:
batch: a batch of sentences, yield by data iterator.
src_vocabs (list): list of torchtext.data.Vocab if can_copy.
decode_strategy (DecodeStrategy): A decode strategy to use for
generate translation step by step.
Returns:
results (dict): The translation results.
"""
# (0) Prep the components of the search.
use_src_map = self.copy_attn
parallel_paths = decode_strategy.parallel_paths # beam_size
batch_size = batch.batch_size
# (1) Run the encoder on the src.
src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)
self.model.decoder.init_state(src, memory_bank, enc_states)
results = {
"predictions": None,
"scores": None,
"attention": None,
"batch": batch,
"gold_score": self._gold_score(
batch, memory_bank, src_lengths, src_vocabs, use_src_map,
enc_states, batch_size, src)}
# (2) prep decode_strategy. Possibly repeat src objects.
src_map = batch.src_map if use_src_map else None
fn_map_state, memory_bank, memory_lengths, src_map = \
decode_strategy.initialize(memory_bank, src_lengths, src_map)
if fn_map_state is not None:
self.model.decoder.map_state(fn_map_state)
# (3) Begin decoding step by step:
for step in range(decode_strategy.max_length):
decoder_input = decode_strategy.current_predictions.view(1, -1, 1)
log_probs, attn = self._decode_and_generate(
decoder_input,
memory_bank,
batch,
src_vocabs,
memory_lengths=memory_lengths,
src_map=src_map,
step=step,
batch_offset=decode_strategy.batch_offset)
decode_strategy.advance(log_probs, attn)
any_finished = decode_strategy.is_finished.any()
if any_finished:
decode_strategy.update_finished()
if decode_strategy.done:
break
select_indices = decode_strategy.select_indices
if any_finished:
# Reorder states.
if isinstance(memory_bank, tuple):
memory_bank = tuple(x.index_select(1, select_indices)
for x in memory_bank)
else:
memory_bank = memory_bank.index_select(1, select_indices)
memory_lengths = memory_lengths.index_select(0, select_indices)
if src_map is not None:
src_map = src_map.index_select(1, select_indices)
if parallel_paths > 1 or any_finished:
self.model.decoder.map_state(
lambda state, dim: state.index_select(dim, select_indices))
results["scores"] = decode_strategy.scores
results["predictions"] = decode_strategy.predictions
results["attention"] = decode_strategy.attention
results["alignment"] = [[] for _ in range(batch_size)]
return results
def _score_target(self, batch, memory_bank, src_lengths,
src_vocabs, src_map):
tgt = batch.tgt
tgt_in = tgt[:-1]
log_probs, attn = self._decode_and_generate(
tgt_in, memory_bank, batch, src_vocabs,
memory_lengths=src_lengths, src_map=src_map)
log_probs[:, :, self._tgt_pad_idx] = 0
gold = tgt[1:]
gold_scores = log_probs.gather(2, gold)
gold_scores = gold_scores.sum(dim=0).view(-1)
return gold_scores
def _report_score(self, name, score_total, words_total):
if words_total == 0:
msg = "%s No words predicted" % (name,)
else:
avg_score = score_total / words_total
ppl = np.exp(-score_total.item() / words_total)
msg = ("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, avg_score,
name, ppl))
return msg
|
FasterTransformer-main
|
examples/pytorch/decoding/utils/translator.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import math
import logging
from datetime import datetime
import numpy as np
import torch
import torch.distributed as dist
from transformers import BartForConditionalGeneration, BartTokenizer
from transformers import MBartForConditionalGeneration, MBartTokenizer
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.bart.utils.ft_encoder import FTBartEncoderWeight, FTBartEncoder
from examples.pytorch.bart.utils.ft_decoding import FTBartDecodingWeight, FTBartDecoding, FTBart
from examples.pytorch.decoding.utils.recover_bpe import recover_bpe
LOGGER = logging.getLogger(__name__)
gemm_data_type_mapping = {"fp32":0, "fp16":1, "bf16":2}
def bleu_score(pred, ref):
from sacrebleu import corpus_bleu
bleu = corpus_bleu(pred, [ref], force=True)
LOGGER.info(" bleu score: {:6.2f}".format(bleu.score))
LOGGER.info(" bleu counts: {}".format(bleu.counts))
LOGGER.info(" bleu totals: {}".format(bleu.totals))
LOGGER.info(" bleu precisions: {}".format(bleu.precisions))
LOGGER.info(" bleu sys_len: {}; ref_len: {}".format(bleu.sys_len, bleu.ref_len))
return bleu
class TranslationResult(object):
def __init__(self, name, frame_work):
self.name = name
self.frame_work = frame_work # FT or HF
self.file_name = name + ".txt"
self.token_list = []
self.batch_ids_list = []
self.batch_seq_len_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.sentence_num = 0
self.token_num = 0
self.bleu_score = None
def translate(args_dict):
torch.set_printoptions(precision=6)
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_seq_len = args_dict['max_seq_len']
source_file = args_dict["source"]
tgt_file = args_dict["target"]
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
topk = args_dict['sampling_topk']
topp = args_dict['sampling_topp']
tensor_para_size = args_dict['tensor_para_size']
pipeline_para_size = args_dict['pipeline_para_size']
max_ite = args_dict['max_iteration']
# repetition_penalty = args_dict["repetition_penalty"]
# temperature = args_dict["temperature"]
# len_penalty = args_dict["len_penalty"]
model_path = args_dict['model_path'] if args_dict['model_path'] != None else args_dict['model']
lib_path = args_dict['lib_path']
if dist.is_mpi_available():
try:
dist.init_process_group(backend='mpi')
rank = dist.get_rank()
except:
rank = dist.get_rank()
else:
rank = 0
if rank == 0:
LOGGER.info("\n=============== Argument ===============")
for key in args_dict:
LOGGER.info("{}: {}".format(key, args_dict[key]))
LOGGER.info("========================================")
if 'mbart' not in model_path:
hf_bart_model = BartForConditionalGeneration.from_pretrained(model_path)
tokenizer = BartTokenizer.from_pretrained(model_path)
layernorm_type = "post_layernorm"
else:
hf_bart_model = MBartForConditionalGeneration.from_pretrained(model_path)
tokenizer = MBartTokenizer.from_pretrained(model_path)
layernorm_type = "pre_layernorm"
is_mbart = hf_bart_model.config.add_final_layer_norm
hf_bart_model = hf_bart_model.eval().to('cuda')
try:
fast_tokenizer = PreTrainedTokenizerFast.from_pretrained(model_path)
except:
fast_tokenizer = tokenizer
config = hf_bart_model.config
activation_type = config.activation_function
bart_with_bias = True
use_gated_activation = False
position_embedding_type = 1 # absolute positional embedding
weight_data_type = np.float32
encoder_head_size = config.d_model // config.encoder_attention_heads
decoder_head_size = config.d_model // config.decoder_attention_heads
if time_args.find("0") != -1 or time_args.find("2") != -1:
hf_bart_model = hf_bart_model.to(rank)
if args_dict['inference_data_type'] == 'fp16':
hf_bart_model = hf_bart_model.half()
elif args_dict['inference_data_type'] == 'bf16':
hf_bart_model = hf_bart_model ## bfloat inference not supported yet
if rank == 0:
LOGGER.debug(f"config: {config}")
if os.path.isfile("gemm_config.in") and rank == 0:
cmd = f"rm gemm_config.in"
LOGGER.info(f"Run {cmd}")
os.system(cmd)
translation_result_list = []
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult("hf-beamsearch-warmup", "HF"))
translation_result_list.append(TranslationResult("hf-beamsearch", "HF"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult("ft-beamsearch-warmup", "FT"))
translation_result_list.append(TranslationResult("ft-beamsearch", "FT"))
if rank == 0 and not args_dict["skip_gemm_test"]:
inference_data_type = gemm_data_type_mapping[args_dict['inference_data_type']]
cmd = f"./bin/t5_gemm {math.ceil(batch_size / pipeline_para_size)} {beam_size} {max_seq_len} " \
f"{config.d_model} {config.encoder_attention_heads} {encoder_head_size} {config.encoder_ffn_dim} " \
f"{config.d_model} {config.decoder_attention_heads} {decoder_head_size} {config.decoder_ffn_dim} " \
f"{config.vocab_size} {inference_data_type} {tensor_para_size} 0 > .tmp_gemm.log"
LOGGER.info(f"Run gemm test: {cmd}")
os.system(cmd)
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult("hf-sampling-warmup", "HF"))
translation_result_list.append(TranslationResult("hf-sampling", "HF"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult("ft-sampling-warmup", "FT"))
translation_result_list.append(TranslationResult("ft-sampling", "FT"))
if rank == 0 and not args_dict["skip_gemm_test"]:
inference_data_type = gemm_data_type_mapping[args_dict['inference_data_type']]
cmd = f"./bin/t5_gemm {math.ceil(batch_size / pipeline_para_size)} {1} {max_seq_len} " \
f"{config.d_model} {config.encoder_attention_heads} {encoder_head_size} {config.encoder_ffn_dim} " \
f"{config.d_model} {config.decoder_attention_heads} {decoder_head_size} {config.decoder_ffn_dim} " \
f"{config.vocab_size} {inference_data_type} {tensor_para_size} 1 > .tmp_gemm.log"
LOGGER.info(f"Run gemm test: {cmd}")
os.system(cmd)
if time_args.find("1") != -1 or time_args.find("3") != -1:
remove_padding = True if batch_size > 32 else False
ft_encoder_weight = FTBartEncoderWeight(
config,
tensor_para_size,
pipeline_para_size,
bart_with_bias=bart_with_bias,
mbart=is_mbart,
use_gated_activation=use_gated_activation,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
ft_encoder_weight.load_from_model(hf_bart_model.float())
ft_decoding_weight = FTBartDecodingWeight(
config,
tensor_para_size,
pipeline_para_size,
bart_with_bias=bart_with_bias,
mbart=is_mbart,
use_gated_activation=use_gated_activation,
position_embedding_type=position_embedding_type,
weight_data_type=weight_data_type,
)
ft_decoding_weight.load_from_model(hf_bart_model.float())
if args_dict['inference_data_type'] == "fp16":
ft_encoder_weight.to_half()
ft_decoding_weight.to_half()
elif args_dict['inference_data_type'] == "bf16":
ft_encoder_weight.to_bfloat16()
ft_decoding_weight.to_bfloat16()
ft_encoder = FTBartEncoder(ft_encoder_weight.w, lib_path, config.encoder_attention_heads,
encoder_head_size, config.encoder_ffn_dim,
config.d_model, remove_padding, config.encoder_layers,
tensor_para_size=tensor_para_size, pipeline_para_size=pipeline_para_size,
bart_with_bias=bart_with_bias, mbart=is_mbart,
position_embedding_type=position_embedding_type,
activation_type=activation_type, layernorm_type=layernorm_type)
ft_decoding = FTBartDecoding(ft_decoding_weight.w, lib_path,
config.decoder_attention_heads, decoder_head_size,
config.decoder_ffn_dim, config.d_model,
config.d_model, config.decoder_layers,
config.decoder_start_token_id, config.eos_token_id, config.vocab_size,
tensor_para_size=tensor_para_size, pipeline_para_size=pipeline_para_size,
bart_with_bias=bart_with_bias, mbart=is_mbart,
position_embedding_type=position_embedding_type,
activation_type=activation_type, layernorm_type=layernorm_type)
ft_bart = FTBart(ft_encoder, ft_decoding)
with open(source_file, 'r') as f:
src_text = recover_bpe(f.readlines())
src_text = ["translate English to German: " + line.strip() for line in src_text]
with open(tgt_file, 'r') as f:
tgt_text = recover_bpe(f.readlines())
for i in range(len(translation_result_list)):
sys.stdout.flush()
prev = 0
start_time = datetime.now()
while prev < len(src_text):
input_texts = src_text[prev:prev+batch_size]
prev += batch_size
input_token = tokenizer(input_texts, return_tensors='pt', padding=True)
if translation_result_list[i].frame_work == "HF":
if translation_result_list[i].name.find("beamsearch") != -1:
hf_outputs = hf_bart_model.generate(input_token.input_ids.to("cuda"),
max_length=max_seq_len,
early_stopping=True,
num_beams=beam_size)
elif translation_result_list[i].name.find("sampling") != -1:
hf_outputs = hf_bart_model.generate(input_token.input_ids.to("cuda"),
max_length=max_seq_len,
early_stopping=True,
do_sample=True,
top_k=topk if topk > 0 else None,
top_p=topp if topp > 0.0 else None)
translation_result_list[i].batch_ids_list.append(hf_outputs)
translation_result_list[i].batch_seq_len_list.append(np.ones(len(input_texts)) * max_seq_len)
elif translation_result_list[i].frame_work == "FT":
tmp_beam_size = beam_size
if translation_result_list[i].name.find("sampling") != -1:
tmp_beam_size = 1
return_dict = ft_bart(input_token['input_ids'],
input_token['attention_mask'],
inputs_embeds=None,
beam_size=tmp_beam_size,
max_seq_len=max_seq_len,
top_k=topk,
top_p=topp,
beam_search_diversity_rate=beam_search_diversity_rate,
is_return_output_log_probs=args_dict["return_output_log_probs"],
is_return_cum_log_probs=args_dict["return_cum_log_probs"],)
ft_output_ids = return_dict['output_ids']
ft_sequence_length = return_dict['sequence_lengths']
translation_result_list[i].batch_ids_list.append(ft_output_ids)
translation_result_list[i].batch_seq_len_list.append(ft_sequence_length)
translation_result_list[i].sentence_num += len(input_token)
translation_result_list[i].batch_num += 1
if translation_result_list[i].name.find("warmup") != -1 and \
(translation_result_list[i].batch_num > 10 or translation_result_list[i].sentence_num > 300):
break
if translation_result_list[i].batch_num >= max_ite:
break
stop_time = datetime.now()
translation_result_list[i].execution_time = (stop_time - start_time).total_seconds()
if translation_result_list[i].name.find("warmup") != -1:
continue
for batch_token, batch_seq_len in zip(translation_result_list[i].batch_ids_list, translation_result_list[i].batch_seq_len_list):
for j in range(len(batch_token)):
if translation_result_list[i].frame_work == "HF":
translation_result_list[i].token_list.append(fast_tokenizer.decode(batch_token[j][1:], skip_special_tokens=True))
translation_result_list[i].token_num += sum(batch_token[j][:] != 0)
elif translation_result_list[i].frame_work == "FT":
translation_result_list[i].token_list.append(fast_tokenizer.decode(batch_token[j][0][:batch_seq_len[j][0]], skip_special_tokens=True))
translation_result_list[i].token_num += batch_seq_len[j][0]
if rank == 0:
translation_result_list[i].bleu_score = bleu_score(translation_result_list[i].token_list, tgt_text[:len(translation_result_list[i].token_list)])
with open(translation_result_list[i].name + ".txt", 'w') as f:
for line in translation_result_list[i].token_list:
f.write(line)
if rank == 0:
for t in translation_result_list:
if t.name.find("warmup") != -1:
continue
LOGGER.info(f"{t.name} translates {t.batch_num} batches taking {t.execution_time:.2f} sec to translate "
f"{t.token_num} tokens, BLEU score: {t.bleu_score.score:.2f}, {(t.token_num / t.execution_time):.0f} tokens/sec."
f" ({t.bleu_score.sys_len} words, {(t.bleu_score.sys_len / t.execution_time):.0f} words/sec)")
if t.name == "ft-beamsearch" and args_dict["ft_beamsearch_BLEU_threshold"] != None:
print(t.bleu_score.score, args_dict["ft_beamsearch_BLEU_threshold"])
assert t.bleu_score.score >= args_dict["ft_beamsearch_BLEU_threshold"], f"[ERROR] {t.name} test fail !"
LOGGER.info(f"{t.name} PASS !")
if t.name == "ft-sampling" and args_dict["ft_sampling_BLEU_threshold"] != None:
print(t.bleu_score.score, args_dict["ft_sampling_BLEU_threshold"])
assert t.bleu_score.score >= args_dict["ft_sampling_BLEU_threshold"], f"[ERROR] {t.name} test fail !"
LOGGER.info(f"{t.name} PASS !")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=200, metavar='NUMBER',
help='max sequence length (default: 200)')
parser.add_argument("--source", default="../examples/pytorch/decoding/utils/translation/test.en",
help="Path to the source file.")
parser.add_argument("--target", default="../examples/pytorch/decoding/utils/translation/test.de",
help="Path to the target file.")
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test hf_beamsearch
'1': test ft_beamsearch
'2': test hf_sampling
'3': test ft_sampling
'e.g., if you want to test tf_beamsearch and ft_sampling,
then you need to use -time '03' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
# parser.add_argument('-repeat_penalty', '--repetition_penalty', type=float, default=1.0, metavar='NUMBER',
# help='Repetition penalty for generating tokens. Default is 1.0.')
# parser.add_argument('-temperature', '--temperature', type=float, default=1.0, metavar='NUMBER',
# help='Temperature penalty for generating tokens. Default is 1.0.')
# parser.add_argument('-len_penalty', '--len_penalty', type=float, default=0.0, metavar='NUMBER',
# help='Length penalty for generating tokens. Default is 0.0.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--inference_data_type', type=str, default="fp32", metavar='STRING',
help='data type for inference (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-lib_path', '--lib_path', type=str, default="lib/libth_transformer.so", metavar='STRING',
help='the path of FasterTransformer pytorch bart op library.')
parser.add_argument('-model_path', '--model_path', type=str, default=None, metavar='STRING',
help='T5 model path.')
parser.add_argument('-model', '--model', type=str, default="bart-base", metavar='STRING',
help='T5 model size. Only used when --model_path=None')
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='size of tensor parallelism (default: 1)')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='size of pipeline parallelism (default: 1)')
parser.add_argument('-max_ite', '--max_iteration', type=int, default=100000, metavar='NUMBER',
help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
parser.add_argument('--return_output_log_probs', action='store_true',
help='Return the log probability of generated tokens.')
parser.add_argument('--return_cum_log_probs', action='store_true',
help='Return the cumulative log probability of generated tokens.')
parser.add_argument('--ft_beamsearch_BLEU_threshold', type=float,
help='Threshold of FT beam search BLEU score')
parser.add_argument('--ft_sampling_BLEU_threshold', type=float,
help='Threshold of FT beam search BLEU score')
parser.add_argument("--verbose", action="store_true", help="Provide verbose messages")
parser.add_argument('--skip_gemm_test', action='store_true')
args = parser.parse_args()
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO, format=log_format)
translate(vars(args))
|
FasterTransformer-main
|
examples/pytorch/bart/translate_example.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.distributed as dist
import numpy as np
class FTBartEncoderWeight(object):
def __init__(
self,
config,
tensor_para_size,
pipeline_para_size,
*,
bart_with_bias=True,
mbart=False,
use_gated_activation=False,
position_embedding_type=1,
weight_data_type
):
self.num_layer = config.encoder_layers
self.config = config
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.bart_with_bias = bart_with_bias
self.mbart = mbart
self.use_gated_activation = use_gated_activation
self.real_weights_num = 24 # assume all weights are allocated
self.position_embedding_type = position_embedding_type
self.weight_data_type = weight_data_type
self.w = []
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend = 'mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
self.rank = dist.get_rank() if self.use_mpi else 0
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size() if self.use_mpi else 1
assert world_size == tensor_para_size * \
pipeline_para_size, "[ERROR] world_size != tensor_para_size * pipeline_para_size"
self.tensor_para_rank = self.rank % self.tensor_para_size
self.pipeline_para_rank = self.rank // self.tensor_para_size
def load_from_model(self, model):
'''Only applies to HuggingFace models.
Weight loading order: PyTorch tensor order should conform to src/fastertransformer/th_op/BartEncoderOp.h:FasterTransformerBartEncoder. For per-layer weights, the tensor is a stack of the weight across all layers.
'''
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
np_weight_dtype = self.weight_data_type
torch_weight_dtype = {np.float32: torch.float32, np.float16: torch.float16}[np_weight_dtype]
encoder_weight_dict = {}
for name, param in model.named_parameters():
# HF BART/mBART model's weight names are prepended with "model.", remove for consistency
name = name.replace("model.", "")
if param.dim() == 2:
param_t = param.transpose(1, 0) # PyTorch --> FT weight loading needs transpose
elif param.dim() == 1:
param_t = param
else:
assert False, f"The dimension of param {name} should be 1 or 2"
if name.find("encoder.layers") != -1 or name.find("encoder.layernorm_embedding") != -1 or name.find("encoder.layer_norm") != -1:
encoder_weight_dict[name] = param_t
if name.find("encoder.embed_positions") != -1:
encoder_weight_dict[name] = param # positional embedding table should NOT be transposed
# [0]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn_layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [1]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn.q_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# [2]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn.k_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# [3]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn.v_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# [4]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn.out_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous())
# [5]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.final_layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [6]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.fc1.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# [7] add empty weight for gated activation for now (BART/mBART model by default don't use gated activation)
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
# [8]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.fc2.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous())
# [9] (1) positional embedding table should NOT be transposed, [max position embeddings, hidden size] (2) need to apply offset of 2 for absolute position embeddings in BART/mBART
t = encoder_weight_dict["encoder.embed_positions.weight"][2:, :].contiguous().cuda()
self.w.append(t)
# [10] input embedding table should NOT be transposed, [vocab, hidden size]. Directly obtained from raw weight is untransposed
t = model.get_input_embeddings().weight.contiguous().cuda()
# input word embedding may be scaled (mBART), instead of customize this in FT, it's better to modify the embedding loading part in PyT
embedding_scale = np.sqrt(model.config.d_model) if model.config.scale_embedding else 1.0
t = t * embedding_scale
self.w.append(t)
# [11] LayerNorm after embedding & before transformer block, special in BART/mBART
t = encoder_weight_dict["encoder.layernorm_embedding.weight"].contiguous().cuda()
self.w.append(t)
# [12] LayerNorm after transformer block, special in mBART
if self.mbart:
t = encoder_weight_dict["encoder.layer_norm.weight"].contiguous().cuda()
else:
t = torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda()
self.w.append(t)
if self.bart_with_bias:
# [13]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn_layer_norm.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [14]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn.q_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [15]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn.k_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [16]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn.v_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [17]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.self_attn.out_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [18]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.final_layer_norm.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [19]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.fc1.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [20] add empty bias for gated activation for now (BART/mBART model by default don't use gated activation)
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
# [21]
t = torch.stack([encoder_weight_dict["encoder.layers.{}.fc2.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [22]
t = encoder_weight_dict["encoder.layernorm_embedding.bias"].contiguous().cuda()
self.w.append(t)
# [23] LayerNorm after transformer block, special in mBART
if self.mbart:
t = encoder_weight_dict["encoder.layer_norm.bias"].contiguous().cuda()
else:
t = torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda()
self.w.append(t)
else:
# TODO: pass None Type to Torch Op
for i in range(11):
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
def to_cuda(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].cuda()
def to_float(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].float()
def to_half(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].half()
def to_single(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].float()
def to_bfloat16(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].bfloat16()
class FTBartEncoder(nn.Module):
def __init__(self, encoder_weight_list, lib_path, head_num, head_size, inter_size, d_model, is_remove_padding,
num_layer, num_bucket=32, max_distance=128, sparse=False, q_scaling=1.0, tensor_para_size=1, pipeline_para_size=1,
bart_with_bias=True, mbart=False, position_embedding_type=1, activation_type="gelu", layernorm_type="post_layernorm"):
super().__init__()
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend = 'mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
torch.classes.load_library(lib_path)
try:
self.encoder = torch.classes.FasterTransformer.BartEncoder(*encoder_weight_list, head_num, head_size, inter_size, d_model,
is_remove_padding, num_layer, num_bucket, max_distance, sparse, q_scaling, tensor_para_size, pipeline_para_size,
bart_with_bias, mbart, position_embedding_type, activation_type, layernorm_type)
except:
self.encoder = torch.classes.FasterTransformerBartEncoder(*encoder_weight_list, head_num, head_size, inter_size, d_model,
is_remove_padding, num_layer, num_bucket, max_distance, sparse, q_scaling, tensor_para_size, pipeline_para_size,
bart_with_bias, mbart, position_embedding_type, activation_type, layernorm_type)
def forward(self, input, seq_len, inputs_embeds=None):
output = self.encoder.forward(input, seq_len, inputs_embeds)
return output
|
FasterTransformer-main
|
examples/pytorch/bart/utils/ft_encoder.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.distributed as dist
import numpy as np
class FTBartDecodingWeight(object):
def __init__(
self,
config,
tensor_para_size,
pipeline_para_size,
*,
bart_with_bias=True,
mbart=False,
use_gated_activation=False,
position_embedding_type=1,
weight_data_type
):
self.config = config
self.num_layer = config.decoder_layers
self.tensor_para_size = tensor_para_size
self.pipeline_para_size = pipeline_para_size
self.bart_with_bias = bart_with_bias
self.mbart = mbart
self.use_gated_activation = use_gated_activation
self.position_embedding_type = position_embedding_type
self.real_weights_num = 32 # assume all weights are allocated and converted to specific data type
self.weight_data_type = weight_data_type
self.w = []
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend = 'mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
self.rank = dist.get_rank() if self.use_mpi else 0
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size() if self.use_mpi else 1
assert world_size == tensor_para_size * \
pipeline_para_size, "[ERROR] world_size != tensor_para_size * pipeline_para_size"
self.tensor_para_rank = self.rank % self.tensor_para_size
self.pipeline_para_rank = self.rank // self.tensor_para_size
def load_from_model(self, model):
'''Only applies to HuggingFace models.
Weight loading order: PyTorch tensor order should conform to src/fastertransformer/th_op/BartDecodingOp.h:FasterTransformerBartDecoding. For per-layer weights, the tensor is a stack of the weight across all layers.
'''
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
np_weight_dtype = self.weight_data_type
torch_weight_dtype = {np.float32: torch.float32, np.float16: torch.float16}[np_weight_dtype]
weight_dict = {}
qkv_weight_tmp = ["q", "k", "v"] # must respect this order
qkv_weight_len = 0
qkv_bias_tmp = ["q", "k", "v"]
qkv_bias_len = 0
for name, param in model.state_dict().items():
name = name.replace("model.", "")
if param.dim() == 2:
param_t = param.transpose(1, 0)
elif param.dim() == 1:
param_t = param
else:
assert False, f"The dimension of param {name} should be 2"
if name.find("decoder.layers") != -1:
if name.find(".self_attn.q_proj.weight") != -1 or name.find(".self_attn.k_proj.weight") != -1 or name.find(".self_attn.v_proj.weight") != -1:
qkv_weight_tmp[0 if "q_proj" in name else 1 if "k_proj" in name else 2] = param_t # qkv order in weight dict is not guaranteed
qkv_weight_len += 1
if qkv_weight_len == 3:
qkv_weight = torch.cat(qkv_weight_tmp, dim=-1)
weight_dict[name.partition("self_attn")[0] + "self_attn.qkv_proj.weight"] = qkv_weight
qkv_weight_tmp = ["q", "k", "v"]
qkv_weight_len = 0
elif name.find(".self_attn.q_proj.bias") != -1 or name.find(".self_attn.k_proj.bias") != -1 or name.find(".self_attn.v_proj.bias") != -1:
qkv_bias_tmp[0 if "q_proj" in name else 1 if "k_proj" in name else 2] = param_t # qkv order in weight dict is not guaranteed
qkv_bias_len += 1
if qkv_bias_len == 3:
qkv_bias = torch.cat(qkv_bias_tmp, dim=-1)
weight_dict[name.partition("self_attn")[0] + "self_attn.qkv_proj.bias"] = qkv_bias
qkv_bias_tmp = ["q", "k", "v"]
qkv_bias_len = 0
else:
weight_dict[name] = param_t
elif name.find("decoder.layernorm_embedding") != -1 or name.find("decoder.layer_norm") != -1 or name.find("final_logits_bias") != -1 or name.find("lm_head") != -1:
weight_dict[name] = param_t
elif name.find("decoder.embed_positions") != -1:
weight_dict[name] = param
# load by torch model directly
# [0] self-attention
t = torch.stack([weight_dict["decoder.layers.{}.self_attn_layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [1] QKV weight concatenated
t = torch.stack([weight_dict["decoder.layers.{}.self_attn.qkv_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.reshape([t.shape[0], t.shape[1], 3, t.shape[2] // 3])
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [2]
t = torch.stack([weight_dict["decoder.layers.{}.self_attn.out_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous())
# [3] cross-attention
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn_layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [4]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn.q_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# [5]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn.k_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# [6]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn.v_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# [7]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn.out_proj.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous())
# [8]
t = torch.stack([weight_dict["decoder.layers.{}.final_layer_norm.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [9]
t = torch.stack([weight_dict["decoder.layers.{}.fc1.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous())
# [10] add empty weight for gated activation for now (BART/mBART model by default don't use gated activation)
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
# [11]
t = torch.stack([weight_dict["decoder.layers.{}.fc2.weight".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t.split(t.shape[1] // self.tensor_para_size, dim=1)[self.tensor_para_rank].contiguous())
# [12] (1) positional embedding table should NOT be transposed, [max position embeddings, hidden size] (2) need to apply offset of 2 for absolute position embeddings in BART/mBART
t = weight_dict["decoder.embed_positions.weight"][2:, :].contiguous().cuda()
self.w.append(t)
# [13] input embedding table should NOT be transposed, [vocab, hidden size]. Directly obtained from raw weight is untransposed
t = model.get_input_embeddings().weight.contiguous().cuda()
# input word embedding may be scaled (mBART), instead of customize this in FT, it's better to modify the embedding loading part in PyT
embedding_scale = np.sqrt(model.config.d_model) if model.config.scale_embedding else 1.0
t = t * embedding_scale
self.w.append(t)
# [14] output embedding table should NOT be transposed, [vocab, hidden size]. Directly obtained from raw weight is untransposed
t = model.get_output_embeddings().weight.contiguous().cuda() # same as weight_dict["lm_head.weight"].transpose(1, 0).contiguous().cuda()
self.w.append(t)
# [15] LayerNorm after embedding & before transformer block, special in BART/mBART
t = weight_dict["decoder.layernorm_embedding.weight"].contiguous().cuda()
self.w.append(t)
# [16] LayerNorm after transformer block, special in mBART
if self.mbart:
t = weight_dict["decoder.layer_norm.weight"].contiguous().cuda()
else:
t = torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda()
self.w.append(t)
if self.bart_with_bias:
# [17]
t = torch.stack([weight_dict["decoder.layers.{}.self_attn_layer_norm.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [18]
t = torch.stack([weight_dict["decoder.layers.{}.self_attn.qkv_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.reshape([t.shape[0], 3, t.shape[-1] // 3])
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [19]
t = torch.stack([weight_dict["decoder.layers.{}.self_attn.out_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [20]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn_layer_norm.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [21]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn.q_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [22]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn.k_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [23]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn.v_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [24]
t = torch.stack([weight_dict["decoder.layers.{}.encoder_attn.out_proj.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [25]
t = torch.stack([weight_dict["decoder.layers.{}.final_layer_norm.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [26]
t = torch.stack([weight_dict["decoder.layers.{}.fc1.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
t = t.split(t.shape[-1] // self.tensor_para_size, dim=-1)[self.tensor_para_rank].contiguous()
self.w.append(t)
# [27] add empty bias for gated activation for now (BART/mBART model by default don't use gated activation)
t = torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda()
self.w.append(t)
# [28]
t = torch.stack([weight_dict["decoder.layers.{}.fc2.bias".format(i)]
for i in range(start_layer, end_layer)], 0).contiguous().cuda()
self.w.append(t)
# [29]
t = weight_dict["decoder.layernorm_embedding.bias"].contiguous().cuda()
self.w.append(t)
# [30]
if self.mbart:
t = weight_dict["decoder.layer_norm.bias"].contiguous().cuda()
else:
t = torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda()
self.w.append(t)
# [31] embedding bias aka final_logits_bias (may not exist, keys to ignore)
t = weight_dict.get("final_logits_bias", torch.zeros((1, self.config.vocab_size), dtype=torch_weight_dtype)).contiguous().cuda()
self.w.append(t)
else:
# TODO: pass None Type to Torch Op
for i in range(15):
self.w.append(torch.empty((1, 1), dtype=torch_weight_dtype).contiguous().cuda())
def to_cuda(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].cuda()
def to_float(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].float()
def to_half(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].half()
def to_single(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].float()
def to_bfloat16(self):
for i in range(self.real_weights_num):
self.w[i] = self.w[i].bfloat16()
class FTBartDecoding(nn.Module):
def __init__(self, decoding_weight_list, lib_path, head_num, head_size, inter_size,
mem_d_model, d_model, num_layer, start_id, end_id, vocab_size, q_scaling=1.0, num_bucket=32,
max_distance=128, tensor_para_size=1, pipeline_para_size=1, bart_with_bias=True, mbart=False, position_embedding_type=1,
activation_type="gelu", layernorm_type="post_layernorm"):
super().__init__()
self.use_mpi = dist.is_mpi_available()
if self.use_mpi:
try:
dist.init_process_group(backend='mpi')
except:
print("[INFO] WARNING: Exception occurred in dist.init_process_group(backend = 'mpi'). Maybe the process group has been initialized somewhere else.")
else:
print("[INFO] MPI is not available in this PyTorch build.")
assert tensor_para_size == 1, "[FATAL] MPI is required for tensor_para_size > 1."
assert pipeline_para_size == 1, "[FATAL] MPI is required for pipeline_para_size > 1."
torch.classes.load_library(lib_path)
try:
self.decoding = torch.classes.FasterTransformer.BartDecoding(head_num, head_size, inter_size, mem_d_model, d_model, num_layer,
vocab_size, num_bucket, max_distance, q_scaling, start_id, end_id,
tensor_para_size, pipeline_para_size, bart_with_bias, mbart,
position_embedding_type, activation_type, layernorm_type, *decoding_weight_list)
except:
self.decoding = torch.classes.FasterTransformerBartDecoding(head_num, head_size, inter_size, mem_d_model, d_model, num_layer,
vocab_size, num_bucket, max_distance, q_scaling, start_id, end_id,
tensor_para_size, pipeline_para_size, bart_with_bias, mbart,
position_embedding_type, activation_type, layernorm_type, *decoding_weight_list)
def forward(self, beam_width, max_seq_len, top_k, top_p,
beam_search_diversity_rate, temperature,
len_penalty, repetition_penalty, random_seed,
mem_hidden_states, mem_seq_len,
is_return_output_log_probs, is_return_cum_log_probs, is_return_cross_attentions=False):
# TODO (bhsueh) Not found an method to put a None Type into op forward function
# So, the top_k and top_p must be some values now.
results = self.decoding.forward(beam_width, max_seq_len,
top_k, top_p, beam_search_diversity_rate,
temperature, len_penalty, repetition_penalty,
random_seed, mem_hidden_states, mem_seq_len,
is_return_output_log_probs, is_return_cum_log_probs, is_return_cross_attentions)
return results
class FTBart(nn.Module):
def __init__(self, encoder, decoding):
super().__init__()
self.encoder = encoder
self.decoding = decoding
def forward(self, input_ids, attention_mask, inputs_embeds, beam_size, max_seq_len,
top_k, top_p, beam_search_diversity_rate,
temperature=1.0, len_penalty=0.0, repetition_penalty=1.0, random_seed=0,
is_return_output_log_probs=False, is_return_cum_log_probs=False, is_return_cross_attentions=False):
input_ids = input_ids.to("cuda").type(torch.int32)
mem_seq_len = torch.sum(attention_mask, dim=1).type(torch.int32).to("cuda")
ft_encoder_outputs = self.encoder.forward(input_ids, mem_seq_len, inputs_embeds)
results = self.decoding.forward(beam_size, # optional, can be None
max_seq_len,
top_k, # optional, can be None
top_p, # optional, can be None
beam_search_diversity_rate, # optional, can be None
temperature, # optional, can be None
len_penalty, # optional, can be None
repetition_penalty, # optional, can be None
random_seed, # optional, can be None
is_return_output_log_probs, # optional, can be None
is_return_cum_log_probs, # optional, can be None
is_return_cross_attentions, # optional, can be None
ft_encoder_outputs,
mem_seq_len)
return_dict = {}
return_dict['output_ids'] = results.pop(0).reshape([-1, beam_size, max_seq_len]).cpu().numpy()
return_dict['sequence_lengths'] = results.pop(0).reshape([-1, beam_size]).cpu().numpy()
if is_return_output_log_probs:
return_dict['output_log_probs'] = results.pop(0).cpu().numpy()
if is_return_cum_log_probs:
return_dict['cum_log_probs'] = results.pop(0).cpu().numpy()
if is_return_cross_attentions:
return_dict['cross_attentions'] = results.pop(0).cpu().numpy()
return return_dict
|
FasterTransformer-main
|
examples/pytorch/bart/utils/ft_decoding.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import re
import numpy as np
import torch
ACTIVATION_AMAX_NUM = 72
INT8O_GEMM_NUM = 8
TRT_FUSED_MHA_AMAX_NUM = 3
SCALE_RESERVE_NUM = 21
def checkpoint_quantization(init_dict, ths_path='../../../build/lib/libth_transformer.so', verbose=True):
print("Quantizing checkpoint ...")
torch.classes.load_library(ths_path)
weight_quantize = torch.ops.fastertransformer.vit_weight_quantize
def init_graph():
layer_num = 0
regex = re.compile('layer.\d+')
amaxTotalNum = 0
for name, tensor_value in init_dict.items():
if "ffn.fc1" in name and amaxTotalNum == 0:
amaxTotalNum = ACTIVATION_AMAX_NUM + 9 * tensor_value.size(1) + INT8O_GEMM_NUM + TRT_FUSED_MHA_AMAX_NUM + SCALE_RESERVE_NUM
if verbose:
print("amaxTotalNum", amaxTotalNum)
print("Hidden size:", tensor_value.size(1))
tmp = regex.findall(name)
if len(tmp) < 1:
continue
num_tmp = int(tmp[0].replace("layer.", ""))
if layer_num < num_tmp:
layer_num = num_tmp
layer_num = layer_num + 1
#add new var for amax
for i in range(layer_num):
init_dict["transformer.encoder.layer.{}.amaxList".format(i)] = torch.zeros((amaxTotalNum,), dtype=torch.float32)
return layer_num, amaxTotalNum
layer_num, amaxTotalNum = init_graph()
kernel_name_list = ["attn.query",
"attn.key",
"attn.value",
"attn.out",
"ffn.fc1",
"ffn.fc2"]
amax_name_list = ["attn.query._input_quantizer",
"attn.query._aftergemm_quantizer",
"attn.matmul_q_input_quantizer",
"attn.key._aftergemm_quantizer",
"attn.matmul_k_input_quantizer",
"attn.value._aftergemm_quantizer",
"attn.matmul_v_input_quantizer",
"attn.softmax_input_quantizer",
"attn.matmul_a_input_quantizer",
"attn.out._input_quantizer",
"attn.out._aftergemm_quantizer",
"ffn.fc1._input_quantizer",
"ffn.fc1._aftergemm_quantizer",
"ffn.fc2._input_quantizer",
"ffn.fc2._aftergemm_quantizer",
"special_F2Bias_scale",
]
int8O_gemm_weight_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_weight_list = ["attn.query",
"attn.key",
"attn.value",
"attn.matmul_k_input_quantizer",
"attn.matmul_v_input_quantizer",
"attn.out",
"ffn.fc1",
"ffn.fc2"]
int8O_gemm_input_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_input_list = ["attn.query._input_quantizer",
"attn.key._input_quantizer",
"attn.value._input_quantizer",
"attn.matmul_q_input_quantizer",
"attn.matmul_a_input_quantizer",
"attn.out._input_quantizer",
"ffn.fc1._input_quantizer",
"ffn.fc2._input_quantizer"]
int8O_gemm_output_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_output_list = ["attn.query._aftergemm_quantizer",
"attn.key._aftergemm_quantizer",
"attn.value._aftergemm_quantizer",
"attn.softmax_input_quantizer",
"attn.out._input_quantizer",
"attn.out._aftergemm_quantizer",
"ffn.fc1._aftergemm_quantizer",
"ffn.fc2._aftergemm_quantizer"]
same_value_tuple_list = [("attn.query._input_quantizer",
"attn.key._input_quantizer",
"attn.value._input_quantizer")]
factor = 1000000.0
for i in range(layer_num):
amaxList = np.zeros([amaxTotalNum]).astype(np.float32)
amax_id = 0
# verify some quantizers have same value. input_quantizer is per-tensor quantization
for same_value_tuple in same_value_tuple_list:
tmp_v = init_dict["transformer.encoder.layer.{}.{}._amax".format(i, same_value_tuple[0])].numpy()
for same_value_name in same_value_tuple:
tmp_v_2 = init_dict["transformer.encoder.layer.{}.{}._amax".format(i, same_value_name)].numpy()
assert(np.allclose(tmp_v, tmp_v_2))
for amax_name in amax_name_list:
if amax_name == "special_F2Bias_scale":
if i != layer_num - 1:
quant_max = init_dict["transformer.encoder.layer.{}.{}._amax".format(i+1, amax_name_list[0])].item()
amax = abs(quant_max)
else:
#not used, placeholder
amax = 1.0
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
continue
quant_max = init_dict["transformer.encoder.layer.{}.{}._amax".format(i, amax_name)].item()
amax = abs(quant_max)#round(abs(quant_max)*factor)/factor
if amax_name in int8O_gemm_input_list:
int8O_gemm_input_amax_list[int8O_gemm_input_list.index(amax_name)] = amax
if amax_name == "attn.query._input_quantizer":
int8O_gemm_input_amax_list[int8O_gemm_input_list.index("attn.key._input_quantizer")] = amax
int8O_gemm_input_amax_list[int8O_gemm_input_list.index("attn.value._input_quantizer")] = amax
if amax_name in int8O_gemm_output_list:
int8O_gemm_output_amax_list[int8O_gemm_output_list.index(amax_name)] = amax
if amax_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(amax_name)] = amax
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
if verbose:
print(i, amax_name)
print('quant_max:', quant_max)
print('amax:', amax)
if verbose:
print("done process layer_{} activation amax".format(i))
#kernel amax starts from ACTIVATION_AMAX_NUM
assert amax_id == 64
amax_id = ACTIVATION_AMAX_NUM
for kernel_id, kernel_name in enumerate(kernel_name_list):
kernel = init_dict["transformer.encoder.layer.{}.{}.weight".format(i, kernel_name)].transpose(-1, -2).contiguous()
quant_max2 = init_dict["transformer.encoder.layer.{}.{}._weight_quantizer._amax".format(i, kernel_name)]
amax2 = abs(quant_max2)
if (amax2.dim() == 0):
quant_max_processed = torch.full((kernel.size(1),), amax2.item(), dtype=amax2.dtype, device=amax2.device)
else:
quant_max_processed = amax2.view(-1)
kernel_processed = weight_quantize(kernel.cuda(), quant_max_processed.cuda())
init_dict["transformer.encoder.layer.{}.{}.weight".format(i, kernel_name)] = kernel_processed
if kernel_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(kernel_name)] = quant_max_processed[0]
for e in quant_max_processed:
amaxList[amax_id] = e
amax_id += 1
# if verbose:
# print(i, kernel_name)
# print('kernel:', kernel)
# print('quant_max2:', quant_max2)
# print('quant_max_processed_:', quant_max_processed)
#for int8O gemm deQuant
for j in range(INT8O_GEMM_NUM):
amaxList[amax_id] = (int8O_gemm_input_amax_list[j]*int8O_gemm_weight_amax_list[j])/(127.0*int8O_gemm_output_amax_list[j])
amax_id += 1
#for trt fused MHA amax
#### QKV_addBias_amax
amaxList[amax_id] = np.maximum(np.maximum(amaxList[8],amaxList[16]), amaxList[24])
amax_id += 1
#### softmax amax
amaxList[amax_id] = amaxList[32]
amax_id += 1
#### bmm2 amax
amaxList[amax_id] = amaxList[36]
amax_id += 1
init_dict["transformer.encoder.layer.{}.amaxList".format(i)] = torch.tensor(amaxList, dtype=torch.float32)
if verbose:
print("done process layer_{} kernel weight".format(i))
print("Quantizing checkpoint done.")
return init_dict
if __name__ == '__main__':
model_dict = torch.load('checkpoint/ViT-B_16_calib.pth', map_location='cpu')
checkpoint_quantization(model_dict, '../../../build/lib/libth_transformer.so', verbose=True)
|
FasterTransformer-main
|
examples/pytorch/vit/checkpoint_quantization.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.utils import accuracy, AverageMeter
from VisionTransformerINT8WeightLoader import ViTINT8WeightLoader
import sys
sys.path.insert(0, "./ViT-quantization/ViT-pytorch")
# from config import get_config
# from models import build_model
from models.modeling import CONFIGS
sys.path.insert(0, "./ViT-quantization")
from vit_int8 import VisionTransformerINT8
import quant_utils
from config import get_config
from data import build_val_loader
test_time = 100
warmup_time = 10
def setup(args):
# Prepare model
config = CONFIGS[args.model_type]
print(config)
num_classes = 1000 if args.dataset == "ImageNet" else 100
model = VisionTransformerINT8(config, args.img_size, zero_head=False, num_classes=num_classes)
model.load_state_dict(torch.load(args.calibrated_dir))
quant_utils.configure_model(model, args, calib=False)
model.to(args.device)
return config, model
def parse_option():
parser = argparse.ArgumentParser('ViT evaluation script', add_help=False)
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--img_size", default=384, type=int,
help="Resolution size")
parser.add_argument("--dataset", choices=["ImageNet"], default="ImageNet",
help="Which downstream task.")
parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
parser.add_argument("--calibrated_dir", type=str, default="checkpoint/ViT-B_16_calib.pth",
help="Where to search for calibrated ViT models.")
parser.add_argument("--data-path", type=str, default="/workspace/imagenet",
help="Root directory for datasets.")
# easy config modification
parser.add_argument('--th-path', type=str, help='path to pytorch library', required=True)
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--int8-mode', type=int, choices=[1,2,3], default=2,
help="Which int8 mode to use, choices=[1,2,3], default=2")
parser.add_argument('--fp16_opt_level', type=str, default='O2',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help='local rank for DistributedDataParallel')
parser.add_argument("--validate", action="store_true", help='If true, validate on ImageNet, else just profile')
quant_utils.add_arguments(parser)
args, unparsed = parser.parse_known_args()
if args.quant_mode is not None:
args = quant_utils.set_args(args)
quant_utils.set_default_quantizers(args)
config = get_config(args)
if args.quant_mode == 'ft1':
args.int8_mode = 1
elif args.quant_mode == 'ft2':
args.int8_mode = 2
else:
raise NotImplementedError("For ViT-INT8, we only support ft1/ft2 as quant_mode")
return args, config
def main(args, data_config):
model_cfg, model = setup(args)
if args.validate:
validate(args, data_config, model_cfg, model)
else:
validate_with_random_data(args, model_cfg, model)
@torch.no_grad()
def validate(args, data_config, config, model):
dataset, data_loader = build_val_loader(data_config)
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
th_path = args.th_path
patch_size = config.patches.size[0]
num_heads = config.transformer.num_heads
layer_num = config.transformer.num_layers
inter_size = config.transformer.mlp_dim
embed_dim = config.hidden_size
max_batch = args.batch_size
img_size = args.img_size
int8_mode = args.int8_mode
with_cls_token = config.classifier == 'token'
in_chans = 3
model.half()
vit_weights = ViTINT8WeightLoader(layer_num, args.img_size, patch_size, model.state_dict())
vit_weights.to_int8(args.th_path)
vit_weights.to_cuda()
weights = vit_weights.listed_weights()
torch.classes.load_library(th_path)
try:
vit = torch.classes.VisionTransformerINT8.Class(weights,
max_batch,
img_size,
patch_size,
in_chans,
embed_dim,
num_heads,
inter_size,
layer_num,
int8_mode,
with_cls_token
)
except:
# legacy ths for 20.03 image
vit = torch.classes.VisionTransformerINT8Class(weights,
max_batch,
img_size,
patch_size,
in_chans,
embed_dim,
num_heads,
inter_size,
layer_num,
int8_mode,
with_cls_token
)
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images_half = torch.tensor(images, dtype=torch.half)
images_half = images_half.cuda(non_blocking=True)
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
op_tmp = vit.forward(images_half)
# op_tmp,_ = model.transformer(images_half)
output = model.head(op_tmp[:, 0])
# output_th, _ = model(images_half)
# diff = abs(output - output_th).cpu().numpy()
# print(diff.mean(), diff.max(), diff.min())
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % data_config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
print(
f'Test: [{idx:4}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
print(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def run_vitnv_op(args, config, model, images):
th_path = args.th_path
patch_size = config.patches.size[0]
num_heads = config.transformer.num_heads
layer_num = config.transformer.num_layers
inter_size = config.transformer.mlp_dim
embed_dim = config.hidden_size
max_batch = args.batch_size
img_size = args.img_size
int8_mode = args.int8_mode
with_cls_token = config.classifier == 'token'
in_chans = 3
model.half()
torch.classes.load_library(th_path)
vit_weights = ViTINT8WeightLoader(layer_num, args.img_size, patch_size, model.state_dict(), config.classifier)
vit_weights.to_int8(args.th_path)
vit_weights.to_cuda()
weights = vit_weights.listed_weights()
##run pytorch op
try:
vit = torch.classes.VisionTransformerINT8.Class(weights,
max_batch,
img_size,
patch_size,
in_chans,
embed_dim,
num_heads,
inter_size,
layer_num,
int8_mode,
with_cls_token
)
except:
# legacy ths for 20.03 image
vit = torch.classes.VisionTransformerINT8Class(weights,
max_batch,
img_size,
patch_size,
in_chans,
embed_dim,
num_heads,
inter_size,
layer_num,
int8_mode,
with_cls_token
)
# warm up
for i in range(warmup_time):
op_tmp = vit.forward(images)
# op_output = model.head(op_tmp[:,0])
torch.cuda.synchronize()
op_begin = time.time()
#_nvtx.rangePushA("op")
for i in range(test_time):
op_tmp = vit.forward(images)
op_output = model.head(op_tmp[:,0])
#_nvtx.rangePop()
torch.cuda.synchronize()
op_end = time.time()
op_output = op_output.cpu().numpy()
print("INT8 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
return op_output
@torch.no_grad()
def run_torch(model, images, mark):
# warm up
# for i in range(warmup_time):
# output = model(images)
# torch.cuda.synchronize()
# torch_start = time.time()
#_nvtx.rangePushA("torch")
# for i in range(test_time):
torch_output = model(images)
#_nvtx.rangePop()
# torch.cuda.synchronize()
# torch_end = time.time()
torch_output = torch_output[0].cpu().numpy()
# print(mark + " time : ", (torch_end - torch_start)/test_time*1000.0, "ms")
return torch_output
@torch.no_grad()
def validate_with_random_data(args, model_cfg, model):
model.eval()
max_batch = args.batch_size
img_size = args.img_size
in_chans = 3
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
images_half = torch.tensor(images, dtype=torch.half).cuda(non_blocking=True)
##run original swin-transformer
# traced_module_float = torch.jit.trace(model, images_float)
# FP32_torch_traced_output = run_torch(traced_module_float, images_float, "FP32 torch trace")
model.half()
INT8_torch_output = run_torch(model, images_half, "INT8 torch")
print(INT8_torch_output.shape)
# run pytorch op
INT8_op_output = run_vitnv_op(args, model_cfg, model, images_half)
print(INT8_op_output.shape)
# diff = abs(FP16_torch_traced_output - FP16_op_output)
diff = abs(INT8_torch_output - INT8_op_output)
print("INT8_torch_output vs INT8_op_output , avg diff : ", diff.mean((1)), "max diff : ", diff.max((1)))
if __name__ == '__main__':
args, data_config = parse_option()
seed = args.seed #+ int(time.time())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl',
timeout=datetime.timedelta(minutes=60))
args.n_gpu = 1
args.device = device
main(args, data_config)
|
FasterTransformer-main
|
examples/pytorch/vit/infer_visiontransformer_int8_op.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import sys
sys.path.insert(0, "./ViT-quantization/ViT-pytorch")
# from config import get_config
# from models import build_model
from models.modeling import VisionTransformer, CONFIGS
from VisionTransformerWeightLoader import ViTWeightLoader
#from torch._C import _nvtx
test_time = 100
warmup_time = 10
def setup(args):
# Prepare model
config = CONFIGS[args.model_type]
print(config)
model = VisionTransformer(config, args.img_size, zero_head=False, num_classes=1000)
model.load_from(np.load(args.pretrained_dir))
model.to(args.device)
return config, model
def parse_option():
parser = argparse.ArgumentParser('ViT evaluation script', add_help=False)
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--img_size", default=384, type=int,
help="Resolution size")
parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
# easy config modification
parser.add_argument('--th-path', type=str, help='path to pytorch library', required=True)
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
args, unparsed = parser.parse_known_args()
return args
def main(args):
config, model = setup(args)
validate_with_random_data(args, config, model)
@torch.no_grad()
def run_vitnv_op(args, config, model, images, use_fp16):
th_path = args.th_path
patch_size = config.patches.size[0]
num_heads = config.transformer.num_heads
layer_num = config.transformer.num_layers
inter_size = config.transformer.mlp_dim
embed_dim = config.hidden_size
max_batch = args.batch_size
img_size = args.img_size
with_cls_token = int(config.classifier == 'token')
in_chans = 3
torch.classes.load_library(th_path)
vit_weights = ViTWeightLoader(layer_num, args.img_size, patch_size, args.pretrained_dir, config.classifier)
if use_fp16:
vit_weights.to_half()
model.half()
vit_weights.to_cuda()
##run pytorch op
try:
vit = torch.classes.VisionTransformer.Class(vit_weights.weights,
max_batch,
img_size,
patch_size,
in_chans,
embed_dim,
num_heads,
inter_size,
layer_num,
with_cls_token
)
except:
# legacy ths for 20.03 image
vit = torch.classes.VisionTransformerClass(vit_weights.weights,
max_batch,
img_size,
patch_size,
in_chans,
embed_dim,
num_heads,
inter_size,
layer_num,
with_cls_token
)
# warm up
for i in range(warmup_time):
op_tmp = vit.forward(images)
op_output = model.head(op_tmp[:,0])
torch.cuda.synchronize()
op_begin = time.time()
#_nvtx.rangePushA("op")
for i in range(test_time):
op_tmp = vit.forward(images)
op_output = model.head(op_tmp[:,0])
#_nvtx.rangePop()
torch.cuda.synchronize()
op_end = time.time()
op_output = op_output.cpu().numpy()
if use_fp16:
print("FP16 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
else:
print("FP32 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
return op_output
@torch.no_grad()
def run_torch(model, images, mark):
# warm up
for i in range(warmup_time):
output = model(images)
torch.cuda.synchronize()
torch_start = time.time()
#_nvtx.rangePushA("torch")
for i in range(test_time):
torch_output = model(images)
#_nvtx.rangePop()
torch.cuda.synchronize()
torch_end = time.time()
torch_output = torch_output[0].cpu().numpy()
print(mark + " time : ", (torch_end - torch_start)/test_time*1000.0, "ms")
return torch_output
@torch.no_grad()
def validate_with_random_data(args, config, model):
model.eval()
max_batch = args.batch_size
img_size = args.img_size
in_chans = 3
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
images_half = torch.tensor(images, dtype=torch.half)
images_float = torch.tensor(images, dtype=torch.float)
images_half = images_half.cuda(non_blocking=True)
images_float = images_float.cuda(non_blocking=True)
# run pytorch op
FP32_op_output = run_vitnv_op(args, config, model, images_float, False)
# traced_module_float = torch.jit.trace(model, images_float)
# FP32_torch_traced_output = run_torch(traced_module_float, images_float, "FP32 torch trace")
FP32_torch_output = run_torch(model, images_float, "FP32 torch")
FP16_op_output = run_vitnv_op(args, config, model, images_half, True)
# traced_module_half = torch.jit.trace(model, images_half)
# FP16_torch_traced_output = run_torch(traced_module_half, images_half, "FP16 torch trace")
FP16_torch_output = run_torch(model, images_half, "FP16 torch")
# diff = abs(FP32_torch_traced_output - FP32_op_output)
diff = abs(FP32_torch_output - FP32_op_output)
print("FP32_torch_traced_output vs FP32_op_output , avg diff : ", diff.mean(), "max diff : ", diff.max())
assert diff.mean() < 0.004, "[ERROR] VIT OP TEST FAIL !"
# diff = abs(FP16_torch_traced_output - FP16_op_output)
diff = abs(FP16_torch_output - FP16_op_output)
print("FP16_torch_traced_output vs FP16_op_output , avg diff : ", diff.mean(), "max diff : ", diff.max())
assert diff.mean() < 0.005, "[ERROR] VIT OP TEST FAIL !"
print("[INFO] VIT OP TEST PASS !")
if __name__ == '__main__':
args = parse_option()
# seed = args.seed + int(time.time())
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
main(args)
|
FasterTransformer-main
|
examples/pytorch/vit/infer_visiontransformer_op.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch as th
import math
import numpy as np
import os
from scipy import ndimage
L_ROOT = 'Transformer/encoderblock_{}'
ATT_Q = 'MultiHeadDotProductAttention_1/query'
ATT_K = 'MultiHeadDotProductAttention_1/key'
ATT_V = 'MultiHeadDotProductAttention_1/value'
ATT_OUT = 'MultiHeadDotProductAttention_1/out'
ATT_NORM = 'LayerNorm_0'
FFN_NORM = 'LayerNorm_2'
FFN_IN = 'MlpBlock_3/Dense_0'
FFN_OUT = 'MlpBlock_3/Dense_1'
def np2th(weights, is_conv=False):
if is_conv:
""" convert HWIO to OIHW."""
weights = weights.transpose([3, 2, 0, 1])
return th.from_numpy(weights).contiguous()
class ViTWeightLoader(object):
def __init__(self, layer_num, img_size, patch_size, weight_path=None, classifier='token'):
"""weights need be a state_dict of swin transformer model"""
layer_weight_names = [
os.path.join(L_ROOT, ATT_NORM, 'scale' ),
os.path.join(L_ROOT, ATT_NORM, 'bias' ),
os.path.join(L_ROOT, ATT_Q, 'kernel' ),
os.path.join(L_ROOT, ATT_Q, 'bias' ),
os.path.join(L_ROOT, ATT_K, 'kernel' ),
os.path.join(L_ROOT, ATT_K, 'bias' ),
os.path.join(L_ROOT, ATT_V, 'kernel' ),
os.path.join(L_ROOT, ATT_V, 'bias' ),
os.path.join(L_ROOT, ATT_OUT, 'kernel' ),
os.path.join(L_ROOT, ATT_OUT, 'bias' ),
os.path.join(L_ROOT, FFN_NORM, 'scale' ),
os.path.join(L_ROOT, FFN_NORM, 'bias' ),
os.path.join(L_ROOT, FFN_IN, 'kernel' ),
os.path.join(L_ROOT, FFN_IN, 'bias' ),
os.path.join(L_ROOT, FFN_OUT, 'kernel' ),
os.path.join(L_ROOT, FFN_OUT, 'bias' )
]
pre_layer_weight_names = [
'embedding/kernel',
'embedding/bias',
'cls',
'Transformer/posembed_input/pos_embedding'
]
post_layer_weight_names = [
'Transformer/encoder_norm/scale',
'Transformer/encoder_norm/bias'
]
self.layer_num = layer_num
self.weights = []
if weight_path is None:
print("[ERROR][SwinTransformerWeights::__init__] weights should not be empty!")
exit(-1)
else:
self._generated_weights = False
weight_dict = self.load_weights(weight_path)
for name in pre_layer_weight_names:
if name not in weight_dict.files:
print("Unsupported weight file: Missing weights %s" % name)
is_conv = name == 'embedding/kernel'
if classifier != 'token' and name == 'cls':
continue
th_weight = np2th(weight_dict[name], is_conv)
if name.split('/')[-1] == "pos_embedding":
posemb_new_size = pow(img_size//patch_size, 2) + 1
if th_weight.size(1) != posemb_new_size:
print("load_pretrained: resized variant: %s to %s" % (th_weight.size(1), posemb_new_size))
ntok_new = posemb_new_size
if classifier == "token":
posemb_tok, posemb_grid = th_weight[:, :1], th_weight[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = th_weight[:, :0], th_weight[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
th_weight = np2th(posemb)
self.weights.append(th_weight)
#loop over layers
for layer_idx in range(layer_num):
for name in layer_weight_names:
w_name = name.format(layer_idx)
if w_name not in weight_dict.files:
print("Unsupported weight file: Missing weights %s" % w_name)
th_weight = np2th(weight_dict[w_name])
self.weights.append(th_weight)
for name in post_layer_weight_names:
if name not in weight_dict.files:
print("Unsupported weight file: Missing weights %s" % name)
th_weight = np2th(weight_dict[name])
self.weights.append(th_weight)
def load_weights(self, weight_path:str):
suffix = weight_path.split('.')[-1]
if suffix != 'npz':
print("Unsupported weight file: Unrecognized format %s " % suffix)
exit(-1)
return np.load(weight_path)
def to_cuda(self):
for idx, v in enumerate(self.weights):
self.weights[idx] = v.cuda()
def to_half(self):
for idx, v in enumerate(self.weights):
self.weights[idx] = v.half()
|
FasterTransformer-main
|
examples/pytorch/vit/VisionTransformerWeightLoader.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch as th
import math
import numpy as np
import os
from scipy import ndimage
from checkpoint_quantization import checkpoint_quantization
L_ROOT = 'Transformer/encoderblock_{}'
ATT_Q = 'MultiHeadDotProductAttention_1/query'
ATT_K = 'MultiHeadDotProductAttention_1/key'
ATT_V = 'MultiHeadDotProductAttention_1/value'
ATT_OUT = 'MultiHeadDotProductAttention_1/out'
ATT_NORM = 'LayerNorm_0'
FFN_NORM = 'LayerNorm_2'
FFN_IN = 'MlpBlock_3/Dense_0'
FFN_OUT = 'MlpBlock_3/Dense_1'
def np2th(weights, is_conv=False):
if is_conv:
""" convert HWIO to OIHW."""
weights = weights.transpose([3, 2, 0, 1])
return th.from_numpy(weights).contiguous()
class ViTINT8WeightLoader(object):
def __init__(self, layer_num, img_size, patch_size, weight_dict=None, classifier='token'):
"""weights need be a pytorch state_dict of swin transformer model"""
pre_layer_weight_names = [
'transformer.embeddings.patch_embeddings.weight',
'transformer.embeddings.patch_embeddings.bias',
'transformer.embeddings.cls_token',
'transformer.embeddings.position_embeddings'
]
self.layer_num = layer_num
self.weights = []
self.int8 = False
if weight_dict is None:
print("[ERROR][SwinTransformerWeights::__init__] weights should not be empty!")
exit(-1)
self._generated_weights = False
for name in pre_layer_weight_names:
if name not in weight_dict.keys():
print("Unsupported weight file: Missing weights %s" % name)
th_weight = weight_dict[name]
if name.split('.')[-1] == "pos_embedding":
posemb_new_size = pow(img_size//patch_size, 2) + 1
if th_weight.size(1) != posemb_new_size:
print("load_pretrained: resized variant: %s to %s" % (th_weight.size(1), posemb_new_size))
ntok_new = posemb_new_size
if classifier == "token":
posemb_tok, posemb_grid = th_weight[:, :1], th_weight[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = th_weight[:, :0], th_weight[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
th_weight = np2th(posemb)
weight_dict[name] = th_weight
self.weights = weight_dict
# def load_weights(self, weight_path:str):
# suffix = weight_path.split('.')[-1]
# if suffix != 'pth':
# print("Unsupported weight file: Unrecognized format %s " % suffix)
# exit(-1)
# return th.load(weight_path)
def to_cuda(self):
if not self.int8:
for k, v in self.weights.items():
self.weights[k] = v.cuda()
else:
h_scale_list = {}
for k, v in self.weights.items():
if "amaxList" in k:
k_h = k.replace("amaxList", "h_amaxList")
h_scale_list[k_h] = v
self.weights[k] = v.cuda()
for k, v in h_scale_list.items():
self.weights[k] = v
def to_half(self):
for k, v in self.weights.items():
self.weights[k] = v.half()
def listed_weights(self):
ret = []
for k, v in self.weights.items():
if k.split('.')[-1] == '_amax' or k.endswith('amaxList'):
continue
if k.split('.')[0] == 'head':
continue
ret.append(v)
for i in range(self.layer_num):
name = 'transformer.encoder.layer.{}.amaxList'.format(i)
ret.append(self.weights[name])
name = 'transformer.encoder.layer.{}.h_amaxList'.format(i)
ret.append(self.weights[name])
return ret
def listed_weight_to_dict(self):
ret = {}
for k, v in self.weights.items():
if k.split('.')[-1] == '_amax' or k.endswith('amaxList'):
continue
if k.split('.')[0] == 'head':
continue
ret[k] = v
for i in range(self.layer_num):
name = 'transformer.encoder.layer.{}.amaxList'.format(i)
ret[name] = self.weights[name]
name = 'transformer.encoder.layer.{}.h_amaxList'.format(i)
ret[name] = self.weights[name]
return ret
def to_int8(self, ths_path='../../../lib/libth_transformer.so'):
if 'transformer.encoder.layer.0.attn.query._input_quantizer._amax' not in self.weights:
raise RuntimeError("There is no quantization node in the checkpoint, cannot be quantized to int8.")
if self.int8:
return
self.int8 = True
for k, v in self.weights.items():
if k.endswith('bias') or k.endswith('norm.weight') or 'embeddings' in k:
self.weights[k] = v.half()
elif k.endswith('weight'):
self.weights[k] = v.float().cuda()
else:
self.weights[k] = v.float().cpu()
self.weights = checkpoint_quantization(self.weights, ths_path, verbose=False)
|
FasterTransformer-main
|
examples/pytorch/vit/VisionTransformerINT8WeightLoader.py
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from yacs.config import CfgNode as CN
_C = CN()
# Base config files
_C.BASE = ['']
# -----------------------------------------------------------------------------
# Data settings
# -----------------------------------------------------------------------------
_C.DATA = CN()
# Batch size for a single GPU, could be overwritten by command line argument
_C.DATA.BATCH_SIZE = 128
# Path to dataset, could be overwritten by command line argument
_C.DATA.DATA_PATH = ''
# Dataset name
_C.DATA.DATASET = 'imagenet'
# Input image size
_C.DATA.IMG_SIZE = 224
# Interpolation to resize image (random, bilinear, bicubic)
_C.DATA.INTERPOLATION = 'bilinear'
# Use zipped dataset instead of folder dataset
# could be overwritten by command line argument
_C.DATA.ZIP_MODE = False
# Cache Data in Memory, could be overwritten by command line argument
_C.DATA.CACHE_MODE = 'part'
# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
_C.DATA.PIN_MEMORY = True
# Number of data loading threads
_C.DATA.NUM_WORKERS = 8
# -----------------------------------------------------------------------------
# Model settings
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# Pretrained weight from checkpoint, could be imagenet22k pretrained weight
# could be overwritten by command line argument
_C.MODEL.PRETRAINED = ''
# Checkpoint to resume, could be overwritten by command line argument
_C.MODEL.RESUME = ''
# Number of classes, overwritten in data preparation
_C.MODEL.NUM_CLASSES = 1000
# Dropout rate
_C.MODEL.DROP_RATE = 0.0
# Drop path rate
_C.MODEL.DROP_PATH_RATE = 0.1
# Label Smoothing
_C.MODEL.LABEL_SMOOTHING = 0.1
# -----------------------------------------------------------------------------
# Training settings
# -----------------------------------------------------------------------------
_C.TRAIN = CN()
_C.TRAIN.START_EPOCH = 0
_C.TRAIN.EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 20
_C.TRAIN.WEIGHT_DECAY = 0.05
_C.TRAIN.BASE_LR = 5e-4
_C.TRAIN.WARMUP_LR = 5e-7
_C.TRAIN.MIN_LR = 5e-6
# Clip gradient norm
_C.TRAIN.CLIP_GRAD = 5.0
# Auto resume from latest checkpoint
_C.TRAIN.AUTO_RESUME = True
# Gradient accumulation steps
# could be overwritten by command line argument
_C.TRAIN.ACCUMULATION_STEPS = 0
# Whether to use gradient checkpointing to save memory
# could be overwritten by command line argument
_C.TRAIN.USE_CHECKPOINT = False
# LR scheduler
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'cosine'
# Epoch interval to decay LR, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30
# LR decay rate, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1
# Optimizer
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'adamw'
# Optimizer Epsilon
_C.TRAIN.OPTIMIZER.EPS = 1e-8
# Optimizer Betas
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999)
# SGD momentum
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# -----------------------------------------------------------------------------
# Augmentation settings
# -----------------------------------------------------------------------------
_C.AUG = CN()
# Color jitter factor
_C.AUG.COLOR_JITTER = 0.4
# Use AutoAugment policy. "v0" or "original"
_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1'
# Random erase prob
_C.AUG.REPROB = 0.25
# Random erase mode
_C.AUG.REMODE = 'pixel'
# Random erase count
_C.AUG.RECOUNT = 1
# Mixup alpha, mixup enabled if > 0
_C.AUG.MIXUP = 0.8
# Cutmix alpha, cutmix enabled if > 0
_C.AUG.CUTMIX = 1.0
# Cutmix min/max ratio, overrides alpha and enables cutmix if set
_C.AUG.CUTMIX_MINMAX = None
# Probability of performing mixup or cutmix when either/both is enabled
_C.AUG.MIXUP_PROB = 1.0
# Probability of switching to cutmix when both mixup and cutmix enabled
_C.AUG.MIXUP_SWITCH_PROB = 0.5
# How to apply mixup/cutmix params. Per "batch", "pair", or "elem"
_C.AUG.MIXUP_MODE = 'batch'
# -----------------------------------------------------------------------------
# Testing settings
# -----------------------------------------------------------------------------
_C.TEST = CN()
# Whether to use center crop when testing
_C.TEST.CROP = False
# Whether to use SequentialSampler as validation sampler
_C.TEST.SEQUENTIAL = True
# -----------------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------------
# Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2')
# overwritten by command line argument
_C.AMP_OPT_LEVEL = ''
# Path to output folder, overwritten by command line argument
_C.OUTPUT = ''
# Tag of experiment, overwritten by command line argument
_C.TAG = 'default'
# Frequency to save checkpoint
_C.SAVE_FREQ = 1
# Frequency to logging info
_C.PRINT_FREQ = 10
# Fixed random seed
_C.SEED = 0
# Perform evaluation only, overwritten by command line argument
_C.EVAL_MODE = False
# Test throughput only, overwritten by command line argument
_C.THROUGHPUT_MODE = False
# local rank for DistributedDataParallel, given by command line argument
_C.LOCAL_RANK = 0
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
config.defrost()
# merge from specific arguments
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.pretrained_dir:
config.MODEL.PRETRAINED = args.pretrained_dir
if args.img_size:
config.DATA.IMG_SIZE = args.img_size
# if args.resume:
# config.MODEL.RESUME = args.resume
# if args.accumulation_steps:
# config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
# if args.use_checkpoint:
# config.TRAIN.USE_CHECKPOINT = True
# if args.amp_opt_level:
# config.AMP_OPT_LEVEL = args.amp_opt_level
# if args.output:
# config.OUTPUT = args.output
# set local rank for distributed training
config.LOCAL_RANK = args.local_rank
# output folder
# config.OUTPUT = os.path.join(config.OUTPUT, config.MODEL.NAME, config.TAG)
config.freeze()
def get_config(args):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
update_config(config, args)
return config
|
FasterTransformer-main
|
examples/pytorch/vit/ViT-quantization/config.py
|
# coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from email.policy import strict
import logging
import argparse
import os
import random
import numpy as np
from datetime import timedelta
import torch
import torch.distributed as dist
import tensorrt as trt
import ctypes
from tqdm import tqdm
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
import sys
sys.path.insert(0, "./ViT-pytorch")
from models.modeling import CONFIGS
from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
from utils.dist_util import get_world_size
from data import build_loader
from config import get_config
import quant_utils
from vit_int8 import VisionTransformerINT8
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Knowledge_Distillation_Loss(torch.nn.Module):
def __init__(self, scale, T = 3):
super(Knowledge_Distillation_Loss, self).__init__()
self.KLdiv = torch.nn.KLDivLoss()
self.T = T
self.scale = scale
def get_knowledge_distillation_loss(self, output_student, output_teacher):
loss_kl = self.KLdiv(torch.nn.functional.log_softmax(output_student / self.T, dim=1), torch.nn.functional.softmax(output_teacher / self.T, dim=1))
loss = loss_kl
return self.scale * loss
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def save_model(args, model):
model_to_save = model.module if hasattr(model, 'module') else model
model_checkpoint = os.path.join(args.output_dir, "%s_checkpoint.bin" % args.name)
torch.save(model_to_save.state_dict(), model_checkpoint)
logger.info("Saved model checkpoint to %s", model_checkpoint)
def setup(args):
# Prepare model
config = CONFIGS[args.model_type]
num_classes = 10 if args.dataset == "cifar10" else 1000
model = VisionTransformerINT8(config, args.img_size, zero_head=False, num_classes=num_classes)
model.load_from(np.load(args.pretrained_dir))
model.to(args.device)
num_params = count_parameters(model)
logger.info("{}".format(config))
logger.info("Training parameters %s", args)
logger.info("Total Parameter: \t%2.1fM" % num_params)
print(num_params)
return args, model
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
@torch.no_grad()
def valid(args, config, model, test_loader):
# Validation!
eval_losses = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
logger.info("***** Running Validation *****")
logger.info(" Num steps = %d", len(test_loader))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
all_preds, all_label = [], []
loss_fct = torch.nn.CrossEntropyLoss()
for step, batch in enumerate(test_loader):
batch = tuple(t.to(args.device) for t in batch)
x, y = batch
logits, _ = model(x)
eval_loss = loss_fct(logits, y)
acc1, acc5 = accuracy(logits, y, topk=(1, 5))
eval_losses.update(eval_loss.item(), y.size(0))
acc1_meter.update(acc1.item(), y.size(0))
acc5_meter.update(acc5.item(), y.size(0))
if step % config.PRINT_FREQ == 0:
logger.info(
f'Test: [{step}/{len(test_loader)}]\t'
f'Loss {eval_losses.val:.4f} ({eval_losses.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg
def calib(args, config, model):
""" Calibrate the model """
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
dataset_train, dataset_val, train_loader, test_loader = build_loader(config, args)
# Calibration
quant_utils.configure_model(model, args, calib=True)
model.eval()
quant_utils.enable_calibration(model)
# Run forward passes on a sample of the training set
for step, (samples, targets) in enumerate(tqdm(train_loader, desc='Calibration', total=args.num_calib_batch)):
if step > args.num_calib_batch:
break
samples = samples.to(args.device)
outputs = model(samples)
quant_utils.finish_calibration(model, args)
# model.load_state_dict(torch.load('checkpoint/{}_{}_{}.pth'.format(args.model_type, args.quant_mode, args.percentile)))
quant_utils.configure_model(model, args, calib=False)
if args.local_rank in [-1, 0]:
accuracy = valid(args, config, model, test_loader)
logger.info("Test Accuracy: \t%f" %accuracy)
output_model_path = os.path.join(args.calib_output_path, '{}_calib.pth'.format(args.model_type))
if not os.path.exists(args.calib_output_path):
os.mkdir(args.calib_output_path)
torch.save(model.state_dict(), output_model_path)
logger.info(f'Model is saved to {output_model_path}')
def validate_trt(args, config):
num_classes = 1000
model_config = CONFIGS[args.model_type]
model = VisionTransformerINT8(model_config, args.img_size, zero_head=False, num_classes=num_classes)
model_ckpt = torch.load(args.pretrained_dir, map_location="cpu")
model.load_state_dict(model_ckpt["model"] if "model" in model_ckpt else model_ckpt, strict=False)
model.cuda()
model.eval()
quant_utils.configure_model(model, args, calib=False)
dataset_train, dataset_val, train_loader, test_loader = build_loader(config, args)
# Validation!
eval_losses = AverageMeter()
logger.info("***** Running Validation *****")
logger.info(" Num steps = %d", len(test_loader))
logger.info(" Batch size = %d", args.eval_batch_size)
# _, test_loader = get_loader(args)
model.eval()
all_preds, all_label = [], []
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
loss_fct = torch.nn.CrossEntropyLoss()
print('Eval batchsize', args.eval_batch_size)
# Import necessary plugins for BERT TensorRT
plugin_path = "/workspace/FasterTransformer/build/lib/libvit_plugin.so"
# handle = ctypes.CDLL(plugin_path, mode=ctypes.RTLD_GLOBAL)
trtlogger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(trtlogger, '')
ctypes.cdll.LoadLibrary(plugin_path)
# if not handle:
# raise RuntimeError("Fail to load plugin library: %s" % plugin_path)
with open(args.engine, 'rb') as f, trt.Runtime(trt.Logger(trt.Logger.INFO)) as runtime,\
runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
if engine == None:
print('engine is None')
context.active_optimization_profile = 0
stream = 0 #torch.cuda.Stream()
context.set_binding_shape(0, (args.eval_batch_size, 3, args.img_size, args.img_size))
output_shape = tuple(context.get_binding_shape(1))
print(output_shape)
d_output = torch.empty(output_shape, dtype=torch.float32).cuda()
for idx, (images, target) in enumerate(epoch_iterator):
images_half = torch.tensor(images, dtype=torch.half)
images_half = images_half.cuda(non_blocking=True)
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# print(images.shape)
context.execute_async_v2([images_half.data_ptr()] + [d_output.data_ptr()], stream)
torch.cuda.synchronize()
# torch_embed = model.transformer(images)
# torch.save(images_half, 'images_half.pt')
# torch.save(d_output, 'd_output2.pt')
# torch.save(torch_embed, 'torch_embed.pt')
# exit(0)
with torch.no_grad():
logits = model.head(d_output.float()[:, 0])
# torch_pred, _ = model(images)
# if step % 10 == 0:
# diff = abs(torch_pred - logits)
# print(diff.shape, diff.mean(1))
eval_loss = loss_fct(logits, target)
eval_losses.update(eval_loss.item())
preds = torch.argmax(logits, dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(target.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0
)
all_label[0] = np.append(
all_label[0], target.detach().cpu().numpy(), axis=0
)
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
logger.info("\n")
logger.info("Validation Results")
logger.info("Valid Loss: %2.5f" % eval_losses.avg)
logger.info("Valid Accuracy: %2.5f" % accuracy)
return accuracy
def train(args, config):
num_classes = 1000
model_config = CONFIGS[args.model_type]
model = VisionTransformerINT8(model_config, args.img_size, zero_head=False, num_classes=num_classes)
model_ckpt = torch.load(args.pretrained_dir, map_location="cpu")
model.load_state_dict(model_ckpt["model"] if "model" in model_ckpt else model_ckpt, strict=False)
model.cuda()
model.train()
teacher = None
dis_loss = None
if args.distill:
teacher = VisionTransformerINT8(model_config, args.img_size, zero_head=False, num_classes=num_classes)
teacher.load_from(np.load(args.teacher))
dis_loss = Knowledge_Distillation_Loss(scale=args.distillation_loss_scale).cuda()
teacher.cuda()
teacher.eval()
quant_utils.set_quantizer_by_name(teacher, [''], _disabled=True)
""" Train the model """
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# Prepare dataset
# train_loader, test_loader = get_loader(args)
dataset_train, dataset_val, train_loader, test_loader = build_loader(config, args)
# Prepare optimizer and scheduler
optimizer = torch.optim.SGD(model.parameters(),
lr=args.qat_lr,
momentum=0.9,
weight_decay=args.weight_decay)
print('args.qat_lr: %.6f' % (args.qat_lr))
print('optimizer.lr: %.6f' % optimizer.state_dict()['param_groups'][0]['lr'])
t_total = args.num_steps
# if args.decay_type == "cosine":
# scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
# else:
# scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
print('查看optimizer.param_groups结构:')
i_list=[i for i in optimizer.param_groups[0].keys()]
print(i_list)
if args.fp16:
model, optimizer = amp.initialize(models=model,
optimizers=optimizer,
opt_level=args.fp16_opt_level)
amp._amp_state.loss_scalers[0]._loss_scale = 2**20
# Distributed training
if args.local_rank != -1:
model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())
# Train!
logger.info("***** Running training *****")
logger.info(" Total optimization epochs = %d", args.num_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
model.zero_grad()
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
losses = AverageMeter()
global_step, best_acc = 0, 0
quant_utils.configure_model(model, args, calib=False)
for epoch_i in range(args.num_epochs):
model.train()
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
x, y = batch
outputs, loss = model(x, y)
if teacher:
with torch.no_grad():
teacher_outputs, _ = teacher(x)
loss_t = dis_loss.get_knowledge_distillation_loss(outputs, teacher_outputs)
loss = loss + loss_t
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# scheduler.step()
optimizer.step()
lr = optimizer.param_groups[0]['lr']
optimizer.zero_grad()
global_step += 1
epoch_iterator.set_description(
"EPOCH [%d/%d] (%d / %d Steps) (loss=%2.5f) (lr=%.7f)" %
(epoch_i, args.num_epochs, global_step, len(epoch_iterator), losses.val, lr)
)
if args.local_rank in [-1, 0]:
accuracy = valid(args, config, model, test_loader)
if best_acc < accuracy:
save_model(args, model)
best_acc = accuracy
model.train()
# if global_step % t_total == 0:
# break
losses.reset()
# if global_step % t_total == 0:
# break
logger.info("Best Accuracy: \t%f" % best_acc)
logger.info("End Training!")
def parse_option():
parser = argparse.ArgumentParser()
# Required parameters
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--calib', action='store_true', help='Perform calibration only')
parser.add_argument('--train', action='store_true', help='Perform training only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--num-calib-batch', type=int, default=10, help='Number of batches for calibration. 0 will disable calibration.')
parser.add_argument('--calib-batchsz', type=int, default=8, help='Batch size when doing calibration')
parser.add_argument('--calib-output-path', type=str, default='calib-checkpoint', help='Output directory to save calibrated model')
parser.add_argument("--num-epochs", type=int, default=10, help="Number of epochs to run QAT fintuning.")
parser.add_argument("--qat-lr", type=float, default=1e-6, help="learning rate for QAT.")
parser.add_argument("--distill", action='store_true', help='Using distillation')
parser.add_argument("--teacher", type=str, help='teacher model path')
parser.add_argument('--distillation_loss_scale', type=float, default=10000., help="scale applied to distillation component of loss")
# distributed training
# parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel')
parser.add_argument("--name", required=True,
help="Name of this run. Used for monitoring.")
parser.add_argument("--dataset", choices=["cifar10", "cifar100"], default="cifar100",
help="Which downstream task.")
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14", "R50-ViT-B_16"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
parser.add_argument("--output_dir", default="output", type=str,
help="The output directory where checkpoints will be written.")
parser.add_argument("--engine", type=str,
help="The directory of vit tensorrt engine.")
parser.add_argument("--img_size", default=384, type=int,
help="Resolution size")
parser.add_argument("--train_batch_size", default=64, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=32, type=int,
help="Total batch size for eval.")
parser.add_argument("--eval_every", default=2000, type=int,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
parser.add_argument("--learning_rate", default=3e-2, type=float,
help="The initial learning rate for SGD.")
parser.add_argument("--weight_decay", default=0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--num_steps", default=10000, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine",
help="How to decay the learning rate.")
parser.add_argument("--warmup_steps", default=500, type=int,
help="Step of training to perform learning rate warmup for.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O2',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
quant_utils.add_arguments(parser)
args, unparsed = parser.parse_known_args()
if args.quant_mode is not None:
args = quant_utils.set_args(args)
quant_utils.set_default_quantizers(args)
config = get_config(args)
return args, config
def main():
args, config = parse_option()
# print(config.dump())
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl',
timeout=timedelta(minutes=60))
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" %
(args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
# Set seed
set_seed(args)
if args.engine:
validate_trt(args, config)
# Calibration
if args.calib:
args, model = setup(args)
calib(args, config, model)
# Quantization-Aware Training
if args.train:
# args, model = setup(args)
train(args, config)
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/vit/ViT-quantization/eval_engine.py
|
# coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
from os.path import join as pjoin
import torch
import torch.nn as nn
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
import torch.nn.init as init
import torch.nn.functional as F
from scipy import ndimage
import models.configs as configs
from models.modeling_resnet import ResNetV2
logger = logging.getLogger(__name__)
ATTENTION_Q = "MultiHeadDotProductAttention_1/query"
ATTENTION_K = "MultiHeadDotProductAttention_1/key"
ATTENTION_V = "MultiHeadDotProductAttention_1/value"
ATTENTION_OUT = "MultiHeadDotProductAttention_1/out"
FC_0 = "MlpBlock_3/Dense_0"
FC_1 = "MlpBlock_3/Dense_1"
ATTENTION_NORM = "LayerNorm_0"
MLP_NORM = "LayerNorm_2"
QUANT = True
if QUANT:
from pytorch_quantization.nn import QuantLinear, TensorQuantizer
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
def bias_noact(bias, y):
return bias + y
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": torch.nn.functional.gelu,
"bias_gelu": bias_gelu,
"relu": torch.nn.functional.relu,
"swish": swish,
"bias_noact": bias_noact}
class LinearActivation(nn.Module):
r"""Fused Linear and Activation Module.
"""
__constants__ = ['bias']
def __init__(self, in_features, out_features, act='noact', bias=True, do_quant=True):
super(LinearActivation, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.act_fn = nn.Identity()
self.biased_act_fn = None
if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)):
if bias and not 'bias' in act:
act = 'bias_' + act
self.biased_act_fn = ACT2FN[act]
else:
self.act_fn = ACT2FN[act]
else:
self.act_fn = act
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.do_quant = do_quant
if QUANT and do_quant:
self._input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self._weight_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_weight)
self._aftergemm_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_normal_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
if QUANT and self.do_quant:
input = self._input_quantizer(input)
weight = self._weight_quantizer(self.weight)
else:
weight = self.weight
if not self.bias is None:
if QUANT and self.do_quant:
return self.biased_act_fn(self.bias, self._aftergemm_quantizer(F.linear(input, weight, None)))
else:
return self.biased_act_fn(self.bias, F.linear(input, weight, None))
else:
if QUANT and self.do_quant:
return self.act_fn(self._aftergemm_quantizer(F.linear(input, weight, None)))
else:
return self.act_fn(F.linear(input, weight, None))
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Attention(nn.Module):
def __init__(self, config, vis):
super(Attention, self).__init__()
self.vis = vis
self.num_attention_heads = config.transformer["num_heads"]
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = LinearActivation(config.hidden_size, self.all_head_size)
self.key = LinearActivation(config.hidden_size, self.all_head_size)
self.value = LinearActivation(config.hidden_size, self.all_head_size)
self.out = LinearActivation(config.hidden_size, config.hidden_size)
self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.softmax = Softmax(dim=-1)
if QUANT:
self.matmul_q_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_k_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_v_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_a_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.softmax_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
if QUANT:
attention_scores = torch.matmul(self.matmul_q_input_quantizer(query_layer),
self.matmul_k_input_quantizer(key_layer.transpose(-1, -2)))
attention_scores = self.softmax_input_quantizer(attention_scores)
else:
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_probs = self.softmax(attention_scores)
weights = attention_probs if self.vis else None
attention_probs = self.attn_dropout(attention_probs)
if QUANT:
context_layer = torch.matmul(self.matmul_a_input_quantizer(attention_probs),
self.matmul_v_input_quantizer(value_layer))
else:
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
attention_output = self.out(context_layer)
attention_output = self.proj_dropout(attention_output)
return attention_output, weights
class Mlp(nn.Module):
def __init__(self, config):
super(Mlp, self).__init__()
self.fc1 = LinearActivation(config.hidden_size, config.transformer["mlp_dim"], act='noact')
self.fc2 = LinearActivation(config.transformer["mlp_dim"], config.hidden_size, act='noact')
self.act_fn = ACT2FN["gelu"]
self.dropout = Dropout(config.transformer["dropout_rate"])
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.normal_(self.fc1.bias, std=1e-6)
nn.init.normal_(self.fc2.bias, std=1e-6)
def forward(self, x):
x = self.fc1(x)
x = self.act_fn(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class Embeddings(nn.Module):
"""Construct the embeddings from patch, position embeddings.
"""
def __init__(self, config, img_size, in_channels=3):
super(Embeddings, self).__init__()
self.hybrid = None
img_size = _pair(img_size)
if config.patches.get("grid") is not None:
grid_size = config.patches["grid"]
patch_size = (img_size[0] // 16 // grid_size[0], img_size[1] // 16 // grid_size[1])
n_patches = (img_size[0] // 16) * (img_size[1] // 16)
self.hybrid = True
else:
patch_size = _pair(config.patches["size"])
n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1])
self.hybrid = False
if self.hybrid:
self.hybrid_model = ResNetV2(block_units=config.resnet.num_layers,
width_factor=config.resnet.width_factor)
in_channels = self.hybrid_model.width * 16
self.patch_embeddings = Conv2d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=patch_size)
self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches+1, config.hidden_size))
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.dropout = Dropout(config.transformer["dropout_rate"])
def forward(self, x):
B = x.shape[0]
cls_tokens = self.cls_token.expand(B, -1, -1)
if self.hybrid:
x = self.hybrid_model(x)
x = self.patch_embeddings(x)
x = x.flatten(2)
x = x.transpose(-1, -2)
x = torch.cat((cls_tokens, x), dim=1)
embeddings = x + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class Block(nn.Module):
def __init__(self, config, vis):
super(Block, self).__init__()
self.hidden_size = config.hidden_size
self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn = Mlp(config)
self.attn = Attention(config, vis)
if QUANT:
self.layernorm_input1_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add1_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add1_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.layernorm_input2_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add2_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add2_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def forward(self, x):
h = x
if QUANT:
x = self.attention_norm(self.layernorm_input1_quantizer(x))
else:
x = self.attention_norm(x)
x, weights = self.attn(x)
if QUANT:
x = self.add1_local_input_quantizer(x) + self.add1_residual_input_quantizer(h)
else:
x = x + h
h = x
if QUANT:
x = self.ffn_norm(self.layernorm_input2_quantizer(x))
else:
x = self.ffn_norm(x)
x = self.ffn(x)
# print('adding bias', x[0,0,:8])
if QUANT:
x = self.add2_local_input_quantizer(x) + self.add2_residual_input_quantizer(h)
else:
x = x + h
# print('residual:', h[0,0,:8])
# print('adding bias+res', x[0,0,:8])
return x, weights
def load_from(self, weights, n_block):
ROOT = f"Transformer/encoderblock_{n_block}"
with torch.no_grad():
query_weight = np2th(weights[pjoin(ROOT, ATTENTION_Q, "kernel")]).view(self.hidden_size, self.hidden_size).t()
key_weight = np2th(weights[pjoin(ROOT, ATTENTION_K, "kernel")]).view(self.hidden_size, self.hidden_size).t()
value_weight = np2th(weights[pjoin(ROOT, ATTENTION_V, "kernel")]).view(self.hidden_size, self.hidden_size).t()
out_weight = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "kernel")]).view(self.hidden_size, self.hidden_size).t()
query_bias = np2th(weights[pjoin(ROOT, ATTENTION_Q, "bias")]).view(-1)
key_bias = np2th(weights[pjoin(ROOT, ATTENTION_K, "bias")]).view(-1)
value_bias = np2th(weights[pjoin(ROOT, ATTENTION_V, "bias")]).view(-1)
out_bias = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "bias")]).view(-1)
self.attn.query.weight.copy_(query_weight)
self.attn.key.weight.copy_(key_weight)
self.attn.value.weight.copy_(value_weight)
self.attn.out.weight.copy_(out_weight)
self.attn.query.bias.copy_(query_bias)
self.attn.key.bias.copy_(key_bias)
self.attn.value.bias.copy_(value_bias)
self.attn.out.bias.copy_(out_bias)
mlp_weight_0 = np2th(weights[pjoin(ROOT, FC_0, "kernel")]).t()
mlp_weight_1 = np2th(weights[pjoin(ROOT, FC_1, "kernel")]).t()
mlp_bias_0 = np2th(weights[pjoin(ROOT, FC_0, "bias")]).t()
mlp_bias_1 = np2th(weights[pjoin(ROOT, FC_1, "bias")]).t()
self.ffn.fc1.weight.copy_(mlp_weight_0)
self.ffn.fc2.weight.copy_(mlp_weight_1)
self.ffn.fc1.bias.copy_(mlp_bias_0)
self.ffn.fc2.bias.copy_(mlp_bias_1)
self.attention_norm.weight.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "scale")]))
self.attention_norm.bias.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "bias")]))
self.ffn_norm.weight.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "scale")]))
self.ffn_norm.bias.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "bias")]))
class Encoder(nn.Module):
def __init__(self, config, vis):
super(Encoder, self).__init__()
self.vis = vis
self.layer = nn.ModuleList()
self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
for _ in range(config.transformer["num_layers"]):
layer = Block(config, vis)
self.layer.append(copy.deepcopy(layer))
def forward(self, hidden_states):
attn_weights = []
for idx, layer_block in enumerate(self.layer):
hidden_states, weights = layer_block(hidden_states)
if self.vis:
attn_weights.append(weights)
encoded = self.encoder_norm(hidden_states)
return encoded, attn_weights
class Transformer(nn.Module):
def __init__(self, config, img_size, vis):
super(Transformer, self).__init__()
self.embeddings = Embeddings(config, img_size=img_size)
self.encoder = Encoder(config, vis)
def forward(self, input_ids):
embedding_output = self.embeddings(input_ids)
encoded, attn_weights = self.encoder(embedding_output)
return encoded, attn_weights
class VisionTransformerINT8(nn.Module):
def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False):
super(VisionTransformerINT8, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.classifier = config.classifier
self.transformer = Transformer(config, img_size, vis)
self.head = Linear(config.hidden_size, num_classes)
def forward(self, x, labels=None):
x, attn_weights = self.transformer(x)
logits = self.head(x[:, 0])
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_classes), labels.view(-1))
return logits, loss
else:
return logits, attn_weights
def load_from(self, weights):
with torch.no_grad():
if self.zero_head:
nn.init.zeros_(self.head.weight)
nn.init.zeros_(self.head.bias)
else:
self.head.weight.copy_(np2th(weights["head/kernel"]).t())
self.head.bias.copy_(np2th(weights["head/bias"]).t())
self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True))
self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"]))
self.transformer.embeddings.cls_token.copy_(np2th(weights["cls"]))
self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"]))
self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"]))
posemb = np2th(weights["Transformer/posembed_input/pos_embedding"])
posemb_new = self.transformer.embeddings.position_embeddings
if posemb.size() == posemb_new.size():
self.transformer.embeddings.position_embeddings.copy_(posemb)
else:
logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size()))
ntok_new = posemb_new.size(1)
if self.classifier == "token":
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))
for bname, block in self.transformer.encoder.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=uname)
if self.transformer.embeddings.hybrid:
self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights["conv_root/kernel"], conv=True))
gn_weight = np2th(weights["gn_root/scale"]).view(-1)
gn_bias = np2th(weights["gn_root/bias"]).view(-1)
self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)
self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)
for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=bname, n_unit=uname)
CONFIGS = {
'ViT-B_16': configs.get_b16_config(),
'ViT-B_32': configs.get_b32_config(),
'ViT-L_16': configs.get_l16_config(),
'ViT-L_32': configs.get_l32_config(),
'ViT-H_14': configs.get_h14_config(),
'R50-ViT-B_16': configs.get_r50_b16_config(),
'testing': configs.get_testing(),
}
|
FasterTransformer-main
|
examples/pytorch/vit/ViT-quantization/vit_int8.py
|
# coding=utf-8
# Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for training models with pytorch-quantization"""
import pickle
import re
import time
import numpy as np
import torch
import random
import pytorch_quantization as quantization
import pytorch_quantization.nn as quant_nn
from pytorch_quantization.tensor_quant import QuantDescriptor
from pytorch_quantization import calib
class Logger:
def info(self, s):
print("INFO:", s)
def warn(self, s):
print("WARN:", s)
logger = Logger()
name_width = 50 # max width of layer names
qname_width = name_width + 20 # max width of quantizer names
def add_arguments(parser):
"""Add arguments to parser for functions defined in quant_trainer."""
group = parser.add_argument_group('quant_trainer arguments')
group.add_argument('--wprec', type=int, default=8,
help='weight precision')
group.add_argument('--aprec', type=int, default=8,
help='activation precision')
group.add_argument('--quant-per-tensor', action='store_true',
help='per tensor weight scaling')
group.add_argument('--quant-disable', action='store_true',
help='disable all quantizers')
group.add_argument('--quant-disable-keyword', type=str, nargs='+',
help='disable quantizers by keyword')
group.add_argument('--calibrator', default='percentile',
help='which quantization range calibrator to use')
group.add_argument('--percentile', default=99.999, type=float,
help='percentile for PercentileCalibrator')
group.add_argument('--fuse-qkv', action='store_true',
help='use the same scale factor for qkv')
group.add_argument('--narrow-range', action='store_true',
help='use [-127, 127] range for activations rather than [-128, 127]')
group.add_argument('--quant-mode', type=str, default="ft2",
help='predefined quantization mode, choices: ["ft1", "ft2", "ft3", "trt"]')
def set_args(args):
if args.quant_mode == 'ft1':
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = False
args.quant_disable = False
args.quant_disable_keyword = ['final_input', 'layernorm_input', 'softmax_input', 'residual_input', 'local_input', 'aftergemm']
args.fuse_qkv = False
args.narrow_range = False
elif args.quant_mode == 'ft2':
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = True
args.quant_disable = False
args.quant_disable_keyword = ['final_input', 'layernorm_input', 'residual_input', 'local_input']
args.fuse_qkv = True
args.narrow_range = False
elif args.quant_mode == 'ft3':
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = True
args.quant_disable = False
args.quant_disable_keyword = ['final_input', 'layernorm_input', 'local_input']
args.fuse_qkv = True
args.narrow_range = False
elif args.quant_mode == 'trt':
# for demobert
args.wprec = 8
args.aprec = 8
args.quant_per_tensor = True
args.quant_disable = False
args.quant_disable_keyword = ['layernorm_input', 'softmax_input', 'aftergemm']
args.fuse_qkv = True
args.narrow_range = False
else:
raise ValueError("wrong argument value for 'quant_mode'")
return args
def set_default_quantizers(args):
"""Set default quantizers before creating the model."""
if args.calibrator == 'max':
calib_method = 'max'
elif args.calibrator == 'percentile':
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator')
calib_method = 'histogram'
elif args.calibrator == 'mse':
calib_method = 'histogram'
elif args.calibrator == 'entropy':
calib_method = 'histogram'
else:
raise ValueError(F'Invalid calibrator {args.calibrator}')
input_desc = QuantDescriptor(num_bits=args.aprec,
calib_method=calib_method,
narrow_range=args.narrow_range,
)
weight_desc = QuantDescriptor(num_bits=args.wprec,
axis=(None if args.quant_per_tensor else (0,)),
)
quant_nn.QuantLinear.set_default_quant_desc_input(input_desc)
quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc)
def configure_model(model, args, calib=False):
"""Function called before the training loop."""
logger.info('Configuring Model for Quantization')
logger.info(F'using quantization package {quantization.__file__}')
if not calib:
if args.quant_disable:
set_quantizer_by_name(model, [''], _disabled=True)
if args.quant_disable_keyword:
set_quantizer_by_name(model, args.quant_disable_keyword, _disabled=True)
if args.fuse_qkv:
fuse_qkv(model, args)
if args.local_rank in [-1, 0] and not calib:
print_quant_summary(model)
time.sleep(1) # prevent eval printing overlap
def enable_calibration(model):
"""Enable calibration of all *_input_quantizer modules in model."""
logger.info("Enabling Calibration")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:{qname_width}}: {module}")
def finish_calibration(model, args):
"""Disable calibration and load amax for all "*_input_quantizer modules in model."""
logger.info("Loading calibrated amax")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
elif args.calibrator == "percentile":
module.load_calib_amax("percentile", percentile=args.percentile)
else:
module.load_calib_amax(args.calibrator)
module.enable_quant()
module.disable_calib()
else:
module.enable()
if args.fuse_qkv:
fuse_qkv(model, args)
model.cuda()
print_quant_summary(model)
def fuse_qkv(model, args):
"""Adjust quantization ranges to match an implementation where the QKV projections are implemented with a single GEMM.
Force the weight and output scale factors to match by taking the max of (Q,K,V).
"""
def fuse3(qq, qk, qv):
if not hasattr(qq, '_amax') or not hasattr(qk, '_amax') or not hasattr(qv, '_amax'):
logger.warn('missing amax buffer, unable to fuse')
return
q = qq._amax.detach().item()
k = qk._amax.detach().item()
v = qv._amax.detach().item()
amax = max(q, k, v)
qq._amax.fill_(amax)
qk._amax.fill_(amax)
qv._amax.fill_(amax)
logger.info(f' q={q:7.4f} k={k:7.4f} v={v:7.4f} -> {amax:7.4f}')
for name, mod in model.named_modules():
if name.endswith('.attn'):
logger.info(f'FUSE_QKV: {name:{name_width}}')
fuse3(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer)
fuse3(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer)
fuse3(mod.query._aftergemm_quantizer, mod.key._aftergemm_quantizer, mod.value._aftergemm_quantizer)
def print_quant_summary(model):
"""Print summary of all quantizer modules in the model."""
counters = {'quantizers': 0, 'enabled_quantizers': 0,
'weights': 0, 'quant_weights': 0, 'sparse_weights': 0,
'params': 0, 'sparse_params': 0}
for name, mod in model.named_modules():
if isinstance(mod, quantization.nn.TensorQuantizer):
print(f'{name:80} {mod}')
counters['quantizers'] += 1
if not mod._disabled:
counters['enabled_quantizers'] += 1
for pname, param in mod.named_parameters():
if '.' in pname:
continue
counters['params'] += param.numel()
# fullname = f'{name}.{pname}'
# print(f'{fullname:80} {param.numel():12}')
weight_quantizer = getattr(mod, '_weight_quantizer', None)
if pname == 'weight':
counters['weights'] += param.numel()
if weight_quantizer is not None and not weight_quantizer._disabled:
counters['quant_weights'] += param.numel()
counters['sparse_weights'] += param.eq(0).sum().item()
counters['sparse_params'] += param.eq(0).sum().item()
def print_fraction(a, b, counters, desc):
va = counters[a]
vb = counters[b]
pct = va/vb * 100 if vb != 0 else float('NaN')
print(f'{counters[a]:12}/{vb:12} ({pct:6.2f}%) {desc}')
print_fraction('enabled_quantizers', 'quantizers', counters, 'TensorQuantizers enabled')
print_fraction('quant_weights', 'weights', counters, 'Quantized weights')
print_fraction('sparse_weights', 'weights', counters, 'Zero weights')
print_fraction('weights', 'params', counters, 'Weight parameters')
print('\n\n')
def set_quantizer(name, mod, quantizer, k ,v):
"""Set attributes for mod.quantizer."""
quantizer_mod = getattr(mod, quantizer, None)
if quantizer_mod is not None:
assert hasattr(quantizer_mod, k)
setattr(quantizer_mod, k, v)
else:
logger.warn(f'{name} has no {quantizer}')
def set_quantizers(name, mod, which='both', **kwargs):
"""Set quantizer attributes for mod."""
s = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += (f' {k}={v}')
if which in ['input', 'both']:
set_quantizer(name, mod, '_input_quantizer', k, v)
if which in ['weight', 'both']:
set_quantizer(name, mod, '_weight_quantizer', k, v)
logger.info(s)
def set_quantizer_by_name(model, names, **kwargs):
"""Set quantizer attributes for layers where name contains a substring in names."""
for name, mod in model.named_modules():
if hasattr(mod, '_input_quantizer') or hasattr(mod, '_weight_quantizer'):
for n in names:
if re.search(n, name):
set_quantizers(name, mod, **kwargs)
elif name.endswith('_quantizer'):
for n in names:
if re.search(n, name):
s = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += (f' {k}={v}')
setattr(mod, k, v)
logger.info(s)
|
FasterTransformer-main
|
examples/pytorch/vit/ViT-quantization/quant_utils.py
|
# coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from email.policy import strict
import logging
import argparse
import os
import random
import numpy as np
from datetime import timedelta
import torch
import torch.distributed as dist
from tqdm import tqdm
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
import sys
sys.path.insert(0, "./ViT-pytorch")
from models.modeling import CONFIGS
from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
from utils.dist_util import get_world_size
from data import build_loader
from config import get_config
import quant_utils
from vit_int8 import VisionTransformerINT8
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Knowledge_Distillation_Loss(torch.nn.Module):
def __init__(self, scale, T = 3):
super(Knowledge_Distillation_Loss, self).__init__()
self.KLdiv = torch.nn.KLDivLoss()
self.T = T
self.scale = scale
def get_knowledge_distillation_loss(self, output_student, output_teacher):
loss_kl = self.KLdiv(torch.nn.functional.log_softmax(output_student / self.T, dim=1), torch.nn.functional.softmax(output_teacher / self.T, dim=1))
loss = loss_kl
return self.scale * loss
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def save_model(args, model):
model_to_save = model.module if hasattr(model, 'module') else model
model_checkpoint = os.path.join(args.output_dir, "%s_checkpoint.bin" % args.name)
torch.save(model_to_save.state_dict(), model_checkpoint)
logger.info("Saved model checkpoint to %s", model_checkpoint)
def setup(args):
# Prepare model
config = CONFIGS[args.model_type]
num_classes = 10 if args.dataset == "cifar10" else 1000
model = VisionTransformerINT8(config, args.img_size, zero_head=False, num_classes=num_classes)
model.load_from(np.load(args.pretrained_dir))
model.to(args.device)
num_params = count_parameters(model)
logger.info("{}".format(config))
logger.info("Training parameters %s", args)
logger.info("Total Parameter: \t%2.1fM" % num_params)
print(num_params)
return args, model
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
@torch.no_grad()
def valid(args, config, model, test_loader):
# Validation!
eval_losses = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
logger.info("***** Running Validation *****")
logger.info(" Num steps = %d", len(test_loader))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
all_preds, all_label = [], []
loss_fct = torch.nn.CrossEntropyLoss()
for step, batch in enumerate(test_loader):
batch = tuple(t.to(args.device) for t in batch)
x, y = batch
logits, _ = model(x)
eval_loss = loss_fct(logits, y)
acc1, acc5 = accuracy(logits, y, topk=(1, 5))
eval_losses.update(eval_loss.item(), y.size(0))
acc1_meter.update(acc1.item(), y.size(0))
acc5_meter.update(acc5.item(), y.size(0))
if step % config.PRINT_FREQ == 0:
logger.info(
f'Test: [{step}/{len(test_loader)}]\t'
f'Loss {eval_losses.val:.4f} ({eval_losses.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg
def calib(args, config, model):
""" Calibrate the model """
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
dataset_train, dataset_val, train_loader, test_loader = build_loader(config, args)
# Calibration
quant_utils.configure_model(model, args, calib=True)
model.eval()
quant_utils.enable_calibration(model)
# Run forward passes on a sample of the training set
for step, (samples, targets) in enumerate(tqdm(train_loader, desc='Calibration', total=args.num_calib_batch)):
if step > args.num_calib_batch:
break
samples = samples.to(args.device)
outputs = model(samples)
quant_utils.finish_calibration(model, args)
# model.load_state_dict(torch.load('checkpoint/{}_{}_{}.pth'.format(args.model_type, args.quant_mode, args.percentile)))
quant_utils.configure_model(model, args, calib=False)
if args.local_rank in [-1, 0]:
accuracy = valid(args, config, model, test_loader)
logger.info("Test Accuracy: \t%f" %accuracy)
output_model_path = os.path.join(args.calib_output_path, '{}_calib.pth'.format(args.model_type))
if not os.path.exists(args.calib_output_path):
os.mkdir(args.calib_output_path)
torch.save(model.state_dict(), output_model_path)
logger.info(f'Model is saved to {output_model_path}')
def train(args, config):
num_classes = 1000
model_config = CONFIGS[args.model_type]
model = VisionTransformerINT8(model_config, args.img_size, zero_head=False, num_classes=num_classes)
model_ckpt = torch.load(args.pretrained_dir, map_location="cpu")
model.load_state_dict(model_ckpt["model"] if "model" in model_ckpt else model_ckpt, strict=False)
model.cuda()
model.train()
teacher = None
dis_loss = None
if args.distill:
teacher = VisionTransformerINT8(model_config, args.img_size, zero_head=False, num_classes=num_classes)
teacher.load_from(np.load(args.teacher))
dis_loss = Knowledge_Distillation_Loss(scale=args.distillation_loss_scale).cuda()
teacher.cuda()
teacher.eval()
quant_utils.set_quantizer_by_name(teacher, [''], _disabled=True)
""" Train the model """
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# Prepare dataset
# train_loader, test_loader = get_loader(args)
dataset_train, dataset_val, train_loader, test_loader = build_loader(config, args)
# Prepare optimizer and scheduler
optimizer = torch.optim.SGD(model.parameters(),
lr=args.qat_lr,
momentum=0.9,
weight_decay=args.weight_decay)
print('args.qat_lr: %.6f' % (args.qat_lr))
print('optimizer.lr: %.6f' % optimizer.state_dict()['param_groups'][0]['lr'])
t_total = args.num_steps
# if args.decay_type == "cosine":
# scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
# else:
# scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
print('查看optimizer.param_groups结构:')
i_list=[i for i in optimizer.param_groups[0].keys()]
print(i_list)
if args.fp16:
model, optimizer = amp.initialize(models=model,
optimizers=optimizer,
opt_level=args.fp16_opt_level)
amp._amp_state.loss_scalers[0]._loss_scale = 2**20
# Distributed training
if args.local_rank != -1:
model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())
# Train!
logger.info("***** Running training *****")
logger.info(" Total optimization epochs = %d", args.num_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
model.zero_grad()
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
losses = AverageMeter()
global_step, best_acc = 0, 0
quant_utils.configure_model(model, args, calib=False)
for epoch_i in range(args.num_epochs):
model.train()
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
x, y = batch
outputs, loss = model(x, y)
if teacher:
with torch.no_grad():
teacher_outputs, _ = teacher(x)
loss_t = dis_loss.get_knowledge_distillation_loss(outputs, teacher_outputs)
loss = loss + loss_t
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# scheduler.step()
optimizer.step()
lr = optimizer.param_groups[0]['lr']
optimizer.zero_grad()
global_step += 1
epoch_iterator.set_description(
"EPOCH [%d/%d] (%d / %d Steps) (loss=%2.5f) (lr=%.7f)" %
(epoch_i, args.num_epochs, global_step, len(epoch_iterator), losses.val, lr)
)
if args.local_rank in [-1, 0]:
accuracy = valid(args, config, model, test_loader)
if best_acc < accuracy:
save_model(args, model)
best_acc = accuracy
model.train()
# if global_step % t_total == 0:
# break
losses.reset()
# if global_step % t_total == 0:
# break
logger.info("Best Accuracy: \t%f" % best_acc)
logger.info("End Training!")
def parse_option():
parser = argparse.ArgumentParser()
# Required parameters
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--calib', action='store_true', help='Perform calibration only')
parser.add_argument('--train', action='store_true', help='Perform training only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--num-calib-batch', type=int, default=10, help='Number of batches for calibration. 0 will disable calibration.')
parser.add_argument('--calib-batchsz', type=int, default=8, help='Batch size when doing calibration')
parser.add_argument('--calib-output-path', type=str, default='calib-checkpoint', help='Output directory to save calibrated model')
parser.add_argument("--num-epochs", type=int, default=10, help="Number of epochs to run QAT fintuning.")
parser.add_argument("--qat-lr", type=float, default=1e-6, help="learning rate for QAT.")
parser.add_argument("--distill", action='store_true', help='Using distillation')
parser.add_argument("--teacher", type=str, help='teacher model path')
parser.add_argument('--distillation_loss_scale', type=float, default=10000., help="scale applied to distillation component of loss")
# distributed training
# parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel')
parser.add_argument("--name", required=True,
help="Name of this run. Used for monitoring.")
parser.add_argument("--dataset", choices=["cifar10", "cifar100"], default="cifar100",
help="Which downstream task.")
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14", "R50-ViT-B_16"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
parser.add_argument("--output_dir", default="output", type=str,
help="The output directory where checkpoints will be written.")
parser.add_argument("--img_size", default=384, type=int,
help="Resolution size")
parser.add_argument("--train_batch_size", default=64, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int,
help="Total batch size for eval.")
parser.add_argument("--eval_every", default=2000, type=int,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
parser.add_argument("--learning_rate", default=3e-2, type=float,
help="The initial learning rate for SGD.")
parser.add_argument("--weight_decay", default=0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--num_steps", default=10000, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine",
help="How to decay the learning rate.")
parser.add_argument("--warmup_steps", default=500, type=int,
help="Step of training to perform learning rate warmup for.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O2',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
quant_utils.add_arguments(parser)
args, unparsed = parser.parse_known_args()
if args.quant_mode is not None:
args = quant_utils.set_args(args)
quant_utils.set_default_quantizers(args)
config = get_config(args)
return args, config
def main():
args, config = parse_option()
# print(config.dump())
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl',
timeout=timedelta(minutes=60))
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" %
(args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
# Set seed
set_seed(args)
# Calibration
if args.calib:
args, model = setup(args)
calib(args, config, model)
# Quantization-Aware Training
if args.train:
# args, model = setup(args)
train(args, config)
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/vit/ViT-quantization/main.py
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import numpy as np
import torch.distributed as dist
from torchvision import datasets, transforms
from timm.data import create_transform
try:
from torchvision.transforms import InterpolationMode
def _pil_interp(method):
if method == 'bicubic':
return InterpolationMode.BICUBIC
elif method == 'lanczos':
return InterpolationMode.LANCZOS
elif method == 'hamming':
return InterpolationMode.HAMMING
else:
# default bilinear, do we want to allow nearest?
return InterpolationMode.BILINEAR
except:
from timm.data.transforms import _pil_interp
def build_val_loader(config):
config.freeze()
dataset_val, _ = build_dataset(is_train=False, config=config)
# print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
indices = np.arange(0, len(dataset_val), 1)
if config.TEST.SEQUENTIAL:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_val = torch.utils.data.distributed.DistributedSampler(
dataset_val, shuffle=False
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=config.DATA.BATCH_SIZE,
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=False
)
return dataset_val, data_loader_val
def build_loader(config, args):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(is_train=True, config=config)
config.freeze()
# print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
# print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
num_tasks = dist.get_world_size()
global_rank = dist.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if config.TEST.SEQUENTIAL:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_val = torch.utils.data.distributed.DistributedSampler(
dataset_val, shuffle=False
)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.calib_batchsz if args.calib else config.DATA.BATCH_SIZE,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=config.DATA.BATCH_SIZE,
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=True
)
return dataset_train, dataset_val, data_loader_train, data_loader_val
def build_dataset(is_train, config):
transform = build_transform(is_train, config)
if config.DATA.DATASET == 'imagenet':
prefix = 'train' if is_train else 'val'
root = os.path.join(config.DATA.DATA_PATH, prefix)
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif config.DATA.DATASET == 'imagenet22K':
raise NotImplementedError("Imagenet-22K will come soon.")
else:
raise NotImplementedError("We only support ImageNet Now.")
return dataset, nb_classes
def build_transform(is_train, config):
resize_im = config.DATA.IMG_SIZE > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=config.DATA.IMG_SIZE,
is_training=True,
color_jitter=config.AUG.COLOR_JITTER if config.AUG.COLOR_JITTER > 0 else None,
auto_augment=config.AUG.AUTO_AUGMENT if config.AUG.AUTO_AUGMENT != 'none' else None,
re_prob=config.AUG.REPROB,
re_mode=config.AUG.REMODE,
re_count=config.AUG.RECOUNT,
interpolation=config.DATA.INTERPOLATION,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(config.DATA.IMG_SIZE, padding=4)
return transform
t = []
if resize_im:
if config.TEST.CROP:
size = int((256 / 224) * config.DATA.IMG_SIZE)
t.append(
transforms.Resize(size, interpolation=_pil_interp(config.DATA.INTERPOLATION)),
# to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(config.DATA.IMG_SIZE))
else:
t.append(
transforms.Resize((config.DATA.IMG_SIZE, config.DATA.IMG_SIZE),
interpolation=_pil_interp(config.DATA.INTERPOLATION))
)
t.append(transforms.ToTensor())
t.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
return transforms.Compose(t)
|
FasterTransformer-main
|
examples/pytorch/vit/ViT-quantization/data.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import threading
import os
import argparse
import timeit
import torch
import torch.distributed as dist
import torch.cuda.nvtx as nvtx
import time
import sys
import numpy as np
import random
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.utils import print_memory_usage
from examples.pytorch.bert.utils.encoder import HuggingFaceEncoder
from examples.pytorch.bert.utils.encoder import CustomEncoder
from examples.pytorch.bert.utils.encoder import EncoderWeights
def sequence_mask(lengths, max_len=None, is_2d=True):
batch_size = lengths.numel()
max_len = max_len or lengths.max()
mask = (torch.arange(0, max_len, device=lengths.device)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
if is_2d:
return mask
else:
mask = mask.view(-1, 1, 1, max_len)
m2 = mask.transpose(2, 3)
return mask * m2
def main():
parser = argparse.ArgumentParser()
parser.add_argument('batch_size', type=int,
help='batch size')
parser.add_argument('layer_num', type=int,
help='number of layers')
parser.add_argument('seq_len', type=int,
help='sequence length')
parser.add_argument('head_num', type=int,
help='head number')
parser.add_argument('head_size', type=int,
help='size per head')
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument('--int8_mode', type=int, default=0, metavar='NUMBER',
help='int8 mode (default: 0)', choices=[0, 1, 2, 3])
parser.add_argument('--time', action='store_true',
help='test the time or not.')
parser.add_argument('--avg_seq_len', type=int, default=-1, metavar='NUMBER',
help='average sequence length (default: -1)')
parser.add_argument('--sparse', action='store_true',
help='Whether use sparse feature (only support SM 8.0 and 8.6, and SPARSITY_SUPPORT need be ON).')
parser.add_argument('--weight_path', type=str,
default=None,
help='path containing the pretrained weights')
parser.add_argument('--ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('-thread_num', '--thread_num', type=int, default=1, metavar='int',
help='Testing multithread if thread_num > 1.')
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='int',
help='Size of tensor parallelism.')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='int',
help='Size of pipeline parallelism.')
parser.add_argument('--error-threshold', type=float,
help='Threshold of error')
args = parser.parse_args()
bert_example(vars(args))
def bert_example(args):
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
if dist.is_mpi_available():
try:
dist.init_process_group(backend='mpi')
rank = dist.get_rank()
world_size = dist.get_world_size()
except:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
batch_size = args['batch_size']
seq_len = args['seq_len']
if args['weight_path'] is not None:
if 'large' in args['weight_path']:
layer_num = 24
head_num = 16
head_size = 64
elif 'base' in args['weight_path']:
layer_num = 12
head_num = 12
head_size = 64
else:
layer_num = args['layer_num']
head_num = args['head_num']
head_size = args['head_size']
else:
layer_num = args['layer_num']
head_num = args['head_num']
head_size = args['head_size']
hidden_dim = head_num * head_size
if args['int8_mode'] == 1:
per_channel = True
elif args['int8_mode'] == 2 or args['int8_mode'] == 3:
per_channel = False
elif args['int8_mode'] != 0:
raise ValueError("wrong int8_mode argument")
if rank == 0:
print("\n=============== Argument ===============")
for key in args:
print("{}: {}".format(key, args[key]))
print("========================================\n")
inp = torch.empty(batch_size, seq_len, hidden_dim).cuda()
torch.nn.init.normal_(inp, -0.02, 0.02)
if args['avg_seq_len'] > 0:
mem_seq_lens = torch.ones((batch_size,)) * args['avg_seq_len']
mem_seq_lens = mem_seq_lens.to(torch.int32).cuda()
elif args['avg_seq_len'] == -1:
mem_seq_lens = torch.randint(1, seq_len + 1, (batch_size,), dtype=torch.int32).cuda()
else:
raise ValueError("wrong avg_seq_len")
mask = sequence_mask(mem_seq_lens, args['seq_len'], False).to(torch.float)
# mask = torch.randint(0, 2, (batch_size, seq_len, seq_len), dtype=torch.float32).cuda()
if args['data_type'] == 'fp16' or args['int8_mode'] != 0:
inp = inp.half()
mask = mask.half()
elif args['data_type'] == 'bf16':
inp = inp.bfloat16()
mask = mask.bfloat16()
pretrained_weights = torch.load(args['weight_path']) if (args['weight_path'] is not None) else None
weights = EncoderWeights(layer_num, hidden_dim, pretrained_weights, args['sparse'])
ft_weights = EncoderWeights(layer_num, hidden_dim, weights.weights, args['sparse'],
args["tensor_para_size"], args["pipeline_para_size"])
world_size = dist.get_world_size() if dist.is_mpi_available() else 1
assert world_size == args["tensor_para_size"] * \
args["pipeline_para_size"], f"[ERROR] world_size ({world_size}) != tensor_para_size ({args['tensor_para_size']}) * pipeline_para_size ({args['pipeline_para_size']})"
ft_weights._generated_weights = True # for int8 handling
if rank == 0:
hf_encoder = HuggingFaceEncoder(layer_num, head_num, head_size, weights)
hf_encoder.cuda()
if args['data_type'] == 'fp16' or args['int8_mode'] != 0:
hf_encoder.half()
elif args['data_type'] == 'bf16':
hf_encoder.bfloat16()
hf_encoder.eval()
hf_encoder = torch.jit.trace(hf_encoder, (inp, mask))
if args['int8_mode'] != 0:
ft_weights.to_int8(args['sparse'], args['ths_path'])
elif args['data_type'] == 'fp16':
ft_weights.to_half()
elif args['data_type'] == 'bf16':
ft_weights.to_bfloat16()
ft_weights.to_cuda()
custom_encoder = CustomEncoder(layer_num, head_num, head_size, ft_weights,
int8_mode=args['int8_mode'],
remove_padding=False,
sparse=args['sparse'],
path=args['ths_path'],
tensor_para_size=args["tensor_para_size"],
pipeline_para_size=args["pipeline_para_size"])
custom_encoder = torch.jit.script(custom_encoder)
eff_custom_encoder = CustomEncoder(layer_num, head_num, head_size, ft_weights,
int8_mode=args['int8_mode'],
remove_padding=True,
sparse=args['sparse'],
path=args['ths_path'],
tensor_para_size=args["tensor_para_size"],
pipeline_para_size=args["pipeline_para_size"])
eff_custom_encoder = torch.jit.script(eff_custom_encoder)
with torch.no_grad():
output_mask = sequence_mask(mem_seq_lens, args['seq_len']).to(mask.dtype).unsqueeze(-1)
if rank == 0:
hf_output = hf_encoder(inp, mask)[0] * output_mask
print(hf_output)
print(hf_output.size())
ft_inp = inp.to(f"cuda:{rank}")
ft_mask = mask.to(f"cuda:{rank}")
ft_mem_seq_lens = mem_seq_lens.to(f"cuda:{rank}")
ft_output_mask = output_mask.to(f"cuda:{rank}")
ft_output = custom_encoder(ft_inp, ft_mask, ft_mem_seq_lens)[0] * ft_output_mask
if rank == 0:
print(ft_output)
print(ft_output.size())
eff_ft_output = eff_custom_encoder(ft_inp, ft_mask, ft_mem_seq_lens)[0] * ft_output_mask
if rank == 0:
print(eff_ft_output)
print(eff_ft_output.size())
if rank == 0:
FT_diff = torch.abs(hf_output - ft_output).float() # Prevent error under bfloat16
print('FT Mean diff: {}'.format(torch.mean(FT_diff)))
print('FT Max diff: {}'.format(torch.max(FT_diff)))
print('FT Min diff: {}'.format(torch.min(FT_diff)))
if rank == 0:
EFF_diff = torch.abs(hf_output - eff_ft_output).float() # Prevent error under bfloat16
print('EFF-FT Mean diff: {}'.format(torch.mean(EFF_diff)))
print('EFF-FT Max diff: {}'.format(torch.max(EFF_diff)))
print('EFF-FT Min diff: {}'.format(torch.min(EFF_diff)))
if args['time']:
iterations = 100
if rank == 0:
for i in range(iterations):
output = hf_encoder(inp, mask)
t10 = timeit.default_timer()
# nvtx.range_push("hf")
for i in range(iterations):
# nvtx.range_push("hf"+str(i))
output = hf_encoder(inp, mask)
# nvtx.range_pop()
# nvtx.range_pop()
t1 = timeit.default_timer() - t10
# time.sleep(60)
for i in range(iterations):
output = custom_encoder(ft_inp, ft_mask, ft_mem_seq_lens)
t20 = timeit.default_timer()
# nvtx.range_push("ext")
for i in range(iterations):
# nvtx.range_push("ext"+str(i))
output = custom_encoder(ft_inp, ft_mask, ft_mem_seq_lens)
# nvtx.range_pop()
# nvtx.range_pop()
t2 = timeit.default_timer() - t20
# time.sleep(60)
for i in range(iterations):
output = eff_custom_encoder(ft_inp, ft_mask, ft_mem_seq_lens)
t30 = timeit.default_timer()
# nvtx.range_push("eff_ext")
for i in range(iterations):
# nvtx.range_push("eff_ext"+str(i))
output = eff_custom_encoder(ft_inp, ft_mask, ft_mem_seq_lens)
# nvtx.range_pop()
# nvtx.range_pop()
t3 = timeit.default_timer() - t30
# time.sleep(60)
if rank == 0:
print("[INFO] HuggingFaceEnocder time costs: {:.2f} ms".format(t1 * 1000 / iterations))
print("[INFO] FasterTransformer time costs: {:.2f} ms".format(t2 * 1000 / iterations))
print("[INFO] EFF-FasterTransformer time costs: {:.2f} ms".format(t3 * 1000 / iterations))
if args['thread_num'] > 1:
# Multi-threading demonstration
assert world_size == 1, "[ERROR] multi thread does not support MGMN"
thread_list = []
thread_num = args['thread_num']
iterations = 100
def run():
t40 = timeit.default_timer()
for i in range(iterations):
ft_output = custom_encoder(ft_inp, ft_mask, ft_mem_seq_lens)[0] * ft_output_mask
t4 = timeit.default_timer() - t40
if rank == 0:
diff = torch.abs(hf_output - ft_output)
print('FT Mean diff: {}'.format(torch.mean(diff)))
print('FT Max diff: {}'.format(torch.max(diff)))
print('FT Min diff: {}'.format(torch.min(diff)))
print("[INFO] batch_size {} max_seq_len {} {} layer FT-OP-time {:6.2f} ms with {} threads".format(batch_size,
seq_len, layer_num, t4, thread_num))
for i in range(thread_num):
thread_list.append(threading.Thread(target=run, name="RunFT"))
for t in thread_list:
t.start()
for t in thread_list:
t.join()
torch.cuda.empty_cache()
sys.stdout.flush()
if rank == 0:
if (args["error_threshold"] != None):
assert max(torch.mean(FT_diff), torch.mean(EFF_diff)) < args["error_threshold"], "[ERROR] TEST FAIL!"
print("[INFO] TEST PASS!")
return max(torch.mean(FT_diff), torch.mean(EFF_diff))
else:
return 0
if __name__ == '__main__':
main()
|
FasterTransformer-main
|
examples/pytorch/bert/bert_example.py
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import random
import timeit
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
BertConfig,
BertTokenizer,
)
from utils.modeling_bert import BertForSequenceClassification
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def convert_type(tensor, data_type):
if data_type == 'fp16':
return tensor.half()
elif data_type == 'fp32':
return tensor.float()
elif data_type == 'bf16':
return tensor.bfloat16()
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
# args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
args.eval_batch_size = 1
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
#if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
# model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
# eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = [batch[0], convert_type(batch[1], args.data_type), batch[2]]
outputs = model(*inputs)
# tmp_eval_loss, logits = outputs[:2]
logits = outputs[0]
# eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().float().cpu().numpy()
out_label_ids = batch[3].detach().float().cpu().numpy()
else:
preds = np.append(preds, logits.detach().float().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, batch[3].detach().float().cpu().numpy(), axis=0)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation for " + eval_task + " done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
examples = (
processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=False,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--model_type", type=str, help="ori, ths, thsext")
parser.add_argument('--data_type', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32')
parser.add_argument('--ths_path', type=str, default='./lib/libth_transformer.so',
help='path of the pyt_fastertransformer dynamic lib file')
parser.add_argument('--remove_padding', action='store_true',
help='Remove the padding of sentences of encoder.')
args = parser.parse_args()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s",
args.local_rank,
device,
args.n_gpu,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
config = BertConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = BertTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
logger.info("Parameters %s", args)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path)
checkpoints = [args.model_name_or_path]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
use_ths = args.model_type.startswith('ths')
model = BertForSequenceClassification.from_pretrained(checkpoint, torchscript=use_ths)
model.to(args.device)
if args.data_type == 'fp16':
logger.info("Use fp16")
model.half()
elif args.data_type == 'bf16':
logger.info("Use bf16")
model.bfloat16()
if args.model_type == 'thsext':
logger.info("Use custom BERT encoder for TorchScript")
from utils.encoder import EncoderWeights, CustomEncoder
weights = EncoderWeights(
model.config.num_hidden_layers, model.config.hidden_size,
torch.load(os.path.join(checkpoint, 'pytorch_model.bin'), map_location='cpu'))
weights.to_cuda()
if args.data_type == 'fp16':
weights.to_half()
elif args.data_type == 'bf16':
weights.to_bfloat16()
enc = CustomEncoder(model.config.num_hidden_layers,
model.config.num_attention_heads,
model.config.hidden_size//model.config.num_attention_heads,
weights,
remove_padding=args.remove_padding,
path=os.path.abspath(args.ths_path))
enc_ = torch.jit.script(enc)
model.replace_encoder(enc_)
if use_ths:
logger.info("Use TorchScript mode")
fake_input_id = torch.LongTensor(args.per_gpu_eval_batch_size, args.max_seq_length)
fake_input_id.fill_(1)
fake_input_id = fake_input_id.to(args.device)
fake_mask = torch.ones(args.per_gpu_eval_batch_size, args.max_seq_length).to(args.device)
fake_type_id = fake_input_id.clone().detach()
if args.data_type == 'fp16':
fake_mask = fake_mask.half()
elif args.data_type == 'bf16':
fake_mask = fake_mask.bfloat16()
model.eval()
with torch.no_grad():
model_ = torch.jit.trace(model, (fake_input_id, fake_mask, fake_type_id))
model = model_
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
|
FasterTransformer-main
|
examples/pytorch/bert/run_glue.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.