python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""GLUE dataset."""
from abc import ABC
from abc import abstractmethod
from torch.utils.data import Dataset
from megatron import print_rank_0
from tasks.data_utils import build_sample
from tasks.data_utils import build_tokens_types_paddings_from_text
class GLUEAbstractDataset(ABC, Dataset):
"""GLUE base dataset class."""
def __init__(self, task_name, dataset_name, datapaths,
tokenizer, max_seq_length):
# Store inputs.
self.task_name = task_name
self.dataset_name = dataset_name
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
print_rank_0(' > building {} dataset for {}:'.format(self.task_name,
self.dataset_name))
# Process the files.
string = ' > paths:'
for path in datapaths:
string += ' ' + path
print_rank_0(string)
self.samples = []
for datapath in datapaths:
self.samples.extend(self.process_samples_from_single_path(datapath))
print_rank_0(' >> total number of samples: {}'.format(
len(self.samples)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
raw_sample = self.samples[idx]
ids, types, paddings = build_tokens_types_paddings_from_text(
raw_sample['text_a'], raw_sample['text_b'],
self.tokenizer, self.max_seq_length)
sample = build_sample(ids, types, paddings,
raw_sample['label'], raw_sample['uid'])
return sample
@abstractmethod
def process_samples_from_single_path(self, datapath):
"""Abstract method that takes a single path / filename and
returns a list of dataset samples, each sample being a dict of
{'text_a': string, 'text_b': string, 'label': int, 'uid': int}
"""
pass
|
Megatron-LM-master
|
tasks/glue/data.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""MNLI dataset."""
from megatron import print_rank_0
from tasks.data_utils import clean_text
from .data import GLUEAbstractDataset
LABELS = {'contradiction': 0, 'entailment': 1, 'neutral': 2}
class MNLIDataset(GLUEAbstractDataset):
def __init__(self, name, datapaths, tokenizer, max_seq_length,
test_label='contradiction'):
self.test_label = test_label
super().__init__('MNLI', name, datapaths,
tokenizer, max_seq_length)
def process_samples_from_single_path(self, filename):
""""Implement abstract method."""
print_rank_0(' > Processing {} ...'.format(filename))
samples = []
total = 0
first = True
is_test = False
with open(filename, 'r') as f:
for line in f:
row = line.strip().split('\t')
if first:
first = False
if len(row) == 10:
is_test = True
print_rank_0(
' reading {}, {} and {} columns and setting '
'labels to {}'.format(
row[0].strip(), row[8].strip(),
row[9].strip(), self.test_label))
else:
print_rank_0(' reading {} , {}, {}, and {} columns '
'...'.format(
row[0].strip(), row[8].strip(),
row[9].strip(), row[-1].strip()))
continue
text_a = clean_text(row[8].strip())
text_b = clean_text(row[9].strip())
unique_id = int(row[0].strip())
label = row[-1].strip()
if is_test:
label = self.test_label
assert len(text_a) > 0
assert len(text_b) > 0
assert label in LABELS
assert unique_id >= 0
sample = {'text_a': text_a,
'text_b': text_b,
'label': LABELS[label],
'uid': unique_id}
total += 1
samples.append(sample)
if total % 50000 == 0:
print_rank_0(' > processed {} so far ...'.format(total))
print_rank_0(' >> processed {} samples.'.format(len(samples)))
return samples
|
Megatron-LM-master
|
tasks/glue/mnli.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Detokenization."""
import re
def ptb_detokenizer(string):
string = string.replace(" '", "'")
string = string.replace(" \n", "\n")
string = string.replace("\n ", "\n")
string = string.replace(" n't", "n't")
string = string.replace(" N ", "1 ")
string = string.replace("$ 1", "$1")
string = string.replace("# 1", "#1")
return string
def wikitext_detokenizer(string):
# contractions
string = string.replace("s '", "s'")
string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string)
# number separators
string = string.replace(" @-@ ", "-")
string = string.replace(" @,@ ", ",")
string = string.replace(" @.@ ", ".")
# punctuation
string = string.replace(" : ", ": ")
string = string.replace(" ; ", "; ")
string = string.replace(" . ", ". ")
string = string.replace(" ! ", "! ")
string = string.replace(" ? ", "? ")
string = string.replace(" , ", ", ")
# double brackets
string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string)
string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string)
string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string)
string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string)
string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string)
# miscellaneous
string = string.replace("= = = =", "====")
string = string.replace("= = =", "===")
string = string.replace("= =", "==")
string = string.replace(" " + chr(176) + " ", chr(176))
string = string.replace(" \n", "\n")
string = string.replace("\n ", "\n")
string = string.replace(" N ", " 1 ")
string = string.replace(" 's", "'s")
return string
def lambada_detokenizer(string):
return string
_DETOKENIZERS = {
'ptb': ptb_detokenizer,
'wiki': wikitext_detokenizer,
'lambada': lambada_detokenizer,
}
def get_detokenizer(path):
for key in _DETOKENIZERS.keys():
if key in path:
return _DETOKENIZERS[key]
|
Megatron-LM-master
|
tasks/zeroshot_gpt/detokenizer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Zero-shot datasets."""
import json
import math
import numpy as np
import torch
from megatron import get_args
from megatron import print_rank_0
from megatron import get_tokenizer
from .detokenizer import get_detokenizer
def build_dataset(task):
"""Helper function to select and build dataset."""
if task == 'LAMBADA':
return _build_lambada_dataset()
if task == 'WIKITEXT103':
return _build_wikitext103_dataset()
raise NotImplementedError('dataset for {} task is not '
'implemented.'.format(task))
class _LMDataset(torch.utils.data.Dataset):
def __init__(self, tokens, seq_len, pad_idx, num_original_tokens,
num_tokenized_tokens, overalapping_eval=None):
self.tokens = tokens
self.seq_len = seq_len
self.pad_idx = pad_idx
self.overalapping_eval = overalapping_eval
if self.overalapping_eval is None:
self.overalapping_eval = self.seq_len
self.overalapping_eval = max(1, self.overalapping_eval)
self.num_original_tokens = num_original_tokens
self.num_tokenized_tokens = num_tokenized_tokens
self.total_targets = len(self.tokens) - 1
# remove first sequence tokens
targets = max(self.total_targets - self.overalapping_eval, 0)
self.total_sequences = max(
math.ceil(targets / self.overalapping_eval) + 1, 1)
def __len__(self):
return self.total_sequences
def __getitem__(self, idx):
start_idx = idx * self.overalapping_eval
end_idx = start_idx + self.seq_len
tokens = self.tokens[start_idx:end_idx + 1]
num_tokens = len(tokens)
pad_mask = [1] * num_tokens
if num_tokens < self.seq_len + 1:
num_pad = (self.seq_len + 1 - num_tokens)
pad_mask += [0] * (num_pad)
tokens += [self.pad_idx] * num_pad
pad_mask = np.array(pad_mask[1:])
if self.overalapping_eval != self.seq_len and idx != 0:
pad_mask[:-self.overalapping_eval] *= 0
return {'text': np.array(tokens), 'pad_mask': pad_mask}
class _LambadaDataset(torch.utils.data.Dataset):
def __init__(self, path, pad_idx, tokenizer, seq_len, strict=False):
print_rank_0('> building lambada dataset from {} ...'.format(path))
self.seq_len = seq_len
self.pad_idx = pad_idx
self.tokenizer = tokenizer
self.strict = strict
self.tokens = []
self.labels = []
with open(path, 'r') as f:
for line in f.readlines():
text = json.loads(line)['text']
tokens, labels = self.get_tokens(text)
self.tokens.append(tokens)
self.labels.append(labels)
def get_tokens(self, text):
if not self.strict:
tokens = self.tokenizer.tokenize(text)
return tokens[:-1], [tokens[-1]]
last_token = text.split()[-1]
start_idx = text.rfind(last_token)
beginning_tokens = self.tokenizer.tokenize(text[:start_idx].strip())
last_token = self.tokenizer.tokenize(' ' + last_token)
return beginning_tokens, last_token
def __len__(self):
return len(self.tokens)
def __getitem__(self, idx):
tokens = self.tokens[idx]
num_tokens = len(tokens)
pad_mask = [0] * num_tokens
labels = self.labels[idx]
pad_mask += [1] * len(labels)
tokens = tokens + labels
num_tokens = len(tokens)
if num_tokens < self.seq_len + 1:
num_pad = (self.seq_len + 1 - num_tokens)
pad_mask += [0] * (num_pad)
tokens += [self.pad_idx] * num_pad
pad_mask = np.array(pad_mask[1:])
return {'text': np.array(tokens), 'pad_mask': pad_mask}
def _build_lambada_dataset():
"""Build lambada dataset."""
args = get_args()
tokenizer = get_tokenizer()
assert len(args.valid_data) == 1
val_dataset = _LambadaDataset(args.valid_data[0], tokenizer.eod, tokenizer,
args.seq_length, args.strict_lambada)
print_rank_0(' > found {} samples.'.format(len(val_dataset)))
return val_dataset
def _build_wikitext103_dataset():
""""""
args = get_args()
tokenizer = get_tokenizer()
assert len(args.valid_data) == 1
with open(args.valid_data[0], "rb") as reader:
entire_data = reader.read().decode('utf-8')
num_original_tokens = len(entire_data.strip().split(" "))
entire_data = get_detokenizer(args.valid_data[0])(entire_data)
tokenized_data = tokenizer.tokenize(entire_data)
num_tokenized_tokens = len(tokenized_data)
val_dataset = _LMDataset(tokenized_data, args.seq_length, tokenizer.eod,
num_original_tokens, num_tokenized_tokens,
args.overlapping_eval)
print_rank_0(' > number of original tokens: {}, number of detokenized '
'tokens: {}'.format(num_original_tokens, num_tokenized_tokens))
return val_dataset
|
Megatron-LM-master
|
tasks/zeroshot_gpt/datasets.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""GPT zero-shot evaluation."""
import math
import torch
from megatron import get_args
from megatron import print_rank_0, is_last_rank
from megatron import get_tokenizer
from megatron.core import parallel_state, tensor_parallel
from megatron.checkpointing import load_checkpoint
from megatron.model import GPTModel
from megatron.training import get_model
from megatron.utils import get_ltor_masks_and_position_ids, unwrap_model
from megatron.core.pipeline_parallel.p2p_communication import recv_forward, send_forward
from megatron.arguments import core_transformer_config_from_args
from tasks.finetune_utils import build_data_loader
from .datasets import build_dataset
def get_model_provider(eval_metric):
"""Based on evaluation metric set the parallel-output flag and
return the model provider."""
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
config = core_transformer_config_from_args(get_args())
if eval_metric == 'loss':
parallel_output = True
elif eval_metric == 'accuracy':
parallel_output = False
else:
raise NotImplementedError('output type for {} evaluation metric '
'is not supported.'.format(eval_metric))
print_rank_0('building GPT model ...')
model = GPTModel(config, num_tokentypes=0, parallel_output=parallel_output,
pre_process=pre_process, post_process=post_process)
return model
return model_provider
def process_batch(batch):
"""Process batch and produce inputs for the model."""
args = get_args()
tokenizer = get_tokenizer()
loss_mask = batch['pad_mask'].long().cuda().contiguous().byte()
tokens_ = batch['text'].long().cuda().contiguous()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
return tokens, labels, attention_mask, position_ids, loss_mask
def forward_step(batch, model, eval_metric, config):
"""Forward step."""
# Get the batch.
tokens, labels, attention_mask, position_ids, loss_mask = process_batch(
batch)
# Tell the model what our actual batch size will be
args = get_args()
args.micro_batch_size = len(labels)
tensor_shape = (args.seq_length, args.micro_batch_size, args.hidden_size)
input_tensor = recv_forward(tensor_shape, config)
# Forward pass through the model.
unwrapped_model = unwrap_model(model)
unwrapped_model.set_input_tensor(input_tensor)
output = model(tokens, position_ids, attention_mask)
send_forward(output, config)
if parallel_state.is_pipeline_last_stage():
# For loss, return the unreduced loss.
if eval_metric == 'loss':
losses = tensor_parallel.vocab_parallel_cross_entropy(
output.contiguous().float(), labels.contiguous())
loss = torch.sum(
losses.view(-1) * loss_mask.contiguous().view(-1).float())
return loss
# For accuracy, return the number of correctly predicted samples.
if eval_metric == 'accuracy':
outputs = torch.argmax(output, -1)
correct = (outputs == labels).float()
correct[(1 - loss_mask).bool()] = 1
correct = correct.prod(-1)
return correct.sum()
raise NotImplementedError('forward method for evaluation metric {} '
'is not implemented.'.format(eval_metric))
return None
def evaluate(data_loader, model, eval_metric):
"""Evaluation."""
args = get_args()
config = core_transformer_config_from_args(args)
# Turn on evaluation mode which disables dropout.
model.eval()
total_output = 0.0
with torch.no_grad():
# For all the batches in the dataset.
for iteration, batch in enumerate(data_loader):
if iteration % args.log_interval == 0:
print_rank_0('> working on iteration: {}'.format(iteration))
# Forward evaluation.
output = forward_step(batch, model, eval_metric, config)
# Reduce across processes.
if parallel_state.is_pipeline_last_stage():
torch.distributed.all_reduce(output,
group=parallel_state.get_data_parallel_group())
total_output += output
return total_output
def evaluate_and_print_results(task, data_loader, model, eval_metric):
"""Evaluate and print results on screen."""
# Evaluate and get results.
output = evaluate(data_loader, model, eval_metric)
string = ' validation results on {} | '.format(task)
if is_last_rank():
if eval_metric == 'loss':
num_tokenized_tokens = data_loader.dataset.num_tokenized_tokens
num_original_tokens = data_loader.dataset.num_original_tokens
val_loss = output / (num_tokenized_tokens - 1)
ppl = math.exp(min(20, val_loss))
token_ratio = (num_tokenized_tokens - 1) / (num_original_tokens - 1)
adjusted_ppl = math.exp(min(20, val_loss * token_ratio))
string += 'avg loss: {:.4E} | '.format(val_loss)
string += 'ppl: {:.4E} | '.format(ppl)
string += 'adjusted ppl: {:.4E} | '.format(adjusted_ppl)
string += 'token ratio: {} |'.format(token_ratio)
elif eval_metric == 'accuracy':
num_examples = len(data_loader.dataset)
acc = output / num_examples
string += 'number correct: {:.4E} | '.format(output)
string += 'total examples: {:.4E} | '.format(num_examples)
string += 'avg accuracy: {:.4E}'.format(acc)
else:
raise NotImplementedError('evaluation method for {} metric is not '
'implemented yet.'.format(eval_metric))
length = len(string) + 1
print('-' * length)
print(string)
print('-' * length)
def main():
"""Main program."""
args = get_args()
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for text generation.")
exit()
if args.task == 'LAMBADA':
eval_metric = 'accuracy'
elif args.task == 'WIKITEXT103':
eval_metric = 'loss'
else:
raise NotImplementedError('{} task is not implemented.'.format(
args.task))
# Set up model and load checkpoint.
model = get_model(get_model_provider(eval_metric), wrap_with_ddp=False)
if args.load is not None:
_ = load_checkpoint(model, None, None)
assert len(model) == 1, "Above condition should have caught this"
model = model[0]
# Data stuff.
dataset = build_dataset(args.task)
dataloader = build_data_loader(dataset, args.micro_batch_size,
args.num_workers, drop_last=False)
# Run evaluation.
evaluate_and_print_results(args.task, dataloader, model, eval_metric)
print_rank_0('done :-)')
|
Megatron-LM-master
|
tasks/zeroshot_gpt/evaluate.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Finetune utilities."""
import torch
import torch.nn.functional as F
from megatron import get_args
from megatron import print_rank_0
from megatron import get_timers
from megatron import utils
from megatron.core import mpu
from megatron.checkpointing import load_checkpoint
from megatron.checkpointing import save_checkpoint
from megatron.training import evaluate_and_print_results
from megatron.training import setup_model_and_optimizer
from megatron.training import train_step
from megatron.training import training_log
from megatron.utils import check_adlr_autoresume_termination
from megatron.utils import average_losses_across_data_parallel_group, print_params_min_max_norm
from megatron.core.enums import ModelType
def process_batch(batch):
"""Process batch and produce inputs for the model."""
images = batch[0].cuda().contiguous()
labels = batch[1].cuda().contiguous()
return images, labels
def build_data_loader(dataset, micro_batch_size,
num_workers, drop_last, shuffle):
"""Data loader. Note that batch-size is the local (per GPU) batch-size."""
# Sampler.
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank,
drop_last=drop_last, shuffle=shuffle
)
# Data loader. Note that batch size is the per GPU batch size.
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=micro_batch_size,
sampler=sampler,
shuffle=False,
num_workers=num_workers,
drop_last=drop_last,
pin_memory=True,
)
return data_loader
def _build_infinite_size_dataloader(dataloader):
"""Build a looped dataloader with infinite size."""
iterator = dataloader.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = dataloader.__iter__()
def _build_train_valid_dataloaders(train_dataset, valid_dataset):
"""Traing and validation dataloaders."""
args = get_args()
print_rank_0('building train and validation dataloaders ...')
# Training dataset.
train_dataloader = build_data_loader(train_dataset, args.micro_batch_size,
args.num_workers, False, True)
# Set the training iterations.
args.train_iters_per_epoch = len(train_dataloader)
args.train_iters = args.epochs * args.train_iters_per_epoch
# Validation dataset. For this dataset, we do not need to set up
# shuffling so we can just use a simple infinite loop.
valid_dataloader_ = build_data_loader(valid_dataset, args.micro_batch_size,
args.num_workers, True, False)
valid_dataloader = _build_infinite_size_dataloader(valid_dataloader_)
# Now that we've built the data loaders, set batch_size arguments
# to the actual batch size the model will see for this dataset.
# This is necessary so pipeline transfers know what size they are
# and the LR schedule, which is based on samples seen, gets set
# correctly.
args.orig_micro_batch_size = args.micro_batch_size
args.orig_global_batch_size = args.global_batch_size
return train_dataloader, valid_dataloader
def _train(
model,
optimizer,
opt_param_scheduler,
forward_step,
train_dataloader,
valid_dataloader,
end_of_epoch_callback,
process_non_loss_data_func=None
):
"""Train the model."""
args = get_args()
timers = get_timers()
# Turn on training mode which enables dropout.
for m in model:
m.train()
# Tracking loss.
losses_dict_sum = {}
# Starting epoch and iteration
start_epoch = args.iteration // args.train_iters_per_epoch
start_iteration = args.iteration % args.train_iters_per_epoch
iteration = args.iteration
# Memory reporting flag.
report_memory_flag = True
# For each remaining epoch
timers("interval-time", log_level=0).start(barrier=True)
for epoch in range(start_epoch, args.epochs):
print_rank_0("working on epoch {} ...".format(epoch + 1))
# Set the data loader epoch to shuffle the index iterator.
train_dataloader.sampler.set_epoch(args.seed + epoch)
train_dataloader.dataset.set_epoch(epoch)
# For all the batches in the dataset.
for iteration_, batch in enumerate(train_dataloader):
# Ignore the iterations before starting value
if iteration_ < start_iteration:
continue
# Set to zero so the next epoch does not skip any batches.
start_iteration = 0
# Train for one step.
losses_dict, skipped_iter, grad_norm, num_zeros_in_grad = train_step(
forward_step, batch, model, optimizer, opt_param_scheduler
)
iteration += 1
# Logging.
params_norm = None
report_memory_flag = training_log(
losses_dict,
losses_dict_sum,
optimizer.param_groups[0]["lr"],
iteration,
optimizer.get_loss_scale().item(),
report_memory_flag,
skipped_iter,
grad_norm,
params_norm,
num_zeros_in_grad
)
# Autoresume
if args.adlr_autoresume and \
iteration % args.adlr_autoresume_interval == 0:
check_adlr_autoresume_termination(iteration, model, optimizer,
opt_param_scheduler)
# Checkpointing
if args.save and args.save_interval and \
iteration % args.save_interval == 0:
save_checkpoint(iteration, model, optimizer,
opt_param_scheduler)
# Evaluation
if args.eval_interval and iteration % args.eval_interval == 0:
prefix = "iteration {}".format(iteration)
evaluate_and_print_results(
prefix,
forward_step,
valid_dataloader,
model,
iteration,
process_non_loss_data_func,
False,
)
# Callback at the end of each epoch.
if end_of_epoch_callback is not None:
end_of_epoch_callback(model, epoch)
def finetune(
train_valid_datasets_provider,
model_provider,
forward_step,
model_type=ModelType.encoder_or_decoder,
process_non_loss_data_func=None,
end_of_epoch_callback_provider=None,
):
"""Main finetune function used across all tasks."""
args = get_args()
timers = get_timers()
# Train and validation data loaders.
timers("train/valid/test dataset/dataloder", log_level=0).start()
if args.epochs > 0:
train_dataset, valid_dataset = train_valid_datasets_provider()
train_dataloader, valid_dataloader = _build_train_valid_dataloaders(
train_dataset, valid_dataset
)
timers("train/valid/test dataset/dataloder").stop()
# Build calback function.
timers("callback function", log_level=0).start()
end_of_epoch_callback = None
if end_of_epoch_callback_provider is not None:
end_of_epoch_callback = end_of_epoch_callback_provider()
timers("callback function").stop()
# Build model, optimizer and learning rate scheduler.
timers("model and optimizer", log_level=0).start()
model, optimizer, opt_param_scheduler = \
setup_model_and_optimizer(
model_provider,
model_type,
scale_lr_cond=lambda name, param: ".head." in name,
lr_mult=args.head_lr_mult)
timers("model and optimizer").stop()
# If pretrained checkpoint is provided and we have not trained for
# any iteration (i.e., iteration is zero), then load the pretrained
# checkpoint.
timers("pretrained checkpoint", log_level=0).start(barrier=True)
if args.iteration == 0 and args.pretrained_checkpoint is not None:
if args.pretrained_checkpoint_type == 'default':
original_load = args.load
args.load = args.pretrained_checkpoint
_ = load_checkpoint(model, None, None, strict=False)
args.load = original_load
elif args.pretrained_checkpoint_type == 'external':
unwrap_model = utils.unwrap_model(model)
state_dict = torch.load(args.pretrained_checkpoint,
map_location="cpu")
unwrap_model[0].module.backbone.load_state_dict(state_dict,
strict=False)
elif args.pretrained_checkpoint_type == 'constrastive':
unwrap_model = utils.unwrap_model(model)
state_dict = torch.load(args.pretrained_checkpoint,
map_location="cpu")
state_dict = state_dict["model"]
state_dict = {k.replace("teacher.backbone.", ""): v
for k, v in state_dict.items()
if k.startswith("teacher.backbone.")}
unwrap_model[0].module.backbone.load_state_dict(state_dict,
strict=False)
else:
raise Exception("pretrained checkpoint type {} not supported".format(args.pretrained_checkpoint_type))
# This is critical when only model is loaded. We should make sure
# master parameters are also updated.
optimizer.reload_model_params()
timers("pretrained checkpoint").stop()
# Print setup timing.
print_rank_0("done with setups ...")
timers.log(
[
"train/valid/test dataset/dataloder",
"callback function",
"model and optimizer",
"pretrained checkpoint",
]
)
print_rank_0("training ...")
# Finetune the model.
if args.epochs > 0:
_train(
model,
optimizer,
opt_param_scheduler,
forward_step,
train_dataloader,
valid_dataloader,
end_of_epoch_callback,
process_non_loss_data_func,
)
# Or just evaluate.
else:
if end_of_epoch_callback is not None:
print_rank_0("evaluation only mode, setting epoch to -1")
end_of_epoch_callback(model, epoch=-1)
print_rank_0("done :-)")
|
Megatron-LM-master
|
tasks/vision/finetune_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Main tasks functionality."""
import os
import sys
sys.path.append(
os.path.abspath(
os.path.join(
os.path.join(os.path.dirname(__file__), os.path.pardir),
os.path.pardir,
)
)
)
from megatron import get_args
from megatron.initialize import initialize_megatron
def get_tasks_args(parser):
"""Provide extra arguments required for tasks."""
group = parser.add_argument_group(title="tasks")
group.add_argument('--task', type=str, default='segment',
choices=['classify', 'segment_setr', 'segment_segformer'],
help='task name.')
group.add_argument("--epochs", type=int, default=None,
help="Number of finetunning epochs. Zero results in "
"evaluation only.")
group.add_argument('--pretrained-checkpoint-type', type=str, default='default',
choices=['default', 'external', 'constrastive'],
help='Type of pretrained checkpoint')
group.add_argument("--pretrained-checkpoint", type=str, default=None,
help="Pretrained checkpoint used for finetunning.")
group.add_argument('--seg-stride', type=int, default=None,
help='sliding window stride during evaluation')
return parser
if __name__ == "__main__":
initialize_megatron(extra_args_provider=get_tasks_args)
args = get_args()
if args.task == 'classify':
from tasks.vision.classification.classification import main
main()
elif args.task == 'segment_setr':
from tasks.vision.segmentation.finetune_setr import main
main()
elif args.task == 'segment_segformer':
from tasks.vision.segmentation.finetune_segformer import main
main()
|
Megatron-LM-master
|
tasks/vision/main.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Vision-classification finetuning/evaluation."""
import torch.nn.functional as F
from functools import partial
from megatron import get_args, get_timers
from megatron import print_rank_0
from megatron.model.vision.classification import VitClassificationModel
from megatron.data.vit_dataset import build_train_valid_datasets
from tasks.vision.classification.eval_utils import accuracy_func_provider
from tasks.vision.finetune_utils import finetune
from megatron.utils import average_losses_across_data_parallel_group
def classification():
def train_valid_datasets_provider():
"""Build train and validation dataset."""
args = get_args()
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w),
)
return train_ds, valid_ds
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
print_rank_0("building classification model for ImageNet ...")
return VitClassificationModel(num_classes=args.num_classes, finetune=True,
pre_process=pre_process, post_process=post_process)
def process_batch(batch):
"""Process batch and produce inputs for the model."""
images = batch[0].cuda().contiguous()
labels = batch[1].cuda().contiguous()
return images, labels
def cross_entropy_loss_func(labels, output_tensor):
logits = output_tensor
# Cross-entropy loss.
loss = F.cross_entropy(logits.contiguous().float(), labels)
# Reduce loss for logging.
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {'lm loss': averaged_loss[0]}
def _cross_entropy_forward_step(batch, model):
"""Simple forward step with cross-entropy loss."""
timers = get_timers()
# Get the batch.
timers("batch generator", log_level=2).start()
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
images, labels = process_batch(batch_)
timers("batch generator").stop()
# Forward model.
output_tensor = model(images)
return output_tensor, partial(cross_entropy_loss_func, labels)
"""Finetune/evaluate."""
finetune(
train_valid_datasets_provider,
model_provider,
forward_step=_cross_entropy_forward_step,
end_of_epoch_callback_provider=accuracy_func_provider,
)
def main():
classification()
|
Megatron-LM-master
|
tasks/vision/classification/classification.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Evaluation utilities."""
import os
from functools import partial
import torch
from megatron import get_args
from megatron import print_rank_0, print_rank_last
from megatron.core import mpu
from megatron.schedules import get_forward_backward_func
from tasks.vision.finetune_utils import build_data_loader
from tasks.vision.finetune_utils import process_batch
from torchvision import datasets, transforms
def accuracy_func_provider():
"""Provide function that calculates accuracies."""
args = get_args()
data_path = args.data_path
crop_size = (args.img_h, args.img_w)
# Build dataloaders.
val_data_path = data_path[1]
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
transform_val = transforms.Compose(
[
transforms.Resize(crop_size),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize,
]
)
dataset = datasets.ImageFolder(root=val_data_path, transform=transform_val)
dataloader = build_data_loader(
dataset,
args.micro_batch_size,
num_workers=args.num_workers,
drop_last=(mpu.get_data_parallel_world_size() > 1),
shuffle=False
)
def metrics_func(model, epoch):
print_rank_0("calculating metrics ...")
correct, total = calculate_correct_answers(model, dataloader, epoch)
percent = float(correct) * 100.0 / float(total)
print_rank_last(
" >> |epoch: {}| overall: correct / total = {} / {} = "
"{:.4f} %".format(epoch, correct, total, percent)
)
return metrics_func
def calculate_correct_answers(model, dataloader, epoch):
"""Calculate correct over total answers"""
forward_backward_func = get_forward_backward_func()
for m in model:
m.eval()
def loss_func(labels, output_tensor):
logits = output_tensor
loss_dict = {}
# Compute the correct answers.
predicted = torch.argmax(logits, dim=-1)
corrects = (predicted == labels).float()
# Add to the counters.
loss_dict['total'] = labels.size(0)
loss_dict['correct'] = corrects.sum().item()
return 0, loss_dict
#defined inside to capture output_predictions
def correct_answers_forward_step(batch, model):
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
images, labels = process_batch(batch_)
# Forward model.
output_tensor = model(images)
return output_tensor, partial(loss_func, labels)
with torch.no_grad():
# For all the batches in the dataset.
total = 0
correct = 0
for _, batch in enumerate(dataloader):
loss_dicts = forward_backward_func(correct_answers_forward_step, batch, model,
optimizer=None, timers=None, forward_only=True)
for loss_dict in loss_dicts:
total += loss_dict['total']
correct += loss_dict['correct']
for m in model:
m.train()
# Reduce.
if mpu.is_pipeline_last_stage():
unreduced = torch.cuda.LongTensor([correct, total])
torch.distributed.all_reduce(unreduced,
group=mpu.get_data_parallel_group())
# Print on screen.
correct_ans = unreduced[0].item()
total_count = unreduced[1].item()
return correct_ans, total_count
|
Megatron-LM-master
|
tasks/vision/classification/eval_utils.py
|
# BSD 3-Clause License
#
# Copyright (c) Soumith Chintala 2016,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# code taken from
# https://github.com/pytorch/vision/blob/main/torchvision/datasets/cityscapes.py
# modified it to change max label index from 255 to 19 (num_classes)
import torch
import json
import os
from collections import namedtuple
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
import numpy as np
from torchvision.datasets.utils import extract_archive, verify_str_arg, iterable_to_str
from torchvision.datasets import VisionDataset
from PIL import Image
from megatron import print_rank_0
class Cityscapes(VisionDataset):
"""`Cityscapes <http://www.cityscapes-dataset.com/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory ``leftImg8bit``
and ``gtFine`` or ``gtCoarse`` are located.
split (string, optional): The image split to use, ``train``, ``test`` or ``val`` if mode="fine"
otherwise ``train``, ``train_extra`` or ``val``
mode (string, optional): The quality mode to use, ``fine`` or ``coarse``
target_type (string or list, optional): Type of target to use, ``instance``, ``semantic``, ``polygon``
or ``color``. Can also be a list to output a tuple with all specified target types.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Examples:
Get semantic segmentation target
.. code-block:: python
dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
target_type='semantic')
img, smnt = dataset[0]
Get multiple targets
.. code-block:: python
dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
target_type=['instance', 'color', 'polygon'])
img, (inst, col, poly) = dataset[0]
Validate on the "coarse" set
.. code-block:: python
dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse',
target_type='semantic')
img, smnt = dataset[0]
"""
num_classes = 19
ignore_index = 19
color_table = torch.tensor(
[[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=torch.float, device='cuda')
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id',
'category', 'category_id', 'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 19, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 19, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 19, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 19, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 19, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 19, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 19, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 19, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 19, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 19, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 19, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 19, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 19, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 19, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 19, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)),
]
# label2trainid
label2trainid = { label.id : label.train_id for label in classes}
def __init__(
self,
root: str,
split: str = "train",
mode: str = "fine",
resolution: int = 1024,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super(Cityscapes, self).__init__(root, transforms, transform, target_transform)
self.mode = 'gtFine' if mode == 'fine' else 'gtCoarse'
self.images_dir = os.path.join(self.root, 'leftImg8bit_trainvaltest/leftImg8bit', split)
self.targets_dir = os.path.join(self.root, 'gtFine_trainvaltest/gtFine', split)
self.split = split
self.resolution = resolution
self.images = []
self.targets = []
for city in sorted(os.listdir(self.images_dir)):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
for file_name in os.listdir(img_dir):
target_name = '{}_{}_labelIds.png'.format(file_name.split('_leftImg8bit')[0], self.mode)
self.images.append(os.path.join(img_dir, file_name))
self.targets.append(os.path.join(target_dir, target_name))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
target = np.array(target)
target_copy = target.copy()
for k, v in Cityscapes.label2trainid.items():
binary_target = (target == k)
target_copy[binary_target] = v
target = target_copy
target = Image.fromarray(target.astype(np.uint8))
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
# len(self.images)
return len(self.images)
|
Megatron-LM-master
|
tasks/vision/segmentation/cityscapes.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#copyright (c) go-hiroaki & Chokurei
#email: guangmingwu2010@gmail.com
# guozhilingty@gmail.com
#
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
eps = 1e-6
def _binarize(y_data, threshold):
"""
args:
y_data : [float] 4-d tensor in [batch_size, channels, img_rows, img_cols]
threshold : [float] [0.0, 1.0]
return 4-d binarized y_data
"""
y_data[y_data < threshold] = 0.0
y_data[y_data >= threshold] = 1.0
return y_data
def _argmax(y_data, dim):
"""
args:
y_data : 4-d tensor in [batch_size, chs, img_rows, img_cols]
dim : int
return 3-d [int] y_data
"""
return torch.argmax(y_data, dim).int()
def _get_tp(y_pred, y_true):
"""
args:
y_true : [int] 3-d in [batch_size, img_rows, img_cols]
y_pred : [int] 3-d in [batch_size, img_rows, img_cols]
return [float] true_positive
"""
return torch.sum(y_true * y_pred).float()
def _get_fp(y_pred, y_true):
"""
args:
y_true : 3-d ndarray in [batch_size, img_rows, img_cols]
y_pred : 3-d ndarray in [batch_size, img_rows, img_cols]
return [float] false_positive
"""
return torch.sum((1 - y_true) * y_pred).float()
def _get_tn(y_pred, y_true):
"""
args:
y_true : 3-d ndarray in [batch_size, img_rows, img_cols]
y_pred : 3-d ndarray in [batch_size, img_rows, img_cols]
return [float] true_negative
"""
return torch.sum((1 - y_true) * (1 - y_pred)).float()
def _get_fn(y_pred, y_true):
"""
args:
y_true : 3-d ndarray in [batch_size, img_rows, img_cols]
y_pred : 3-d ndarray in [batch_size, img_rows, img_cols]
return [float] false_negative
"""
return torch.sum(y_true * (1 - y_pred)).float()
def _get_weights(y_true, nb_ch):
"""
args:
y_true : 3-d ndarray in [batch_size, img_rows, img_cols]
nb_ch : int
return [float] weights
"""
batch_size, img_rows, img_cols = y_true.shape
pixels = batch_size * img_rows * img_cols
weights = [torch.sum(y_true==ch).item() / pixels for ch in range(nb_ch)]
return weights
class CFMatrix(object):
def __init__(self, des=None):
self.des = des
def __repr__(self):
return "ConfusionMatrix"
def __call__(self, y_pred, y_true, ignore_index, threshold=0.5):
"""
args:
y_true : 3-d ndarray in [batch_size, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
threshold : [0.0, 1.0]
return confusion matrix
"""
batch_size, img_rows, img_cols = y_pred.shape
chs = ignore_index
device = y_true.device
if chs == 1:
y_pred = _binarize(y_pred, threshold)
y_true = _binarize(y_true, threshold)
nb_tp = _get_tp(y_pred, y_true)
nb_fp = _get_fp(y_pred, y_true)
nb_tn = _get_tn(y_pred, y_true)
nb_fn = _get_fn(y_pred, y_true)
mperforms = [nb_tp, nb_fp, nb_tn, nb_fn]
performs = None
else:
performs = torch.zeros(chs, 4).to(device)
weights = _get_weights(y_true, chs)
for ch in range(chs):
y_true_ch = torch.zeros(batch_size, img_rows, img_cols)
y_false_ch = torch.zeros(batch_size, img_rows, img_cols)
y_pred_ch = torch.zeros(batch_size, img_rows, img_cols)
y_true_ch[y_true == ch] = 1
y_false_ch[torch.logical_and((y_true != ch), (y_true != ignore_index))] = 1
y_pred_ch[y_pred == ch] = 1
nb_tp = _get_tp(y_pred_ch, y_true_ch)
nb_fp = torch.sum(y_false_ch * y_pred_ch).float()
nb_tn = torch.sum(y_false_ch * (1 - y_pred_ch)).float()
nb_fn = _get_fn(y_pred_ch, y_true_ch)
performs[int(ch), :] = torch.FloatTensor([nb_tp, nb_fp, nb_tn, nb_fn])
mperforms = sum([i*j for (i, j) in zip(performs, weights)])
return mperforms, performs
class OAAcc(object):
def __init__(self, des="Overall Accuracy"):
self.des = des
def __repr__(self):
return "OAcc"
def __call__(self, y_pred, y_true, threshold=0.5):
"""
args:
y_true : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
threshold : [0.0, 1.0]
return (tp+tn)/total
"""
batch_size, chs, img_rows, img_cols = y_true.shape
device = y_true.device
if chs == 1:
y_pred = _binarize(y_pred, threshold)
y_true = _binarize(y_true, threshold)
else:
y_pred = _argmax(y_pred, 1)
y_true = _argmax(y_true, 1)
nb_tp_tn = torch.sum(y_true == y_pred).float()
mperforms = nb_tp_tn / (batch_size * img_rows * img_cols)
performs = None
return mperforms, performs
class Precision(object):
def __init__(self, des="Precision"):
self.des = des
def __repr__(self):
return "Prec"
def __call__(self, y_pred, y_true, threshold=0.5):
"""
args:
y_true : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
threshold : [0.0, 1.0]
return tp/(tp+fp)
"""
batch_size, chs, img_rows, img_cols = y_true.shape
device = y_true.device
if chs == 1:
y_pred = _binarize(y_pred, threshold)
y_true = _binarize(y_true, threshold)
nb_tp = _get_tp(y_pred, y_true)
nb_fp = _get_fp(y_pred, y_true)
mperforms = nb_tp / (nb_tp + nb_fp + esp)
performs = None
else:
y_pred = _argmax(y_pred, 1)
y_true = _argmax(y_true, 1)
performs = torch.zeros(chs, 1).to(device)
weights = _get_weights(y_true, chs)
for ch in range(chs):
y_true_ch = torch.zeros(batch_size, img_rows, img_cols)
y_pred_ch = torch.zeros(batch_size, img_rows, img_cols)
y_true_ch[y_true == ch] = 1
y_pred_ch[y_pred == ch] = 1
nb_tp = _get_tp(y_pred_ch, y_true_ch)
nb_fp = _get_fp(y_pred_ch, y_true_ch)
performs[int(ch)] = nb_tp / (nb_tp + nb_fp + esp)
mperforms = sum([i*j for (i, j) in zip(performs, weights)])
return mperforms, performs
class Recall(object):
def __init__(self, des="Recall"):
self.des = des
def __repr__(self):
return "Reca"
def __call__(self, y_pred, y_true, threshold=0.5):
"""
args:
y_true : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
threshold : [0.0, 1.0]
return tp/(tp+fn)
"""
batch_size, chs, img_rows, img_cols = y_true.shape
device = y_true.device
if chs == 1:
y_pred = _binarize(y_pred, threshold)
y_true = _binarize(y_true, threshold)
nb_tp = _get_tp(y_pred, y_true)
nb_fn = _get_fn(y_pred, y_true)
mperforms = nb_tp / (nb_tp + nb_fn + esp)
performs = None
else:
y_pred = _argmax(y_pred, 1)
y_true = _argmax(y_true, 1)
performs = torch.zeros(chs, 1).to(device)
weights = _get_weights(y_true, chs)
for ch in range(chs):
y_true_ch = torch.zeros(batch_size, img_rows, img_cols)
y_pred_ch = torch.zeros(batch_size, img_rows, img_cols)
y_true_ch[y_true == ch] = 1
y_pred_ch[y_pred == ch] = 1
nb_tp = _get_tp(y_pred_ch, y_true_ch)
nb_fn = _get_fn(y_pred_ch, y_true_ch)
performs[int(ch)] = nb_tp / (nb_tp + nb_fn + esp)
mperforms = sum([i*j for (i, j) in zip(performs, weights)])
return mperforms, performs
class F1Score(object):
def __init__(self, des="F1Score"):
self.des = des
def __repr__(self):
return "F1Sc"
def __call__(self, y_pred, y_true, threshold=0.5):
"""
args:
y_true : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
threshold : [0.0, 1.0]
return 2*precision*recall/(precision+recall)
"""
batch_size, chs, img_rows, img_cols = y_true.shape
device = y_true.device
if chs == 1:
y_pred = _binarize(y_pred, threshold)
y_true = _binarize(y_true, threshold)
nb_tp = _get_tp(y_pred, y_true)
nb_fp = _get_fp(y_pred, y_true)
nb_fn = _get_fn(y_pred, y_true)
_precision = nb_tp / (nb_tp + nb_fp + esp)
_recall = nb_tp / (nb_tp + nb_fn + esp)
mperforms = 2 * _precision * _recall / (_precision + _recall + esp)
performs = None
else:
y_pred = _argmax(y_pred, 1)
y_true = _argmax(y_true, 1)
performs = torch.zeros(chs, 1).to(device)
weights = _get_weights(y_true, chs)
for ch in range(chs):
y_true_ch = torch.zeros(batch_size, img_rows, img_cols)
y_pred_ch = torch.zeros(batch_size, img_rows, img_cols)
y_true_ch[y_true == ch] = 1
y_pred_ch[y_pred == ch] = 1
nb_tp = _get_tp(y_pred_ch, y_true_ch)
nb_fp = _get_fp(y_pred_ch, y_true_ch)
nb_fn = _get_fn(y_pred_ch, y_true_ch)
_precision = nb_tp / (nb_tp + nb_fp + esp)
_recall = nb_tp / (nb_tp + nb_fn + esp)
performs[int(ch)] = 2 * _precision * \
_recall / (_precision + _recall + esp)
mperforms = sum([i*j for (i, j) in zip(performs, weights)])
return mperforms, performs
class Kappa(object):
def __init__(self, des="Kappa"):
self.des = des
def __repr__(self):
return "Kapp"
def __call__(self, y_pred, y_true, threshold=0.5):
"""
args:
y_true : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
threshold : [0.0, 1.0]
return (Po-Pe)/(1-Pe)
"""
batch_size, chs, img_rows, img_cols = y_true.shape
device = y_true.device
if chs == 1:
y_pred = _binarize(y_pred, threshold)
y_true = _binarize(y_true, threshold)
nb_tp = _get_tp(y_pred, y_true)
nb_fp = _get_fp(y_pred, y_true)
nb_tn = _get_tn(y_pred, y_true)
nb_fn = _get_fn(y_pred, y_true)
nb_total = nb_tp + nb_fp + nb_tn + nb_fn
Po = (nb_tp + nb_tn) / nb_total
Pe = ((nb_tp + nb_fp) * (nb_tp + nb_fn) +
(nb_fn + nb_tn) * (nb_fp + nb_tn)) / (nb_total**2)
mperforms = (Po - Pe) / (1 - Pe + esp)
performs = None
else:
y_pred = _argmax(y_pred, 1)
y_true = _argmax(y_true, 1)
performs = torch.zeros(chs, 1).to(device)
weights = _get_weights(y_true, chs)
for ch in range(chs):
y_true_ch = torch.zeros(batch_size, img_rows, img_cols)
y_pred_ch = torch.zeros(batch_size, img_rows, img_cols)
y_true_ch[y_true == ch] = 1
y_pred_ch[y_pred == ch] = 1
nb_tp = _get_tp(y_pred_ch, y_true_ch)
nb_fp = _get_fp(y_pred_ch, y_true_ch)
nb_tn = _get_tn(y_pred_ch, y_true_ch)
nb_fn = _get_fn(y_pred_ch, y_true_ch)
nb_total = nb_tp + nb_fp + nb_tn + nb_fn
Po = (nb_tp + nb_tn) / nb_total
Pe = ((nb_tp + nb_fp) * (nb_tp + nb_fn)
+ (nb_fn + nb_tn) * (nb_fp + nb_tn)) / (nb_total**2)
performs[int(ch)] = (Po - Pe) / (1 - Pe + esp)
mperforms = sum([i*j for (i, j) in zip(performs, weights)])
return mperforms, performs
class Jaccard(object):
def __init__(self, des="Jaccard"):
self.des = des
def __repr__(self):
return "Jacc"
def __call__(self, y_pred, y_true, threshold=0.5):
"""
args:
y_true : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, chs, img_rows, img_cols]
threshold : [0.0, 1.0]
return intersection / (sum-intersection)
"""
batch_size, chs, img_rows, img_cols = y_true.shape
device = y_true.device
if chs == 1:
y_pred = _binarize(y_pred, threshold)
y_true = _binarize(y_true, threshold)
_intersec = torch.sum(y_true * y_pred).float()
_sum = torch.sum(y_true + y_pred).float()
mperforms = _intersec / (_sum - _intersec + esp)
performs = None
else:
y_pred = _argmax(y_pred, 1)
y_true = _argmax(y_true, 1)
performs = torch.zeros(chs, 1).to(device)
weights = _get_weights(y_true, chs)
for ch in range(chs):
y_true_ch = torch.zeros(batch_size, img_rows, img_cols)
y_pred_ch = torch.zeros(batch_size, img_rows, img_cols)
y_true_ch[y_true == ch] = 1
y_pred_ch[y_pred == ch] = 1
_intersec = torch.sum(y_true_ch * y_pred_ch).float()
_sum = torch.sum(y_true_ch + y_pred_ch).float()
performs[int(ch)] = _intersec / (_sum - _intersec + esp)
mperforms = sum([i*j for (i, j) in zip(performs, weights)])
return mperforms, performs
class MSE(object):
def __init__(self, des="Mean Square Error"):
self.des = des
def __repr__(self):
return "MSE"
def __call__(self, y_pred, y_true, dim=1, threshold=None):
"""
args:
y_true : 4-d ndarray in [batch_size, channels, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, channels, img_rows, img_cols]
threshold : [0.0, 1.0]
return mean_squared_error, smaller the better
"""
if threshold:
y_pred = _binarize(y_pred, threshold)
return torch.mean((y_pred - y_true) ** 2)
class PSNR(object):
def __init__(self, des="Peak Signal to Noise Ratio"):
self.des = des
def __repr__(self):
return "PSNR"
def __call__(self, y_pred, y_true, dim=1, threshold=None):
"""
args:
y_true : 4-d ndarray in [batch_size, channels, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, channels, img_rows, img_cols]
threshold : [0.0, 1.0]
return PSNR, larger the better
"""
if threshold:
y_pred = _binarize(y_pred, threshold)
mse = torch.mean((y_pred - y_true) ** 2)
return 10 * torch.log10(1 / mse)
class SSIM(object):
'''
modified from https://github.com/jorge-pessoa/pytorch-msssim
'''
def __init__(self, des="structural similarity index"):
self.des = des
def __repr__(self):
return "SSIM"
def gaussian(self, w_size, sigma):
gauss = torch.Tensor([math.exp(-(x - w_size//2)**2/float(2*sigma**2)) for x in range(w_size)])
return gauss/gauss.sum()
def create_window(self, w_size, channel=1):
_1D_window = self.gaussian(w_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, w_size, w_size).contiguous()
return window
def __call__(self, y_pred, y_true, w_size=11, size_average=True, full=False):
"""
args:
y_true : 4-d ndarray in [batch_size, channels, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, channels, img_rows, img_cols]
w_size : int, default 11
size_average : boolean, default True
full : boolean, default False
return ssim, larger the better
"""
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if torch.max(y_pred) > 128:
max_val = 255
else:
max_val = 1
if torch.min(y_pred) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
padd = 0
(_, channel, height, width) = y_pred.size()
window = self.create_window(w_size, channel=channel).to(y_pred.device)
mu1 = F.conv2d(y_pred, window, padding=padd, groups=channel)
mu2 = F.conv2d(y_true, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(y_pred * y_pred, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(y_true * y_true, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(y_pred * y_true, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
class AE(object):
"""
Modified from matlab : colorangle.m, MATLAB V2019b
angle = acos(RGB1' * RGB2 / (norm(RGB1) * norm(RGB2)));
angle = 180 / pi * angle;
"""
def __init__(self, des='average Angular Error'):
self.des = des
def __repr__(self):
return "AE"
def __call__(self, y_pred, y_true):
"""
args:
y_true : 4-d ndarray in [batch_size, channels, img_rows, img_cols]
y_pred : 4-d ndarray in [batch_size, channels, img_rows, img_cols]
return average AE, smaller the better
"""
dotP = torch.sum(y_pred * y_true, dim=1)
Norm_pred = torch.sqrt(torch.sum(y_pred * y_pred, dim=1))
Norm_true = torch.sqrt(torch.sum(y_true * y_true, dim=1))
ae = 180 / math.pi * torch.acos(dotP / (Norm_pred * Norm_true + eps))
return ae.mean(1).mean(1)
if __name__ == "__main__":
for ch in [3, 1]:
batch_size, img_row, img_col = 1, 224, 224
y_true = torch.rand(batch_size, ch, img_row, img_col)
noise = torch.zeros(y_true.size()).data.normal_(0, std=0.1)
y_pred = y_true + noise
for cuda in [False, True]:
if cuda:
y_pred = y_pred.cuda()
y_true = y_true.cuda()
print('#'*20, 'Cuda : {} ; size : {}'.format(cuda, y_true.size()))
########### similarity metrics
metric = MSE()
acc = metric(y_pred, y_true).item()
print("{} ==> {}".format(repr(metric), acc))
metric = PSNR()
acc = metric(y_pred, y_true).item()
print("{} ==> {}".format(repr(metric), acc))
metric = SSIM()
acc = metric(y_pred, y_true).item()
print("{} ==> {}".format(repr(metric), acc))
metric = LPIPS(cuda)
acc = metric(y_pred, y_true).item()
print("{} ==> {}".format(repr(metric), acc))
metric = AE()
acc = metric(y_pred, y_true).item()
print("{} ==> {}".format(repr(metric), acc))
########### accuracy metrics
metric = OAAcc()
maccu, accu = metric(y_pred, y_true)
print('mAccu:', maccu, 'Accu', accu)
metric = Precision()
mprec, prec = metric(y_pred, y_true)
print('mPrec:', mprec, 'Prec', prec)
metric = Recall()
mreca, reca = metric(y_pred, y_true)
print('mReca:', mreca, 'Reca', reca)
metric = F1Score()
mf1sc, f1sc = metric(y_pred, y_true)
print('mF1sc:', mf1sc, 'F1sc', f1sc)
metric = Kappa()
mkapp, kapp = metric(y_pred, y_true)
print('mKapp:', mkapp, 'Kapp', kapp)
metric = Jaccard()
mjacc, jacc = metric(y_pred, y_true)
print('mJacc:', mjacc, 'Jacc', jacc)
|
Megatron-LM-master
|
tasks/vision/segmentation/metrics.py
|
# Copyright (c) 2020 The MMSegmenation Authors.
#
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this source tree.
import random
import os
import math
import mmcv
import torch
import numpy as np
import torchvision.transforms as T
from torchvision import datasets
from torch.utils.data import Dataset
from megatron import print_rank_0
from megatron import get_args
from PIL import Image, ImageOps, ImageEnhance
import torchvision.transforms as torch_tr
def _is_pil_image(img):
return isinstance(img, Image.Image)
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
"""Brightness distortion."""
if random.randint(0, 1):
return self.convert(
img,
beta=random.uniform(-self.brightness_delta,
self.brightness_delta))
return img
def contrast(self, img):
"""Contrast distortion."""
if random.randint(0, 1):
return self.convert(
img,
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
return img
def saturation(self, img):
"""Saturation distortion."""
if random.randint(0, 1):
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(
img[:, :, 1],
alpha=random.uniform(self.saturation_lower,
self.saturation_upper))
img = mmcv.hsv2bgr(img)
return img
def hue(self, img):
"""Hue distortion."""
if random.randint(0, 1):
img = mmcv.bgr2hsv(img)
img[:, :,
0] = (img[:, :, 0].astype(int) +
random.randint(-self.hue_delta, self.hue_delta)) % 180
img = mmcv.hsv2bgr(img)
return img
def __call__(self, img):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
img = np.array(img)
# random brightness
img = self.brightness(img)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(0, 1)
if mode == 1:
img = self.contrast(img)
# random saturation
img = self.saturation(img)
# random hue
img = self.hue(img)
# random contrast
if mode == 0:
img = self.contrast(img)
img = Image.fromarray(img.astype(np.uint8)).convert('RGB')
return img
class RandomCrop(object):
"""
Take a random crop from the image.
First the image or crop size may need to be adjusted if the incoming image
is too small...
If the image is smaller than the crop, then:
the image is padded up to the size of the crop
unless 'nopad', in which case the crop size is shrunk to fit the image
A random crop is taken such that the crop fits within the image.
if cfg.DATASET.TRANSLATION_AUG_FIX is set, we insure that there's always
translation randomness of at least that value around the image.
if image < crop_size:
# slide crop within image, random offset
else:
# slide image within crop
"""
def __init__(self, crop_size):
args = get_args()
self.size = crop_size
self.cat_max_ratio = 0.75
self.ignore_index = args.ignore_index
self.pad_color = (0, 0, 0)
def get_crop_bbox(self, img):
"""Randomly get a crop bounding box."""
img_w, img_h = img.size
target_h, target_w = self.size #[H W]
margin_h = max(img_h - target_h, 0)
margin_w = max(img_w - target_w, 0)
offset_h = random.randint(0, margin_h)
offset_w = random.randint(0, margin_w)
crop_y1, crop_y2 = offset_h, offset_h + target_h
crop_x1, crop_x2 = offset_w, offset_w + target_w
return crop_y1, crop_y2, crop_x1, crop_x2
def crop(self, img, crop_bbox):
"""Crop from ``img``"""
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
img = img.crop((crop_x1, crop_y1, crop_x2, crop_y2))
return img
@staticmethod
def crop_in_image(target_w, target_h, w, h, img, mask):
if w == target_w:
x1 = 0
else:
x1 = random.randint(0, w - target_w)
if h == target_h:
y1 = 0
else:
y1 = random.randint(0, h - target_h)
return [img.crop((x1, y1, x1 + target_w, y1 + target_h)),
mask.crop((x1, y1, x1 + target_w, y1 + target_h))]
def __call__(self, img, mask):
w, h = img.size
target_h, target_w = self.size # ASSUME H, W
if w == target_w and h == target_h:
return img, mask
# Pad image if image < crop
if target_h > h:
pad_h = (target_h - h) // 2 + 1
else:
pad_h = 0
if target_w > w:
pad_w = (target_w - w) // 2 + 1
else:
pad_w = 0
border = (pad_w, pad_h, pad_w, pad_h)
if pad_h or pad_w:
img = ImageOps.expand(img, border=border, fill=(0, 0, 0))
mask = ImageOps.expand(mask, border=border, fill=self.ignore_index)
w, h = img.size
crop_bbox = self.get_crop_bbox(img)
if self.cat_max_ratio < 1.:
# Repeat 10 times
for _ in range(10):
seg_temp = self.crop(mask, crop_bbox)
labels, cnt = np.unique(seg_temp, return_counts=True)
cnt = cnt[labels != self.ignore_index]
if len(cnt) > 1 and np.max(cnt) / np.sum(
cnt) < self.cat_max_ratio:
break
crop_bbox = self.get_crop_bbox(img)
# crop the image
img = self.crop(img, crop_bbox)
# crop semantic seg
mask = self.crop(mask, crop_bbox)
assert(img.size[0] == self.size[1] and img.size[1] == self.size[0])
return img, mask
class RandomSizeAndCrop(object):
def __init__(self,
crop_size,
scale_min=0.5,
scale_max=2.0):
self.crop = RandomCrop(crop_size)
self.scale_min = scale_min
self.scale_max = scale_max
def __call__(self, img, mask):
scale_amt = random.uniform(self.scale_min, self.scale_max)
w, h = [int(i * scale_amt) for i in img.size]
resized_img = img.resize((w, h), Image.BICUBIC)
resized_mask = mask.resize((w, h), Image.NEAREST)
img, mask = self.crop(resized_img, resized_mask)
return img, mask
class RandomHorizontallyFlip(object):
def __call__(self, img, mask):
if random.random() < 0.5:
return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(
Image.FLIP_LEFT_RIGHT)
return img, mask
def adjust_brightness(img, brightness_factor):
"""Adjust brightness of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image: Brightness adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
def adjust_saturation(img, saturation_factor):
"""Adjust color saturation of an image.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See https://en.wikipedia.org/wiki/Hue for more details on Hue.
Args:
img (PIL Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
input_mode = img.mode
if input_mode in {'L', '1', 'I', 'F'}:
return img
h, s, v = img.convert('HSV').split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, 'L')
img = Image.merge('HSV', (h, s, v)).convert(input_mode)
return img
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = np.random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(
torch_tr.Lambda(lambda img: adjust_brightness(img, brightness_factor)))
if contrast > 0:
contrast_factor = np.random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(
torch_tr.Lambda(lambda img: adjust_contrast(img, contrast_factor)))
if saturation > 0:
saturation_factor = np.random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(
torch_tr.Lambda(lambda img: adjust_saturation(img, saturation_factor)))
if hue > 0:
hue_factor = np.random.uniform(-hue, hue)
transforms.append(
torch_tr.Lambda(lambda img: adjust_hue(img, hue_factor)))
np.random.shuffle(transforms)
transform = torch_tr.Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
|
Megatron-LM-master
|
tasks/vision/segmentation/transforms.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import math
import einops
import torch
import apex
import torch.nn.functional as F
from megatron import get_args
from megatron.model.module import MegatronModule
from megatron.model.vision.vit_backbone import VitBackbone, VitMlpHead
from megatron.model.vision.mit_backbone import mit_b3, mit_b5
from tasks.vision.segmentation.seg_heads import SetrSegmentationHead, SegformerSegmentationHead
class SetrSegmentationModel(MegatronModule):
def __init__(self,
num_classes,
pre_process=True,
post_process=True):
super(SetrSegmentationModel, self).__init__()
args = get_args()
assert post_process & pre_process
self.hidden_size = args.hidden_size
self.num_classes = num_classes
self.backbone = VitBackbone(
pre_process=pre_process,
post_process=post_process,
class_token=False,
post_layer_norm=False,
drop_path_rate=0.1
)
self.head = SetrSegmentationHead(
self.hidden_size,
self.num_classes
)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
pass
def forward(self, input):
# [b hw c]
hidden_states = self.backbone(input)
result_final = self.head(hidden_states)
return result_final
class SegformerSegmentationModel(MegatronModule):
def __init__(self,
num_classes,
pre_process=True,
post_process=True):
super(SegformerSegmentationModel, self).__init__()
args = get_args()
self.hidden_size = args.hidden_size
self.num_classes = num_classes
self.pre_process = pre_process
self.post_process = post_process
self.backbone = mit_b5()
self.head = SegformerSegmentationHead(
feature_strides=[4, 8, 16, 32],
in_channels=[64, 128, 320, 512],
embedding_dim=768,
dropout_ratio=0.1
)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
pass
def forward(self, input):
# [b hw c]
hidden_states = self.backbone(input)
hidden_states = self.head(hidden_states)
return hidden_states
|
Megatron-LM-master
|
tasks/vision/segmentation/seg_models.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Vision-classification finetuning/evaluation."""
import torch
import torch.nn.functional as F
from functools import partial
from megatron import get_args, get_timers
from megatron import print_rank_0, print_rank_last
from megatron.core import mpu
from tasks.vision.finetune_utils import finetune
from tasks.vision.finetune_utils import build_data_loader
from megatron.utils import average_losses_across_data_parallel_group
from megatron.schedules import get_forward_backward_func
from tasks.vision.segmentation.metrics import CFMatrix
from tasks.vision.segmentation.data import build_train_valid_datasets
from tasks.vision.segmentation.seg_models import SetrSegmentationModel
from tasks.vision.segmentation.utils import slidingcrops, slidingjoins
def segmentation():
def train_valid_datasets_provider():
"""Build train and validation dataset."""
args = get_args()
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w)
)
return train_ds, valid_ds
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
return SetrSegmentationModel(num_classes=args.num_classes,
pre_process=pre_process,
post_process=post_process)
def process_batch(batch):
"""Process batch and produce inputs for the model."""
images = batch[0].cuda().contiguous()
masks = batch[1].cuda().contiguous()
return images, masks
def calculate_weight(masks, num_classes):
bins = torch.histc(masks, bins=num_classes, min=0.0, max=num_classes)
hist_norm = bins.float()/bins.sum()
hist = ((bins != 0).float() * (1. - hist_norm)) + 1.0
return hist
def cross_entropy_loss_func(images, masks, output_tensor, non_loss_data=False):
args = get_args()
ignore_index = args.ignore_index
color_table = args.color_table
weight = calculate_weight(masks, args.num_classes)
logits = output_tensor.contiguous().float()
loss = F.cross_entropy(logits, masks, weight=weight, ignore_index=ignore_index)
if not non_loss_data:
# Reduce loss for logging.
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {'lm loss': averaged_loss[0]}
else:
seg_mask = logits.argmax(dim=1)
output_mask = F.embedding(seg_mask, color_table).permute(0, 3, 1, 2)
gt_mask = F.embedding(masks, color_table).permute(0, 3, 1, 2)
return torch.cat((images, output_mask, gt_mask), dim=2), loss
def _cross_entropy_forward_step(batch, model):
"""Simple forward step with cross-entropy loss."""
args = get_args()
timers = get_timers()
# Get the batch.
timers("batch generator", log_level=2).start()
import types
if isinstance(batch, types.GeneratorType):
batch_ = next(batch)
else:
batch_ = batch
images, masks = process_batch(batch_)
timers("batch generator").stop()
# Forward model.
if not model.training:
images, masks, _, _ = slidingcrops(images, masks)
#print_rank_0("images size = {}".format(images.size()))
if not model.training:
output_tensor = torch.cat([model(image) for image in torch.split(images, args.micro_batch_size)])
else:
output_tensor = model(images)
return output_tensor, partial(cross_entropy_loss_func, images, masks)
def calculate_correct_answers(model, dataloader, epoch):
"""Calculate correct over total answers"""
forward_backward_func = get_forward_backward_func()
for m in model:
m.eval()
def loss_func(labels, slices_info, img_size, output_tensor):
args = get_args()
logits = output_tensor
loss_dict = {}
# Compute the correct answers.
probs = logits.contiguous().float().softmax(dim=1)
max_probs, preds = torch.max(probs, 1)
preds = preds.int()
preds, labels = slidingjoins(preds, max_probs, labels, slices_info, img_size)
_, performs = CFMatrix()(preds, labels, args.ignore_index)
loss_dict['performs'] = performs
return 0, loss_dict
# defined inside to capture output_predictions
def correct_answers_forward_step(batch, model):
args = get_args()
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
images, labels = process_batch(batch_)
assert not model.training
images, labels, slices_info, img_size = slidingcrops(images, labels)
# Forward model.
output_tensor = torch.cat([model(image) for image in torch.split(images, args.micro_batch_size)])
return output_tensor, partial(loss_func, labels, slices_info, img_size)
with torch.no_grad():
# For all the batches in the dataset.
performs = None
for _, batch in enumerate(dataloader):
loss_dicts = forward_backward_func(correct_answers_forward_step,
batch, model,
optimizer=None,
timers=None,
forward_only=True)
for loss_dict in loss_dicts:
if performs is None:
performs = loss_dict['performs']
else:
performs += loss_dict['performs']
for m in model:
m.train()
# Reduce.
if mpu.is_pipeline_last_stage():
torch.distributed.all_reduce(performs,
group=mpu.get_data_parallel_group())
# Print on screen.
# performs[int(ch), :] = [nb_tp, nb_fp, nb_tn, nb_fn]
true_positive = performs[:, 0]
false_positive = performs[:, 1]
false_negative = performs[:, 3]
iou = true_positive / (true_positive + false_positive + false_negative)
miou = iou[~torch.isnan(iou)].mean()
return iou.tolist(), miou.item()
def accuracy_func_provider():
"""Provide function that calculates accuracies."""
args = get_args()
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w)
)
dataloader = build_data_loader(
valid_ds,
args.micro_batch_size,
num_workers=args.num_workers,
drop_last=(mpu.get_data_parallel_world_size() > 1),
shuffle=False
)
def metrics_func(model, epoch):
print_rank_0("calculating metrics ...")
iou, miou = calculate_correct_answers(model, dataloader, epoch)
print_rank_last(
" >> |epoch: {}| overall: iou = {},"
"miou = {:.4f} %".format(epoch, iou, miou*100.0)
)
return metrics_func
def dump_output_data(data, iteration, writer):
for (output_tb, loss) in data:
# output_tb[output_tb < 0] = 0
# output_tb[output_tb > 1] = 1
writer.add_images("image-outputseg-realseg", output_tb,
global_step=None, walltime=None,
dataformats='NCHW')
"""Finetune/evaluate."""
finetune(
train_valid_datasets_provider,
model_provider,
forward_step=_cross_entropy_forward_step,
process_non_loss_data_func=dump_output_data,
end_of_epoch_callback_provider=accuracy_func_provider,
)
def main():
segmentation()
|
Megatron-LM-master
|
tasks/vision/segmentation/finetune_setr.py
|
import math
import torch
import numpy as np
from megatron import get_args
def slidingcrops(img, mask):
# img: [b c h w]
# mask: [b h w]
args = get_args()
assert args.img_h == args.img_w
crop_size = args.img_h
stride = args.seg_stride
ignore_index = args.ignore_index
n, c, h, w = img.shape
assert h >= crop_size
assert w >= crop_size
long_size = max(h, w)
img_slices, mask_slices, slices_info = [], [], []
if long_size > crop_size:
assert stride <= crop_size
h_step_num = int(math.ceil((h - crop_size) / float(stride))) + 1
w_step_num = int(math.ceil((w - crop_size) / float(stride))) + 1
for yy in range(h_step_num):
for xx in range(w_step_num):
sy, sx = yy * stride, xx * stride
ey, ex = sy + crop_size, sx + crop_size
img_sub = img[:, :, sy: ey, sx: ex]
mask_sub = mask[:, sy: ey, sx: ex]
# padding
sub_h, sub_w = img_sub.shape[2:]
pad_h = max(crop_size - sub_h, 0)
pad_w = max(crop_size - sub_w, 0)
img_sub = torch.nn.functional.pad(img_sub, pad=(0, pad_w, 0, pad_h), value=ignore_index)
mask_sub = torch.nn.functional.pad(mask_sub, pad=(0, pad_w, 0, pad_h))
img_slices.append(img_sub)
mask_slices.append(mask_sub)
slices_info.append([sy, ey, sx, ex, sub_h, sub_w])
return torch.cat(img_slices), torch.cat(mask_slices), slices_info, (h, w)
else:
return img, mask, [[0, h, 0, w, h, w]], (h, w)
def slidingjoins(preds, probs, labels, slices_info, img_size):
args = get_args()
num_slices = len(slices_info)
if num_slices == 1:
return preds, labels
h, w = img_size
split_size = args.micro_batch_size
preds_split = torch.split(preds, split_size)
probs_split = torch.split(probs, split_size)
labels_split = torch.split(labels, split_size)
assert(len(preds_split) == num_slices)
total_max_probs = torch.zeros((split_size, h, w), dtype=torch.float, device='cuda')
total_preds = torch.zeros((split_size, h, w), dtype=torch.int, device='cuda')
total_labels = torch.zeros((split_size, h, w), dtype=torch.int, device='cuda')
for i in range(num_slices):
sy, ey, sx, ex, sub_h, sub_w = slices_info[i]
assert sy + sub_h <= h
assert sx + sub_w <= w
curr_max_probs = total_max_probs[:, sy:sy + sub_h, sx:sx + sub_w]
curr_preds = total_preds[:, sy:sy + sub_h, sx:sx + sub_w]
local_max_probs = probs_split[i][:, :sub_h, : sub_w]
local_preds = preds_split[i][:, :sub_h, :sub_w]
result_max_probs = torch.maximum(curr_max_probs, local_max_probs)
result_preds = torch.where(curr_max_probs >= local_max_probs, curr_preds, local_preds)
total_max_probs[:, sy:sy + sub_h, sx:sx + sub_w] = result_max_probs
total_preds[:, sy:sy + sub_h, sx:sx + sub_w] = result_preds
total_labels[:, sy:sy + sub_h, sx:sx + sub_w] = labels_split[i][0, :sub_h, :sub_w]
return total_preds, total_labels
|
Megatron-LM-master
|
tasks/vision/segmentation/utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Vision-classification finetuning/evaluation."""
import numpy as np
import torch
import torch.nn.functional as F
from functools import partial
from megatron import get_args, get_timers
from megatron import print_rank_0, print_rank_last
from megatron.core import mpu
from tasks.vision.finetune_utils import finetune
from tasks.vision.finetune_utils import build_data_loader
from megatron.utils import average_losses_across_data_parallel_group
from megatron.schedules import get_forward_backward_func
from tasks.vision.segmentation.data import build_train_valid_datasets
from tasks.vision.segmentation.seg_models import SegformerSegmentationModel
from megatron.model.vision.utils import resize
def calculate_iou(hist_data):
acc = np.diag(hist_data).sum() / hist_data.sum()
acc_cls = np.diag(hist_data) / hist_data.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
divisor = hist_data.sum(axis=1) + hist_data.sum(axis=0) - \
np.diag(hist_data)
iu = np.diag(hist_data) / divisor
return iu, acc, acc_cls
def fast_hist(pred, gtruth, num_classes):
# mask indicates pixels we care about
mask = (gtruth >= 0) & (gtruth < num_classes)
# stretch ground truth labels by num_classes
# class 0 -> 0
# class 1 -> 19
# class 18 -> 342
#
# TP at 0 + 0, 1 + 1, 2 + 2 ...
#
# TP exist where value == num_classes*class_id + class_id
# FP = row[class].sum() - TP
# FN = col[class].sum() - TP
hist = np.bincount(num_classes * gtruth[mask].astype(int) + pred[mask],
minlength=num_classes ** 2)
hist = hist.reshape(num_classes, num_classes)
return hist
def segmentation():
def train_valid_datasets_provider():
"""Build train and validation dataset."""
args = get_args()
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w)
)
return train_ds, valid_ds
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
model = SegformerSegmentationModel(num_classes=args.num_classes,
pre_process=pre_process,
post_process=post_process)
print_rank_0("model = {}".format(model))
return model
def process_batch(batch):
"""Process batch and produce inputs for the model."""
images = batch[0].cuda().contiguous()
masks = batch[1].cuda().contiguous()
return images, masks
def calculate_weight(masks, num_classes):
bins = torch.histc(masks, bins=num_classes, min=0.0, max=num_classes)
hist_norm = bins.float()/bins.sum()
hist = ((bins != 0).float() * (1. - hist_norm)) + 1.0
return hist
def cross_entropy_loss_func(images, masks, output_tensor,
non_loss_data=False):
args = get_args()
ignore_index = args.ignore_index
color_table = args.color_table
logits = output_tensor.contiguous().float()
logits = resize(logits, size=masks.shape[1:],
mode='bilinear', align_corners=False)
# Cross-entropy loss.
# weight = calculate_weight(masks, num_classes)
loss = F.cross_entropy(logits, masks, ignore_index=ignore_index)
if not non_loss_data:
# Reduce loss for logging.
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {'lm loss': averaged_loss[0]}
else:
seg_mask = logits.argmax(dim=1)
output_mask = F.embedding(seg_mask, color_table).permute(0, 3, 1, 2)
gt_mask = F.embedding(masks, color_table).permute(0, 3, 1, 2)
return torch.cat((images, output_mask, gt_mask), dim=2), loss
def _cross_entropy_forward_step(batch, model):
"""Simple forward step with cross-entropy loss."""
timers = get_timers()
# Get the batch.
timers("batch generator", log_level=2).start()
import types
if isinstance(batch, types.GeneratorType):
batch_ = next(batch)
else:
batch_ = batch
images, masks = process_batch(batch_)
timers("batch generator").stop()
# Forward model.
output_tensor = model(images)
return output_tensor, partial(cross_entropy_loss_func, images, masks)
def calculate_correct_answers(model, dataloader, epoch):
"""Calculate correct over total answers"""
forward_backward_func = get_forward_backward_func()
for m in model:
m.eval()
def loss_func(labels, output_tensor):
args = get_args()
logits = output_tensor
logits = resize(logits, size=labels.shape[1:],
mode='bilinear', align_corners=False)
loss_dict = {}
# Compute the correct answers.
probs = logits.contiguous().float().softmax(dim=1)
max_probs, preds = torch.max(probs, 1)
preds = preds.cpu().numpy()
performs = fast_hist(preds.flatten(),
labels.cpu().numpy().flatten(),
args.ignore_index)
loss_dict['performs'] = performs
return 0, loss_dict
# defined inside to capture output_predictions
def correct_answers_forward_step(batch, model):
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
images, labels = process_batch(batch_)
# Forward model.
output_tensor = model(images)
return output_tensor, partial(loss_func, labels)
with torch.no_grad():
# For all the batches in the dataset.
performs = None
for _, batch in enumerate(dataloader):
loss_dicts = forward_backward_func(correct_answers_forward_step,
batch, model,
optimizer=None,
timers=None,
forward_only=True)
for loss_dict in loss_dicts:
if performs is None:
performs = loss_dict['performs']
else:
performs += loss_dict['performs']
for m in model:
m.train()
# Reduce.
if mpu.is_pipeline_last_stage():
performs_tensor = torch.cuda.FloatTensor(performs)
torch.distributed.all_reduce(performs_tensor,
group=mpu.get_data_parallel_group())
hist = performs_tensor.cpu().numpy()
iu, acc, acc_cls = calculate_iou(hist)
miou = np.nanmean(iu)
return iu, miou
def accuracy_func_provider():
"""Provide function that calculates accuracies."""
args = get_args()
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w)
)
dataloader = build_data_loader(
valid_ds,
args.micro_batch_size,
num_workers=args.num_workers,
drop_last=(mpu.get_data_parallel_world_size() > 1),
shuffle=False
)
def metrics_func(model, epoch):
print_rank_0("calculating metrics ...")
iou, miou = calculate_correct_answers(model, dataloader, epoch)
print_rank_last(
" >> |epoch: {}| overall: iou = {},"
"miou = {:.4f} %".format(epoch, iou, miou*100.0)
)
return metrics_func
def dump_output_data(data, iteration, writer):
for (output_tb, loss) in data:
# output_tb[output_tb < 0] = 0
# output_tb[output_tb > 1] = 1
writer.add_images("image-outputseg-realseg", output_tb,
global_step=None, walltime=None,
dataformats='NCHW')
"""Finetune/evaluate."""
finetune(
train_valid_datasets_provider,
model_provider,
forward_step=_cross_entropy_forward_step,
process_non_loss_data_func=dump_output_data,
end_of_epoch_callback_provider=accuracy_func_provider,
)
def main():
segmentation()
|
Megatron-LM-master
|
tasks/vision/segmentation/finetune_segformer.py
|
import random
import os
import math
import mmcv
import torch
import numpy as np
import torchvision.transforms as T
from torchvision import datasets
from torch.utils.data import Dataset
from megatron.data.autoaugment import ImageNetPolicy
from tasks.vision.segmentation.cityscapes import Cityscapes
import tasks.vision.segmentation.transforms as ET
from megatron.data.autoaugment import ImageNetPolicy
from megatron import get_args
from PIL import Image, ImageOps
class VitSegmentationJointTransform():
def __init__(self, train=True, resolution=None):
self.train = train
if self.train:
self.transform0 = ET.RandomSizeAndCrop(resolution)
self.transform1 = ET.RandomHorizontallyFlip()
def __call__(self, img, mask):
if self.train:
img, mask = self.transform0(img, mask)
img, mask = self.transform1(img, mask)
return img, mask
class VitSegmentationImageTransform():
def __init__(self, train=True, resolution=None):
args = get_args()
self.train = train
assert args.fp16 or args.bf16
self.data_type = torch.half if args.fp16 else torch.bfloat16
self.mean_std = args.mean_std
if self.train:
assert resolution is not None
self.transform = T.Compose([
ET.PhotoMetricDistortion(),
T.ToTensor(),
T.Normalize(*self.mean_std),
T.ConvertImageDtype(self.data_type)
])
else:
self.transform = T.Compose([
T.ToTensor(),
T.Normalize(*self.mean_std),
T.ConvertImageDtype(self.data_type)
])
def __call__(self, input):
output = self.transform(input)
return output
class VitSegmentationTargetTransform():
def __init__(self, train=True, resolution=None):
self.train = train
def __call__(self, input):
output = torch.from_numpy(np.array(input, dtype=np.int32)).long()
return output
class RandomSeedSegmentationDataset(Dataset):
def __init__(self,
dataset,
joint_transform,
image_transform,
target_transform):
args = get_args()
self.base_seed = args.seed
self.curr_seed = self.base_seed
self.dataset = dataset
self.joint_transform = joint_transform
self.image_transform = image_transform
self.target_transform = target_transform
def __len__(self):
return len(self.dataset)
def set_epoch(self, epoch):
self.curr_seed = self.base_seed + 100 * epoch
def __getitem__(self, idx):
seed = idx + self.curr_seed
img, mask = self.dataset[idx]
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
img, mask = self.joint_transform(img, mask)
img = self.image_transform(img)
mask = self.target_transform(mask)
return img, mask
def build_cityscapes_train_valid_datasets(data_path, image_size):
args = get_args()
args.num_classes = Cityscapes.num_classes
args.ignore_index = Cityscapes.ignore_index
args.color_table = Cityscapes.color_table
args.mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
train_joint_transform = \
VitSegmentationJointTransform(train=True, resolution=image_size)
val_joint_transform = \
VitSegmentationJointTransform(train=False, resolution=image_size)
train_image_transform = \
VitSegmentationImageTransform(train=True, resolution=image_size)
val_image_transform = \
VitSegmentationImageTransform(train=False, resolution=image_size)
train_target_transform = \
VitSegmentationTargetTransform(train=True, resolution=image_size)
val_target_transform = \
VitSegmentationTargetTransform(train=False, resolution=image_size)
# training dataset
train_data = Cityscapes(
root=data_path[0],
split='train',
mode='fine',
resolution=image_size
)
train_data = RandomSeedSegmentationDataset(
train_data,
joint_transform=train_joint_transform,
image_transform=train_image_transform,
target_transform=train_target_transform)
# validation dataset
val_data = Cityscapes(
root=data_path[0],
split='val',
mode='fine',
resolution=image_size
)
val_data = RandomSeedSegmentationDataset(
val_data,
joint_transform=val_joint_transform,
image_transform=val_image_transform,
target_transform=val_target_transform)
return train_data, val_data
def build_train_valid_datasets(data_path, image_size):
return build_cityscapes_train_valid_datasets(data_path, image_size)
|
Megatron-LM-master
|
tasks/vision/segmentation/data.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import math
import einops
import torch
import apex
import torch.nn.functional as F
from megatron import get_args
from megatron.model import LayerNorm
from megatron.model.module import MegatronModule
from megatron.model.vision.utils import resize
class SetrSegmentationHead(MegatronModule):
def __init__(self, hidden_size, num_classes):
super(SetrSegmentationHead, self).__init__()
args = get_args()
self.hidden_size = hidden_size
self.num_classes = num_classes
self.img_h = args.img_h
self.img_w = args.img_w
self.patch_dim = args.patch_dim
self.layernorm = LayerNorm(hidden_size, eps=args.layernorm_epsilon)
self.conv_0 = torch.nn.Conv2d(hidden_size, hidden_size,
1, 1, bias=False)
self.norm_0 = apex.parallel.SyncBatchNorm(hidden_size)
self.conv_1 = torch.nn.Conv2d(hidden_size, num_classes, 1, 1)
def to_2D(self, x):
n, hw, c = x.shape
h = self.img_h // self.patch_dim
w = self.img_w // self.patch_dim
assert(hw == h * w)
x = x.transpose(1, 2).reshape(n, c, h, w)
return x
def forward(self, hidden_states):
# [b c h w]
hidden_states = self.layernorm(hidden_states)
hidden_states = self.to_2D(hidden_states)
hidden_states = self.conv_0(hidden_states)
hidden_states = self.norm_0(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.conv_1(hidden_states)
# [b c h w]
result = F.interpolate(hidden_states,
size=(self.img_h, self.img_w),
mode='bilinear')
return result
class MLP(torch.nn.Module):
"""
Linear Embedding
"""
def __init__(self, input_dim=2048, embed_dim=768):
super().__init__()
self.proj = torch.nn.Linear(input_dim, embed_dim)
def forward(self, x):
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class SegformerSegmentationHead(MegatronModule):
def __init__(self, feature_strides, in_channels,
embedding_dim, dropout_ratio):
super(SegformerSegmentationHead, self).__init__()
assert len(feature_strides) == len(in_channels)
assert min(feature_strides) == feature_strides[0]
args = get_args()
self.feature_strides = feature_strides
self.in_channels = in_channels
self.embedding_dim = embedding_dim
self.num_classes = args.num_classes
self.dropout_ratio = dropout_ratio
c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = \
self.in_channels
self.linear_c4 = MLP(input_dim=c4_in_channels,
embed_dim=self.embedding_dim)
self.linear_c3 = MLP(input_dim=c3_in_channels,
embed_dim=self.embedding_dim)
self.linear_c2 = MLP(input_dim=c2_in_channels,
embed_dim=self.embedding_dim)
self.linear_c1 = MLP(input_dim=c1_in_channels,
embed_dim=self.embedding_dim)
self.conv_fuse = torch.nn.Conv2d(self.embedding_dim*4,
self.embedding_dim, 1, 1)
self.norm = apex.parallel.SyncBatchNorm(self.embedding_dim)
self.dropout = torch.nn.Dropout2d(self.dropout_ratio)
self.linear_pred = torch.nn.Conv2d(self.embedding_dim,
self.num_classes,
kernel_size=1)
def forward(self, inputs):
c1, c2, c3, c4 = inputs
############## MLP decoder on C1-C4 ###########
n, _, h, w = c4.shape
_c4 = self.linear_c4(c4).permute(0, 2, 1).reshape(n, -1, c4.shape[2], c4.shape[3])
_c4 = resize(_c4, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c3 = self.linear_c3(c3).permute(0, 2, 1).reshape(n, -1, c3.shape[2], c3.shape[3])
_c3 = resize(_c3, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c2 = self.linear_c2(c2).permute(0, 2, 1).reshape(n, -1, c2.shape[2], c2.shape[3])
_c2 = resize(_c2, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c1 = self.linear_c1(c1).permute(0, 2, 1).reshape(n, -1, c1.shape[2], c1.shape[3])
_c = self.conv_fuse(torch.cat([_c4, _c3, _c2, _c1], dim=1))
x = self.norm(_c)
x = F.relu(x, inplace=True)
x = self.dropout(x)
x = self.linear_pred(x)
return x
|
Megatron-LM-master
|
tasks/vision/segmentation/seg_heads.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Race."""
from megatron import get_args
from megatron import print_rank_0
from megatron import get_tokenizer
from megatron.model.multiple_choice import MultipleChoice
from tasks.eval_utils import accuracy_func_provider
from tasks.finetune_utils import finetune
from tasks.race.data import RaceDataset
from megatron.arguments import core_transformer_config_from_args
def train_valid_datasets_provider():
"""Provide train and validation datasets."""
args = get_args()
tokenizer = get_tokenizer()
train_dataset = RaceDataset('training', args.train_data,
tokenizer, args.seq_length)
valid_dataset = RaceDataset('validation', args.valid_data,
tokenizer, args.seq_length)
return train_dataset, valid_dataset
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
config = core_transformer_config_from_args(get_args())
print_rank_0('building multichoice model for RACE ...')
model = MultipleChoice(config=config,
num_tokentypes=2,
pre_process=pre_process,
post_process=post_process)
return model
def metrics_func_provider():
"""Privde metrics callback function."""
args = get_args()
tokenizer = get_tokenizer()
def single_dataset_provider(datapath):
name = datapath.split('RACE')[-1].strip('/').replace('/', '-')
return RaceDataset(name, [datapath], tokenizer, args.seq_length)
return accuracy_func_provider(single_dataset_provider)
def main():
finetune(train_valid_datasets_provider, model_provider,
end_of_epoch_callback_provider=metrics_func_provider)
|
Megatron-LM-master
|
tasks/race/finetune.py
|
import glob
import json
import os
import time
from torch.utils.data import Dataset
from megatron import print_rank_0
from tasks.data_utils import build_sample
from tasks.data_utils import build_tokens_types_paddings_from_ids
from tasks.data_utils import clean_text
NUM_CHOICES = 4
MAX_QA_LENGTH = 128
class RaceDataset(Dataset):
def __init__(self, dataset_name, datapaths, tokenizer, max_seq_length,
max_qa_length=MAX_QA_LENGTH):
self.dataset_name = dataset_name
print_rank_0(' > building RACE dataset for {}:'.format(
self.dataset_name))
string = ' > paths:'
for path in datapaths:
string += ' ' + path
print_rank_0(string)
self.samples = []
for datapath in datapaths:
self.samples.extend(process_single_datapath(datapath, tokenizer,
max_qa_length,
max_seq_length))
print_rank_0(' >> total number of samples: {}'.format(
len(self.samples)))
# This indicates that each "sample" has multiple samples that
# will collapse into batch dimension
self.sample_multiplier = NUM_CHOICES
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
return self.samples[idx]
def process_single_datapath(datapath, tokenizer, max_qa_length, max_seq_length):
"""Read in RACE files, combine, clean-up, tokenize, and convert to
samples."""
print_rank_0(' > working on {}'.format(datapath))
start_time = time.time()
# Get list of files.
filenames = glob.glob(os.path.join(datapath, '*.txt'))
samples = []
num_docs = 0
num_questions = 0
num_samples = 0
# Load all the files
for filename in filenames:
with open(filename, 'r') as f:
for line in f:
data = json.loads(line)
num_docs += 1
context = data["article"]
questions = data["questions"]
choices = data["options"]
answers = data["answers"]
# Check the length.
assert len(questions) == len(answers)
assert len(questions) == len(choices)
# Context: clean up and convert to ids.
context = clean_text(context)
context_ids = tokenizer.tokenize(context)
# Loop over questions.
for qi, question in enumerate(questions):
num_questions += 1
# Label.
label = ord(answers[qi]) - ord("A")
assert label >= 0
assert label < NUM_CHOICES
assert len(choices[qi]) == NUM_CHOICES
# For each question, build num-choices samples.
ids_list = []
types_list = []
paddings_list = []
for ci in range(NUM_CHOICES):
choice = choices[qi][ci]
# Merge with choice.
if "_" in question:
qa = question.replace("_", choice)
else:
qa = " ".join([question, choice])
# Clean QA.
qa = clean_text(qa)
# Tokenize.
qa_ids = tokenizer.tokenize(qa)
# Trim if needed.
if len(qa_ids) > max_qa_length:
qa_ids = qa_ids[0:max_qa_length]
# Build the sample.
ids, types, paddings \
= build_tokens_types_paddings_from_ids(
qa_ids, context_ids, max_seq_length,
tokenizer.cls, tokenizer.sep, tokenizer.pad)
ids_list.append(ids)
types_list.append(types)
paddings_list.append(paddings)
# Convert to numpy and add to samples
samples.append(build_sample(ids_list, types_list,
paddings_list, label,
num_samples))
num_samples += 1
elapsed_time = time.time() - start_time
print_rank_0(' > processed {} document, {} questions, and {} samples'
' in {:.2f} seconds'.format(num_docs, num_questions,
num_samples, elapsed_time))
return samples
|
Megatron-LM-master
|
tasks/race/data.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
from megatron import get_args, print_rank_0
from megatron.checkpointing import load_biencoder_checkpoint
from megatron.data.orqa_wiki_dataset import get_open_retrieval_wiki_dataset
from megatron.data.realm_index import OpenRetreivalDataStore, FaissMIPSIndex
from megatron.model.biencoder_model import get_model_provider
from megatron.training import get_model
from tasks.orqa.unsupervised.nq import get_nq_dataset
from tasks.orqa.unsupervised.nq import get_one_epoch_nq_dataloader
from tasks.orqa.unsupervised.nq import process_nq_batch
from tasks.orqa.unsupervised.qa_utils import calculate_matches
class ORQAEvaluator(object):
def __init__(self):
args = get_args()
self.embedding_size = args.hidden_size
self.faiss_use_gpu = args.faiss_use_gpu
self.evidence_embedder_obj = None
self.evidence_dataset = None
self.mips_index = None
self.eval_dataset = None
# Get Evidence (Wikipedia) dataset
self.get_evidence_dataset()
# Load query encoder checkpoint
only_query_model = True
if args.biencoder_shared_query_context_model:
only_query_model = False
model = get_model(get_model_provider(only_query_model=only_query_model,
biencoder_shared_query_context_model=args.biencoder_shared_query_context_model))
self.model = load_biencoder_checkpoint(model,
only_query_model=only_query_model)
assert len(self.model) == 1
self.model[0].eval()
# Load faiss indexer
self.faiss_wrapper()
def get_evidence_embedding(self):
# This will load the embedding from the embedding path
self.evidence_embedder_obj = OpenRetreivalDataStore(load_from_path=True)
def get_evidence_dataset(self):
self.evidence_dataset = get_open_retrieval_wiki_dataset()
def faiss_wrapper(self):
# Initialize FAISS wrapper on local rank = 0 as the evidence embeddings
# is distributed over all the GPUs in a node and FAISS is not
# thread-safe
args = get_args()
if args.local_rank == 0:
# Get evidence embeddings computed using context encoder
self.get_evidence_embedding()
assert self.evidence_embedder_obj is not None
self.mips_index = FaissMIPSIndex(embed_size=self.embedding_size,
embed_data=self.evidence_embedder_obj,
use_gpu=self.faiss_use_gpu)
# Wait for the FAISS index to be initialized in all the nodes
torch.distributed.barrier()
def generate_query_vectors(self, qa_data, split):
self.eval_dataset = get_nq_dataset(qa_data, split)
dataloader = get_one_epoch_nq_dataloader(self.eval_dataset)
query_vectors = []
reference_list = []
for batch in dataloader:
# batch also has query_tokens and query_pad_data
query_tokens, query_mask, query_types, \
query_len, reference = process_nq_batch(batch)
assert len(self.model) == 1
unwrapped_model = self.model[0]
while not hasattr(unwrapped_model, 'embed_text'):
unwrapped_model = unwrapped_model.module
with torch.no_grad():
query_logits = unwrapped_model.embed_text(
unwrapped_model.query_model, query_tokens,
query_mask, query_types)
reference_list.extend(reference)
query_vectors.extend(query_logits.split(1, dim=0))
if len(query_vectors) % 100 == 0:
print_rank_0('Encoded queries {}'.format(len(query_vectors)))
query_tensor = torch.cat(query_vectors, dim=0)
print_rank_0('Total encoded queries tensor {}'.format(query_tensor.size()))
assert query_tensor.size(0) == len(self.eval_dataset)
return query_tensor, reference_list
def evaluate(self, qa_data, split):
args = get_args()
query_tensor, reference_list = self.generate_query_vectors(qa_data, \
split)
local_rank = args.local_rank
rank = torch.distributed.get_rank()
device_count = torch.cuda.device_count()
num_nodes = torch.distributed.get_world_size() // device_count
node_id = rank // device_count
for node in range(num_nodes):
start_rank = node * device_count
end_rank = (node + 1) * device_count
ranks_list = list(range(start_rank, end_rank))
node_group = torch.distributed.new_group(ranks=ranks_list)
if node_id == node:
device_start_rank = start_rank
group = node_group
input_ = torch.empty_like(query_tensor).copy_(query_tensor).detach_()
tensor_list = [torch.empty_like(input_) for _ in range(device_count)]
torch.distributed.all_gather(tensor_list, query_tensor, group=group)
if local_rank == 0 and self.mips_index is not None:
all_query_tensor = torch.cat(tensor_list, dim=0).contiguous()
distance, topkindex = self.mips_index.search_mips_index(
all_query_tensor, top_k=args.faiss_topk_retrievals,
reconstruct=False)
distance = torch.from_numpy(distance).cuda()
topkindex = torch.LongTensor(topkindex).cuda()
if local_rank != 0:
distance = torch.empty(device_count * len(query_tensor), \
args.faiss_topk_retrievals, dtype=torch.float32).cuda()
topkindex = torch.empty(device_count * len(query_tensor), \
args.faiss_topk_retrievals, dtype=torch.int64).cuda()
torch.distributed.broadcast(distance, src=device_start_rank, \
group=group)
torch.distributed.broadcast(topkindex, src=device_start_rank, \
group=group)
distance = torch.split(distance, len(query_tensor), dim=0)\
[local_rank]
topkindex = torch.split(topkindex, len(query_tensor), dim=0)\
[local_rank]
top_ids_and_scores = []
for darray, topkarray in zip(distance, topkindex):
top_ids_and_scores.append((topkarray.tolist(), darray.tolist()))
passages = self.evidence_dataset.id2text
match_stats = calculate_matches(passages,
reference_list,
top_ids_and_scores,
workers_num=args.num_workers,
match_type=args.faiss_match)
top_k_hits = match_stats.top_k_hits
print_rank_0("{} SET RESULTS".format(split))
print_rank_0("topk-{} documents hits {}".format(
args.faiss_topk_retrievals, top_k_hits))
top_k_hits = [v / len(top_ids_and_scores) for v in top_k_hits]
print_rank_0("top-k documents hits accuracy {}".format(top_k_hits))
for i in args.retriever_report_topk_accuracies:
print_rank_0("top-{}: {:.2f}".format(i, top_k_hits[i-1] * 100))
return
|
Megatron-LM-master
|
tasks/orqa/evaluate_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Main tasks functionality."""
from megatron import get_args, print_rank_0
from megatron.indexer import IndexBuilder
from tasks.orqa.evaluate_utils import ORQAEvaluator
def main():
"""
Main program
"""
args = get_args()
"""
Create a BlockData data structure by running an IndexBuilder over an
ICT Dataset and then evaluate on NQ task
"""
print_rank_0("Starting index builder!")
index_builder = IndexBuilder()
index_builder.build_and_save_index()
print_rank_0("Build and save indices: done!")
print_rank_0("Starting evaluations!")
# Set up the model and evaluator
evaluator = ORQAEvaluator()
# Run evaluation
if args.qa_data_dev is not None:
evaluator.evaluate(args.qa_data_dev, "DEV")
if args.qa_data_test is not None:
evaluator.evaluate(args.qa_data_test, "TEST")
|
Megatron-LM-master
|
tasks/orqa/evaluate_orqa.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# The following code has been taken from
# https://github.com/facebookresearch/DPR, which is CC-BY-NC 4.0
# licensed as of now. More details on the license can be found
# at https://github.com/facebookresearch/DPR/blob/master/LICENSE
"""
Set of utilities for Q&A results validation tasks - Retriver passage
validation and Reader predicted answer validation
"""
import collections
import logging
import string
import unicodedata
from functools import partial
from multiprocessing import Pool as ProcessPool
from typing import Tuple, List, Dict
import regex as re
from tasks.orqa.unsupervised.tokenizers import SimpleTokenizer
logger = logging.getLogger(__name__)
QAMatchStats = collections.namedtuple('QAMatchStats', ['top_k_hits',\
'questions_doc_hits'])
def calculate_matches(all_docs: Dict[object, Tuple[str, str]],
answers: List[List[str]], closest_docs: List[Tuple[List[object],
List[float]]], workers_num: int, match_type: str) -> QAMatchStats:
"""
Evaluates answers presence in the set of documents. This function is
supposed to be used with a large collection of documents and results.
It internally forks multiple sub-processes for evaluation and then
merges results
:param all_docs: dictionary of the entire documents database.
doc_id -> (doc_text, title)
:param answers: list of answers's list. One list per question
:param closest_docs: document ids of the top results along with their
scores
:param workers_num: amount of parallel threads to process data
:param match_type: type of answer matching. Refer to has_answer code for
available options
:return: matching information tuple.
top_k_hits - a list where the index is the amount of top documents retrieved
and the value is the total amount of valid matches across an entire
dataset.
questions_doc_hits - more detailed info with answer matches for every
question and every retrieved document
"""
global dpr_all_documents
dpr_all_documents = all_docs
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
processes = ProcessPool(
processes=workers_num,
)
logger.info('Matching answers in top docs...')
get_score_partial = partial(check_answer, match_type=match_type,
tokenizer=tokenizer)
questions_answers_docs = zip(answers, closest_docs)
scores = processes.map(get_score_partial, questions_answers_docs)
logger.info('Per question validation results len=%d', len(scores))
n_docs = len(closest_docs[0][0])
top_k_hits = [0] * n_docs
for question_hits in scores:
best_hit = next((i for i, x in enumerate(question_hits) if x), None)
if best_hit is not None:
top_k_hits[best_hit:] = [v + 1 for v in top_k_hits[best_hit:]]
return QAMatchStats(top_k_hits, scores)
def check_answer(questions_answers_docs, tokenizer, match_type) -> List[bool]:
"""
Search through all the top docs to see if they have any of the answers.
"""
answers, (doc_ids, doc_scores) = questions_answers_docs
global dpr_all_documents
hits = []
for i, doc_id in enumerate(doc_ids):
doc = dpr_all_documents[doc_id]
text = doc[0]
answer_found = False
if text is None: # cannot find the document for some reason
logger.warning("no doc in db")
hits.append(False)
continue
if has_answer(answers, text, tokenizer, match_type):
answer_found = True
hits.append(answer_found)
return hits
def has_answer(answers, text, tokenizer, match_type) -> bool:
"""
Check if a document contains an answer string.
If `match_type` is string, token matching is done between the text
and answer.
If `match_type` is regex, we search the whole text with the regex.
"""
text = _normalize(text)
if match_type == 'string':
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = _normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
elif match_type == 'regex':
# Answer is a regex
for single_answer in answers:
single_answer = _normalize(single_answer)
if regex_match(text, single_answer):
return True
return False
def regex_match(text, pattern):
"""Test if a regex pattern is contained within a text."""
try:
pattern = re.compile(
pattern,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,
)
except BaseException:
return False
return pattern.search(text) is not None
# function for the reader model answer validation
def exact_match_score(prediction, ground_truth):
return _normalize_answer(prediction) == _normalize_answer(ground_truth)
def _normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _normalize(text):
return unicodedata.normalize('NFD', text)
|
Megatron-LM-master
|
tasks/orqa/unsupervised/qa_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""
Data Loader for Google NQ dataset
"""
from abc import ABC
import csv
from collections import OrderedDict
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset, BatchSampler
from megatron import print_rank_0, get_args, get_tokenizer
from megatron.data.biencoder_dataset_utils import make_attention_mask
def get_nq_dataset(qa_data, split):
args = get_args()
tokenizer = get_tokenizer()
dataset = NQDataset('Google NQ {} Split'.format(split),
'Google Natural Questions',
qa_data,
tokenizer,
args.retriever_seq_length)
return dataset
def process_nq_batch(batch):
query_tokens = batch['token_ids'].long().cuda()
query_mask = (batch['token_mask'] < 0.5).cuda()
query_types = batch['token_types'].long().cuda()
query_len = batch['seq_len'].long().cuda()
reference = batch['reference']
return query_tokens, query_mask, query_types, query_len, reference
class CustomDataLoader(DataLoader):
def __init__(self, dataset, eval=False, **kwargs):
if kwargs.get('collate_fn', None) is None:
kwargs['collate_fn'] = self._collate_fn
self.eval = eval
super().__init__(dataset, **kwargs)
def _collate_fn(self, batch_data):
# generate batch
batch_size = len(batch_data)
tensorized = OrderedDict()
for d in batch_data:
for k, v in d.items():
tensorized.setdefault(k, []).append(v)
assert len(tensorized) == 5
tensorized['token_ids'] = torch.LongTensor(tensorized['token_ids'])
tensorized['token_mask'] = torch.LongTensor(tensorized['token_mask'])
tensorized['token_types'] = torch.LongTensor(tensorized['token_types'])
tensorized['seq_len'] = torch.LongTensor(tensorized['seq_len'])
return tensorized
def get_one_epoch_nq_dataloader(dataset, micro_batch_size=None):
"""Data loader. Note that batch-size is the local (per GPU) batch-size.
NOTE: This dataloader is not distributed !!!
"""
args = get_args()
if micro_batch_size is None:
micro_batch_size = args.micro_batch_size
num_workers = args.num_workers
sampler = torch.utils.data.SequentialSampler(dataset)
# importantly, drop_last must be False to get all the data.
batch_sampler = BatchSampler(sampler,
batch_size=micro_batch_size,
drop_last=False)
# Data loader. Note that batch size is the per GPU batch size.
data_loader = CustomDataLoader(dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=True)
return data_loader
def build_tokens_types_paddings_from_text(src_text, tokenizer, max_seq_length):
"""Build token types and paddings, trim if needed, and pad if needed."""
src_text_ids = tokenizer.tokenize(src_text)
return build_tokens_types_paddings_from_ids(src_text_ids,
max_seq_length,
tokenizer.cls,
tokenizer.sep,
tokenizer.pad)
def build_tokens_types_paddings_from_ids(src_ids, max_seq_length, cls_id, \
sep_id, pad_id):
"""
Build token types and paddings, trim if needed, and pad if needed.
TODO: Design modular interface to reuse this function. This is getting
repeated multiple times in different tasks
"""
enc_ids = []
tokentypes_enc = []
# [CLS].
enc_ids.append(cls_id)
tokentypes_enc.append(0)
# A.
len_src = len(src_ids)
enc_ids.extend(src_ids)
tokentypes_enc.extend([0] * len_src)
# Cap the size.
if len(enc_ids) > max_seq_length - 1:
enc_ids = enc_ids[0: max_seq_length - 1]
tokentypes_enc = tokentypes_enc[0: max_seq_length - 1]
# [SEP].
enc_ids.append(sep_id)
tokentypes_enc.append(0)
num_tokens_enc = len(enc_ids)
# Padding.
padding_length = max_seq_length - len(enc_ids)
if padding_length > 0:
enc_ids.extend([pad_id] * padding_length)
tokentypes_enc.extend([pad_id] * padding_length)
return enc_ids, tokentypes_enc, num_tokens_enc
def build_sample(token_ids, token_types, num_tokens, reference):
"""
Convert to numpy and return a sample consumed by the
batch producer.
"""
token_ids = np.array(token_ids, dtype=np.int64)
token_types = np.array(token_types, dtype=np.int64)
token_mask = make_attention_mask(token_ids, token_ids)
sample = ({
'token_ids': token_ids,
'token_mask': token_mask,
'token_types': token_types,
'seq_len': num_tokens,
'reference': reference
})
return sample
class NQDataset(ABC, Dataset):
"""
Open Retrieval Question Answering evaluation using Google NQ dataset.
"""
def __init__(self, task_name, dataset_name, datapath,
tokenizer, max_seq_length):
# Store inputs.
self.task_name = task_name
self.dataset_name = dataset_name
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
print_rank_0(' > building {} dataset for {}:'.format(self.task_name,
self.dataset_name))
print_rank_0(datapath)
self.samples = self.process_samples_from_single_path(datapath)
print_rank_0(' >> total number of samples: {}'.format(\
len(self.samples)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
raw_sample = self.samples[idx]
ques_tokens, tokentypes_enc, num_tokens_ques = \
build_tokens_types_paddings_from_text(raw_sample['question'],
self.tokenizer, self.max_seq_length)
sample = build_sample(ques_tokens,
tokentypes_enc,
num_tokens_ques,
raw_sample['answers'])
return sample
@staticmethod
def process_samples_from_single_path(filename):
print_rank_0(' > Processing {} ...'.format(filename))
samples = []
total = 0
with open(filename, 'r') as ifile:
reader = csv.reader(ifile, delimiter='\t')
for row in reader:
question = row[0]
answers = eval(row[1])
sample = {'question': question, 'answers': answers}
total += 1
samples.append(sample)
if total % 1000 == 0:
print_rank_0(' > processed {} so far ...'.format(total))
print_rank_0(' >> processed {} samples.'.format(len(samples)))
return samples
|
Megatron-LM-master
|
tasks/orqa/unsupervised/nq.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# The following code has been taken from
# https://github.com/facebookresearch/DPR, which is CC-BY-NC 4.0
# licensed as of now. More details on the license can be found
# at https://github.com/facebookresearch/DPR/blob/master/LICENSE
"""
Most of the tokenizers code here is copied from DrQA codebase to avoid adding extra dependency
"""
import copy
import logging
import regex
import spacy
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
class SpacyTokenizer(Tokenizer):
def __init__(self, **kwargs):
"""
Args:
annotators: set that can include pos, lemma, and ner.
model: spaCy model to use (either path, or keyword like 'en').
"""
model = kwargs.get('model', 'en')
self.annotators = copy.deepcopy(kwargs.get('annotators', set()))
nlp_kwargs = {'parser': False}
if not any([p in self.annotators for p in ['lemma', 'pos', 'ner']]):
nlp_kwargs['tagger'] = False
if 'ner' not in self.annotators:
nlp_kwargs['entity'] = False
self.nlp = spacy.load(model, **nlp_kwargs)
def tokenize(self, text):
# We don't treat new lines as tokens.
clean_text = text.replace('\n', ' ')
tokens = self.nlp.tokenizer(clean_text)
if any([p in self.annotators for p in ['lemma', 'pos', 'ner']]):
self.nlp.tagger(tokens)
if 'ner' in self.annotators:
self.nlp.entity(tokens)
data = []
for i in range(len(tokens)):
# Get whitespace
start_ws = tokens[i].idx
if i + 1 < len(tokens):
end_ws = tokens[i + 1].idx
else:
end_ws = tokens[i].idx + len(tokens[i].text)
data.append((
tokens[i].text,
text[start_ws: end_ws],
(tokens[i].idx, tokens[i].idx + len(tokens[i].text)),
tokens[i].tag_,
tokens[i].lemma_,
tokens[i].ent_type_,
))
# Set special option for non-entity tag: '' vs 'O' in spaCy
return Tokens(data, self.annotators, opts={'non_ent': ''})
|
Megatron-LM-master
|
tasks/orqa/unsupervised/tokenizers.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Evaluation utilities."""
from collections import OrderedDict
import math
import numpy as np
import time
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from megatron import get_args, print_rank_0
from megatron.core import mpu
from megatron.utils import average_losses_across_data_parallel_group
from tasks.finetune_utils import build_data_loader
def task_collate_fn(batch_data):
# generate batch
batch_size = len(batch_data)
tensorized = OrderedDict()
for d in batch_data:
for k, v in d.items():
tensorized.setdefault(k, []).append(v)
tensorized['query'] = torch.LongTensor(tensorized['query'])
tensorized['query_mask'] = torch.LongTensor(tensorized['query_mask'])
tensorized['query_types'] = torch.LongTensor(tensorized['query_types'])
tensorized['query_pad_mask'] = \
torch.LongTensor(tensorized['query_pad_mask'])
tensorized['context'] = torch.LongTensor(tensorized['context'])
tensorized['context_mask'] = \
torch.LongTensor(tensorized['context_mask'])
tensorized['context_types'] = \
torch.LongTensor(tensorized['context_types'])
tensorized['context_pad_mask'] = \
torch.LongTensor(tensorized['context_pad_mask'])
if 'neg_context' in tensorized:
tensorized['neg_context'] = \
torch.LongTensor(np.concatenate(tensorized['neg_context']))
tensorized['neg_context_mask'] = \
torch.LongTensor(np.concatenate(tensorized['neg_context_mask']))
tensorized['neg_context_types'] = \
torch.LongTensor(np.concatenate(tensorized['neg_context_types']))
return tensorized
def process_batch(batch):
"""Process batch and produce inputs for the model."""
query_tokens = batch['query'].long().cuda()
query_mask = (batch['query_mask'] < 0.5).cuda()
query_types = batch['query_types'].long().cuda()
query_pad_mask = batch['query_pad_mask'].long().cuda()
context_tokens = batch['context'].long().cuda()
context_mask = (batch['context_mask'] < 0.5).cuda()
context_types = batch['context_types'].long().cuda()
context_pad_mask = batch['context_pad_mask'].long().cuda()
if 'neg_context' in batch:
neg_context_tokens = batch['neg_context'].long().cuda()
neg_context_mask = (batch['neg_context_mask'] < 0.5).cuda()
neg_context_types = batch['neg_context_types'].long().cuda()
else:
neg_context_tokens = None
neg_context_mask = None
neg_context_types = None
reference = batch['reference']
return query_tokens, query_mask, query_types, query_pad_mask, \
context_tokens, context_mask, context_types, context_pad_mask, \
neg_context_tokens, neg_context_mask, neg_context_types, reference
def accuracy_func_provider(single_dataset_provider, rank0sampler=False):
"""Provide function that calculates accuracies."""
args = get_args()
print_rank_0("accuracy_func_provider is CALLED")
# Build dataloaders
datapath = args.valid_data
dataset = single_dataset_provider(datapath)
drop_last = False
if mpu.get_data_parallel_world_size() > 1 and not rank0sampler:
drop_last = True
print_rank_0(datapath)
print_rank_0(rank0sampler)
dataloader = build_data_loader(dataset,
args.eval_micro_batch_size,
num_workers=args.num_workers,
drop_last=drop_last,
task_collate_fn=task_collate_fn)
dataloaders = (dataset.dataset_name, dataloader)
def metrics_func(model, epoch, output_predictions=False):
print_rank_0('calculating metrics by accuracy func in ORQA...')
if output_predictions:
assert rank0sampler
names = 'predictions'
name, dataloader = dataloaders
if args.task == "RET-FINETUNE-NQ":
start_time = time.time()
output = retrieval_loss(model, dataloader)
stats_dict, total = output
format_string = ""
for k, v in stats_dict.items():
format_string += "|{} = {:.2f}".format(k, v / total)
print_rank_0("epoch:{}{}".format(epoch, format_string))
print_rank_0("taken time to calcuate metrics {:.3f}".format(\
time.time() - start_time))
else:
raise AssertionError("{} Task not supported".format(args.task))
return metrics_func
def retrieval_loss(model, dataloader):
args = get_args()
total = 0
topk_stats_dict = {'top{}_acc'.format(k): 0 for k in \
args.retriever_report_topk_accuracies}
stats_dict = dict(rank=0, **topk_stats_dict)
assert len(model) == 1
unwrapped_model = model[0]
unwrapped_model.eval()
with torch.no_grad():
# For all the batches in the dataset.
for batch in dataloader:
# Run the model forward.
query_tokens, query_mask, query_types, _, \
context_tokens, context_mask, context_types, _, \
neg_context_tokens, neg_context_mask, neg_context_types, \
reference = process_batch(batch)
query_logits, context_logits = unwrapped_model(query_tokens,
query_mask, query_types,
torch.cat([context_tokens, neg_context_tokens]),
torch.cat([context_mask, neg_context_mask]),
torch.cat([context_types, neg_context_types]))
retrieval_scores = torch.matmul(query_logits,
torch.transpose(context_logits, 0, 1))
if args.retriever_score_scaling:
retrieval_scores = retrieval_scores / \
math.sqrt(args.hidden_size)
local_batch_size = query_logits.shape[0]
labels = torch.arange(local_batch_size).long().cuda()
softmax_scores = F.softmax(retrieval_scores, dim=1)
sorted_vals, sorted_indices = torch.topk(softmax_scores,
k=softmax_scores.shape[1],
sorted=True)
def topk_accuracy(k):
return torch.cuda.FloatTensor(
[sum([int(labels[i] in sorted_indices[i, :k]) for i in \
range(local_batch_size)])])
def get_rank():
return torch.cuda.FloatTensor(
[sum([torch.nonzero(labels[i] == sorted_indices[i])[0][0] \
for i in range(local_batch_size)])])
topk_accs = [topk_accuracy(k) for k in \
args.retriever_report_topk_accuracies]
rank = get_rank()
losses = average_losses_across_data_parallel_group([rank, \
*topk_accs])
# create stats_dict with retrieval loss and all specified
# top-k accuracies
topk_acc_dict = {'top{}_acc'.format(k): v * 100 for k, v in \
zip(args.retriever_report_topk_accuracies, losses[1:])}
temp_stats_dict = dict(rank=losses[0], **topk_acc_dict)
for k in stats_dict.keys():
stats_dict[k] += temp_stats_dict[k]
total += local_batch_size
unwrapped_model.train()
return stats_dict, total
|
Megatron-LM-master
|
tasks/orqa/supervised/eval_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""ORQA finetuning/evaluation."""
from functools import partial
import sys
import math
import torch
import torch.nn.functional as F
from megatron import get_args, get_timers, get_tokenizer, print_rank_0
from megatron.core import mpu
from megatron.indexer import IndexBuilder
from megatron.model.biencoder_model import biencoder_model_provider
from megatron.utils import average_losses_across_data_parallel_group
from pretrain_ict import get_group_world_size_rank
from tasks.finetune_utils import finetune
from tasks.orqa.supervised.eval_utils import accuracy_func_provider
from tasks.orqa.supervised.eval_utils import process_batch, task_collate_fn
from tasks.orqa.evaluate_utils import ORQAEvaluator
# input_ is a 2D tensor
def check_and_append_tensor_for_gather(group, rank, world_size, input_):
# gather the size of the first dimension of the tensor from all ranks
current_length = input_.size()[0]
first_dim = torch.tensor([[current_length]],
device=torch.cuda.current_device())
input_list = [torch.empty_like(first_dim) for _ in range(world_size)]
input_list[rank].copy_(first_dim)
torch.distributed.all_gather(input_list, first_dim, group=group)
all_input_list = torch.cat(input_list, dim=0).contiguous()
max_length = torch.max(all_input_list)
# if the size are different than the max, extend the tensor
# accordingly
if max_length > current_length:
padding=tuple([0] * (input_.dim() * 2 - 1)) + \
tuple([max_length - current_length])
input_ = F.pad(input=input_, pad=padding)
return input_
def orqa(Dataset):
def cross_entropy_forward_step(batch, model):
"""Simple forward step with cross-entropy loss."""
timers = get_timers()
tokenizer = get_tokenizer()
# Get the batch.
timers('batch generator', log_level=2).start()
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
group, rank, world_size = get_group_world_size_rank()
query_tokens, query_mask, query_types, query_pad_mask, \
context_tokens, context_mask, context_types, context_pad_mask, \
neg_context_tokens, neg_context_mask, neg_context_types, \
reference = process_batch(batch_)
timers('batch generator').stop()
local_batch_size = query_tokens.shape[0]
# Text representation of query and context
query_list, context_list = [], []
for i in range(local_batch_size):
query_list.append(tokenizer.decode(query_tokens[i].tolist()))
context_list.append(tokenizer.decode(context_tokens[i].tolist()))
if neg_context_tokens is not None:
neg_context_tokens = check_and_append_tensor_for_gather(group,
rank, world_size, neg_context_tokens)
neg_context_mask = check_and_append_tensor_for_gather(group,
rank, world_size, neg_context_mask)
neg_context_types = check_and_append_tensor_for_gather(group,
rank, world_size, neg_context_types)
if neg_context_tokens is not None:
context_tokens = torch.cat([context_tokens, neg_context_tokens])
context_mask = torch.cat([context_mask, neg_context_mask])
context_types = torch.cat([context_types, neg_context_types])
# Forward model.
output_tensor = model(query_tokens, query_mask,
query_types, context_tokens,
context_mask, context_types)
return output_tensor, partial(cross_entropy_loss_func, query_tokens, context_tokens)
def cross_entropy_loss_func(query_tokens, context_tokens, output_tensor):
args = get_args()
local_batch_size = query_tokens.shape[0]
group, rank, world_size = get_group_world_size_rank()
# recall we assert that model_parallel_size == 1
global_batch_size = world_size * local_batch_size
query_logits, context_logits = output_tensor
if world_size > 1:
input_ = torch.empty_like(context_logits).copy_(\
context_logits).detach_()
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank].copy_(input_)
torch.distributed.all_gather(tensor_list, input_, group=group)
# Check if all-gather happens in order
assert tensor_list[rank].sum().item() == \
context_logits.sum().item()
# Preserves the gradient
tensor_list[rank] = context_logits
all_context_logits = torch.cat(tensor_list, dim=0).contiguous()
# Query tensors
input_ = torch.empty_like(query_logits).copy_(\
query_logits).detach_()
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank].copy_(input_)
torch.distributed.all_gather(tensor_list, input_, group=group)
# Check if all-gather happens in order
assert tensor_list[rank].sum().item() == query_logits.sum().item()
# Preserves the gradient
tensor_list[rank] = query_logits
all_query_logits = torch.cat(tensor_list, dim=0).contiguous()
else:
all_query_logits = query_logits
all_context_logits = context_logits
retrieval_scores = torch.matmul(all_query_logits,
torch.transpose(all_context_logits, 0, 1))
# Scaling the retrieval scores
if args.retriever_score_scaling:
retrieval_scores = retrieval_scores / math.sqrt(args.hidden_size)
if args.train_with_neg:
# if the world size is 3, local batch size is 4, and
# local context size is 8, what we want is
# labels = [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19]
labels = []
local_context_size = context_tokens.shape[0]
for i in range(world_size):
j = i * local_context_size
labels.extend(list(range(j, j + local_batch_size)))
labels = torch.LongTensor(labels).cuda()
assert len(labels) == global_batch_size
else:
labels = torch.arange(global_batch_size).long().cuda()
# Cross-entropy loss.
softmax_scores = F.log_softmax(retrieval_scores, dim=1)
loss = F.nll_loss(softmax_scores, labels, reduction='mean')
max_score, max_idxs = torch.max(softmax_scores, 1)
correct_predictions_count = (max_idxs == labels).sum().float()
# Reduce loss for logging.
reduced_loss = average_losses_across_data_parallel_group([loss, \
correct_predictions_count])
# Loss scaling for correct losses in Supervised Retrieval
loss = loss * mpu.get_data_parallel_world_size()
return loss, {'lm loss': reduced_loss[0],
'correct_prediction_count': reduced_loss[1]}
def train_valid_datasets_provider():
"""Build train and validation dataset."""
args = get_args()
tokenizer = get_tokenizer()
train_dataset = Dataset('training',
args.train_data,
tokenizer,
args.retriever_seq_length,
evaluate=False)
valid_dataset = Dataset('validation',
args.valid_data,
tokenizer,
args.retriever_seq_length,
evaluate=True)
return train_dataset, valid_dataset
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
print_rank_0('building retriever model for {} ...'.format(args.task))
model = biencoder_model_provider(only_context_model=False,
only_query_model=False,
biencoder_shared_query_context_model=\
args.biencoder_shared_query_context_model,
pre_process=pre_process, post_process=post_process)
return model
def single_dataset_provider(datapath):
args = get_args()
tokenizer = get_tokenizer()
name = datapath[0].split('/')[-1].split('.')[0]
return Dataset(name,
datapath,
tokenizer,
args.retriever_seq_length,
evaluate=True)
def metrics_func_provider():
"""Provide metrics callback function."""
return accuracy_func_provider(single_dataset_provider)
"""Finetune/evaluate."""
finetune(train_valid_datasets_provider,
model_provider,
forward_step=cross_entropy_forward_step,
end_of_epoch_callback_provider=metrics_func_provider,
task_collate_fn=task_collate_fn)
def main():
args = get_args()
if args.task == 'RET-FINETUNE-NQ':
from tasks.orqa.supervised.data import NQSupervisedDataset as Dataset
else:
raise NotImplementedError('ORQA task {} is not implemented.'.format(
args.task))
orqa(Dataset)
|
Megatron-LM-master
|
tasks/orqa/supervised/finetune.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""ORQA dataset."""
import json
import random
from abc import ABC
from abc import abstractmethod
import numpy as np
from torch.utils.data import Dataset
from megatron import print_rank_0, get_args
from megatron.data.biencoder_dataset_utils import make_attention_mask
def build_token_types_from_context_list(ctx_list, tokenizer, max_seq_length):
ctx_id_list, ctx_types_list = [], []
for context in ctx_list:
title_ids = tokenizer.tokenize(context['title'])
ctx_ids = tokenizer.tokenize(context['text'])
ctx_ids = title_ids + [tokenizer.sep_id] + ctx_ids
ctx_ids, ctx_types, _ = build_tokens_types_paddings_from_ids(ctx_ids,
max_seq_length, tokenizer.cls,
tokenizer.sep, tokenizer.pad)
ctx_id_list.append(ctx_ids)
ctx_types_list.append(ctx_types)
return ctx_id_list, ctx_types_list
def build_tokens_types_paddings_from_text(query, context,
tokenizer, max_seq_length):
"""Build token types and paddings, trim if needed, and pad if needed."""
query_ids = tokenizer.tokenize(query)
query_ids, query_types, query_pad_mask = \
build_tokens_types_paddings_from_ids(query_ids, max_seq_length, \
tokenizer.cls, tokenizer.sep, tokenizer.pad)
# Appending the title of the context at front
extended_ctx_ids = None
if context is not None:
title_ids = tokenizer.tokenize(context['title'])
ctx_ids = tokenizer.tokenize(context['text'])
extended_ctx_ids = title_ids + [tokenizer.sep] + ctx_ids
ctx_ids, ctx_types, ctx_pad_mask = \
build_tokens_types_paddings_from_ids(extended_ctx_ids,
max_seq_length, tokenizer.cls, tokenizer.sep, tokenizer.pad)
return query_ids, query_types, query_pad_mask, \
ctx_ids, ctx_types, ctx_pad_mask
# Similar code tasks/data_utils with some changes
def build_tokens_types_paddings_from_ids(text_ids, max_seq_length,
cls_id, sep_id, pad_id):
"""Build token types and paddings, trim if needed, and pad if needed."""
enc_ids = []
tokentypes_enc = []
# [CLS].
enc_ids.append(cls_id)
tokentypes_enc.append(0)
# A.
len_src = len(text_ids)
enc_ids.extend(text_ids)
tokentypes_enc.extend([0] * len_src)
# Cap the size.
if len(enc_ids) > max_seq_length - 1:
enc_ids = enc_ids[0: max_seq_length - 1]
tokentypes_enc = tokentypes_enc[0: max_seq_length - 1]
# [SEP].
enc_ids.append(sep_id)
tokentypes_enc.append(0)
num_tokens_enc = len(enc_ids)
# Padding.
padding_length = max_seq_length - len(enc_ids)
if padding_length > 0:
enc_ids.extend([pad_id] * padding_length)
tokentypes_enc.extend([pad_id] * padding_length)
pad_mask = ([1] * num_tokens_enc) + ([0] * padding_length)
pad_mask = np.array(pad_mask, dtype=np.int64)
return enc_ids, tokentypes_enc, pad_mask
def build_sample(query_ids, query_types, query_pad_mask,
ctx_ids, ctx_types, ctx_pad_mask, answers,
neg_ctx_id_list=None, neg_ctx_types_list=None,
include_neg=False):
"""Convert to numpy and return a sample consumed by the batch producer."""
query_ids = np.array(query_ids, dtype=np.int64)
query_types = np.array(query_types, dtype=np.int64)
query_mask = make_attention_mask(query_ids, query_ids)
ctx_ids = np.array(ctx_ids, dtype=np.int64)
ctx_types = np.array(ctx_types, dtype=np.int64)
ctx_mask = make_attention_mask(ctx_ids, ctx_ids)
sample = ({
'query': query_ids,
'query_mask': query_mask,
'query_types': query_types,
'query_pad_mask': query_pad_mask,
'context': ctx_ids,
'context_mask': ctx_mask,
'context_types': ctx_types,
'context_pad_mask': ctx_pad_mask,
'reference': answers
})
if include_neg:
neg_ctx_ids = np.array(neg_ctx_id_list, dtype=np.int64)
neg_ctx_id_types = np.array(neg_ctx_types_list, dtype=np.int64)
neg_ctx_mask = np.array([make_attention_mask(ids, ids) \
for ids in neg_ctx_ids], dtype=np.int64)
sample['neg_context'] = neg_ctx_ids
sample['neg_context_types'] = neg_ctx_id_types
sample['neg_context_mask'] = neg_ctx_mask
return sample
class OpenRetrievalAbstractDataset(ABC, Dataset):
"""Open Retrieval base dataset class."""
def __init__(self, task_name, dataset_name, datapaths, tokenizer, \
max_seq_length, evaluate=False):
# Store inputs.
args = get_args()
self.evaluate = evaluate
self.val_av_rank_hard_neg = args.val_av_rank_hard_neg
self.val_av_rank_other_neg = args.val_av_rank_other_neg
self.train_with_neg = args.train_with_neg
self.train_hard_neg = args.train_hard_neg
self.task_name = task_name
self.dataset_name = dataset_name
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
print_rank_0(' > building {} dataset for {}:'.format(self.task_name,
self.dataset_name))
# Process the files.
string = ' > paths:'
for path in datapaths:
string += ' ' + path
print_rank_0(string)
self.samples = []
for datapath in datapaths:
self.samples.extend(self.process_samples_from_single_path(datapath))
args = get_args()
if args.sample_rate < 1: # subsample
k = int(len(self.samples) * args.sample_rate)
self.samples = random.sample(self.samples, k)
print_rank_0(' >> total number of samples: {}'.format(
len(self.samples)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
raw_sample = self.samples[idx]
query_ids, query_types, query_pad_mask, ctx_ids, ctx_types, \
ctx_pad_mask = build_tokens_types_paddings_from_text( \
raw_sample['question'], raw_sample['pos_context'], \
self.tokenizer, self.max_seq_length)
if self.evaluate:
neg_ctx_list = \
raw_sample['negative_context'][:self.val_av_rank_other_neg] + \
raw_sample['hard_negative_context'][:self.val_av_rank_hard_neg]
neg_ctx_id_list, neg_ctx_types_list = \
build_token_types_from_context_list(neg_ctx_list, \
self.tokenizer, self.max_seq_length)
elif self.train_with_neg:
hard_negative_ctx = raw_sample['hard_negative_context']
negative_ctx = raw_sample['negative_context']
if True: # TODO: fix this or remove this condition
random.shuffle(hard_negative_ctx)
random.shuffle(negative_ctx)
neg_ctx_list = hard_negative_ctx[:self.train_hard_neg]
# In the Google NQ dataset by DPR paper, there are around more than
# 50 missing hard negatives in training data.
# In those cases, substitute hard negatives by simple negatives.
if len(neg_ctx_list) < self.train_hard_neg:
neg_ctx_list += negative_ctx[:self.train_hard_neg - \
len(neg_ctx_list)]
neg_ctx_id_list, neg_ctx_types_list = \
build_token_types_from_context_list(neg_ctx_list,
self.tokenizer, self.max_seq_length)
else:
neg_ctx_id_list = None
neg_ctx_types_list = None
sample = build_sample(query_ids, query_types, query_pad_mask,
ctx_ids, ctx_types, ctx_pad_mask,
raw_sample['answers'],
neg_ctx_id_list, neg_ctx_types_list,
include_neg=self.evaluate or self.train_with_neg)
return sample
@staticmethod
@abstractmethod
def process_samples_from_single_path(filename):
"""Abstract method that takes a filename and
returns a list of dataset samples, each sample being a dict of
{'text': string, 'text': string}
"""
pass
def normalize_question(question):
if question[-1] == '?':
question = question[:-1]
return question
# The following class reads the datasets for training retriever as
# prepared by the DPR codebase (https://github.com/facebookresearch/DPR)
class NQSupervisedDataset(OpenRetrievalAbstractDataset):
def __init__(self, name, datapaths, tokenizer, max_seq_length, \
evaluate=False):
super().__init__('natural_questions_ret',
name,
datapaths,
tokenizer,
max_seq_length,
evaluate=evaluate)
@staticmethod
def process_samples_from_single_path(filename):
""""Implement abstract method."""
print_rank_0(' > Processing {} ...'.format(filename))
samples = []
total = 0
with open(filename, 'r', encoding="utf-8") as f:
data = json.load(f)
for row in data:
question = normalize_question(row['question'])
pos_context = row['positive_ctxs'][0]
# Hard Negative Contexts
if len(row['hard_negative_ctxs']) > 0:
hard_neg_context = row['hard_negative_ctxs']
else:
hard_neg_context = []
# Negative Contexts
if len(row['negative_ctxs']) > 0:
neg_context = row['negative_ctxs']
else:
neg_context = []
answers = row['answers']
sample = {'question': question,
'pos_context': pos_context,
'hard_negative_context': hard_neg_context,
'negative_context': neg_context,
'answers': answers}
total += 1
samples.append(sample)
if total % 5000 == 0:
print_rank_0(' > processed {} so far ...'.format(total))
print_rank_0(' >> processed {} samples.'.format(len(samples)))
return samples
|
Megatron-LM-master
|
tasks/orqa/supervised/data.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Processing large data for pretraining."""
import argparse
import math
import json
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import time
import gzip
import glob
import torch
import numpy as np
import multiprocessing
try:
import nltk
nltk_available = True
except ImportError:
nltk_available = False
from megatron.tokenizer import build_tokenizer
from megatron.data import indexed_dataset
# https://stackoverflow.com/questions/33139531/preserve-empty-lines-with-nltks-punkt-tokenizer
class CustomLanguageVars(nltk.tokenize.punkt.PunktLanguageVars):
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
\s* # <-- THIS is what I changed
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
(?P<next_tok>\S+) # <-- Normally you would have \s+ here
))"""
class IdentitySplitter(object):
def tokenize(self, *text):
return text
class Encoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
# Use Encoder class as a container for global data
Encoder.tokenizer = build_tokenizer(self.args)
if self.args.split_sentences:
if not nltk_available:
print("NLTK is not available to split sentences.")
exit()
library = "tokenizers/punkt/{}.pickle".format(self.args.lang)
splitter = nltk.load(library)
if self.args.keep_newlines:
# this prevents punkt from eating newlines after sentences
Encoder.splitter = nltk.tokenize.punkt.PunktSentenceTokenizer(
train_text = splitter._params,
lang_vars = CustomLanguageVars())
else:
Encoder.splitter = splitter
else:
Encoder.splitter = IdentitySplitter()
def split(self, json_line):
data = json.loads(json_line)
output = {}
for key in self.args.json_keys:
text = data[key]
max_len = 1000000
tokens_list = [Encoder.splitter.tokenize(text[i:i+max_len]) for i in range(0, len(text), max_len)]
output[key] = [tokens for partial in tokens_list for tokens in partial]
return json.dumps(output), len(json_line)
def encode(self, json_line):
data = json.loads(json_line)
ids = {}
lens = {}
for key in self.args.json_keys:
text = data[key]
if isinstance(text, list):
sentences = text
else:
sentences = [text]
doc_ids = []
sentence_lens = []
for sentence in sentences:
sentence_ids = Encoder.tokenizer.tokenize(sentence)
if len(sentence_ids) > 0:
doc_ids.extend(sentence_ids)
sentence_lens.append(len(sentence_ids))
if len(doc_ids) > 0 and self.args.append_eod:
doc_ids.append(Encoder.tokenizer.eod)
sentence_lens[-1] += 1
ids[key] = doc_ids
lens[key] = sentence_lens
return ids, lens, len(json_line)
class Partition(object):
def __init__(self, args, workers):
self.args = args
self.workers = workers
def print_processing_stats(self, count, proc_start, total_bytes_processed):
if count % self.args.log_interval == 0:
current = time.time()
elapsed = current - proc_start
mbs = total_bytes_processed/elapsed/1024/1024
print(f"Processed {count} documents",
f"({count/elapsed} docs/s, {mbs} MB/s).",
file=sys.stderr)
def split_sentences(self, file_name):
input_file_name, output_file_name = file_name
print("Opening", input_file_name)
fin = open(input_file_name, 'r', encoding='utf-8')
fout = open(output_file_name, 'w')
encoder = Encoder(self.args)
pool = multiprocessing.Pool(self.workers, initializer=encoder.initializer)
split_docs = pool.imap(encoder.split, fin, 32)
proc_start = time.time()
total_bytes_processed = 0
for i, (doc, bytes_processed) in enumerate(split_docs, start=1):
total_bytes_processed += bytes_processed
fout.write(doc + "\n")
self.print_processing_stats(i, proc_start, total_bytes_processed)
fin.close()
fout.close()
def process_json_file(self, file_name):
input_file_name, output_prefix = file_name
print("Opening", input_file_name)
fin = open(input_file_name, 'r', encoding='utf-8')
startup_start = time.time()
encoder = Encoder(self.args)
tokenizer = build_tokenizer(self.args)
pool = multiprocessing.Pool(self.workers, initializer=encoder.initializer)
encoded_docs = pool.imap(encoder.encode, fin, 32)
level = "document"
if self.args.split_sentences:
level = "sentence"
output_bin_files = {}
output_idx_files = {}
builders = {}
for key in self.args.json_keys:
output_bin_files[key] = "{}_{}_{}.bin".format(output_prefix,
key, level)
output_idx_files[key] = "{}_{}_{}.idx".format(output_prefix,
key, level)
builders[key] = indexed_dataset.MMapIndexedDatasetBuilder(
output_bin_files[key],
dtype=indexed_dataset.DType.optimal_dtype(tokenizer.vocab_size),
)
startup_end = time.time()
proc_start = time.time()
total_bytes_processed = 0
print("Time to startup:", startup_end - startup_start)
for i, (doc, sentence_lens, bytes_processed) in enumerate(encoded_docs, start=1):
total_bytes_processed += bytes_processed
for key in doc.keys():
builders[key].add_doc(doc[key], sentence_lens[key])
self.print_processing_stats(i, proc_start, total_bytes_processed)
fin.close()
builders[key].finalize(output_idx_files[key])
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument('--input', type=str, required=True,
help='Path to input JSON')
group.add_argument('--json-keys', nargs='+', default=['text'],
help='space separate listed of keys to extract from json')
group.add_argument('--split-sentences', action='store_true',
help='Split documents into sentences.')
group.add_argument('--keep-newlines', action='store_true',
help='Keep newlines between sentences when splitting.')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True,
choices=['BertWordPieceLowerCase','BertWordPieceCase',
'GPT2BPETokenizer', 'SentencePieceTokenizer',
'GPTSentencePieceTokenizer', 'NullTokenizer'],
help='What type of tokenizer to use.')
group.add_argument('--tokenizer-model', type=str, default=None,
help='YTTM tokenizer model.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file')
group.add_argument('--vocab-size', default=786,
help='size of vocab for use with NullTokenizer')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file (if necessary).')
group.add_argument('--append-eod', action='store_true',
help='Append an <eod> token to the end of a document.')
group.add_argument('--lang', type=str, default='english',
help='Language to use for NLTK-powered sentence splitting.')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True,
help='Path to binary output file without suffix')
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, required=True,
help=('Number of worker processes to launch.'
'A good default for fast pre-processing '
'is: (workers * partitions) = available CPU cores.'))
group.add_argument('--partitions', type=int, default=1,
help='Number of file partitions')
group.add_argument('--log-interval', type=int, default=1000,
help='Interval between progress updates')
group.add_argument('--keep-sequential-samples', action='store_true',
help='Ensure ordering of samples in .jsonl files is '
'preserved when using partitions>1.')
args = parser.parse_args()
args.keep_empty = False
if args.tokenizer_type.lower().startswith('bert') and not args.split_sentences:
print("Are you sure you don't want to split sentences?")
# some default/dummy values for the tokenizer
args.rank = 1
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
args.vocab_extra_ids = 0
return args
def get_file_name(args, file_id):
file_name, extension = os.path.splitext(args.input)
input_file_name = file_name + "_" + str(file_id) + extension
sentence_split_file = file_name + "_ss_" + str(file_id) + extension
output_prefix = args.output_prefix + "_" + str(file_id)
file_names = {
'partition': input_file_name,
'sentence_split': sentence_split_file,
'output_prefix': output_prefix}
return file_names
def check_files_exist(in_ss_out_names, key, num_partitions):
for i in range(num_partitions):
if not os.path.exists(in_ss_out_names[i][key]):
return False
return True
def main():
args = get_args()
if args.split_sentences:
if nltk_available:
nltk.download("punkt", quiet=True)
else:
raise Exception(
"nltk library required for sentence splitting is not available.")
in_ss_out_names = []
if args.partitions == 1:
file_name, extension = os.path.splitext(args.input)
sentence_split_file = file_name + "_ss" + extension
file_names = {
'partition': args.input,
'sentence_split': sentence_split_file,
'output_prefix': args.output_prefix}
in_ss_out_names.append(file_names)
else:
in_file_names = glob.glob(args.input)
# Count total number of lines across .jsonl files
if args.keep_sequential_samples:
total_sample_count = 0
for filename in in_file_names:
with open(filename, "r") as fin:
for fc, _ in enumerate(fin):
pass
total_sample_count += (fc + 1)
partition_size = math.ceil(total_sample_count / args.partitions)
# create .jsonl parition files
for idx in range(args.partitions):
in_ss_out_name = get_file_name(args, idx)
in_ss_out_names.append(in_ss_out_name)
# check to see if paritions were already created
partitions_present = check_files_exist(in_ss_out_names, 'partition', args.partitions)
# check to see if paritions with split sentences already created
split_sentences_present = check_files_exist(in_ss_out_names, 'sentence_split', args.partitions)
if not partitions_present and not split_sentences_present:
# populate .jsonl partition files from parent files
partitioned_input_files = []
for idx in range(args.partitions):
partitioned_input_file = open(in_ss_out_names[idx]['partition'], 'w')
partitioned_input_files.append(partitioned_input_file)
index = 0
if args.keep_sequential_samples: line_count = 0
for in_file_name in in_file_names:
# support for gzip files
if in_file_name.endswith(".gz"):
fin = gzip.open(in_file_name, 'rt')
else:
fin = open(in_file_name, 'r', encoding='utf-8')
for line in fin:
partitioned_input_files[index].write(line)
if args.keep_sequential_samples:
line_count += 1
if line_count % partition_size == 0:
index += 1
else:
index = (index + 1)%args.partitions
fin.close()
for idx in range(args.partitions):
partitioned_input_files[idx].close()
assert args.workers % args.partitions == 0
partition = Partition(args, args.workers//args.partitions)
# check to see if paritions with split sentences already created
split_sentences_present = check_files_exist(in_ss_out_names, 'sentence_split', args.partitions)
# split sentences in partition files
if args.split_sentences and not split_sentences_present:
processes = []
for name in in_ss_out_names:
p = multiprocessing.Process(target=partition.split_sentences,
args=((name['partition'], name['sentence_split']),))
p.start()
processes.append(p)
for p in processes:
p.join()
if args.partitions == 1:
return
# encode partition files in parallel
processes = []
input_key = 'sentence_split' if args.split_sentences else 'partition'
for name in in_ss_out_names:
p = multiprocessing.Process(target=partition.process_json_file,
args=((name[input_key], name['output_prefix']),))
p.start()
processes.append(p)
for p in processes:
p.join()
if args.partitions == 1:
return
# merge bin/idx partitions
level = "document"
if args.split_sentences:
level = "sentence"
output_bin_files = {}
output_idx_files = {}
builders = {}
tokenizer = build_tokenizer(args)
for key in args.json_keys:
output_bin_files[key] = "{}_{}_{}.bin".format(args.output_prefix,
key, level)
output_idx_files[key] = "{}_{}_{}.idx".format(args.output_prefix,
key, level)
builders[key] = indexed_dataset.MMapIndexedDatasetBuilder(
output_bin_files[key],
dtype=indexed_dataset.DType.optimal_dtype(tokenizer.vocab_size),
)
for name in in_ss_out_names:
parition_output_prefix = name['output_prefix']
full_partition_output_prefix = "{}_{}_{}".format(parition_output_prefix,
key, level)
builders[key].merge_file_(full_partition_output_prefix)
builders[key].finalize(output_idx_files[key])
if __name__ == '__main__':
main()
|
Megatron-LM-master
|
tools/preprocess_data.py
|
import os
import sys
import json
import argparse
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
from megatron.data.indexed_dataset import (
MMapIndexedDataset,
MMapIndexedDatasetBuilder,
get_bin_path,
get_idx_path,
)
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title="input data")
group.add_argument(
"--input",
type=str,
required=True,
help="Path to directory containing all document files to merge",
)
group = parser.add_argument_group(title="output data")
group.add_argument(
"--output-prefix",
type=str,
required=True,
help="Path to binary output file without suffix",
)
args = parser.parse_args()
assert os.path.isdir(
args.input
), f"ERROR: {args.input} is not a directory or does not exist"
assert os.path.isdir(
os.path.dirname(args.output_prefix)
), f"ERROR: {os.path.dirname(args.output_prefix)} is not a directory or does not exist"
return args
def main():
args = get_args()
prefixes = set()
for basename in os.listdir(args.input):
prefix, ext = os.path.splitext(basename)
if prefix in prefixes:
continue
if not os.path.isfile(os.path.join(args.input, basename)):
continue
ext_pair = ".bin" if ext == ".idx" else ".idx"
assert os.path.isfile(
os.path.join(args.input, prefix) + ext_pair
), f"ERROR: {ext_pair} file not provided for {os.path.join(args.input, prefix)}"
prefixes.add(prefix)
builder = None
for prefix in sorted(prefixes):
if builder is None:
dataset = MMapIndexedDataset(os.path.join(args.input, prefix))
builder = MMapIndexedDatasetBuilder(
get_bin_path(args.output_prefix), dtype=dataset._index.dtype
)
del dataset
builder.merge_file_(os.path.join(args.input, prefix))
builder.finalize(get_idx_path(args.output_prefix))
if __name__ == '__main__':
main()
|
Megatron-LM-master
|
tools/merge_datasets.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Processing nmt data for finetuning."""
import argparse
import json
import multiprocessing
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import time
import torch
from megatron.tokenizer import build_tokenizer
from megatron.data import indexed_dataset
class Encoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
# Use Encoder class as a container for global data
Encoder.tokenizer = build_tokenizer(self.args)
def encode(self, text):
ids = {}
ids = Encoder.tokenizer.tokenize(text)
assert len(ids) > 0
return ids, len(text)
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument('--input', type=str, required=True,
help='Path to input JSON')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, default='YTTMTokenizer',
choices=['BertWordPieceLowerCase','BertWordPieceCase',
'GPT2BPETokenizer', 'SentencePieceTokenizer'],
help='What type of tokenizer to use.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file (if necessary).')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True,
help='Path to binary output file without suffix')
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, default=1,
help='Number of worker processes to launch')
group.add_argument('--log-interval', type=int, default=100,
help='Interval between progress updates')
args = parser.parse_args()
args.keep_empty = False
# some default/dummy values for the tokenizer
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
args.vocab_extra_ids = 0
return args
def main():
args = get_args()
startup_start = time.time()
print("Opening", args.input)
fin = open(args.input, 'r', encoding='utf-8')
encoder = Encoder(args)
tokenizer = build_tokenizer(args)
pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer)
encoded_sentences = pool.imap(encoder.encode, fin, 25)
print(f"Vocab size: {tokenizer.vocab_size}")
print(f"Output prefix: {args.output_prefix}")
output_bin_file = "{}.bin".format(args.output_prefix)
output_idx_file = "{}.idx".format(args.output_prefix)
builder = indexed_dataset.MMapIndexedDatasetBuilder(
output_bin_file, dtype=indexed_dataset.DType.optimal_dtype(tokenizer.vocab_size)
)
startup_end = time.time()
proc_start = time.time()
total_bytes_processed = 0
print("Time to startup:", startup_end - startup_start)
for i, (sentence, bytes_processed) in enumerate(encoded_sentences, start=1):
total_bytes_processed += bytes_processed
builder.add_item(torch.IntTensor(sentence))
# documents contain only one sentence.
builder.end_document()
if i % args.log_interval == 0:
current = time.time()
elapsed = current - proc_start
mbs = total_bytes_processed/elapsed/1024/1024
print(f"Processed {i} sentences",
f"({i/elapsed} sentences/s, {mbs} MB/s).",
file=sys.stderr)
builder.finalize(output_idx_file)
if __name__ == '__main__':
main()
|
Megatron-LM-master
|
tools/preprocess_data_nmt.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import sys
import json
import requests
if __name__ == "__main__":
url = sys.argv[1]
url = 'http://' + url + '/api'
headers = {'Content-Type': 'application/json'}
while True:
sentence = input("Enter prompt: ")
tokens_to_generate = int(eval(input("Enter number of tokens to generate: ")))
data = {"prompts": [sentence], "tokens_to_generate": tokens_to_generate}
response = requests.put(url, data=json.dumps(data), headers=headers)
if response.status_code != 200:
print(f"Error {response.status_code}: {response.json()['message']}")
else:
print("Megatron Response: ")
print(response.json()['text'][0])
|
Megatron-LM-master
|
tools/text_generation_cli.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Sample Generate GPT"""
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import socket
from megatron import get_args
from megatron import print_rank_0
from megatron.core import mpu
from megatron.checkpointing import load_checkpoint
from megatron.initialize import initialize_megatron
from megatron.model import GPTModel
from megatron.training import get_model
from megatron.arguments import core_transformer_config_from_args
from megatron.text_generation_server import MegatronServer
from megatron.text_generation import generate_and_post_process
from megatron.text_generation import beam_search_and_post_process
import torch
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
config = core_transformer_config_from_args(get_args())
print_rank_0('building GPT model ...')
model = GPTModel(config, num_tokentypes=0, parallel_output=False, pre_process=pre_process, post_process=post_process)
return model
def add_text_generate_args(parser):
group = parser.add_argument_group(title='text generation')
group.add_argument("--port", type=int, default=5000,
help='port for text generation server to run on')
return parser
if __name__ == "__main__":
initialize_megatron(extra_args_provider=add_text_generate_args,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer',
'no_load_rng': True,
'no_load_optim': True})
args = get_args()
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for text generation.")
exit()
print_rank_0("WARNING: Forcing exit_on_missing_checkpoint to True for text "
"generation.")
args.exit_on_missing_checkpoint = True
# Set up model and load checkpoint
model = get_model(model_provider, wrap_with_ddp=False)
if args.load is not None:
_ = load_checkpoint(model, None, None)
assert len(model) == 1, "Above condition should have caught this"
model = model[0]
if mpu.is_pipeline_first_stage() and mpu.get_tensor_model_parallel_rank() == 0:
server = MegatronServer(model)
server.run("0.0.0.0",port=args.port)
while True:
choice = torch.cuda.LongTensor(1)
torch.distributed.broadcast(choice, 0)
if choice[0].item() == 0:
try:
generate_and_post_process(model)
except ValueError as ve:
pass
elif choice[0].item() == 1:
try:
beam_search_and_post_process(model)
except ValueError as ve:
pass
|
Megatron-LM-master
|
tools/run_text_generation_server.py
|
import os
import os.path as osp
import pathlib
import subprocess
def recursively_lint_files():
"""Recursively lint all python files in chosen subdirectories of megatron-lm"""
try:
import autopep8
except ModuleNotFoundError:
print("Please first install autopep8 via `pip install autopep8`")
return
# get all python file paths from top level directory
file_dir = str(pathlib.Path(__file__).parent.absolute())
working_dir = osp.join(file_dir, os.pardir)
all_py_paths = set(os.path.join(working_dir, fname)
for fname in os.listdir(working_dir) if ".py" in fname)
# get all python file paths from chosen subdirectories
check_dirs = ['docker', 'megatron', 'openwebtext', 'scripts', 'tasks']
for sub_dir in check_dirs:
for path, _, fnames in os.walk(osp.join(working_dir, sub_dir)):
all_py_paths.update(set(osp.join(path, fname) for fname in fnames if ".py" in fname))
print("Linting the following: ")
for py_path in all_py_paths:
print(py_path)
command = 'autopep8 --max-line-length 100 --aggressive --in-place {}'.format(py_path)
subprocess.check_call(command)
if __name__ == "__main__":
recursively_lint_files()
|
Megatron-LM-master
|
tools/linter.py
|
# coding=utf-8
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Processing text modality data for MultiModal pretraining."""
import argparse
import json
import multiprocessing
import os
import sys
import numpy as np
from torchvision.transforms import ToTensor
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import time
import torch
try:
import nltk
nltk_available = True
except ImportError:
nltk_available = False
from megatron.tokenizer import build_tokenizer
from megatron.data import indexed_dataset
from megatron.data.indexed_dataset import MMapIndexedDatasetBuilder
# https://stackoverflow.com/questions/33139531/preserve-empty-lines-with-nltks-punkt-tokenizer
class CustomLanguageVars(nltk.tokenize.punkt.PunktLanguageVars):
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
\s* # <-- THIS is what I changed
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
(?P<next_tok>\S+) # <-- Normally you would have \s+ here
))"""
class IdentitySplitter(object):
def tokenize(self, *text):
return text
class Encoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
# Use Encoder class as a container for global data
Encoder.tokenizer = build_tokenizer(self.args)
def encode(self, input_pair):
json_line, img_file = input_pair
data = json.loads(json_line)
key = "text"
text = data[key]
sentence_ids = Encoder.tokenizer.tokenize(text)
pad_len = self.args.pad_length
if len(sentence_ids) > 0 and self.args.append_eod:
sentence_ids = sentence_ids[:pad_len]
current_length = len(sentence_ids)
sentence_ids.extend([Encoder.tokenizer.eod for _ in range(max(0,pad_len-current_length))])
with open(img_file[:-1], "rb") as tf:
xs = bytearray(tf.read())
img_pad = (4 - len(xs) % 4) % 4
xs.extend([0 for _ in range(img_pad)])
img_raw = np.frombuffer(xs, dtype=np.int32)
img_raw = np.insert(img_raw, 0, img_pad)
return sentence_ids, img_raw, len(json_line)
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument('--input', type=str, required=True,
help='Path to input JSON')
group.add_argument('--input-image', type=str, required=True,
help='Path to input image folder')
group.add_argument('--pad-length', type=int, required=True,
help='Pad length of preprocessed text')
group.add_argument('--split-sentences', action='store_true',
help='Split documents into sentences.')
group.add_argument('--keep-newlines', action='store_true',
help='Keep newlines between sentences when splitting.')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True,
choices=['BertWordPieceLowerCase','BertWordPieceCase',
'GPT2BPETokenizer', 'SentencePieceTokenizer', 'GPTSentencePieceTokenizer'],
help='What type of tokenizer to use.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file (if necessary).')
group.add_argument('--append-eod', action='store_true',
help='Append an <eod> token to the end of a document.')
group.add_argument('--lang', type=str, default='english',
help='Language to use for NLTK-powered sentence splitting.')
group.add_argument('--tokenizer-model', type=str, default=None,
help='sentencepeice tokenizer model.')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True,
help='Path to binary output file without suffix')
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, default=1,
help='Number of worker processes to launch')
group.add_argument('--log-interval', type=int, default=100,
help='Interval between progress updates')
args = parser.parse_args()
args.keep_empty = False
# some default/dummy values for the tokenizer
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
args.vocab_extra_ids = 0
return args
def main():
args = get_args()
startup_start = time.time()
encoder = Encoder(args)
tokenizer = build_tokenizer(args)
pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer)
fin = open(args.input + ".json", 'r', encoding='utf-8')
img_files = open(args.input_image)
encoded_docs = pool.imap(encoder.encode, zip(fin, img_files), 25)
print(f"Vocab size: {tokenizer.vocab_size}")
print(f"Output prefix: {args.output_prefix}")
output_bin_files = "{}_mmdata.bin".format(args.output_prefix)
output_idx_files = "{}_mmdata.idx".format(args.output_prefix)
builders = MMapIndexedDatasetBuilder(output_bin_files, dtype=np.int32, multimodal=True)
startup_end = time.time()
proc_start = time.time()
total_bytes_processed = 0
print("Time to startup:", startup_end - startup_start)
for i, (sentence, img_raw, bytes_processed) in enumerate(encoded_docs, start=1):
total_bytes_processed += bytes_processed
builders.add_item(torch.IntTensor(sentence))
builders.add_item(torch.from_numpy(img_raw), 1)
builders.end_document()
if i % args.log_interval == 0:
current = time.time()
elapsed = current - proc_start
mbs = total_bytes_processed/elapsed/1024/1024
print(f"Processed {i} documents",
f"({i/elapsed} docs/s, {mbs} MB/s).",
file=sys.stderr)
builders.finalize(output_idx_files)
if __name__ == '__main__':
main()
|
Megatron-LM-master
|
tools/preprocess_mmdata.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import json
import os
import sys
import types
import torch
def add_arguments(parser):
group = parser.add_argument_group(title='Megatron loader')
group.add_argument('--true-vocab-size', type=int, default=None,
help='original size of vocab, if specified will trim padding from embedding table.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file. If specified will use this to get vocab size and '
'trim padding from the embedding table.')
group.add_argument('--megatron-path', type=str, default=None,
help='Base directory of deepspeed repository')
def _load_checkpoint(queue, args):
# Search in directory above this
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir)))
if args.megatron_path is not None:
sys.path.insert(0, args.megatron_path)
try:
from megatron.arguments import parse_args, validate_args
from megatron.global_vars import set_args, set_global_variables
from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint
from megatron.model import module
from megatron.core import mpu
from megatron.core.enums import ModelType
from megatron import fused_kernels
except ModuleNotFoundError:
print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.")
queue.put("exit")
exit(1)
# We want all arguments to come from us
sys.argv = ['script.py',
'--no-masked-softmax-fusion',
'--no-bias-gelu-fusion',
'--no-bias-dropout-fusion',
'--no-async-tensor-model-parallel-allreduce',
'--use-cpu-initialization',
'--micro-batch-size', '1',
'--no-load-optim',
'--no-load-rng',
'--no-save-optim',
'--no-save-rng',
'--no-initialization',
'--load', args.load_dir
]
margs = parse_args()
margs, checkpoint_args = load_args_from_checkpoint(margs)
# Arguments do sanity checks on the world size, but we don't care,
# so trick it into thinking we are plenty of processes
margs.world_size = margs.tensor_model_parallel_size * margs.pipeline_model_parallel_size
margs = validate_args(margs)
def check_for_arg(arg_name, default=None):
if getattr(margs, arg_name, None) is None:
if default is not None:
setattr(margs, arg_name, default)
else:
print(f"Checkpoint does not specify the argument {arg_name}. Exiting.")
print(f"Arguments: {margs}")
queue.put("exit")
exit(1)
check_for_arg('tensor_model_parallel_size')
check_for_arg('pipeline_model_parallel_size')
check_for_arg('num_layers')
check_for_arg('hidden_size')
check_for_arg('seq_length')
check_for_arg('num_attention_heads')
check_for_arg('max_position_embeddings')
check_for_arg('position_embedding_type')
check_for_arg('tokenizer_type')
check_for_arg('iteration')
check_for_arg('bert_binary_head')
check_for_arg('disable_bias_linear', False)
check_for_arg('params_dtype')
check_for_arg('swiglu', False)
# Determine how to make our models
if args.model_type == 'GPT':
from pretrain_gpt import model_provider
margs.model_type = ModelType.encoder_or_decoder
elif args.model_type == 'BERT':
from pretrain_bert import model_provider
margs.model_type = ModelType.encoder_or_decoder
else:
raise Exception(f'unrecognized model type: {args.model_type}')
# supress warning about torch.distributed not being initialized
module.MegatronModule.embedding_warning_printed = True
consumed_train_samples = None
consumed_valid_samples = None
def get_models(count, dtype):
nonlocal consumed_train_samples
nonlocal consumed_valid_samples
model_array_len = margs.virtual_pipeline_model_parallel_size
if model_array_len is None:
model_array_len = 1
models = [[] for _ in range(model_array_len)]
pre_process = mpu.is_pipeline_first_stage()
post_process = mpu.is_pipeline_last_stage()
for rank in range(count):
mpu.set_tensor_model_parallel_rank(rank)
if margs.virtual_pipeline_model_parallel_size is not None:
model_ = []
for i in range(margs.virtual_pipeline_model_parallel_size):
mpu.set_virtual_pipeline_model_parallel_rank(i)
# Set pre_process and post_process only after virtual rank is set.
pre_process = mpu.is_pipeline_first_stage()
post_process = mpu.is_pipeline_last_stage()
this_model = model_provider(
pre_process=pre_process,
post_process=post_process
).to(dtype)
model_.append(this_model)
else:
pre_process = mpu.is_pipeline_first_stage()
post_process = mpu.is_pipeline_last_stage()
model_rank = 0
model_ = [model_provider(pre_process, post_process).to(dtype)]
margs.consumed_train_samples = 0
margs.consumed_valid_samples = 0
load_checkpoint(model_, None, None)
if consumed_train_samples is not None:
assert(margs.consumed_train_samples == consumed_train_samples)
else:
consumed_train_samples = margs.consumed_train_samples
if consumed_valid_samples is not None:
assert(margs.consumed_valid_samples == consumed_valid_samples)
else:
consumed_valid_samples = margs.consumed_valid_samples
for vp_rank in range(model_array_len):
models[vp_rank].append(model_[vp_rank])
return models
set_global_variables(margs, build_tokenizer=False)
mpu.set_tensor_model_parallel_world_size(margs.tensor_model_parallel_size)
mpu.set_pipeline_model_parallel_world_size(margs.pipeline_model_parallel_size)
mpu.set_virtual_pipeline_model_parallel_world_size(margs.virtual_pipeline_model_parallel_size)
fused_kernels.load(margs)
# Get true (non-padded) vocab size
if args.true_vocab_size is not None:
true_vocab_size = args.true_vocab_size
elif args.vocab_file is not None:
vocab = json.load(open(args.vocab_file))
true_vocab_size = len(vocab)
if args.true_vocab_size is not None and true_vocab_size != args.true_vocab_size:
print("Both --true-vocab-size and --vocab-file specified and the vocab size does not match, aborting.")
queue.put("exit")
exit(1)
else:
true_vocab_size = None
# short aliases
tp_size = margs.tensor_model_parallel_size
pp_size = margs.pipeline_model_parallel_size
vp_size = margs.virtual_pipeline_model_parallel_size
if vp_size is None:
vp_size = 1
# metadata
md = types.SimpleNamespace()
md.model_type = args.model_type
md.num_layers = margs.num_layers
md.hidden_size = margs.hidden_size
md.seq_length = margs.seq_length
md.num_attention_heads = margs.num_attention_heads
md.max_position_embeddings = margs.max_position_embeddings
md.tokenizer_type = margs.tokenizer_type
md.iteration = margs.iteration
md.params_dtype = margs.params_dtype
md.bert_binary_head = margs.bert_binary_head
md.output_layer = margs.untie_embeddings_and_output_weights
md.position_embedding_type = margs.position_embedding_type
md.linear_bias = margs.add_bias_linear
md.swiglu = margs.swiglu
md.previous_tensor_parallel_size = margs.tensor_model_parallel_size
md.previous_pipeline_parallel_size = margs.pipeline_model_parallel_size
md.true_vocab_size = true_vocab_size
md.make_vocab_size_divisible_by = margs.make_vocab_size_divisible_by
md.checkpoint_args = checkpoint_args
# Get first pipe stage
mpu.set_pipeline_model_parallel_rank(0)
all_models = [get_models(tp_size, md.params_dtype)]
models = all_models[0][0]
md.consumed_train_samples = consumed_train_samples
md.consumed_valid_samples = consumed_valid_samples
queue.put(md)
def queue_put(name, msg):
print(f"sending {name}")
msg["name"] = name
queue.put(msg)
# Send embeddings
message = {
"word embeddings": torch.cat(
[models[tp_rank].language_model.embedding.word_embeddings.weight.data for tp_rank in range(tp_size)],
dim = 0)
}
if md.position_embedding_type == 'learned_absolute':
message["position embeddings"] = models[0].language_model.embedding.position_embeddings.weight.data
else:
assert not hasattr(models[0].language_model.embedding, 'position_embeddings')
queue_put("embeddings", message)
# Layernorm has bias; RMSNorm does not.
norm_has_bias = md.checkpoint_args.normalization == "LayerNorm"
total_layer_num = 0
for vp_rank in range(vp_size):
mpu.set_virtual_pipeline_model_parallel_rank(vp_rank)
for pp_rank in range(pp_size):
if pp_rank > 0:
mpu.set_pipeline_model_parallel_rank(pp_rank)
if vp_rank == 0:
all_models.append(get_models(tp_size, md.params_dtype))
models = all_models[pp_rank][vp_rank]
for layer_num in range(len(models[0].language_model.encoder.layers)):
message = {}
# Get non-parallel tensors from tp_rank 0
layer = models[0].language_model.encoder.layers[layer_num]
message["input norm weight"] = layer.input_norm.weight.data
if norm_has_bias:
message["input norm bias"] = layer.input_norm.bias.data
message["post norm weight"] = layer.post_attention_norm.weight.data
if norm_has_bias:
message["post norm bias"] = layer.post_attention_norm.bias.data
if md.linear_bias:
message["dense bias"] = layer.self_attention.dense.bias.data
message["mlp l1 bias"] = layer.mlp.dense_4h_to_h.bias.data
# Grab all parallel tensors for this layer
qkv_weight = []
qkv_bias = []
dense_weight = []
mlp_l0_weight = []
mlp_l0_bias = []
mlp_l1_weight = []
for tp_rank, model in enumerate(models):
layer = model.language_model.encoder.layers[layer_num]
qkv_weight.append(layer.self_attention.query_key_value.weight.data)
dense_weight.append(layer.self_attention.dense.weight.data)
mlp_l0_weight.append(layer.mlp.dense_h_to_4h.weight.data)
mlp_l1_weight.append(layer.mlp.dense_4h_to_h.weight.data)
if md.linear_bias:
qkv_bias.append(layer.self_attention.query_key_value.bias.data)
mlp_l0_bias.append(layer.mlp.dense_h_to_4h.bias.data)
# Handle gated linear units
if md.swiglu:
# concat all the first halves ('W's) and all the second halves ('V's)
for tp_rank in range(tp_size):
mlp_l0_weight[tp_rank] = torch.chunk(mlp_l0_weight[tp_rank], 2, dim=0)
message["mlp l0 weight W"] = torch.cat([w[0] for w in mlp_l0_weight], dim=0)
message["mlp l0 weight V"] = torch.cat([w[1] for w in mlp_l0_weight], dim=0)
else:
message["mlp l0 weight"] = torch.cat(mlp_l0_weight, dim=0)
# simple concat of the rest
message["qkv weight"] = torch.cat(qkv_weight, dim=0)
message["dense weight"] = torch.cat(dense_weight, dim=1)
message["mlp l1 weight"] = torch.cat(mlp_l1_weight, dim=1)
if md.linear_bias:
message["qkv bias"] = torch.cat(qkv_bias, dim=0)
if md.swiglu:
for tp_rank in range(tp_size):
mlp_l0_bias[tp_rank] = torch.chunk(mlp_l0_bias[tp_rank], 2, dim=0)
message["mlp l0 bias W"] = torch.cat([b[0] for b in mlp_l0_bias],dim=0)
message["mlp l0 bias V"] = torch.cat([b[1] for b in mlp_l0_bias],dim=0)
else:
message["mlp l0 bias"] = torch.cat(mlp_l0_bias, dim=0)
queue_put(f"transformer layer {total_layer_num}", message)
total_layer_num = total_layer_num + 1
# Send final norm from tp_rank 0
message = {
"weight": models[0].language_model.encoder.final_norm.weight.data,
}
if norm_has_bias:
message["bias"] = models[0].language_model.encoder.final_norm.bias.data
queue_put("final norm", message)
if md.output_layer:
message = {
"weight": torch.cat(
[models[tp_rank].language_model.output_layer.weight.data for tp_rank in range(tp_size)],
dim = 0)
}
queue_put("output layer", message)
# Send BERT lm head and binary head if it exists
if md.model_type == 'BERT':
message = {
"weight": models[0].language_model.pooler.dense.weight.data,
"bias": models[0].language_model.pooler.dense.bias.data
}
queue_put("pooler", message)
message = {
"dense weight": models[0].lm_head.dense.weight.data,
"dense bias": models[0].lm_head.dense.bias.data,
"norm weight": models[0].lm_head.norm.weight.data,
}
if norm_has_bias:
message["norm bias"] = models[0].lm_head.norm.bias.data
queue_put("lm head", message)
if md.bert_binary_head:
message = {
"weight": models[0].binary_head.weight.data,
"bias": models[0].binary_head.bias.data
}
queue_put("binary head", message)
queue.put("done")
def load_checkpoint(queue, args):
try:
_load_checkpoint(queue, args)
except:
queue.put("exit")
raise
|
Megatron-LM-master
|
tools/checkpoint/loader_megatron.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import argparse
import importlib
import torch.multiprocessing as mp
import os
import sys
# A loader is a python file with at least two functions
# - add_arguments - takes in a parser and adds any arguments needed
# - load_checkpoint - takes in the queue and parsed arguments
# A saver is similar but has save_checkpoint instead of
# load_checkpoint
# The loader and saver process are each given a queue, the loader
# should load the checkpoint and send the weights in messages in the
# following order, the saver should receive them in this order and
# save the checkpoints. A message consists of a python dictionary with
# a "name" for error checking and an entry for each tensor as
# indicated below. Note that the weight sent over the queue are the
# full model weights, nothing split.
# If the loader ever sends "exit" to the queue, that means something
# went wrong and it is exiting.
# - Metadata Namespace with the following attributes:
# model_type - GPT, BERT, T5, etc. (Part of protocol to allow this to be deduced later instead of given on command line)
# num_layers - Number of transformer layers
# hidden_size
# seq_length
# num_attention_heads
# max_position_embeddings
# tokenizer_type
# iteration
# params_dtype
# bert_binary_head - Used only if model_type is BERT
# previous_tensor_parallel_size - Optional
# previous_pipeline_parallel_size - Optional
# true_vocab_size
# make_vocab_size_divisble_by
# consumed_train_samples
# consumed_valid_samples
# messages
# {
# "name": "embeddings"
# "position embeddings"
# "word embeddings"
# }
# (for each transformer layer):
# {
# "name": "transformer layer N"
# "input layernorm weight"
# "input layernorm bias"
# "qkv weight"
# "qkv bias"
# "dense weight"
# "dense bias"
# "post layernorm weight"
# "post layernorm bias"
# "mlp l0 weight"
# "mlp l0 bias"
# "mlp l1 weight"
# "mlp l1 bias"
# }
# {
# "name": "final layer norm"
# "weight"
# "bias"
# }
# if present (i.e. for BERT):
# {
# "name": "pooler"
# "weight"
# "bias"
# }
# {
# "name": "lm head"
# "dense weight"
# "dense bias"
# "layernorm weight"
# "layernorm bias"
# }
# {
# "name": "binary head"
# "weight"
# "bias"
# }
# - "done"
def load_plugin(plugin_type, name):
module_name = f"{plugin_type}_{name}"
try:
plugin = importlib.import_module(module_name)
except ModuleNotFoundError:
module_name = name
try:
plugin = importlib.import_module(module_name)
except ModuleNotFoundError:
sys.exit(f"Unable to load {plugin_type} plugin {name}. Exiting.")
if not hasattr(plugin, 'add_arguments'):
sys.exit(f"{module_name} module is not a plugin. Exiting.")
print(f"Loaded {module_name} as the {plugin_type}.")
return plugin
def main():
import argparse
parser = argparse.ArgumentParser(description="Megatron Checkpoint Utility Arguments",
allow_abbrev=False, conflict_handler='resolve')
parser.add_argument('--model-type', type=str, required=True,
choices=['GPT', 'BERT'],
help='Type of the model')
parser.add_argument('--loader', type=str, default='megatron',
help='Module name to load checkpoint, should be on python path')
parser.add_argument('--saver', type=str, default='megatron',
help='Module name to save checkpoint, shdoul be on python path')
parser.add_argument('--load-dir', type=str, required=True,
help='Directory to load model checkpoint from')
parser.add_argument('--save-dir', type=str, required=True,
help='Directory to save model checkpoint to')
parser.add_argument('--max-queue-size', type=int, default=50,
help='Maximum number of tensors in the queue')
parser.add_argument('--no-checking', action='store_false',
help='Do not perform checking on the name and ordering of weights',
dest='checking')
known_args, _ = parser.parse_known_args()
loader = load_plugin('loader', known_args.loader)
saver = load_plugin('saver', known_args.saver)
loader.add_arguments(parser)
saver.add_arguments(parser)
args = parser.parse_args()
queue = mp.Queue(maxsize=args.max_queue_size)
print("Starting saver...")
saver_proc = mp.Process(target=saver.save_checkpoint, args=(queue, args))
saver_proc.start()
print("Starting loader...")
loader.load_checkpoint(queue, args)
print("Waiting for saver to complete...")
saver_proc.join()
if __name__ == '__main__':
main()
|
Megatron-LM-master
|
tools/checkpoint/util.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import argparse
from collections.abc import Mapping
import concurrent.futures
import os
import sys
import torch
def add_arguments(parser):
group = parser.add_argument_group(title='Megatron saver')
group.add_argument('--megatron-path', type=str, default=None,
help='Base directory of Megatron repository')
group.add_argument('--target-tensor-parallel-size', type=int,
help='Target tensor model parallel size, defaults to the tensor parallel size '
'in the input checkpoint if provided by the loader, otherwise to 1')
group.add_argument('--target-pipeline-parallel-size', type=int,
help='Target tensor model parallel size, default to the pipeline parall size '
'in the input checkpoint if provided by the loader, otherwise to 1')
def save_checkpoint(queue, args):
# Search in directory above this
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir,
os.path.pardir)))
if args.megatron_path is not None:
sys.path.insert(0, args.megatron_path)
try:
from megatron.arguments import (parse_args, validate_args)
from megatron.checkpointing import save_checkpoint
from megatron.global_vars import set_global_variables, get_args
from megatron.core.enums import ModelType
from megatron.tokenizer.tokenizer import _vocab_size_with_padding
from megatron import fused_kernels
from megatron.core import mpu
except ModuleNotFoundError:
print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.")
exit(1)
def queue_get(name=None):
val = queue.get()
if val == "exit":
print("Loader exited, exiting saver")
exit(1)
if name is not None and args.checking and val["name"] != name:
val_name = val["name"]
print(f'Unexpected message. Expecting "{name}" but got "{val_name}". Exiting saver.')
exit(1)
if name is not None:
print(f"received {name}")
return val
def check_message(msg):
if not args.checking:
return
msg_name = msg.pop("name")
if len(msg.keys()) > 0:
print(f"Unexpected values in {msg_name}:")
for key in msg.keys():
print(f" {key}")
print(f"Exiting. If you want to ignore this, use the argument --no-checking.")
exit(1)
md = queue_get()
if args.target_tensor_parallel_size is None:
if hasattr(md, 'previous_tensor_parallel_size'):
args.target_tensor_parallel_size = md.previous_tensor_parallel_size
else:
print("loader did not provide a tensor parallel size and --target-tensor-parallel-size not provided on command line. "
"Default to 1.")
args.target_tensor_parallel_size = 1
if args.target_pipeline_parallel_size is None:
if hasattr(md, 'previous_pipeline_parallel_size'):
args.target_pipeline_parallel_size = md.previous_pipeline_parallel_size
else:
print("loader did not provide a pipeline parallel size and --target-pipeline-parallel-size not provided on command line. "
"Default to 1.")
args.target_pipeline_parallel_size = 1
# Arguments do sanity checks on the world size, but we don't care,
# so trick it into thinking we are plenty of processes
if args.target_tensor_parallel_size is not None and args.target_pipeline_parallel_size is not None:
os.environ["WORLD_SIZE"] = f'{args.target_tensor_parallel_size * args.target_pipeline_parallel_size}'
# We want all arguments to come from us
sys.argv = ['script.py',
'--num-layers', str(md.num_layers),
'--hidden-size', str(md.hidden_size),
'--seq-length', str(md.seq_length),
'--num-attention-heads', str(md.num_attention_heads),
'--max-position-embeddings', str(md.max_position_embeddings),
'--position-embedding-type', str(md.position_embedding_type),
'--tokenizer-type', str(md.tokenizer_type),
'--tensor-model-parallel-size', str(args.target_tensor_parallel_size),
'--pipeline-model-parallel-size', str(args.target_pipeline_parallel_size),
'--no-masked-softmax-fusion',
'--no-bias-gelu-fusion',
'--no-bias-dropout-fusion',
'--no-async-tensor-model-parallel-allreduce',
'--use-cpu-initialization',
'--micro-batch-size', '1',
'--no-load-optim',
'--no-load-rng',
'--no-save-optim',
'--no-save-rng',
'--no-initialization',
'--save-interval', '1',
'--save', args.save_dir
]
if md.make_vocab_size_divisible_by is not None:
sys.argv.extend(['--make-vocab-size-divisible-by', str(md.make_vocab_size_divisible_by)])
if md.params_dtype == torch.float16:
sys.argv.append('--fp16')
elif md.params_dtype == torch.bfloat16:
sys.argv.append('--bf16')
if md.output_layer:
sys.argv.append('--untie-embeddings-and-output-weights')
if not md.linear_bias:
sys.argv.append('--disable-bias-linear')
if md.model_type == 'BERT' and not md.bert_binary_head:
sys.argv.append('--bert-no-binary-head')
margs = parse_args()
if hasattr (md, 'checkpoint_args'):
# These are arguments that we are either changing, or cause problems for validation if they are set
# Note that some of these deal with T5 so will need to be changed if we support T5.
args_to_keep = ['tensor_model_parallel_size', 'pipeline_model_parallel_size', 'world_size', 'params_dtype',
'num_layers_per_virtual_pipeline_stage', 'virtual_pipeline_model_parallel_size',
'masked_softmax_fusion', 'bias_gelu_fusion', 'bias_dropout_fusion',
'sequence_parallel', 'async_tensor_model_parallel_allreduce',
'no_load_optim', 'no_load_rng', 'no_save_optim', 'no_save_rng',
'vocab_file', 'tokenizer_model',
'save_interval', 'save',
'perform_initialization', 'use_cpu_initialization',
'encoder_num_layers', 'encoder_seq_length',
'distribute_saved_activations',
'train_iters', 'lr_decay_iters', 'lr_warmup_iters', 'lr_warmup_fraction',
'start_weight_decay', 'end_weight_decay']
for arg, value in vars(md.checkpoint_args).items():
if arg in args_to_keep:
continue
if not hasattr(margs, arg):
print(f"Checkpoint had argument {arg} but new arguments does not have this.")
continue
if getattr(margs, arg) != value:
print(f"Overwriting default {arg} value {getattr(margs, arg)} with value from checkpoint {value}.")
setattr(margs, arg, value)
validate_args(margs)
set_global_variables(margs, build_tokenizer=False)
# margs = megatron args
margs = get_args()
if hasattr(md, 'consumed_train_samples'):
margs.consumed_train_samples = md.consumed_train_samples
margs.consumed_valid_samples = md.consumed_valid_samples
print(f"Setting consumed_train_samples to {margs.consumed_train_samples}"
f" and consumed_valid_samples to {margs.consumed_valid_samples}")
else:
print("consumed_train_samples not provided.")
# Determine how to make our models
if md.model_type == 'GPT':
from pretrain_gpt import model_provider
margs.model_type = ModelType.encoder_or_decoder
elif md.model_type == 'BERT':
from pretrain_bert import model_provider
margs.model_type = ModelType.encoder_or_decoder
else:
raise Exception(f'unrecognized model type: {args.model_type}')
def get_models(count, dtype, pre_process, post_process):
models = [model_provider(pre_process, post_process).to(dtype) for _ in range(count)]
return models
# fake initializing distributed
mpu.set_tensor_model_parallel_world_size(args.target_tensor_parallel_size)
mpu.set_pipeline_model_parallel_world_size(args.target_pipeline_parallel_size)
mpu.set_tensor_model_parallel_rank(0)
mpu.set_pipeline_model_parallel_rank(0)
fused_kernels.load(margs)
# Embeddings
#-----------
embeddings_msg = queue_get("embeddings")
pos_embed = None
if md.position_embedding_type == 'learned_absolute':
pos_embed = embeddings_msg.pop("position embeddings")
orig_word_embed = embeddings_msg.pop("word embeddings")
check_message(embeddings_msg)
# Deal with padding
if md.true_vocab_size is not None:
# figure out what our padded vocab size is
orig_vocab_size = orig_word_embed.shape[0]
margs.padded_vocab_size = _vocab_size_with_padding(md.true_vocab_size, margs)
# Cut out extra padding we don't need
if orig_vocab_size > margs.padded_vocab_size:
full_word_embed = orig_word_embed[0:margs.padded_vocab_size,:]
# Expanding embedding to larger size by replicating final entry
elif orig_vocab_size < margs.padded_vocab_size:
padding_size = margs.padded_vocab_size - orig_vocab_size
full_word_embed = torch.cat((
orig_word_embed,
orig_word_embed[-1].unsqueeze(0).expand(padding_size, -1)))
# Same size!
else:
full_word_embed = orig_word_embed
else:
print("Original vocab size not specified, leaving embedding table as-is. "
"If you've changed the tensor parallel size this could cause problems.")
margs.padded_vocab_size = orig_word_embed.shape[0]
full_word_embed = orig_word_embed
# Split into new tensor model parallel sizes
out_word_embed = torch.chunk(full_word_embed, args.target_tensor_parallel_size, dim=0)
# Make models for first pipeline stage and fill in embeddings
mpu.set_pipeline_model_parallel_rank(0)
post_process = args.target_pipeline_parallel_size == 1
models = get_models(args.target_tensor_parallel_size, md.params_dtype, True, post_process)
for tp_rank, model in enumerate(models):
model.language_model.embedding.word_embeddings.weight.data.copy_(out_word_embed[tp_rank])
if pos_embed is not None:
model.language_model.embedding.position_embeddings.weight.data.copy_(pos_embed)
else:
assert not hasattr(model.language_model.embedding, "position_embeddings")
# Layernorm has bias; RMSNorm does not.
norm_has_bias = md.checkpoint_args.normalization == "LayerNorm"
# Transformer layers
#-------------------
total_layer_num = 0
for pp_rank in range(args.target_pipeline_parallel_size):
# For later pipeline parallel ranks, make the new models
if pp_rank > 0:
mpu.set_pipeline_model_parallel_rank(pp_rank)
post_process = pp_rank == args.target_pipeline_parallel_size - 1
models = get_models(args.target_tensor_parallel_size, md.params_dtype, False, post_process)
for layer in range(len(models[0].language_model.encoder.layers)):
msg = queue_get(f"transformer layer {total_layer_num}")
# duplicated tensors
input_norm_weight = msg.pop("input norm weight")
if norm_has_bias:
input_norm_bias = msg.pop("input norm bias")
post_norm_weight = msg.pop("post norm weight")
if norm_has_bias:
post_norm_bias = msg.pop("post norm bias")
if md.linear_bias:
dense_bias = msg.pop("dense bias")
mlp_l1_bias = msg.pop("mlp l1 bias")
# Split up the parallel tensors
qkv_weight = torch.chunk(msg.pop("qkv weight"), args.target_tensor_parallel_size, dim=0)
dense_weight = torch.chunk(msg.pop("dense weight"), args.target_tensor_parallel_size, dim=1)
mlp_l1_weight = torch.chunk(msg.pop("mlp l1 weight"), args.target_tensor_parallel_size, dim=1)
# Special handling for swiglu
if md.swiglu:
mlp_l0_weight_W = torch.chunk(msg.pop("mlp l0 weight W"), args.target_tensor_parallel_size, dim=0)
mlp_l0_weight_V = torch.chunk(msg.pop("mlp l0 weight V"), args.target_tensor_parallel_size, dim=0)
mlp_l0_weight = [torch.cat(weights, dim=0) for weights in zip(mlp_l0_weight_W, mlp_l0_weight_V)]
else:
mlp_l0_weight = torch.chunk(msg.pop("mlp l0 weight"), args.target_tensor_parallel_size, dim=0)
if md.linear_bias:
qkv_bias = torch.chunk(msg.pop("qkv bias"), args.target_tensor_parallel_size, dim=0)
if md.swiglu:
mlp_l0_bias_W = torch.chunk(msg.pop("mlp l0 bias W"), args.target_tensor_parallel_size, dim=0)
mlp_l0_bias_V = torch.chunk(msg.pop("mlp l0 bias V"), args.target_tensor_parallel_size, dim=0)
mlp_l0_bias = [torch.cat(bias, dim=0) for bias in zip(mlp_l0_bias_W, mlp_l0_bias_V)]
else:
mlp_l0_bias = torch.chunk(msg.pop("mlp l0 bias"), args.target_tensor_parallel_size, dim=0)
# Save them to the model
for tp_rank in range(args.target_tensor_parallel_size):
l = models[tp_rank].language_model.encoder.layers[layer]
l.input_norm.weight.data.copy_(input_norm_weight)
if norm_has_bias:
l.input_norm.bias.data.copy_(input_norm_bias)
l.self_attention.query_key_value.weight.data.copy_(qkv_weight[tp_rank])
l.self_attention.dense.weight.data.copy_(dense_weight[tp_rank])
l.post_attention_norm.weight.data.copy_(post_norm_weight)
if norm_has_bias:
l.post_attention_norm.bias.data.copy_(post_norm_bias)
l.mlp.dense_h_to_4h.weight.data.copy_(mlp_l0_weight[tp_rank])
l.mlp.dense_4h_to_h.weight.data.copy_(mlp_l1_weight[tp_rank])
if md.linear_bias:
l.self_attention.query_key_value.bias.data.copy_(qkv_bias[tp_rank])
l.self_attention.dense.bias.data.copy_(dense_bias)
l.mlp.dense_h_to_4h.bias.data.copy_(mlp_l0_bias[tp_rank])
l.mlp.dense_4h_to_h.bias.data.copy_(mlp_l1_bias)
total_layer_num = total_layer_num + 1
check_message(msg)
if post_process:
msg = queue_get("final norm")
final_norm_weight = msg.pop("weight")
if norm_has_bias:
final_norm_bias = msg.pop("bias")
for tp_rank in range(args.target_tensor_parallel_size):
models[tp_rank].language_model.encoder.final_norm.weight.data.copy_(final_norm_weight)
if norm_has_bias:
models[tp_rank].language_model.encoder.final_norm.bias.data.copy_(final_norm_bias)
if pp_rank != 0 and not md.output_layer:
# Copy word embeddings to final pipeline rank
models[tp_rank].word_embeddings.weight.data.copy_(out_word_embed[tp_rank])
del final_norm_weight
if norm_has_bias:
del final_norm_bias
check_message(msg)
if md.output_layer:
msg = queue_get("output layer")
if not hasattr(models[0].language_model, 'output_layer'):
print("ERROR: got an output layer, but model does not have one")
exit(1)
output_layer_weight = torch.chunk(msg.pop("weight"), args.target_tensor_parallel_size, dim=0)
for tp_rank in range(args.target_tensor_parallel_size):
models[tp_rank].language_model.output_layer.weight.data.copy_(output_layer_weight[tp_rank])
del output_layer_weight
check_message(msg)
msg = queue_get()
if msg != "done" and msg["name"] == "pooler":
if not hasattr(models[0].language_model, 'pooler'):
print("ERROR: got a pooler, but model does not have one")
exit(1)
print("received pooler")
pooler_weight = msg.pop("weight")
pooler_bias = msg.pop("bias")
for tp_rank in range(args.target_tensor_parallel_size):
models[tp_rank].language_model.pooler.dense.weight.data.copy_(pooler_weight)
models[tp_rank].language_model.pooler.dense.bias.data.copy_(pooler_bias)
del pooler_weight
del pooler_bias
check_message(msg)
msg = queue_get()
if msg != "done" and msg["name"] == "lm head":
if not hasattr(models[0], 'lm_head'):
print("ERROR: got an lm head, but model does not have one")
exit(1)
print("received lm head")
lm_head_dense_weight = msg.pop("dense weight")
lm_head_dense_bias = msg.pop("dense bias")
lm_head_norm_weight = msg.pop("norm weight")
if norm_has_bias:
lm_head_norm_bias = msg.pop("norm bias")
for tp_rank in range(args.target_tensor_parallel_size):
models[tp_rank].lm_head.dense.weight.data.copy_(lm_head_dense_weight)
models[tp_rank].lm_head.dense.bias.data.copy_(lm_head_dense_bias)
models[tp_rank].lm_head.norm.weight.data.copy_(lm_head_norm_weight)
if norm_has_bias:
models[tp_rank].lm_head.norm.bias.data.copy_(lm_head_norm_bias)
check_message(msg)
msg = queue_get()
if msg != "done" and msg["name"] == "binary head":
if not hasattr(models[0], 'binary_head'):
print("ERROR: got a binary head, but model does not have one")
exit(1)
print("received binary head")
binary_head_weight = msg.pop("weight")
binary_head_bias = msg.pop("bias")
for tp_rank in range(args.target_tensor_parallel_size):
models[tp_rank].binary_head.weight.data.copy_(binary_head_weight)
models[tp_rank].binary_head.bias.data.copy_(binary_head_bias)
check_message(msg)
msg = queue_get()
if msg != "done":
print("ERROR: got some more data but was expecting to be done")
for tp_rank in range(args.target_tensor_parallel_size):
mpu.set_tensor_model_parallel_rank(tp_rank)
save_checkpoint(md.iteration, [models[tp_rank]], None, None)
print("Done!")
|
Megatron-LM-master
|
tools/checkpoint/saver_megatron.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import json
import os
import sys
import torch
import transformers
from tqdm import tqdm
import types
def add_arguments(parser):
group = parser.add_argument_group(title='Llama-2 HF loader.')
group.add_argument('--true-vocab-size', type=int, default=None,
help='original size of vocab, if specified will trim padding from embedding table.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file. If specified will use this to get vocab size and '
'trim padding from the embedding table.')
group.add_argument('--tokenizer-model', required=True,
help='Sentencepiece tokenizer model.')
group.add_argument('--megatron-path', type=str, default=None,
help='Base directory of deepspeed repository')
def verify_transformers_version():
major, minor, patch = map(int, transformers.__version__.split('.'))
assert major >= 4 and minor >= 31
def load_args_from_checkpoint(args):
# Read Llama args.
llama_args_path = os.path.join(args.load, "config.json")
with open(llama_args_path) as f:
llama_args = json.load(f)
# Update Megatron args.
args.seq_length = 4096
args.max_position_embeddings = 4096
args.hidden_size = llama_args["hidden_size"]
args.num_attention_heads = llama_args["num_attention_heads"]
args.num_layers = llama_args["num_hidden_layers"]
args.global_batch_size = 1024
args.norm_epsilon = llama_args["rms_norm_eps"]
args.iteration = 1 # '0', 'release' don't work
args.add_position_embedding = False
args.use_rotary_position_embeddings = True
args.swiglu = True
args.tokenizer_type = "Llama2Tokenizer"
args.fp16 = True
args.normalization = "RMSNorm"
args.add_bias_linear = False
args.apply_query_key_layer_scaling = False
args.untie_embeddings_and_output_weights = True
args.vocab_size = llama_args["vocab_size"]
args.padded_vocab_size = llama_args["vocab_size"]
args.llama = llama_args
args.ffn_hidden_size = llama_args["intermediate_size"]
if "num_key_value_heads" in llama_args:
args.group_query_attention = True
args.num_query_groups = llama_args["num_key_value_heads"]
def set_preprocess_state(args, model, hf_model):
'''Set embedding params.'''
model.language_model.embedding.word_embeddings.weight.data.copy_(
hf_model.model.embed_tokens.weight)
def set_postprocess_state(args, model, hf_model):
'''Set output layer & norm params.'''
model.language_model.encoder.final_norm.weight.data.copy_(hf_model.model.norm.weight)
model.language_model.output_layer.weight.data.copy_(hf_model.lm_head.weight)
def set_attn_state(args, layer, hf_layer):
'''Set self-attention params.'''
# Get attention layer & state.
attn = layer.self_attention
hf_attn = hf_layer.self_attn
# Reshape loaded weights.
tp = args.tensor_model_parallel_size
nh = args.num_attention_heads // tp
ng = (args.num_query_groups if args.group_query_attention \
else args.num_attention_heads) // tp
dim = args.kv_channels
assert nh % ng == 0
# Copy weights (re-order dimensions for Megatron).
attn.query_key_value.weight.data.copy_(torch.cat([
hf_attn.q_proj.weight.reshape((ng, dim*nh//ng, -1)),
hf_attn.k_proj.weight.reshape((ng, dim, -1)),
hf_attn.v_proj.weight.reshape((ng, dim, -1)),
], dim=1).reshape((-1, args.hidden_size)))
attn.dense.weight.data.copy_(hf_attn.o_proj.weight)
def set_mlp_state(args, layer, hf_layer):
'''Set MLP params.'''
mlp = layer.mlp
hf_mlp = hf_layer.mlp
mlp.dense_h_to_4h.weight.data.copy_(torch.cat([
hf_mlp.gate_proj.weight,
hf_mlp.up_proj.weight,
], dim=0))
mlp.dense_4h_to_h.weight.data.copy_(hf_mlp.down_proj.weight)
def set_layer_state(args, model, hf_model, layer_idx):
'''Set transformer layer params.'''
layer = model.language_model.encoder.layers[layer_idx]
hf_layer = hf_model.model.layers[layer_idx]
set_attn_state(args, layer, hf_layer)
set_mlp_state(args, layer, hf_layer)
layer.input_norm.weight.data.copy_(hf_layer.input_layernorm.weight)
layer.post_attention_norm.weight.data.copy_(hf_layer.post_attention_layernorm.weight)
def load_checkpoint_to_model(args):
'''Set model params.'''
from pretrain_gpt import model_provider
from transformers import LlamaForCausalLM
# Load Huggingface model.
hf_model = LlamaForCausalLM.from_pretrained(args.load, device_map="cpu")
# Init Megatron model.
model = model_provider(True, True).to(args.params_dtype)
# Set model state.
set_preprocess_state(args, model, hf_model)
set_postprocess_state(args, model, hf_model)
for layer_idx in tqdm(range(args.num_layers), "set layer states"):
set_layer_state(args, model, hf_model, layer_idx)
return model
def _load_checkpoint(queue, args):
# Llama-2 requires HF transformers >=4.31.0.
verify_transformers_version()
# Search in directory above this.
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir,
os.path.pardir)))
if args.megatron_path is not None:
sys.path.insert(0, args.megatron_path)
try:
from megatron.arguments import parse_args, validate_args
from megatron.global_vars import set_args, set_global_variables
from megatron.model import module
from megatron.core import mpu
from megatron.core.enums import ModelType
from megatron import fused_kernels
except ModuleNotFoundError:
print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.")
queue.put("exit")
exit(1)
# We want all arguments to come from us.
sys.argv = ['script.py',
'--no-masked-softmax-fusion',
'--no-bias-gelu-fusion',
'--no-bias-dropout-fusion',
'--no-async-tensor-model-parallel-allreduce',
'--use-cpu-initialization',
'--micro-batch-size', '1',
'--no-load-optim',
'--no-load-rng',
'--no-save-optim',
'--no-save-rng',
'--no-initialization',
'--load', args.load_dir
]
margs = parse_args()
margs.tokenizer_model = args.tokenizer_model
load_args_from_checkpoint(margs)
# Arguments do sanity checks on the world size, but we don't care,
# so trick it into thinking we are plenty of processes.
margs.world_size = margs.tensor_model_parallel_size * margs.pipeline_model_parallel_size
margs = validate_args(margs)
def check_for_arg(arg_name, default=None):
if getattr(margs, arg_name, None) is None:
if default is not None:
setattr(margs, arg_name, default)
else:
print(f"Checkpoint does not specify the argument {arg_name}. Exiting.")
print(f"Arguments: {margs}")
queue.put("exit")
exit(1)
check_for_arg('tensor_model_parallel_size')
check_for_arg('pipeline_model_parallel_size')
check_for_arg('num_layers')
check_for_arg('hidden_size')
check_for_arg('seq_length')
check_for_arg('num_attention_heads')
check_for_arg('max_position_embeddings')
check_for_arg('position_embedding_type')
check_for_arg('tokenizer_type')
check_for_arg('iteration')
check_for_arg('bert_binary_head')
check_for_arg('disable_bias_linear', False)
check_for_arg('params_dtype')
check_for_arg('swiglu', False)
# Determine how to make our models.
assert args.model_type == 'GPT', 'Llama-2 is a GPT model.'
margs.model_type = ModelType.encoder_or_decoder
# Suppress warning about torch.distributed not being initialized.
module.MegatronModule.embedding_warning_printed = True
set_global_variables(margs, build_tokenizer=False)
mpu.set_tensor_model_parallel_world_size(margs.tensor_model_parallel_size)
mpu.set_pipeline_model_parallel_world_size(margs.pipeline_model_parallel_size)
mpu.set_virtual_pipeline_model_parallel_world_size(margs.virtual_pipeline_model_parallel_size)
fused_kernels.load(margs)
# Short aliases.
tp_size = margs.tensor_model_parallel_size
pp_size = margs.pipeline_model_parallel_size
vp_size = margs.virtual_pipeline_model_parallel_size
if vp_size is None:
vp_size = 1
# Metadata.
md = types.SimpleNamespace()
md.model_type = args.model_type
md.num_layers = margs.num_layers
md.hidden_size = margs.hidden_size
md.seq_length = margs.seq_length
md.num_attention_heads = margs.num_attention_heads
md.max_position_embeddings = margs.max_position_embeddings
md.tokenizer_type = margs.tokenizer_type
md.iteration = margs.iteration
md.params_dtype = margs.params_dtype
md.bert_binary_head = margs.bert_binary_head
md.output_layer = margs.untie_embeddings_and_output_weights
md.position_embedding_type = margs.position_embedding_type
md.linear_bias = margs.add_bias_linear
md.swiglu = margs.swiglu
md.previous_tensor_parallel_size = margs.tensor_model_parallel_size
md.previous_pipeline_parallel_size = margs.pipeline_model_parallel_size
md.true_vocab_size = None # skips padding in saver
md.make_vocab_size_divisible_by = None
md.checkpoint_args = margs
md.consumed_train_samples = 0
md.consumed_valid_samples = 0
# Get first pipe stage.
mpu.set_tensor_model_parallel_rank(0)
mpu.set_pipeline_model_parallel_rank(0)
model = load_checkpoint_to_model(margs)
queue.put(md)
def queue_put(name, msg):
print(f"sending {name}")
msg["name"] = name
queue.put(msg)
# Send embeddings.
message = {
"word embeddings": model.language_model.embedding.word_embeddings.weight.data
}
if md.position_embedding_type == 'learned_absolute':
message["position embeddings"] = model.language_model.embedding.position_embeddings.weight.data
else:
assert not hasattr(model.language_model.embedding, 'position_embeddings')
queue_put("embeddings", message)
for layer_num in range(margs.num_layers):
message = {}
# Get non-parallel tensors from tp_rank 0.
layer = model.language_model.encoder.layers[layer_num]
message["input norm weight"] = layer.input_norm.weight.data
message["post norm weight"] = layer.post_attention_norm.weight.data
if md.linear_bias:
message["dense bias"] = layer.self_attention.dense.bias.data
message["mlp l1 bias"] = layer.mlp.dense_4h_to_h.bias.data
# Grab all parallel tensors for this layer.
qkv_weight = []
qkv_bias = []
dense_weight = []
mlp_l0_weight = []
mlp_l0_bias = []
mlp_l1_weight = []
layer = model.language_model.encoder.layers[layer_num]
qkv_weight.append(layer.self_attention.query_key_value.weight.data)
dense_weight.append(layer.self_attention.dense.weight.data)
mlp_l0_weight.append(layer.mlp.dense_h_to_4h.weight.data)
mlp_l1_weight.append(layer.mlp.dense_4h_to_h.weight.data)
if md.linear_bias:
qkv_bias.append(layer.self_attention.query_key_value.bias.data)
mlp_l0_bias.append(layer.mlp.dense_h_to_4h.bias.data)
# Handle gated linear units.
if md.swiglu:
# Concat all the first halves ('W's) and all the second halves ('V's).
for tp_rank in range(tp_size):
mlp_l0_weight[tp_rank] = torch.chunk(mlp_l0_weight[tp_rank], 2, dim=0)
message["mlp l0 weight W"] = torch.cat([w[0] for w in mlp_l0_weight], dim=0)
message["mlp l0 weight V"] = torch.cat([w[1] for w in mlp_l0_weight], dim=0)
else:
message["mlp l0 weight"] = torch.cat(mlp_l0_weight, dim=0)
# Simple concat of the rest.
message["qkv weight"] = torch.cat(qkv_weight, dim=0)
message["dense weight"] = torch.cat(dense_weight, dim=1)
message["mlp l1 weight"] = torch.cat(mlp_l1_weight, dim=1)
if md.linear_bias:
message["qkv bias"] = torch.cat(qkv_bias, dim=0)
if md.swiglu:
for tp_rank in range(tp_size):
mlp_l0_bias[tp_rank] = torch.chunk(mlp_l0_bias[tp_rank], 2, dim=0)
message["mlp l0 bias W"] = torch.cat([b[0] for b in mlp_l0_bias],dim=0)
message["mlp l0 bias V"] = torch.cat([b[1] for b in mlp_l0_bias],dim=0)
else:
message["mlp l0 bias"] = torch.cat(mlp_l0_bias, dim=0)
queue_put(f"transformer layer {layer_num}", message)
# Send final norm from tp_rank 0.
message = {
"weight": model.language_model.encoder.final_norm.weight.data,
}
queue_put("final norm", message)
if md.output_layer:
message = {
"weight": model.language_model.output_layer.weight.data
}
queue_put("output layer", message)
queue.put("done")
def load_checkpoint(queue, args):
try:
_load_checkpoint(queue, args)
except:
queue.put("exit")
raise
|
Megatron-LM-master
|
tools/checkpoint/loader_llama2_hf.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import json
import time
import sys
if __name__ == '__main__':
url_filename = sys.argv[1]
data_filename = sys.argv[2]
output_filename = sys.argv[3]
urls = set()
with open(url_filename, 'r') as f:
for line in f:
myjson = json.loads(line)
for key in myjson:
this_urls = myjson[key]
for i in range(1, len(this_urls)):
urls.add(this_urls[i])
print('will be removing {} urls'.format(len(urls)), flush=True)
written_docs = 0
removed_docs = 0
removed_chars = 0
start_time = time.time()
with open(output_filename, 'wb') as fout:
with open(data_filename, 'r') as fin:
for line in fin:
try:
myjson = json.loads(line)
url = myjson['url']
if url in urls:
print('removing', myjson)
removed_docs += 1
removed_chars += len(myjson['text'])
continue
myjson = json.dumps(myjson, ensure_ascii=False)
fout.write(myjson.encode('utf-8'))
fout.write('\n'.encode('utf-8'))
written_docs += 1
if written_docs % 10000 == 0:
print(' [PROCESSED] time (s): {:.2f} | written: {} '
'| removed: {} (char: {})'.format(
time.time() - start_time,
written_docs, removed_docs, removed_chars))
except Exception as e:
print('[SKIPPING]', line, e)
print(' [PROCESSED] time (s): {:.2f} | written: {} '
'| removed: {} (char: {})'.format(
time.time() - start_time,
written_docs, removed_docs, removed_chars))
print('done :-)')
|
Megatron-LM-master
|
tools/openwebtext/remove_group_duplicates.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import argparse
import json
import os
import time
"""
This code adds id to each json object in a json file. User can add prefix
to the ids.
"""
if __name__ == '__main__':
print('parsing the arguments ...')
parser = argparse.ArgumentParser()
parser.add_argument('--input-file', type=str, default=None, help='Input'\
' json file where id needs to be added')
parser.add_argument('--output-file', type=str, default=None, help=\
'Output file name with id')
parser.add_argument('--id-prefix', type=str, default=None, help=\
'Id prefix')
parser.add_argument('--log-interval', type=int, default=100,
help='Log interval')
args = parser.parse_args()
print('Adding ids to dataset ...')
f_input = open(args.input_file, 'r', encoding='utf-8')
f_output = open(args.output_file, 'wb')
unique_ids = 1
start_time = time.time()
for row in f_input:
each_row = json.loads(row)
adlr_id_string = args.id_prefix + '-{:010d}'.format(int(unique_ids))
each_row['adlr_id'] = adlr_id_string
myjson = json.dumps(each_row, ensure_ascii=False)
f_output.write(myjson.encode('utf-8'))
f_output.write('\n'.encode('utf-8'))
if unique_ids % args.log_interval == 0:
print(' processed {:9d} documents in {:.2f} seconds ...'.format( \
unique_ids, time.time() - start_time), flush=True)
unique_ids += 1
# Close the file.
f_input.close()
f_output.close()
print('done :-)', flush=True)
|
Megatron-LM-master
|
tools/openwebtext/add_id.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import glob
import sys
import json
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--json_path", type=str, default=".",
help="path where all the json files are located")
parser.add_argument("--output_file", type=str, default="merged_output.json",
help="filename where the merged json should go")
args = parser.parse_args()
json_path = args.json_path
out_file = args.output_file
json_files = glob.glob(json_path + '/*.json')
counter = 0
with open(out_file, 'w') as outfile:
for fname in json_files:
counter += 1
if counter % 1024 == 0:
print("Merging at ", counter, flush=True)
with open(fname, 'r') as infile:
for row in infile:
each_row = json.loads(row)
outfile.write(row)
print("Merged file", out_file, flush=True)
|
Megatron-LM-master
|
tools/openwebtext/merge_jsons.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# WARNING! This file contains a blacklist of known malicious sites and thus contains some NSFW language.
import glob
import re
import time
import tldextract
import sys
# List of the domains to blacklist.
domain_blacklist = set([
'500px',
'aapks',
'akamaihd',
'amazon',
'apple',
'artifactfire',
'artstation',
'awwni',
'bandcamp',
'battleforthenet',
'coinscalendar',
'dailymotion',
'deviantart',
'discord',
'discordapp',
'dlapkandroid',
'dropbox',
'e621',
'ebay',
'edealinfo',
'erome',
'eroshare',
'explosm',
'facebook',
'fbcdn',
'flickr',
'furaffinity',
'futhead',
'gatopardo',
'gfycat',
'gifsound',
'gifsoup',
'giphy',
'github',
'google',
'gunprime',
'gyazo',
'horsefucker',
'hotdealstar',
'imagefap',
'imageshack',
'imgflip',
'imgur',
'instagram',
'karmadecay',
'kryptocal',
'kym-cdn',
'liveleak',
'livememe',
'lmgtfy',
'magaimg',
'memegenerator',
'minorplanetcenter',
'minus',
'mobafire',
'morejpeg',
'nocookie',
'pcpartpicker',
'photobucket',
'pinimg',
'pinterest',
'pixiv',
'pornhub',
'prntscr',
'puu',
'qkme',
'quickmeme',
'radd',
'redd',
'reddit',
'reddit-stream',
'redditlog',
'redditmedia',
'reddituploads',
'redtube',
'reupp',
'reverb',
'roanoke',
'rollingstone',
'sli',
'soundcloud',
'soundgasm',
'spankbang',
'spotify',
'strawpoll',
'streamable',
'timeanddate',
'tinypic',
'touhouradio',
'tumblr',
'twimg',
'twitch',
'twitter',
'vid',
'vimeo',
'vine',
'vkaao',
'vocaroo',
'voyagefusion',
'walmart',
'wciu',
'wikimedia',
'wikipedia',
'xhamster',
'xkcd',
'xvideos',
'youtu',
'youtube',
'youtubedoubler',
'ytimg',
'zillexplorer',
])
def domain_is_in_blacklist(url):
domain = tldextract.extract(url).domain
return domain in domain_blacklist
# List of extentions to blacklist.
extentions_blacklist = (
'.3gp',
'.7z'
'.ai',
'.aif',
'.apk',
'.app',
'.avi',
'.bin',
'.bmp',
'.bz2',
'.css',
'.csv',
'.dat',
'.deb',
'.dmg',
'.doc',
'.docx',
'.exe',
'.gif',
'.gifv',
'.gz',
'.iso',
'.jar',
'.jpeg',
'.jpg',
'.js',
'.log',
'.mid',
'.midi',
'.mkv',
'.mov',
'.mp3',
'.mp4',
'.mpeg',
'.mpg',
'.ogg',
'.ogv',
'.otf',
'.pdf',
'.pkg',
'.png',
'.pps',
'.ppt',
'.pptx',
'.psd',
'.py',
'.qt',
'.ram',
'.rar',
'.sql',
'.svg',
'.swf',
'.tar.gz',
'.tar',
'.tgz',
'.tiff',
'.ttf',
'.txt',
'.wav',
'.webm',
'.wma',
'.wmv',
'.xls',
'.xlsx',
'.xml',
'.xz',
'.zip',
)
def extention_is_in_blacklist(url):
if url.split('?')[0].lower().endswith(extentions_blacklist):
return True
return False
# Malformed urls.
# This function is adapted from:
# https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not
url_regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def url_is_malformed(url):
return re.match(url_regex, url) is None
def print_progress(prefix, start_time, urls_counter,
domain_blacklist_counter,
extention_blacklist_counter,
short_url_counter, malformed_url_counter,
duplicate_url_counter):
string = prefix + ' | '
string += 'time elapsed (s): {:.2f} | '.format(time.time() - start_time)
string += 'number of urls: {} | '.format(urls_counter)
string += 'domain blacklisted: {} | '.format(domain_blacklist_counter)
string += 'extention blacklisted: {} | '.format(extention_blacklist_counter)
string += 'short urls (<=8): {} | '.format(short_url_counter)
string += 'malformed urls: {} | '.format(malformed_url_counter)
string += 'duplicate urls: {}'.format(duplicate_url_counter)
print(string, flush=True)
if __name__ == '__main__':
print('remove blacklisted urls ..')
# Path to the url files.
path = sys.argv[1]
# Output url file.
output = sys.argv[2]
# Get the list of url files.
files = glob.glob(path + '/*.txt')
print('> found {} files'.format(len(files)))
urls = set()
urls_counter = 0
domain_blacklist_counter = 0
extention_blacklist_counter = 0
short_url_counter = 0
malformed_url_counter = 0
duplicate_url_counter = 0
start_time = time.time()
for filename in files:
with open(filename, 'r') as f:
for line in f:
url = line.strip()
urls_counter += 1
if domain_is_in_blacklist(url):
print('[DOMAIN BLACKLIST]: {}'.format(url), flush=True)
domain_blacklist_counter += 1
elif extention_is_in_blacklist(url):
print('[EXTENTION BLACKLIST]: {}'.format(url), flush=True)
extention_blacklist_counter += 1
elif len(url) <= 8:
print('[SHORT URL]: {}'.format(url), flush=True)
short_url_counter += 1
elif url_is_malformed(url):
print('[MALFORMED URL]: {}'.format(url), flush=True)
malformed_url_counter += 1
elif url in urls:
print('[DUPLICATE URL]: {}'.format(url), flush=True)
duplicate_url_counter += 1
else:
urls.add(url)
if urls_counter % 100000 == 0:
print_progress('PROGRESS', start_time, urls_counter,
domain_blacklist_counter,
extention_blacklist_counter,
short_url_counter, malformed_url_counter,
duplicate_url_counter)
print_progress('FINAL', start_time, urls_counter,
domain_blacklist_counter,
extention_blacklist_counter,
short_url_counter, malformed_url_counter,
duplicate_url_counter)
# Write the final set of urls.
print('> writing cleaned up url list to {}'.format(output))
with open(output, 'w') as f:
for url in urls:
f.write(url + '\n')
print('done :-)')
|
Megatron-LM-master
|
tools/openwebtext/blacklist_urls.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""
Filter and clean documents:
Capable to clean docs with less than 512 characters, less than
256 characters and contains javascript, fix text and dataset specific
cleaning like stories and realnews datasets.
Program arguments have the details.
"""
import argparse
from functools import partial
import glob
import ftfy
import json
from langdetect import detect
import multiprocessing
import os
from pathlib import Path
import re
import time
def process_doc(json_line, args):
# Read the line.
document = json.loads(json_line)
text = document['text']
output = {'remove_512': False, 'remove_256_javascript': False, \
'remove_512_non_english': False, 'ftfy_fix_text': False, \
'general_cleaning': False}
try:
# Reomove all docs with less than 512 characters
if "remove_512" in args.tasks:
if len(text) < 512:
output['remove_512'] = True
return output, text, document, True
# Remove docs if less than 256 character length and contains Javascript
if "remove_256_javascript" in args.tasks:
if len(text) < 256 and 'javascript' in text.lower():
output['remove_256_javascript'] = True
return output, text, document, True
# Remove docs < 512 and nonenglish
if "remove_512_non_english" in args.tasks:
if len(text) < 512 and detect(text) != 'en':
output['remove_512_non_english'] = True
return output, text, document, True
# Fix the text using ftfy, don't remove the text, hence return False
if "ftfy_fix_text" in args.tasks:
fixed_text = ftfy.fix_text(text)
output['ftfy_fix_text'] = True
return output, fixed_text, document, False
# Cleaning extra spaces and newlines
if "general_cleaning" in args.tasks:
cleaned_text = re.sub(r" +|\b\n+ |\b\n+", " ", text)
#cleaned_text = re.sub(r"\n\n+", "\n\n", text) # used this for Gutenberg dataset
#cleaned_text = re.sub(r"\n", "\n\n", text) # Used this for realnews
# stories datasets
#cleaned_text = re.sub(r" \'", "'", text)
#cleaned_text = re.sub(r" \!", "!", cleaned_text)
#cleaned_text = re.sub(r" \.", ".", cleaned_text)
#cleaned_text = re.sub(r" \?", "?", cleaned_text)
#cleaned_text = re.sub(r" - ", "-", cleaned_text)
##cleaned_text = re.sub(r"\" ", "\"", cleaned_text)
#cleaned_text = re.sub(r" @ ", "@", cleaned_text)
output['general_cleaning'] = True
return output, cleaned_text, document, False
except Exception as e:
print('Error: *************************\n{}\ntext: {}'.format(e, \
text), flush=True)
return output, text, document, True
# don't remove
return output, text, document, False
def process_set(args, input_file, output_f_cleaned, output_f_filtered):
print(' > working on {} ...'.format(input_file), flush=True)
num_docs = num_remove_512 = num_remove_java = num_remove_512_non_english \
= num_ftfy_fix_text = num_general_cleaning = 0
# Output file and counters.
output_cleaned = open(output_f_cleaned, 'wb')
output_filtered = open(output_f_filtered, 'wb')
start_time = time.time()
# Setup multi-processing.
num_workers = 40
fin = open(input_file, 'r', encoding='utf-8')
pool = multiprocessing.Pool(num_workers)
process_doc_partial = partial(process_doc, args=args)
processed_docs = pool.imap(process_doc_partial, fin, 500)
# Process documents.
for output, text, document, to_filter in processed_docs:
num_docs += 1
num_remove_512 += 1 if output['remove_512'] else 0
num_remove_java += 1 if output['remove_256_javascript'] else 0
num_remove_512_non_english += 1 if output['remove_512_non_english'] \
else 0
num_ftfy_fix_text += 1 if output['ftfy_fix_text'] else 0
num_general_cleaning += 1 if output['general_cleaning'] else 0
document['text'] = text
myjson = json.dumps(document, ensure_ascii=False)
if to_filter:
output_filtered.write(myjson.encode('utf-8'))
output_filtered.write('\n'.encode('utf-8'))
else:
output_cleaned.write(myjson.encode('utf-8'))
output_cleaned.write('\n'.encode('utf-8'))
if num_docs % args.log_interval == 0:
print(' processed {:9d} documents in {:.2f} seconds ...'.format(
num_docs, time.time() - start_time), flush=True)
# Close the file.
output_cleaned.close()
output_filtered.close()
fin.close()
# Print stats.
print(' >> total docs: {} remove_512 {} remove_256_javascript {} '\
'remove_512_non_english {} ftfy_fix_text {} general_cleaning {}'.\
format(num_docs, num_remove_512, num_remove_java,\
num_remove_512_non_english, num_ftfy_fix_text, \
num_general_cleaning), flush=True)
if __name__ == '__main__':
print('parsing the arguments ...')
parser = argparse.ArgumentParser()
parser.add_argument('--input-files', nargs = '*', required=True, default=\
None, help = 'Input json files that needs to be'\
' cleaned')
parser.add_argument('--tasks', nargs = '*', required=True, default=None,\
help = 'Tasks to perform on the input files, ' \
'such as remove_512, remove_256_javascript, ' \
'remove_512_non_english, ftfy_fix_text, and ' \
'general_cleaning. 256 or 512 means the number' \
' of characters.')
parser.add_argument('--output-path', type=str, default=None,
help='Directory where the output should go')
parser.add_argument('--log-interval', type=int, default=100,
help='Log interval')
args = parser.parse_args()
print('cleanup dataset ...')
for input_file in args.input_files:
input_filename, input_filename_ext = os.path.splitext(Path(input_file)\
.name)
output_f_cleaned = os.path.join(args.output_path, input_filename + \
"_cleaned" + input_filename_ext)
output_f_filtered = os.path.join(args.output_path, input_filename + \
"_filtered" + input_filename_ext)
process_set(args, input_file, output_f_cleaned, output_f_filtered)
print('done :-)', flush=True)
|
Megatron-LM-master
|
tools/openwebtext/cleanup_fix_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import json
import time
import sys
if __name__ == '__main__':
print('grouping duplicate urls ...')
input = sys.argv[1]
output = sys.argv[2]
if len(sys.argv) > 3:
jaccard_similarity_threshold = float(sys.argv[3])
else:
jaccard_similarity_threshold = 0.7
url_to_index = {}
index_to_urls = []
counter = 0
start_time = time.time()
with open(input, 'r') as f:
for line in f:
counter += 1
myjson = json.loads(line)
urls = []
for main_url in myjson.keys():
urls.append(main_url)
for value in myjson[main_url]:
for other_url, js in value.items():
if js >= jaccard_similarity_threshold:
urls.append(other_url)
current_index = -1
other_indices = set()
for url in urls:
if url in url_to_index:
if current_index == -1:
current_index = url_to_index[url]
elif current_index != url_to_index[url]:
other_indices.add(url_to_index[url])
if current_index == -1:
current_index = len(index_to_urls)
index_to_urls.append(set())
for url in urls:
url_to_index[url] = current_index
index_to_urls[current_index].add(url)
for index in other_indices:
for url in index_to_urls[index]:
index_to_urls[current_index].add(url)
url_to_index[url] = current_index
index_to_urls[index] = None
if counter % 100000 == 0:
print(' > processed {} lines in {} seconds ...'.format(
counter, time.time() - start_time))
total_remove = 0
total_remain = 0
for urls in index_to_urls:
if urls is not None:
if len(urls) > 1:
total_remove += (len(urls) - 1)
total_remain += 1
print('out of {} urls, only {} are unique and {} should be removed'.format(
total_remove+total_remain, total_remain, total_remove))
with open(output, 'wb') as f:
for i, urls in enumerate(index_to_urls):
if urls is not None:
if len(urls) > 1:
myjson = json.dumps({str(i): list(urls)},
ensure_ascii=False)
f.write(myjson.encode('utf-8'))
f.write('\n'.encode('utf-8'))
|
Megatron-LM-master
|
tools/openwebtext/group_duplicate_url.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import argparse
from functools import partial
import itertools
import json
from lsh import cache, minhash
import multiprocessing
import numpy as np
import time
import pickle
import sys
import os
# This function is adapted from:
# https://github.com/mattilyra/LSH/blob/master/examples/Introduction.ipynb
def shingles(text, char_ngram=5):
return set(text[head:head + char_ngram]
for head in range(0, len(text) - char_ngram))
# This function is adapted from:
# https://github.com/mattilyra/LSH/blob/master/examples/Introduction.ipynb
def jaccard(set_a, set_b, args):
if len(set_a) < 1 or len(set_b) < 1:
return 0.0
intersection = set_a & set_b
union = set_a | set_b
if args.jaccard == 'min':
return len(intersection) / min(len(set_a), len(set_b))
elif args.jaccard == 'max':
return len(intersection) / max(len(set_a), len(set_b))
else:
return len(intersection) / len(union)
def compute_fingerprint(line, key):
try:
myjson = json.loads(line)
url = myjson[key]
text = myjson['text']
fingerprint = hasher.fingerprint(text)
except Exception as e:
print('Error:', e)
return None, None, None, False
return url, text, fingerprint, True
def url_pairs_to_remove(args, bucket_urls, url_doc):
remove_urls_list = []
deduped_local, counter_local = 0, 0
iteration = 0
while len(bucket_urls) > 1:
if args.heuristic_iter != -1 and \
iteration == args.heuristic_iter:
break
items = list(bucket_urls)
remove_urls = []
main_url = items[np.random.randint(0, len(items))]
main_dhingles = shingles(url_doc[main_url])
for i in range(0, len(items)):
counter_local += 1
other_url = items[i]
if other_url == main_url:
continue
other_shingles = shingles(url_doc[other_url])
try:
jaccard_sim = jaccard(main_dhingles, other_shingles, args)
except Exception as e:
print('Error:', e)
jaccard_sim = 0.0
if jaccard_sim > 0.5:
remove_urls.append({other_url: jaccard_sim})
deduped_local += 1
bucket_urls.remove(other_url)
bucket_urls.remove(main_url)
if len(remove_urls) > 0:
remove_urls_list.append({main_url: remove_urls})
iteration += 1
return remove_urls_list, deduped_local, counter_local
def write_remove_urls_list(remove_urls_list, f_out):
if len(remove_urls_list) > 0:
for each_url_remove in remove_urls_list:
myjson = json.dumps(each_url_remove, ensure_ascii=False)
f_out.write(myjson.encode('utf-8'))
f_out.write('\n'.encode('utf-8'))
def compute_jaccard(each_bin, num_bins, start_time_local):
remove_urls_list = []
deduped_local, counter_local, bucket_local = 0, 0, 0
for bucket_id in each_bin:
bucket_local += 1
if os.getpid() % num_bins == 0 and bucket_local % 100000 == 0:
print("Counter {}, progress {:.2f} time {:.2f}".\
format(bucket_local, float(bucket_local)/float(len(each_bin)),\
time.time() - start_time_local), flush=True)
if len(each_bin[bucket_id]) <= 1:
continue
bucket_urls = each_bin[bucket_id].copy()
remove_urls_list_sub, deduped_local_sub, counter_local_sub = \
url_pairs_to_remove(args, bucket_urls, url_doc)
deduped_local += deduped_local_sub
counter_local += counter_local_sub
if len(remove_urls_list_sub) > 0:
remove_urls_list.extend(remove_urls_list_sub)
return remove_urls_list, deduped_local, counter_local
def find_pair_urls_parallel(args, lshcache, url_doc):
start_time = time.time()
f_out = open(args.output, 'wb')
deduped, counter = 0, 0
# compute jaccards of buckets in bin in parallel (parallelism
# limited to # of bins)
num_bins = len(lshcache.bins)
pool = multiprocessing.Pool(num_bins)
compute_jaccard_partial = partial(compute_jaccard, num_bins=num_bins, \
start_time_local=start_time)
# don't need to pass args and url_doc as they are already shared
compute_jaccard_iter = pool.imap(compute_jaccard_partial, lshcache.bins)
print("multiprocessing init took {:.2f}".format(time.time() - start_time),\
flush=True)
for remove_urls_list, deduped_local, counter_local in compute_jaccard_iter:
deduped += deduped_local
counter += counter_local
write_remove_urls_list(remove_urls_list, f_out)
print(' [write]> processed {} documents in {:.2f} '
'seoncds and deduped {} documents ...'.format(counter, time.time()\
- start_time, deduped), flush=True)
pool.close()
pool.join()
f_out.close()
print(' Taken time for jaccard similariries {:.2f} seconds'.format(\
time.time() - start_time), flush=True)
def find_pair_urls_sequential(args, lshcache, url_doc):
start_time = time.time()
f_out = open(args.output, 'wb')
deduped, counter = 0, 0
for b in lshcache.bins:
for bucket_id in b:
if len(b[bucket_id]) <= 1:
continue
bucket_urls = b[bucket_id].copy()
remove_urls_list_sub, deduped_local_sub, counter_local_sub = \
url_pairs_to_remove(args, bucket_urls, url_doc)
deduped += deduped_local_sub
counter += counter_local_sub
write_remove_urls_list(remove_urls_list_sub, f_out)
if counter % 10000 == 0:
print(' [write]> processed {} documents in {:.2f} '
'seoncds and deduped {} documents ...'.
format(counter, time.time() - start_time,
deduped), flush=True)
f_out.close()
print(' [write]> processed {} documents in {:.2f} '
'seoncds and deduped {} documents ...'.
format(counter, time.time() - start_time,
deduped), flush=True)
if __name__ == '__main__':
print('parsing the arguments ...')
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1234,
help='Random seed used for python, numpy')
parser.add_argument('--inputs', nargs = '*', default=None, help = \
'Pairwise list of the input files and keys, '
'e.g. --inputs cc.json cc_id news.json news_id')
parser.add_argument('--load-fingerprints', nargs = '*', default=None,
help='Load fingerprints from a list of pickle files,'
' e.g. cc.pkl news.pkl')
parser.add_argument('--save-fingerprints', type=str, default=None,
help='Save the fingerprints of the inputs.')
parser.add_argument('--output', type=str, default=None,
help='Output file name that consists of all ids'
' with matching similarities')
parser.add_argument('--jaccard', type=str, default='union',
choices=['union', 'min', 'max'], help='Jaccard'\
' similarity computation')
parser.add_argument('--heuristic-iter', type=int, default=1,
help='Number of iterations to run the heuristics'
': use -1 for exact')
parser.add_argument('--num-bands', type=int, default=10,
help='Number of bands to use in cache')
parser.add_argument('--num-seeds', type=int, default=100,
help='Number of seeds to use for minhash. Note that'
' this value should be divisible by num-bands')
parser.add_argument('--jaccard-parallel', action='store_true',
help='Use this to process large number of documents.')
args = parser.parse_args()
print('finding possible duplicate content ...')
# set seed and get an array of seeds of 100 integers
np.random.seed(args.seed)
seeds = np.random.randint(0, 1e6, size=args.num_seeds)
# initialize minhash and lsh cache
hasher = minhash.MinHasher(seeds=seeds, char_ngram=5, hashbytes=4)
lshcache = cache.Cache(num_bands=args.num_bands, hasher=hasher)
url_doc = {}
# load fingerprints from pickle file if needed
if args.load_fingerprints is not None:
for count_fp, fp_file_name in enumerate(args.load_fingerprints):
print("Loading fingerprints from pickle file {}".format(
fp_file_name), flush=True)
fp = open(fp_file_name, "rb")
if count_fp == 0:
# assign directory for the first pkl
lshcache = pickle.load(fp)
url_doc = pickle.load(fp)
else:
# append these to lshcache and url_doc
local_lshcache = pickle.load(fp)
local_url_doc = pickle.load(fp)
for url in local_lshcache.fingerprints.keys():
url_doc[url] = local_url_doc[url]
lshcache.add_fingerprint(local_lshcache.fingerprints[url], url)
fp.close()
counter = 0
start_time = time.time()
# compute finger prints of the inputs if any
# input file and the key to use as id
if args.inputs is not None:
print("Computing fingerprints", flush=True)
assert len(args.inputs) % 2 == 0
for input_file, key in zip(args.inputs[::2], args.inputs[1::2]):
print(' document processing {} with key {}'.format(input_file, key),
flush=True)
# compute fingerprints in parallel
num_workers = 40
pool = multiprocessing.Pool(num_workers)
fin = open(input_file, 'r', encoding='utf-8')
compute_fingerprint_partial = partial(compute_fingerprint, key=key)
compute_fingerprint_iter = pool.imap(compute_fingerprint_partial,
fin, 512)
# traverse all the texts and add fingerprints
for url, text, fingerprint, flag in compute_fingerprint_iter:
counter += 1
if flag:
url_doc[url] = text
lshcache.add_fingerprint(fingerprint, url)
if counter % 10000 == 0:
print(' [read]> processed {} documents in {:.2f} '
'seconds ...'.format(counter, time.time() - \
start_time), flush=True)
fin.close()
pool.close()
pool.join()
# Save the fingerprints if needed
if args.save_fingerprints is not None:
print("Saving fingerprints to pickle file {}".format(
args.save_fingerprints), flush=True)
with open(args.save_fingerprints, 'wb') as f_save:
pickle.dump(lshcache, f_save)
pickle.dump(url_doc, f_save)
# compute jaccard index of the input texts and write to file if needed
if args.output is not None:
print("Compute jaccard similarity", flush=True)
if args.jaccard_parallel:
find_pair_urls_parallel(args, lshcache, url_doc)
else:
find_pair_urls_sequential(args, lshcache, url_doc)
print('done :-)')
|
Megatron-LM-master
|
tools/openwebtext/find_duplicates.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import ftfy
import json
from langdetect import detect
import numpy as np
import time
import os
import sys
from tokenizer import Tokenizer
MIN_DOCUMENT_LENGHT = 128
def print_progress(prefix, start_time, num_docs, num_fixed_text,
num_non_english_docs, chars_non_english_docs,
num_small_docs, chars_small_docs):
string = prefix + ' | '
string += 'elapsed time: {:.2f} | '.format(time.time() - start_time)
string += 'documents: {} | '.format(num_docs)
string += 'fixed text: {} | '.format(num_fixed_text)
string += 'non-english: {} | '.format(num_non_english_docs)
string += 'non-english chars: {} | '.format(chars_non_english_docs)
string += 'small docs: {} | '.format(num_small_docs)
string += 'small docs chars: {}'.format(chars_small_docs)
print(string, flush=True)
def filter_corpus(filename, out_filename, print_interval=10000):
print(' > filtering {}'.format(filename))
tokenizer = Tokenizer(cache_dir='./cache')
num_docs = 0
num_written_docs = 0
num_small_docs = 0
num_fixed_text = 0
num_non_english_docs = 0
chars_non_english_docs = 0
chars_small_docs = 0
start_time = time.time()
with open(out_filename, 'wb') as f:
with open(filename, 'r') as fin:
for line in fin:
try:
num_docs += 1
myjson = json.loads(line)
# Fix text
text = ftfy.fix_text(myjson['text'])
if text != myjson['text']:
num_fixed_text += 1
myjson['text'] = text
# Detect language.
if detect(text) != 'en':
print('[non-english text]', myjson)
num_non_english_docs += 1
chars_non_english_docs += len(text)
continue
# On average each token is 5 characters so 8 is an
# upper bound.
if len(text) < (8 * MIN_DOCUMENT_LENGHT):
tokens = tokenizer.tokenize_document(text)
if len(tokens) < MIN_DOCUMENT_LENGHT:
print('[small document, skipping]:', myjson)
num_small_docs += 1
chars_small_docs += len(text)
continue
myjson = json.dumps(myjson, ensure_ascii=False)
f.write(myjson.encode('utf-8'))
f.write('\n'.encode('utf-8'))
num_written_docs += 1
if num_docs % print_interval == 0:
print_progress('[PROGRESS]', start_time, num_docs,
num_fixed_text, num_non_english_docs,
chars_non_english_docs,
num_small_docs, chars_small_docs)
except Exception as e:
print(' skipping ', line, e)
print_progress('[FINAL]', start_time, num_docs,
num_fixed_text, num_non_english_docs,
chars_non_english_docs,
num_small_docs, chars_small_docs)
if __name__ == '__main__':
print('building gpt2 dataset ...')
input_filename = sys.argv[1]
output_filename = sys.argv[2]
print('will be reading {}'.format(input_filename))
print('and will write the results to {}'.format(output_filename))
filter_corpus(input_filename, output_filename)
|
Megatron-LM-master
|
tools/openwebtext/cleanup_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""
Deduplicate downstream tasks from training dataset. 13-grams have been used.
All split documents with less than 200 characters got filtered. Any document
with more than 10 splits got filtered as well.
"""
import argparse
from functools import partial
import json
import multiprocessing
import nltk
import pickle
import re
import string
import sys
import time
def get_words(text):
# get all the lowercase words from text
words, positions = [], []
for match in re.finditer(r'\w+', text.lower()):
words.append(match.group(0))
positions.append(match.start())
return words, positions
# splits the text
def split_text(text, start_position, remove_char_each_side, seq):
# first part of the text
punctuations = ".!?"
pos = start_position - remove_char_each_side
text_first = ""
while pos > 0 and not text[pos] in punctuations:
pos -= 1
if pos > 0:
text_first = text[0:pos+1]
# add length of seq and remove_char_each_side
pos = start_position + len(seq) + remove_char_each_side
# last part of the text
text_second = ""
while pos < len(text) and not text[pos] in punctuations:
pos += 1
if pos + 1 < len(text):
text_second = text[pos+1:len(text)]
return text_first, text_second
def check_and_clean_text(args, words, ngrams, text, start_position, \
text_buf_ngram_free, text_buf, local_ngram):
seq = " ".join(words)
if seq in ngrams:
print(" [matched]: {}".format(seq), flush=True)
if args.get_ngram_freq_only:
# increase freq of this seq and then only consider the later part
# of the text for further processing
if seq in local_ngram:
local_ngram[seq] += 1
else:
local_ngram[seq] = 1
#print(" [increased]: {} {}".format(seq, ngrams[seq]), flush=True)
if (start_position + len(seq) + 1) < len(text):
text_buf.append(text[start_position + len(seq) + 1:len(text)])
return False
# split the text
text_first, text_second = split_text(text, start_position, \
args.remove_char_each_side, seq)
# first part of ngrams free
if len(text_first) > args.filter_text_char_len:
text_buf_ngram_free.append(text_first)
# add second part for further processing
if len(text_second) > args.filter_text_char_len:
text_buf.append(text_second)
return False # not ngram free
# ngram free
return True
def free_ngram(line, args, key, ngrams, ngrams_freq_sorted):
# remove all the ngrams
try:
myjson = json.loads(line)
text_buf = [myjson[key]]
except Exception as e:
print("Error: {}".format(e), flush=True)
text_buf = []
text_buf_ngram_free = []
local_ngram = {}
while len(text_buf) > 0:
# get the first one from the buffer
text = text_buf.pop(0)
words, positions = get_words(text)
ngram_free = True
# find each max n-grams and check dictionary
for i in range(len(words) - args.max_ngram_size + 1):
check_ngram_free = check_and_clean_text(args, words[i:\
i+args.max_ngram_size], ngrams, text, positions[i], \
text_buf_ngram_free, text_buf, local_ngram)
# the seq is ngram free? if yes, break
if not check_ngram_free:
ngram_free = False
break
# if max ngrams doesn't match, check if any other lower n-grams
# within max ngram macthes
for ngram_len, _ in ngrams_freq_sorted:
check_ngram_free = check_and_clean_text(args, words[i:\
i+ngram_len], ngrams, text, positions[i], \
text_buf_ngram_free, text_buf, local_ngram)
# same check as above
if not check_ngram_free:
ngram_free = False
break
# check break from lower than max ngram loop above
if not ngram_free:
break
# for the last max n-gram, check all the lower ngrams in it
if ngram_free and len(words) - args.max_ngram_size > 0:
# get the last words of the lax max ngram
last_seq_words = words[(len(words)-args.max_ngram_size):len(words)]
last_seq_start_position = len(words) - args.max_ngram_size
# check all n-grams lower than the max
for pos, (ngram_len, _) in enumerate(ngrams_freq_sorted):
# ignore the max ngram as has been considered already
if ngram_len == args.max_ngram_size:
continue
# find each ngram of ngram_len in max n-grams and check
for i in range(len(last_seq_words) - ngram_len + 1):
check_ngram_free = check_and_clean_text(args, \
last_seq_words[i:i+ngram_len], ngrams, text,\
positions[last_seq_start_position+i], \
text_buf_ngram_free, text_buf, local_ngram)
if not check_ngram_free:
ngram_free = False
break
if not ngram_free:
break
# texts are ngram free
if ngram_free and not args.get_ngram_freq_only:
text_buf_ngram_free.append(text)
# check if the text has only been trimmed
trimmed = 0
if not args.get_ngram_freq_only and len(text_buf_ngram_free) == 1 and \
len(text_buf_ngram_free[0]) < len(myjson[key]):
trimmed = 1
return text_buf_ngram_free, trimmed, myjson, local_ngram
# insert word sequence into dictionary
def insert_dict(words, ngrams, pos):
seq = " ".join(words)
if seq not in ngrams:
ngrams[seq] = 0
#ngrams[seq] = pos
# insert each ngram from text into the ngrams dictionary
def compute_ngrams_insert_dict(args, text, ngrams):
words, positions = get_words(text)
if len(words) < args.min_ngram_size:
return
if len(words) < args.max_ngram_size:
insert_dict(words, ngrams, positions[0])
for i in range(len(words) - args.max_ngram_size+1):
insert_dict(words[i:i+args.max_ngram_size], ngrams, positions[i])
# Build ngrams for the lambada dataset
def process_task_lambda(args, task_file, ngrams):
print(' reading from {} and computing ngrams'.format(task_file))
with open(task_file, 'r') as f:
for line in f:
try:
myjson = json.loads(line)
text = myjson['text']
compute_ngrams_insert_dict(args, text, ngrams)
except Exception as e:
print('Error:', e)
print(" Entities in ngrams {}".format(len(ngrams)), flush=True)
# Build ngrams for the dataset of the given task
def process_task(args, task_name, ngrams):
print(' reading from {} and computing ngrams'.format('import datasets'))
print(" Current entities in ngrams {}".format(len(ngrams)), flush=True)
# using validation/test data from datasets
from datasets import load_dataset
entities_in_ngrams = len(ngrams)
# load the dataset
if task_name == 'squad':
dataset = load_dataset('squad_v2', split='validation')
elif task_name == 'natural_questions':
dataset = load_dataset('natural_questions', split='validation')
elif task_name == 'triviaqa':
dataset = load_dataset('trivia_qa', 'unfiltered', split='test')
elif task_name == 'webqa':
dataset = load_dataset('web_questions', split='test')
elif task_name == 'race':
dataset = load_dataset('race', 'all', split='test')
elif task_name == 'drop':
dataset = load_dataset('drop', split='validation')
elif task_name == 'coqa':
dataset = load_dataset('coqa', split='validation')
elif task_name == 'piqa':
dataset = load_dataset('piqa', split='test')
else:
print("Invalid task name: {}".format(task_name), flush=True)
return
# read the dataset and add to ngrams
for line in dataset:
try:
if task_name in ['squad', 'triviaqa', 'webqa', 'race', 'drop']:
text = line['question']
compute_ngrams_insert_dict(args, text, ngrams)
elif task_name == 'natural_questions':
text = line['question']['text']
compute_ngrams_insert_dict(args, text, ngrams)
elif task_name == 'coqa':
all_questions = line['questions']
for question in all_questions:
compute_ngrams_insert_dict(args, question, ngrams)
elif task_name == 'piqa':
text = line['goal']
compute_ngrams_insert_dict(args, text, ngrams)
except Exception as e:
print('Error:', e)
print(" After task {} entities in ngrams {}, added {}".format(task_name, \
len(ngrams), len(ngrams) - entities_in_ngrams), flush=True)
def compute_tasks_ngrams(args, ngrams):
start_time = time.time()
for _, task_name in enumerate(args.tasks):
print('Task: {}'.format(task_name), flush=True)
if task_name == 'lambada':
assert args.lambada_path is not None
process_task_lambda(args, args.lambada_path, ngrams)
else:
process_task(args, task_name, ngrams)
print(" Taken time to compute ngrams {:.2f}".format(time.time() - \
start_time), flush=True)
def compute_ngram_freq_sorted(args, ngrams):
ngrams_freq = {}
for ngram_key in ngrams.keys():
length = len(ngram_key.split())
ngrams_freq[length] = ngrams_freq[length] + 1 if length in \
ngrams_freq else 1
ngrams_freq_sorted = sorted(ngrams_freq.items(), key=lambda item: item[0])
print(" Ngram frequencies: {}".format(ngrams_freq_sorted), flush=True)
print(" Entities in ngrams {} min_ngram_size {} max_ngram_size {}".format(\
len(ngrams), ngrams_freq_sorted[0][0], ngrams_freq_sorted[len(\
ngrams_freq_sorted) -1 ][0]), flush=True)
return ngrams_freq_sorted
def get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \
dedup_file, dedup_key, ngrams_freq_sorted):
start_time = time.time()
# get the ngrams frequency
args.get_ngram_freq_only = True
# Open the large file to process in parallel
num_workers = args.num_threads
pool = multiprocessing.Pool(num_workers)
fin = open(dedup_file, 'r', encoding='utf-8')
free_ngram_abt_partial=partial(free_ngram, args=args, key=dedup_key, \
ngrams=ngrams, ngrams_freq_sorted=ngrams_freq_sorted)
free_ngrams_abt = pool.imap(free_ngram_abt_partial, fin, 500)
counter = 0
for _, _, _, local_ngram in free_ngrams_abt:
counter += 1
if counter % 1000 == 0:
print(' [compute_stat]> processed {} documents in {:.2f} seconds ...'.
format(counter, time.time() - start_time), flush=True)
for local_key in local_ngram:
if local_key in ngrams:
ngrams[local_key] += 1
local_ngram = {}
print(' Time taken to compute statistics {:.2f} seconds'.format(time.time() - \
start_time), flush=True)
pool.close()
pool.join()
start_time = time.time()
counter_threshold = 0
# Get ngram below theadhold
for local_key, local_val in ngrams.items():
if ngrams[local_key] < args.key_threshold:
print(" [threshold] {} {}".format(local_key, local_val), flush=True)
counter_threshold += 1
ngrams_below_threshold[local_key] = 1
print(' Ngrams below threshold {}'.format(counter_threshold), flush=True)
fin.close()
def clean_ngrams_below_threshold(args, ngrams_below_threshold, dedup_file, \
dedup_key):
start_time = time.time()
# Now actually filter the dataset
args.get_ngram_freq_only = False
#id_prefix = '-'.join(args.tasks[::2])
id_prefix = '-'.join(args.tasks[::1])
# get the range of the size of the ngrams
ngrams_freq_sorted = compute_ngram_freq_sorted(args, ngrams_below_threshold)
# Open the large file to process in parallel
counter = splitted = ignored = split_mt_thld = trimmed_count = 0
num_workers = args.num_threads
pool = multiprocessing.Pool(num_workers)
fin = open(dedup_file, 'r', encoding='utf-8')
free_ngram_clean_partial=partial(free_ngram, args=args, key=dedup_key, \
ngrams=ngrams_below_threshold, ngrams_freq_sorted=ngrams_freq_sorted)
free_ngrams_clean = pool.imap(free_ngram_clean_partial, fin, 500)
out_f = open(args.output, 'wb')
for text_buf_ngram_free, trimmed, myjson, _ in free_ngrams_clean:
counter += 1
try:
trimmed_count += trimmed
if len(text_buf_ngram_free) > 1:
splitted += 1
if len(text_buf_ngram_free) == 0:
ignored += 1
# more than 10 splits ignored
if len(text_buf_ngram_free) > args.splits_count:
text_buf_ngram_free = []
split_mt_thld += 1
if args.output is not None:
if "split_id" in myjson:
use_prefix = myjson["split_id"] + "-"
else:
use_prefix = ""
for i in range(len(text_buf_ngram_free)):
split_id_string = id_prefix + '-{:010d}'.format(int(\
counter)) + '-{:04d}'.format(int(i))
myjson[dedup_key] = text_buf_ngram_free[i]
myjson["split_id"] = use_prefix + split_id_string
outjson = json.dumps(myjson, ensure_ascii=False)
#outjson = json.dumps({"text":text_buf_ngram_free[i],
# id_prefix+"_split_id":split_id_string},
# ensure_ascii=False)
out_f.write(outjson.encode('utf-8'))
out_f.write('\n'.encode('utf-8'))
if counter % 1000 == 0:
print(' [final]> processed {} documents in {:.2f} seconds ...'.
format(counter, time.time() - start_time), flush=True)
except Exception as e:
print('Error:', e)
print(' [final]> processed {} documents in {:.2f} seconds ...'.
format(counter, time.time() - start_time), flush=True)
print(' Total docs {} splitted {} ignored {} splits > theshold {} trimmed'\
' {}'.format(counter, splitted, ignored, split_mt_thld, trimmed_count)\
, flush=True)
pool.close()
pool.join()
out_f.close()
fin.close()
if __name__ == '__main__':
# we use 13-grams, any text less than 200 characters got removed
# any text splitted more than 10 got removed as well
print('parsing the arguments ...')
parser = argparse.ArgumentParser()
parser.add_argument('--tasks', nargs = '*', required=True, default=None, \
help = 'Tasks to use for deduplication: currently '
' suuport [lambada, squad, natural_questions,'
' triviaqa, webqa, race, drop, coqa, and piqa]')
parser.add_argument('--lambada-path', type=str, default=None,
help='Only Lambada task needs the path')
parser.add_argument('--dedup-dataset', nargs = '*', default=None,
help='Dataset to deduplicate with the key to use'
' e.g. cc.json text')
parser.add_argument('--output', type=str, default=None,
help='Output file name to save dedup dataset')
parser.add_argument('--num-threads', type=int, default=40,
help='Number of threads to use')
# Default dedup values
parser.add_argument('--max-ngram-size', type=int, default=13,
help='Maximum size of ngram to use.')
parser.add_argument('--min-ngram-size', type=int, default=8,
help='Minimum size of ngram to use.')
parser.add_argument('--filter-text-char-len', type=int, default=200,
help='Remove any text below this length.')
parser.add_argument('--key-threshold', type=int, default=10,
help='Number of keys to consider as threshold')
parser.add_argument('--save-dictionary', type=str, default=None,
help='Save the dictionary')
parser.add_argument('--load-dictionary', type=str, default=None,
help='Load the dictionary')
parser.add_argument('--splits-count', type=int, default=10,
help='Remove any documents more than this many splits')
parser.add_argument('--remove-char-each-side', type=int, default=200,
help='Maximum size of ngram to use.')
args = parser.parse_args()
assert len(args.dedup_dataset) == 2
dedup_file = args.dedup_dataset[0]
dedup_key = args.dedup_dataset[1]
# Setup multi-processing
num_workers = args.num_threads
if args.load_dictionary is None:
# Build ngrams
ngrams = {}
compute_tasks_ngrams(args, ngrams)
# get the range of the size of the ngrams
ngrams_freq_sorted = compute_ngram_freq_sorted(args, ngrams)
# get ngram freq from large file in parallel
# get ngrams below threshold
ngrams_below_threshold = {}
get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \
dedup_file, dedup_key, ngrams_freq_sorted)
# save the dictionary if needed
if args.save_dictionary is not None:
with open(args.save_dictionary, 'wb') as save_dict_handle:
pickle.dump(ngrams_below_threshold, save_dict_handle)
else:
with open(args.load_dictionary, 'rb') as load_dict_handle:
ngrams_below_threshold = pickle.load(load_dict_handle)
# filter the large file
if args.output is not None:
clean_ngrams_below_threshold(args, ngrams_below_threshold, \
dedup_file, dedup_key)
print('done :-)')
|
Megatron-LM-master
|
tools/openwebtext/filter_ngrams.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import importlib
required_libs = [
"faiss",
"h5py",
"transformers", # for huggingface bert
]
for lib in required_libs:
try:
globals()[lib] = importlib.import_module(lib)
except ImportError as e:
raise Exception(f"Missing one or more packages required for Retro preprocessing: {required_libs}. Tried importing '{lib}'.")
|
Megatron-LM-master
|
tools/retro/external_libs.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import os
import torch
import types
from megatron import get_retro_args
from megatron.tokenizer.tokenizer import (
_BertWordPieceTokenizer,
_GPT2BPETokenizer,
_GPTSentencePieceTokenizer,
)
def get_args_path(workdir):
'''Argument copy stored within retro workdir.'''
return os.path.join(workdir, "args.json")
def get_num_chunks_per_sample():
'''Compute seq_length // chunk_length.'''
args = get_retro_args()
sample_length = args.retro_gpt_seq_length
chunk_length = args.retro_gpt_chunk_length
assert sample_length % chunk_length == 0
return sample_length // chunk_length
def get_gpt_tokenizer():
'''GPT (BPE) tokenizer.'''
args = get_retro_args()
tokenizer_type = args.retro_gpt_tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
assert args.retro_gpt_vocab_file and args.retro_gpt_merge_file
return _GPT2BPETokenizer(
vocab_file=args.retro_gpt_vocab_file,
merge_file=args.retro_gpt_merge_file,
)
elif tokenizer_type == 'GPTSentencePieceTokenizer':
assert args.retro_gpt_tokenizer_model is not None
return _GPTSentencePieceTokenizer(args.retro_gpt_tokenizer_model)
else:
raise Exception("unrecognized gpt tokenizer, '%s'." % tokenizer_type)
def get_bert_tokenizer():
'''Bert (Wordpiece) tokenizer.'''
args = get_retro_args()
lower_case = {
"BertWordPieceLowerCase" : True,
"BertWordPieceCase" : False,
}[args.retro_bert_tokenizer_type]
return _BertWordPieceTokenizer(
vocab_file=args.retro_bert_vocab_file,
lower_case=lower_case,
)
class GPTToTextDataset(torch.utils.data.Dataset):
'''Dataset to convert GPT tokens to text.'''
def __init__(self, gpt_dataset):
super().__init__()
self.gpt_dataset = gpt_dataset
self.gpt_tokenizer = get_gpt_tokenizer()
def __len__(self):
return len(self.gpt_dataset)
def __getitem__(self, idx):
gpt_token_ids = self.gpt_dataset[idx]["text"].tolist()
text = self.gpt_tokenizer.detokenize(gpt_token_ids)
return {"text": text}
|
Megatron-LM-master
|
tools/retro/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Preprocess data for Retro.
Stages (see argument '--retro-tasks'):
- Build chunk database (DB).
- Build index (train, add).
- Query pretraining neighbors.
"""
import json
import os
import torch
from megatron import get_args, initialize_megatron, print_rank_0
from megatron.global_vars import set_retro_args
from tools.retro.db import build_db
from tools.retro.index import add_to_index, build_index, train_index
from tools.retro.query import query_pretraining_neighbors
from tools.retro.utils import get_args_path
def add_retro_args(parser):
"""Retro preprocesing arguments.
*Note* : Arguments prefixed with '--retro-gpt-*' or '--retro-bert-*' are
included and named as such to more easily handle managing both models
running at the same time. Megatron is not optimized to run two models at
once, so this naming convention makes it clearer.
"""
group = parser.add_argument_group(title="Retro preprocessing.")
# Basic args.
group.add_argument("--retro-tasks", default="build",
help="Comma-separated list of tasks to run. Run entire "
"preprocesing pipeline by using '--retro-tasks build'. "
"Alternatively, run individual stages with tasks (in "
"this order) 'db-build', 'index-build', or "
"'query-pretraining-neighbors'. For example, "
"'--retro-tasks db-build,index-build,"
"query-pretraining-neighbors' is equivalent to "
"'--retro-tasks build'; or the argument can contain "
"a subset of these tasks. Stages must always be run "
"in the correct order (listed above).")
group.add_argument("--retro-block-size", type=int, default=100000,
help="Number of chunks to process at a time when "
"generating Bert embeddings and querying the search "
"index. Partial results for each block are generally "
"saved to disk in separate files.")
group.add_argument("--retro-doc-block-size", type=int, default=100000,
help="Number of documents to processe at time when "
"processing token datasets into chunk databases. The "
"partial chunk database for each block is saved into "
"a separate file.")
# GPT args.
group.add_argument('--retro-gpt-seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--retro-gpt-data-path', nargs='*', required=True,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ... It is used with --split when a '
'single dataset used for all three: train, valid '
'and test. It is exclusive to the other '
'--*-data-path args')
group.add_argument('--retro-gpt-split', type=str, default='969,30,1',
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
group.add_argument('--retro-gpt-mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument("--retro-gpt-eval-interval", type=int, required=True,
help="GPT evaluation interval.")
group.add_argument("--retro-gpt-eval-iters", type=int, required=True,
help="GPT evaluation iterations.")
group.add_argument("--retro-gpt-tokenizer-type", required=True,
help="GPT tokenizer type.")
group.add_argument("--retro-gpt-vocab-file", help="GPT vocab file.")
group.add_argument("--retro-gpt-merge-file", help="GPT merge file.")
group.add_argument("--retro-gpt-tokenizer-model",
help="GPT tokenizer model file.")
group.add_argument("--retro-gpt-seq-length", type=int, required=True,
help="GPT sequence length.")
group.add_argument("--retro-gpt-global-batch-size", type=int, required=True,
help="GPT global batch size.")
group.add_argument("--retro-gpt-chunk-length", type=int, default=64,
help="GPT chunk length.")
# Bert args.
group.add_argument("--retro-bert-vocab-file", required=True,
help="Bert vocab file.")
group.add_argument("--retro-bert-tokenizer-type", required=True,
help="Bert tokenizer type (for when using "
"'--bert-embedder-type megatron').")
group.add_argument("--retro-bert-batch-size", type=int, default=128,
help="Micro-batch size for processing Bert embeddings.")
group.add_argument("--retro-bert-max-chunk-length", type=int, default=256,
help="Maximum sequence length for Bert embeddings. "
"(Named 'chunk' here in reference to these Bert "
"sequences being converted from GPT chunks.)")
# Index args.
group.add_argument("--retro-index-nfeats", "-f", type=int, default=1024,
help="Dimension of Bert embeddings. Bert-large is "
"commonly used, so this value defaults to 1024.")
group.add_argument("--retro-index-type", default="faiss-par-add",
choices=["faiss-base", "faiss-par-add"],
help="A 'faiss-base' index is a simple, un-optimized "
"wrapper around a Faiss index. A 'faiss-par-add' index "
"optimizes the 'add()' method by making it multi-node "
"and multi-process, but with bit-wise equivalent "
"results.")
group.add_argument("--retro-index-str", required=True,
help="Index string used for calling "
"faiss.index_factory(). For example, "
"'IVF262144_HNSW32,Flat' or "
"'OPQ32_256,IVF4194304_HNSW32,PQ32'.")
group.add_argument("--retro-index-ntrain", type=int, required=True,
help="Number of database chunks to use for training "
"the index. This value must be less or equal to the "
"total number of chunks in the database.")
group.add_argument("--retro-index-train-load-fraction",
type=float, default=1.,
help="Fraction of sampled chunks to use for training "
"the index. Useful when our total sampled embeddings "
"use too much memory; lowering the load fraction is "
"less costly than re-embedding a new sampled dataset "
"from scratch.")
group.add_argument("--retro-index-add-load-fraction",
type=float, default=1.,
help="Fraction of database chunks to use for adding to "
"the index. Useful when our total index size would "
"use too much memory; lowering the load fraction is "
"less costly than re-designing our token datasets.")
group.add_argument("--retro-index-no-delete-training-embeddings",
action='store_false',
dest="retro_index_delete_training_embeddings",
help="Skip deleting training embeddings for the search "
"index. Useful for debugging.")
group.add_argument("--retro-index-no-delete-added-codes",
action='store_false',
dest="retro_index_delete_added_codes",
help="Skip deleting added codes for the search "
"index. Useful for debugging.")
# Query args.
group.add_argument("--retro-query-ef-search", type=int, default=256,
help="Index ef-search parameter for HNSW during querying.")
group.add_argument("--retro-query-nprobe", type=int, default=65536,
help="Index nprobe parameter for IVF during querying.")
group.add_argument("--retro-query-num-neighbors-query", type=int, default=200,
help="Number of neighbors to retrieve when calling "
"index.search().")
group.add_argument("--retro-query-num-neighbors-save", type=int, default=20,
help="Number of neighbors to save to disk after "
"the index's returned neighbors. If longer than target "
"value, neighbors truncated; and if shorter than target "
"value, neighbors are padded with -1's.")
# Enforce argument naming convention.
for action in group._group_actions:
prefix = action.dest.split("_")[0]
assert prefix == "retro", \
"Retro args must be prefixed with '--retro-*', for consistent " \
"styling. Please fix '%s'." % ", ".join(action.option_strings)
return parser
def save_args(args):
'''Save copy of args within retro workdir.'''
def default_dump(obj):
if isinstance(obj, torch.dtype):
return str(obj)
else:
raise Exception("specialize for <%s>." % type(obj).__name__)
if torch.distributed.get_rank() == 0:
args_path = get_args_path(args.retro_workdir)
with open(args_path, "w") as f:
json.dump(vars(args), f, indent=4, default=default_dump)
torch.distributed.barrier()
if __name__ == "__main__":
# Initalize Megatron.
initialize_megatron(extra_args_provider=add_retro_args)
# Split retro tasks.
args = get_args()
args.retro_tasks = args.retro_tasks.split(",")
# Save/set retro args.
os.makedirs(args.retro_workdir, exist_ok=True)
save_args(args)
set_retro_args(args)
# Select task to run.
for task in args.retro_tasks:
print_rank_0("start '%s'." % task)
# Run all stages.
if task == "build":
build_db()
torch.distributed.barrier()
build_index()
torch.distributed.barrier()
query_pretraining_neighbors()
# DB (i.e., chunk db).
elif task == "db-build":
build_db()
# Index.
elif task == "index-build":
build_index() # calls both train + add.
elif task == "index-train":
train_index() # train only
elif task == "index-add":
add_to_index() # add only
# Pretraining.
elif task == "query-pretraining-neighbors":
query_pretraining_neighbors()
else:
raise Exception("specialize for task '%s'." % task)
torch.distributed.barrier()
print_rank_0("end '%s'." % task)
|
Megatron-LM-master
|
tools/retro/main.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from .cli import retro
|
Megatron-LM-master
|
tools/retro/cli/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import json
import numpy as np
import os
import torch
import types
from megatron.global_vars import set_global_variables, set_retro_args
from megatron.initialize import (
initialize_megatron,
_initialize_distributed,
_set_random_seed,
_compile_dependencies,
)
from tools.retro.db.utils import (
get_indexed_dataset_infos as get_db_indexed_dataset_infos,
get_merged_train_dataset as get_db_dataset,
)
from tools.retro.main import add_retro_args
from tools.retro.query.retro_dataset import get_retro_datasets
from tools.retro.utils import get_args_path, get_bert_tokenizer, get_gpt_tokenizer
def shorten_str(s, n):
s = "\\n".join(s.splitlines())
return s if len(s) <= n else "%s ... %s" % (s[:n//2], s[-n//2:])
class retro:
args = None
##############################################
# initialize.
##############################################
@classmethod
def parse_dtype_str(cls, dtype_str):
return {
"torch.float16" : torch.float16,
"torch.float32" : torch.float32,
"torch.bfloat16" : torch.bfloat16,
}[dtype_str]
@classmethod
def init_megatron(cls, workdir):
'''Custom initialization of Megatron.'''
# Load args.
args_path = get_args_path(workdir)
assert os.path.exists(args_path), "args.json not found in workdir."
with open(args_path) as f:
cls.args = types.SimpleNamespace(**json.load(f))
cls.args.retro_workdir = workdir # just in case workdir moved
cls.args.rank = 0 # override env
cls.args.world_size = 1 # override env
cls.args.params_dtype = cls.parse_dtype_str(cls.args.params_dtype)
set_global_variables(cls.args)
set_retro_args(cls.args)
_initialize_distributed()
_set_random_seed(cls.args.seed, cls.args.data_parallel_random_init)
_compile_dependencies()
@classmethod
def init(cls, workdir):
'''Initialize Megatron, tokenizers, and datasets.'''
# Load args.
cls.init_megatron(workdir)
cls.tokenizers = types.SimpleNamespace(
gpt=get_gpt_tokenizer(),
bert=get_bert_tokenizer(),
)
# Load data.
cls.db_indexed_dataset_infos = get_db_indexed_dataset_infos()
cls.db_dataset = get_db_dataset()
pt_train_ds, pt_valid_ds, _ = get_retro_datasets(verify_sizes=False)
cls.pt_datasets = types.SimpleNamespace(
train=pt_train_ds,
valid=pt_valid_ds,
)
# Retrieve max saved neighbors.
for key in vars(cls.pt_datasets):
getattr(cls.pt_datasets, key).num_neighbors = \
cls.args.retro_query_num_neighbors_save
# Print usage.
cls.print_usage()
##############################################
# utils.
##############################################
@classmethod
def gpt_to_text(cls, token_ids):
'''GPT tokens to text.'''
return cls.tokenizers.gpt.detokenize(token_ids.tolist()
if isinstance(token_ids, np.ndarray)
else token_ids)
@classmethod
def text_to_bert(cls, text):
'''Text to Bert tokens.'''
return cls.tokenizers.bert.tokenize(text)
##############################################
# chunk db.
##############################################
@classmethod
def get_db_num_indexed_datasets(cls):
'''Number of indexed datasets within blendable dataset.'''
return len(cls.db_indexed_dataset_infos)
@classmethod
def get_db_indexed_dataset_infos(cls):
'''Dataset infos, including number of training & sampled sets.'''
return [(info["ratio"], info["name"])
for info in cls.db_indexed_dataset_infos]
@classmethod
def get_db_dataset(cls):
return cls.db_dataset
@classmethod
def get_db_num_chunks(cls):
'''Number of DB chunks.'''
return len(cls.get_db_dataset())
@classmethod
def get_db_chunk_gpt(cls, idx):
'''Get DB chunk as GPT token ids.'''
return cls.get_db_dataset()[idx]["text"].tolist()
@classmethod
def get_db_chunk_bert(cls, idx):
'''Get DB chunk as Bert token ids.'''
return cls.text_to_bert(cls.get_db_chunk_text(idx))
@classmethod
def get_db_chunk_text(cls, idx):
'''Get DB chunk as text.'''
return cls.gpt_to_text(cls.get_db_chunk_gpt(idx))
@classmethod
def get_db_chunk_and_continuation_text(cls, idx):
'''Get DB chunk along with continuation, as text.'''
# Modulus used here to match original implementation (i.e., last
# chunks continuation wraps around to first chunk).
return [
cls.get_db_chunk_text(idx),
cls.get_db_chunk_text((idx + 1) % len(cls.get_db_dataset())),
]
##############################################
# pretraining corpus.
##############################################
@classmethod
def get_pt_num_samples_and_chunks(cls, data_key):
'''Number of samples & chunks (e.g., 32*n_samples) in corpus.'''
assert hasattr(cls.pt_datasets, data_key), \
"pretraining set '%s' not found (choices: %s)." % (
data_key, ", ".join(vars(cls.pt_datasets).keys()))
chunk_dataset = getattr(cls.pt_datasets, data_key).chunk_dataset
return (
len(chunk_dataset.sample_dataset),
len(chunk_dataset),
)
@classmethod
def get_pt_num_samples(cls, data_key):
'''Number of pretraining samples.'''
return cls.get_pt_num_samples_and_chunks(data_key)[0]
@classmethod
def get_pt_num_chunks(cls, data_key):
'''Number of pretraining chunks (e.g., 32*n_samples).'''
return cls.get_pt_num_samples_and_chunks(data_key)[1]
@classmethod
def get_pt_dataset(cls, data_key):
return getattr(cls.pt_datasets, data_key)
@classmethod
def get_pt_sample(cls, data_key, idx):
return getattr(cls.pt_datasets, data_key)[idx]
@classmethod
def get_neighbor_tokens(cls, sample_id, chunk_id, data_key="train"):
try:
sample = cls.get_pt_sample(data_key, sample_id)
sample_token_ids = sample["text"]
chunk_length = cls.args.retro_gpt_chunk_length
chunk_start_idx = chunk_id * chunk_length
chunk_end_idx = min(sample_token_ids.shape[0],
chunk_start_idx + chunk_length)
chunk_token_ids = sample_token_ids[chunk_start_idx:chunk_end_idx]
neighbor_token_ids = sample["neighbor_tokens"][chunk_id]
return {
"chunk_tokens" : chunk_token_ids,
"neighbor_tokens" : neighbor_token_ids,
}
except:
return None
@classmethod
def print_neighbor_texts(cls, sample_id, chunk_id, data_key="train"):
tokens = cls.get_neighbor_tokens(sample_id, chunk_id, data_key)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
try:
print("PRETRAINING CHUNK:")
print(" - %s" % shorten_str(cls.gpt_to_text(tokens["chunk_tokens"]), 150))
print("NEIGHBOR_CHUNKS:")
for token_ids in tokens["neighbor_tokens"]:
print(" - %s" % shorten_str(cls.gpt_to_text(token_ids), 150))
except:
print("<no neighbors for sample %d>" % sample_id)
##############################################
# usage.
##############################################
@classmethod
def print_usage(cls):
'''Print usage.'''
print()
print("+++++++++++++++++++++++++++++++++++++++++++++++++++")
print("examples ... [ *note*: 'db' = chunk db; 'pt' = pretraining corpus. ]")
print("+++++++++++++++++++++++++++++++++++++++++++++++++++")
print()
print("~~~~ indexed datasets ~~~~")
print("retro.get_db_num_indexed_datasets() : %s" %
cls.get_db_num_indexed_datasets())
print("retro.get_db_indexed_dataset_infos() :")
for i, (ratio,prefix) in enumerate(cls.get_db_indexed_dataset_infos()):
print(" %s(%f, %s)%s" % (
"[" if i == 0 else " ",
ratio,
prefix,
"]" if i == len(cls.db_indexed_dataset_infos) - 1 else ",",
))
print()
print("~~~~ counts ~~~~")
print("retro.get_db_num_chunks : %d." % cls.get_db_num_chunks())
print()
for sq_key in ("sample", "chunk"):
for data_key in ("train", "valid"): # test?
print("retro.get_pt_num_%ss('%s') : %d." % (
sq_key, data_key,
getattr(cls, f"get_pt_num_{sq_key}s")(data_key)))
print()
print("~~~~ tokens, text ~~~~")
print("retro.get_db_chunk_gpt(chunk_id) : %s" %
shorten_str(str(retro.get_db_chunk_gpt(0)), 50))
print("retro.get_db_chunk_bert(chunk_id) : %s" %
shorten_str(str(retro.get_db_chunk_bert(0)), 50))
print("retro.get_db_chunk_text(chunk_id) : %s" %
shorten_str(retro.get_db_chunk_text(0).strip(), 50))
print("retro.get_db_chunk_and_continuation_text(chunk_id) :")
for i, t in enumerate(retro.get_db_chunk_and_continuation_text(0)):
print(" %s'%s'%s" % (
"[" if i == 0 else " ",
shorten_str(t.strip().replace("\n", " "), 50),
"]" if i == 1 else ",",
))
sample = cls.get_pt_sample("train", 0)
sample_chunk_id = sample["neighbor_tokens"].shape[0] // 2
sample_neighbor_id = 0
print()
print("retro.get_pt_sample('train', sample_id) :")
print(" {")
for k, v in sample.items():
print(" '%s' : %s" % (k, shorten_str(str(v), 50)))
print(" }")
print()
print("(e.g., sample = retro.get_pt_sample(...))")
print()
print(" sample['text'].shape : %s" % str(sample["text"].shape))
print(" sample['neighbor_tokens'].shape : %s" % str(sample["neighbor_tokens"].shape))
print(" sample['text'] : %s" % shorten_str(str(sample["text"]), 50))
print(" sample['neighbor_tokens'][17][1] : %s" % shorten_str(str(sample["neighbor_tokens"][sample_chunk_id][sample_neighbor_id]), 50))
print(" retro.gpt_to_text(sample['text']) : %s" % shorten_str(cls.gpt_to_text(sample["text"]), 50))
print(" retro.gpt_to_text(sample['neighbor_tokens']) : %s" % shorten_str(cls.gpt_to_text(sample["neighbor_tokens"][sample_chunk_id][sample_neighbor_id]), 50))
print("+++++++++++++++++++++++++++++++++++++++++++++++++++")
|
Megatron-LM-master
|
tools/retro/cli/cli.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import os
from . import retro
if __name__ == "__main__":
retro.init(os.environ["RETRO_WORKDIR"])
|
Megatron-LM-master
|
tools/retro/cli/__main__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from collections import defaultdict
from concurrent.futures import as_completed, ProcessPoolExecutor
from functools import reduce
import glob
import json
import numpy as np
import os
from pathlib import Path
import threading
import torch
from tqdm import tqdm
import types
from megatron import get_retro_args, print_rank_0
from megatron.data.indexed_dataset import MMapIndexedDataset
from megatron.tokenizer.tokenizer import (
_BertWordPieceTokenizer,
_GPT2BPETokenizer,
)
from tools.bert_embedding.utils import get_missing_blocks_by_rank
from tools.retro.external_libs import h5py
from tools.retro.utils import get_gpt_tokenizer, get_bert_tokenizer
from .utils import (
get_indexed_dataset_infos,
get_indexed_dataset_infos_path,
get_individual_db_dir,
get_individual_chunk_db,
get_individual_doc_offsets,
get_merged_dataset,
get_merged_db_path_map,
save_indexed_dataset_infos,
)
def init_indexed_dataset_infos():
'''Gather meta-info about each indexed dataset.
The returned info array allows for easy access to the configuration, and
helps remove ambiguity.
'''
args = get_retro_args()
assert len(args.data_path) % 2 == 0, \
"currently, only blendable dataset is supported."
# Dataset infos.
infos = []
for i in range(0, len(args.data_path), 2):
ratio = float(args.data_path[i])
prefix = args.data_path[i + 1]
path = prefix + ".bin"
name = os.path.basename(prefix)
assert os.path.exists(path), "couldn't find '%s'." % path
infos.append({
"ratio" : ratio,
"prefix" : prefix,
"path" : path,
"name" : name,
"db_dir" : get_individual_db_dir(name),
"dataset" : MMapIndexedDataset(prefix, skip_warmup=True),
})
return infos
def build_partial_db(
dataset_idx,
n_datasets,
indexed_dataset,
block_id,
n_blocks,
block,
proc_id,
n_procs,
tokenizers,
):
'''Process a document index range of the indexed dataset.
The chunk database is built in parallel blocks, since de-tokenizing &
re-tokenizing for Bert-length computation is expensive. This method
iterates each document and extracts sequential 'chunk-length' sequences
from each document.
'''
args = get_retro_args()
# Document start/end indexes.
doc_range = block["range"]
n_docs = doc_range[1] - doc_range[0]
n_docs_per_proc = int(np.ceil(n_docs / n_procs))
doc_start_id = doc_range[0] + proc_id * n_docs_per_proc
doc_end_id = min(doc_range[1], doc_start_id + n_docs_per_proc)
# Print progress.
progress_proc_ids = set(range(n_procs)) \
if torch.distributed.get_rank() == 0 else set()
if proc_id in progress_proc_ids:
print(" > building partial chunk db, proc %d / %d, docs %d:%d / %d."%(
proc_id,
n_procs,
doc_start_id,
doc_end_id,
n_docs,
))
# Progress bars (snapshot of overall progress).
doc_id_iter = range(doc_start_id, doc_end_id)
pbar = tqdm(doc_id_iter) \
if proc_id in progress_proc_ids else \
doc_id_iter
# Iterate documents & parse chunks.
chunk_db_valid = []
chunk_db_invalid = []
doc_size_map = {}
for doc_id in pbar:
# Progress description.
try:
pbar.set_description("ds %d / %d, block %d / %d, proc %d / %d." % (
dataset_idx,
n_datasets,
block_id,
n_blocks,
proc_id,
n_procs))
except:
pass
# Remove EOD token.
doc = indexed_dataset.get(doc_id)
if doc[-1].item() == tokenizers.gpt.eod:
doc = doc[:-1]
doc_len = len(doc)
# Chunk start/end indexes.
chunk_start_idxs = list(range(0, doc_len, args.retro_gpt_chunk_length))
chunk_end_idxs = [min(doc_len, s + args.retro_gpt_chunk_length)
for s in chunk_start_idxs]
# Re-tokenize each chunk to Bert/Wordpiece (empty bert -> 'invalid').
doc_size_map[doc_id] = 0
for i, chunk_start_idx in enumerate(chunk_start_idxs):
# Re-tokenize.
chunk_end_idx = chunk_end_idxs[i]
gpt_token_ids = indexed_dataset.get(
idx=doc_id,
offset=chunk_start_idx,
length=chunk_end_idx - chunk_start_idx,
)
text = tokenizers.gpt.detokenize(gpt_token_ids.tolist())
bert_token_ids = tokenizers.bert.tokenize(text)
# 'Valid' for non-empty Bert chunks; 'invalid' otherwise.
if len(bert_token_ids) == 0:
_chunk_db = chunk_db_invalid
else:
_chunk_db = chunk_db_valid
doc_size_map[doc_id] += 1
_chunk_db.append((
doc_id,
chunk_start_idx,
chunk_end_idx,
len(bert_token_ids),
))
return proc_id, chunk_db_valid, chunk_db_invalid, doc_size_map
def build_individual_db(dataset_idx, n_datasets, dataset_info, tokenizers):
'''Process a single indexed dataset & extract chunks.'''
args = get_retro_args()
# Make directory.
db_dir = dataset_info["db_dir"]
os.makedirs(db_dir, exist_ok=True)
# Indexed dataset.
indexed_dataset = dataset_info["dataset"]
# Missing db blocks.
n_missing_world, missing_db_blocks = get_missing_blocks_by_rank(
db_dir,
len(indexed_dataset),
args.retro_doc_block_size,
validate=lambda f : f["chunks_valid"].shape == (0,) \
or f["chunks_valid"].shape[1] == 4)
# Prevent missing-path-write race condition.
torch.distributed.barrier()
if not missing_db_blocks:
return
# Num processes.
if n_missing_world == 1:
n_procs = 128
elif n_missing_world <= 2:
n_procs = 64
elif n_missing_world <= 4:
n_procs = 32
elif n_missing_world <= 8:
n_procs = 16
else:
n_procs = 8
# Process documents in parallel.
with ProcessPoolExecutor(max_workers=n_procs) as executor:
for block_idx, block in enumerate(missing_db_blocks):
if block is not None:
db_path = block["path"]
# Build partial dbs.
print_rank_0(' > build partial dbs.')
futures = []
for proc_id in range(n_procs): # not true process id
futures.append(executor.submit(
build_partial_db,
dataset_idx,
n_datasets,
indexed_dataset,
block_idx,
len(missing_db_blocks),
block,
proc_id,
n_procs,
tokenizers,
))
partial_chunk_dbs = []
for future in as_completed(futures):
partial_chunk_dbs.append(future.result())
# Concatenate chunks.
partial_chunk_dbs.sort(key=lambda item:item[0]) # sort by proc_id
chunk_db_valid = [item
for partial_chunk_db in partial_chunk_dbs
for item in partial_chunk_db[1]]
chunk_db_invalid = [item
for partial_chunk_db in partial_chunk_dbs
for item in partial_chunk_db[2]]
# Convert to numpy.
print_rank_0(' > converting chunk db to numpy.')
chunk_db_valid = np.array(chunk_db_valid, dtype="uint32")
chunk_db_invalid = np.array(chunk_db_invalid, dtype="uint32")
# Document offsets.
doc_sizes = [(d, s)
for partial_chunk_db in partial_chunk_dbs
for d, s in partial_chunk_db[3].items()]
doc_sizes.sort(key = lambda item : item[0])
doc_offsets = np.cumsum([item[1] for item in doc_sizes]) \
.astype("uint64")
doc_offsets = np.stack((
np.array([item[0] for item in doc_sizes], dtype="uint64"),
doc_offsets), axis=1)
# Save DB.
print_rank_0(" > saving individual db.")
with h5py.File(db_path, "w") as f:
dset = f.create_dataset("chunks_valid", data=chunk_db_valid)
dset = f.create_dataset("chunks_invalid",
data=chunk_db_invalid)
dset = f.create_dataset("doc_offsets", data=doc_offsets)
# Wait for all ranks to finish block.
print_rank_0(" > waiting for all ranks to finish block.")
torch.distributed.barrier()
print_rank_0(" > finished saving individual db.")
def build_individual_dbs(indexed_dataset_infos):
'''Iterate each indexed dataset & process its chunks.'''
args = get_retro_args()
# Tokenizers.
tokenizers = types.SimpleNamespace(
gpt=get_gpt_tokenizer(),
bert=get_bert_tokenizer(),
)
# Build individual DBs.
print_rank_0(" > build individual chunk dbs.")
for ds_idx, ds_info in enumerate(indexed_dataset_infos):
# Progress.
print_rank_0(" > building individual db, dataset %d / %d ... '%s'." % (
ds_idx,
len(indexed_dataset_infos),
ds_info["name"],
))
# Process single dataset.
build_individual_db(ds_idx, len(indexed_dataset_infos),
ds_info, tokenizers)
def update_chunk_counts(indexed_dataset_infos):
'''Set n_chunks_train & n_chunks sampled for each individual DB.'''
args = get_retro_args()
if torch.distributed.get_rank() != 0:
return
# Data ratio sum (for setting index training chunks).
data_ratio_sum = sum([ d["ratio"] for d in indexed_dataset_infos ])
# Training split size (split at document level).
train_fraction = float(args.split.split(",")[0]) / 100
assert train_fraction > 0 and train_fraction <= 1
# Set n_chunks (including n_chunks_sampled for unambiguity).
print_rank_0(" > compute n_chunks.")
for ds_index, ds_info in enumerate(indexed_dataset_infos):
db_dir = ds_info["db_dir"]
db_paths = sorted(glob.glob(db_dir + "/*.hdf5"))
# Update counts.
ds_info["n_docs"] = len(ds_info["dataset"].doc_idx) - 1
ds_info["n_docs_train"] = int(train_fraction * ds_info["n_docs"])
ds_info["n_chunks"] = 0 # previously, 'n_chunks_valid'
ds_info["n_chunks_train"] = 0
ds_info["n_chunks_invalid"] = 0
for db_path in tqdm(db_paths, "%d/%d, %s" % (
ds_index, len(indexed_dataset_infos), ds_info["name"])):
with h5py.File(db_path, "r") as f:
ds_info["n_chunks"] += len(f["chunks_valid"])
ds_info["n_chunks_invalid"] += len(f["chunks_invalid"])
ds_info["n_chunks_train"] += \
(np.copy(f["chunks_valid"][:, 0]) < ds_info["n_docs_train"]) \
.sum().item()
ds_info["n_chunks_sampled"] = int(args.retro_index_ntrain *
ds_info["ratio"] / data_ratio_sum)
# Verify counts.
assert ds_info["n_chunks_train"] <= ds_info["n_chunks"], \
"n_train (%d) > n_total (%d)." % (
ds_info["n_chunks_train"], ds_info["n_chunks"])
assert ds_info["n_chunks_sampled"] <= ds_info["n_chunks_train"], \
"n_sampled (%d) > n_train (%d)." % (
ds_info["n_chunks_sampled"], ds_info["n_chunks_train"])
def merge_dbs(indexed_dataset_infos, db_type):
'''Merge individual DBs into single DB.'''
if torch.distributed.get_rank() != 0:
return
print(" > build %s chunk db." % db_type)
# Count chunks.
if db_type == "sampled":
n_chunks_key = "n_chunks_sampled"
n_docs_key = None
elif db_type == "train":
n_chunks_key = "n_chunks_train"
n_docs_key = "n_docs_train"
elif db_type == "valid":
n_docs_key = None
else:
raise Exception("handle db_type '%s'." % db_type)
if db_type == "valid":
n_chunks = sum(m["n_chunks"] - m["n_chunks_train"]
for m in indexed_dataset_infos)
else:
n_chunks = sum(m[n_chunks_key] for m in indexed_dataset_infos)
n_docs = None if n_docs_key is None else \
sum(m[n_docs_key] for m in indexed_dataset_infos)
# DB path.
db_path = get_merged_db_path_map()[db_type]
# Delete existing chunk db if incorrect size.
if os.path.exists(db_path):
try:
f = h5py.File(db_path)
n_alloc = len(f["chunks"]) # total allocated
n_written = f["n_written"][0].item() # total written
f.close()
if n_chunks != n_alloc or n_chunks != n_written:
os.remove(db_path)
except Exception as e:
if isinstance(e, OSError):
os.remove(db_path)
elif isinstance(e, KeyError):
f.close()
os.remove(db_path)
else:
raise e
# Build merged chunk db.
if not os.path.exists(db_path):
os.makedirs(os.path.dirname(db_path), exist_ok=True)
f = h5py.File(db_path, "w")
# Initialize output arrays.
merged_chunk_db = \
f.create_dataset("chunks", (n_chunks, 5), dtype="uint32")
merged_doc_offsets = None if n_docs_key is None else \
f.create_dataset("doc_offsets", (n_docs, 3), dtype="uint64")
n_written = f.create_dataset("n_written", (1,), dtype="uint64")
n_written[0] = 0
# Iterate indexed datasets & collect chunks.
chunk_start_index = 0
doc_start_index = 0
doc_start_offset = 0
for ds_idx, ds_info in enumerate(indexed_dataset_infos):
print(" > merging dbs; '%s', dataset %d / %d ... '%s'." %
(db_type, ds_idx, len(indexed_dataset_infos), ds_info["name"]))
individual_chunk_db = get_individual_chunk_db(ds_idx, ds_info)
individual_doc_offsets = None if n_docs_key is None else \
get_individual_doc_offsets(ds_idx, ds_info)
if db_type == "valid":
individual_chunk_db = \
individual_chunk_db[ds_info["n_chunks_train"]:]
if n_docs_key is None:
individual_doc_offsets = None
else:
train_doc_offset = \
individual_doc_offsets[ds_info["n_docs_train"] - 1, 2]
individual_doc_offsets = \
np.copy(individual_doc_offsets[ds_info["n_docs_train"]:])
individual_doc_offsets[:, 2] -= train_doc_offset
print("~~~")
print(individual_doc_offsets)
print(train_doc_offset)
raise Exception("test me.")
else:
individual_chunk_db = \
individual_chunk_db[:ds_info[n_chunks_key]]
individual_doc_offsets = None if n_docs_key is None else \
np.copy(individual_doc_offsets[:ds_info[n_docs_key]])
merged_chunk_db[chunk_start_index:chunk_start_index+len(individual_chunk_db)] = individual_chunk_db
chunk_start_index += len(individual_chunk_db)
n_written[0] = chunk_start_index
if n_docs_key is not None:
individual_doc_offsets[:, 2] += doc_start_offset
doc_end_index = doc_start_index + individual_doc_offsets.shape[0]
merged_doc_offsets[doc_start_index:doc_end_index] = \
individual_doc_offsets
doc_start_index = doc_end_index
doc_start_offset = individual_doc_offsets[-1, 2].item()
f.close()
def build_db():
'''Extract token chunks from each indexed dataset.
Iterate each document of each indexed dataset, extract that document's
chunks, and save to a 'DB' (hdf5 file).
'''
# Indexed dataset info.
indexed_dataset_infos = init_indexed_dataset_infos()
# Build dbs.
build_individual_dbs(indexed_dataset_infos)
# Single-process going forward.
if torch.distributed.get_rank() != 0:
return
# Update n_chunks & save indexed dataset infos.
if not os.path.exists(get_indexed_dataset_infos_path()):
update_chunk_counts(indexed_dataset_infos)
save_indexed_dataset_infos(indexed_dataset_infos)
indexed_dataset_infos = get_indexed_dataset_infos()
# Merge dbs.
merge_dbs(indexed_dataset_infos, "sampled")
merge_dbs(indexed_dataset_infos, "train")
merge_dbs(indexed_dataset_infos, "valid")
|
Megatron-LM-master
|
tools/retro/db/build.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from .build import build_db
|
Megatron-LM-master
|
tools/retro/db/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import json
import numpy as np
import torch
from tqdm import tqdm
from megatron import get_args, print_rank_0
from tools.retro.external_libs import h5py
from tools.retro.utils import get_gpt_tokenizer
class DBDataset(torch.utils.data.Dataset):
'''Dataset for iterating chunks.
Requires:
- List of indexed datasets
- Chunk index array, with format:
[dataset_idx, doc_id, start_idx, end_idx, bert_length])
'''
def __init__(self, db_path, indexed_datasets, chunks, max_chunk_length):
assert chunks.shape[1] == 5, "expected 5 columns (dataset_idx, " \
"doc_idx, token_start_idx, token_end_idx, bert_chunk_length); " \
"found %d columns." % chunks.shape[1]
self.db_path = db_path
self.indexed_datasets = indexed_datasets
self.chunks = chunks
self.doc_chunk_map = None
self.max_chunk_length = max_chunk_length
self.eod_token_id = get_gpt_tokenizer().eod
def __len__(self):
return self.chunks.shape[0]
def __getitem__(self, chunk_id):
# Chunk start/end indexes.
indexed_dataset_id, doc_id, token_start_idx, token_end_idx, _ = \
[ value.item() for value in self.chunks[chunk_id] ]
chunk_length = token_end_idx - token_start_idx
indexed_dataset = self.indexed_datasets[indexed_dataset_id]
# Chunk token ids.
token_ids = indexed_dataset.get(doc_id,
offset=token_start_idx,
length=chunk_length)
# Extend chunks to max_chunk_length by padding with EOD tokens.
if chunk_length != self.max_chunk_length:
assert chunk_length < self.max_chunk_length, "invalid chunk len."
token_ids = token_ids.tolist()
token_ids += [self.eod_token_id] * \
(self.max_chunk_length - chunk_length)
return {
"doc_id" : doc_id,
"text" : np.array(token_ids, dtype=np.int64),
}
def load_doc_tuples(self):
'''Load the dataset & document ids.
Load the dataset id & document id of each chunk in the database, to
be used for causality filtering during querying.
'''
self.doc_tuples = np.zeros(shape=(len(self), 2), dtype="uint32")
block_size = int(1e6)
for start_idx in tqdm(range(0, len(self), block_size)):
end_idx = min(len(self), start_idx + block_size)
self.doc_tuples[start_idx:end_idx]=self.chunks[start_idx:end_idx,:2]
|
Megatron-LM-master
|
tools/retro/db/dataset.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from collections import defaultdict
import glob
import json
import numpy as np
import os
from tqdm import tqdm
from megatron import get_retro_args, print_rank_0
from megatron.data.indexed_dataset import MMapIndexedDataset
from tools.retro.external_libs import h5py
from .dataset import DBDataset
def get_base_db_workdir():
'''Sub-directory for DB data.'''
args = get_retro_args()
return os.path.join(args.retro_workdir, "db")
def get_indexed_dataset_infos_path():
'''Path to indexed dataset meta-infos.'''
return os.path.join(get_base_db_workdir(), "indexed_dataset_infos.json")
def save_indexed_dataset_infos(indexed_dataset_infos):
'''Save dataset order & meta-info.'''
# Remove 'dataset' field.
clean_infos = []
for info in indexed_dataset_infos:
info = dict(info)
del info["dataset"]
clean_infos.append(info)
# Save.
with open(get_indexed_dataset_infos_path(), "w") as f:
json.dump(clean_infos, f, indent=4)
def get_indexed_dataset_infos():
'''Load indexed dataset meta-infos.'''
# Load json.
path = get_indexed_dataset_infos_path()
with open(path) as f:
infos = json.load(f)
# Add indexed datasets.
for info in infos:
info["dataset"] = MMapIndexedDataset(info["prefix"], skip_warmup=True)
return infos
def get_individual_db_dir(name):
'''Individual DB's directory.'''
return os.path.join(get_base_db_workdir(), "individual", name)
def get_individual_chunk_db(ds_id, ds_info):
'''Load individual dataset's chunk DB.'''
db_paths = sorted(glob.glob(ds_info["db_dir"] + "/*hdf5"))
# *Note*: convert to dataset, rather than copying to memory.
db = np.zeros((ds_info["n_chunks"], 5), dtype="uint32")
db[:, 0] = ds_id
start_idx = 0
for db_path in db_paths:
f = h5py.File(db_path, "r")
n_chunks_current = f["chunks_valid"].shape[0]
db[start_idx:(start_idx+n_chunks_current), 1:] = f["chunks_valid"]
start_idx += n_chunks_current
f.close()
assert start_idx == ds_info["n_chunks"]
return db
def get_individual_doc_offsets(ds_id, ds_info):
'''Load individual dataset's chunk DB.'''
paths = sorted(glob.glob(ds_info["db_dir"] + "/*hdf5"))
# *Note*: convert to dataset, rather than copying to memory.
doc_offsets = np.zeros((ds_info["n_docs"], 3), dtype="uint64")
doc_offsets[:, 0] = ds_id
start_idx = 0
start_offset = 0
for path in paths:
with h5py.File(path) as f:
current_doc_offsets = np.copy(f["doc_offsets"])
current_doc_offsets[:, 1] += start_offset
current_ndocs = current_doc_offsets.shape[0]
doc_offsets[start_idx:(start_idx+current_ndocs), 1:] = \
current_doc_offsets
start_idx += current_ndocs
start_offset = current_doc_offsets[-1, 1].item()
return doc_offsets
def get_merged_db_path_map():
'''Paths to merged datasets.'''
base_dir = get_base_db_workdir()
return {
"sampled" : os.path.join(base_dir, "merged", "sampled.hdf5"),
"train" : os.path.join(base_dir, "merged", "train.hdf5"),
"valid" : os.path.join(base_dir, "merged", "valid.hdf5"),
}
def get_merged_dataset(db_type, indexed_dataset_infos=None):
'''Get merged dataset.'''
args = get_retro_args()
if not indexed_dataset_infos:
indexed_dataset_infos = get_indexed_dataset_infos()
# Load chunks.
db_path = get_merged_db_path_map()[db_type]
f = h5py.File(db_path, "r")
chunks = f["chunks"]
# DB dataset.
indexed_datasets = [ info["dataset"] for info in indexed_dataset_infos ]
dataset = DBDataset(db_path, indexed_datasets, chunks,
args.retro_gpt_chunk_length)
return dataset
def get_merged_sampled_dataset(indexed_dataset_infos=None):
return get_merged_dataset("sampled", indexed_dataset_infos)
def get_merged_train_dataset(indexed_dataset_infos=None):
return get_merged_dataset("train", indexed_dataset_infos)
def get_merged_valid_dataset(indexed_dataset_infos=None):
return get_merged_dataset("valid", indexed_dataset_infos)
|
Megatron-LM-master
|
tools/retro/db/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import numpy as np
import os
import shutil
import torch
from tqdm import tqdm
from megatron import get_retro_args, print_rank_0
from tools.bert_embedding import DiskDataParallelBertEmbedder
from tools.retro.db.utils import (
get_indexed_dataset_infos,
get_merged_sampled_dataset,
get_merged_train_dataset,
)
from tools.retro.external_libs import h5py
from tools.retro.index.factory import IndexFactory
from tools.retro.utils import GPTToTextDataset
from .utils import (
get_training_data_block_dir,
get_training_data_block_paths,
get_training_data_merged_path,
get_training_data_root_dir,
)
##################################################
# Train index.
##################################################
def get_empty_index_path():
'''Path of empty index.'''
args = get_retro_args()
index = IndexFactory.get_index(args.retro_index_type)
empty_index_path = index.get_empty_index_path()
return empty_index_path
def get_block_nload(block_path, load_fraction):
with h5py.File(block_path) as fi:
return int(load_fraction * fi["data"].shape[0])
def merge_embedding_blocks():
if torch.distributed.get_rank() != 0:
return
args = get_retro_args()
# Get block, merged paths.
load_fraction = args.retro_index_train_load_fraction
block_paths = get_training_data_block_paths()
bin_path = get_training_data_merged_path()
# Skip, if already built.
if os.path.exists(bin_path):
return
# Merge blocks.
with open(bin_path, "wb") as fo:
byte_offset = 0
for block_idx, block_path in \
enumerate(tqdm(block_paths, "merge train embeddings")):
with h5py.File(block_path) as fi:
nload = get_block_nload(block_path, load_fraction)
block = np.array(fi["data"][:nload], copy = False)
fo.write(block.tobytes())
byte_offset += block.size * block.itemsize
fo.seek(byte_offset)
def embed_db():
'''Embed DB chunks.
Store chunks in blocks on disk. These blocks will later be merged into
a single dataset for training the index.
'''
args = get_retro_args()
merged_train_data_path = get_training_data_merged_path()
if os.path.exists(merged_train_data_path):
return
# Get db dataset.
gpt_dataset = get_merged_sampled_dataset()
text_dataset = GPTToTextDataset(gpt_dataset)
# Embed dataset.
embedder = DiskDataParallelBertEmbedder(args.retro_bert_batch_size,
args.retro_bert_max_chunk_length,
args.retro_block_size,
args.bert_embedder_type)
embedder.embed_text_dataset("index",
get_training_data_block_dir(),
text_dataset)
# Merge embeddings.
merge_embedding_blocks()
def train_on_embeddings():
'''Train index on embedded DB chunks.'''
args = get_retro_args()
index = IndexFactory.get_index(args.retro_index_type)
index.train()
def remove_embeddings():
'''Remove embeddings after training.'''
torch.distributed.barrier()
if torch.distributed.get_rank() != 0:
return
empty_index_path = get_empty_index_path()
assert os.path.isfile(empty_index_path)
shutil.rmtree(get_training_data_root_dir(), ignore_errors=True)
def train_index():
'''Train index on DB chunks.'''
args = get_retro_args()
# Check if trained index already exists.
if not os.path.isfile(get_empty_index_path()):
# Embed training chunks.
embed_db()
# Train index on embeddings.
train_on_embeddings()
# Wait for (single-process) training to complete.
torch.distributed.barrier()
# Remove embeddings.
if args.retro_index_delete_training_embeddings:
remove_embeddings()
##################################################
# Add to index.
##################################################
def add_to_index():
'''Add DB chunks to index.'''
args = get_retro_args()
# Get index.
index = IndexFactory.get_index(args.retro_index_type)
# Get text dataset.
gpt_dataset = get_merged_train_dataset()
text_dataset = GPTToTextDataset(gpt_dataset)
# Add to index.
output_index_path = index.add(text_dataset)
return output_index_path
##################################################
# Build index (train + add).
##################################################
def build_index():
'''Build index.
Building index involves sequentially running stages above:
- Train index (on sampled training chunks).
- Add to index (on all training chunks).
'''
# Train index.
train_index()
# Add to index.
add_to_index()
|
Megatron-LM-master
|
tools/retro/index/build.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import abc
import numpy as np
import os
import torch
from megatron import get_retro_args
from tools.retro.external_libs import faiss
from .utils import get_index_dir
class Index(abc.ABC):
'''Abstract base class for indexes.
*Note* : While currently only Faiss-based classes are implemented, in the
future, this class will be extended with other types of indexes that have
different performance-accuracy trade-offs.
The primary methods to override are:
- train() : Train index on the sampled training chunks.
- add() : Add all training chunks to index.
'''
@classmethod
def c_verbose(cls, index, v):
'''Make index object verbose.'''
assert isinstance(v, bool)
faiss.ParameterSpace().set_index_parameter(index, "verbose", v)
def get_empty_index_path(self):
args = get_retro_args()
return os.path.join(
get_index_dir(),
"empty_%.3f.faissindex" % args.retro_index_train_load_fraction,
)
def get_empty_index(self):
return faiss.read_index(self.get_empty_index_path())
def get_added_index_path(self):
args = get_retro_args()
return os.path.join(
get_index_dir(),
"added_%.3f_%.3f.faissindex" % (
args.retro_index_train_load_fraction,
args.retro_index_add_load_fraction,
),
)
def get_added_index(self):
return faiss.read_index(self.get_added_index_path())
@abc.abstractmethod
def train(self, *args):
pass
@abc.abstractmethod
def add(self, *args):
pass
def embed_text_dataset_block(self, embedder, text_dataset, _range):
'''Embed a range of a text dataset.'''
sub_dataset = torch.utils.data.Subset(text_dataset, range(*_range))
return embedder.embed_text_dataset(sub_dataset)
|
Megatron-LM-master
|
tools/retro/index/index.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from .build import add_to_index, build_index, train_index
# from .index import Index
|
Megatron-LM-master
|
tools/retro/index/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from .indexes import FaissBaseIndex, FaissParallelAddIndex
class IndexFactory:
'''Get index.
Index type generally read from argument '--retro-index-ty'.
'''
@classmethod
def get_index_class(cls, index_type):
return {
"faiss-base" : FaissBaseIndex,
"faiss-par-add" : FaissParallelAddIndex,
}[index_type]
@classmethod
def get_index(cls, index_type):
index_class = cls.get_index_class(index_type)
index = index_class()
return index
|
Megatron-LM-master
|
tools/retro/index/factory.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import concurrent
import gc
import glob
import numpy as np
import os
import psutil
import time
import torch
from tqdm import tqdm
from megatron import get_retro_args, print_rank_0
from tools.retro.db.utils import get_indexed_dataset_infos
from tools.retro.external_libs import h5py
def get_index_dir():
"""Create sub-directory for this index."""
args = get_retro_args()
# Directory path.
index_dir_path = os.path.join(
args.retro_workdir,
"index",
args.retro_index_type,
args.retro_index_str,
)
# Make directory.
os.makedirs(index_dir_path, exist_ok=True)
return index_dir_path
def num_samples_to_block_ranges(num_samples):
'''Split a range (length num_samples) into sequence of block ranges
of size block_size.'''
args = get_retro_args()
block_size = args.retro_block_size
start_idxs = list(range(0, num_samples, block_size))
end_idxs = [min(num_samples, s + block_size) for s in start_idxs]
ranges = list(zip(start_idxs, end_idxs))
return ranges
def get_training_data_root_dir():
args = get_retro_args()
return os.path.join(args.retro_workdir, "index", "train_emb")
def get_training_data_block_dir():
return os.path.join(get_training_data_root_dir(), "blocks")
def get_training_data_block_paths():
return sorted(glob.glob(get_training_data_block_dir() + "/*.hdf5"))
def get_training_data_merged_path():
args = get_retro_args()
return os.path.join(get_training_data_root_dir(),
"train_%.3f.bin" % args.retro_index_train_load_fraction)
def get_added_codes_dir():
return os.path.join(get_index_dir(), "add_codes")
def get_added_code_paths():
return sorted(glob.glob(get_added_codes_dir() + "/*.hdf5"))
|
Megatron-LM-master
|
tools/retro/index/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Multi-process & multi-node version of Faiss's index.add().
This class inherits from FaissBaseIndex, and optimizes the 'add()' method by
making it multi-node and multi-process, with bit-wise equivalence to
FaissBaseIndex. This allows 'add()' to scale out to very large datasets, since
the vast majority of the computational effort is embarrassingly parallel.
"""
import numpy as np
import os
import psutil
import shutil
import torch
from tqdm import tqdm
from megatron import get_retro_args, print_rank_0
from tools.bert_embedding import BertEmbedder
from tools.bert_embedding.utils import get_missing_blocks_by_rank
from tools.retro.external_libs import faiss, h5py
from tools.retro.index.utils import get_added_codes_dir, get_added_code_paths
from .faiss_base import FaissBaseIndex
class FaissParallelAddIndex(FaissBaseIndex):
def encode_block(self, index, embedder, text_dataset, block):
'''Encode sub-dataset block, to be later added to index.
Encode the data subset, generally in blocks of 1M vectors each. For
each block, the empty/trained index is loaded, codes are computed
via index.sa_encode(), and the resulting codes are saved to disk.
'''
args = get_retro_args()
# Embed block.
embeddings = self.embed_text_dataset_block(
embedder,
text_dataset,
block["range"],
)
# Encode block.
print_rank_0("encode.")
codes = index.sa_encode(embeddings)
# Save neighbors.
print_rank_0("save codes.")
os.makedirs(os.path.dirname(block["path"]), exist_ok=True)
with h5py.File(block["path"], "w") as f:
f.create_dataset("data", data=codes)
def encode(self, text_dataset):
'''Encode text dataset, to be later added to index.'''
args = get_retro_args()
codes_dir = get_added_codes_dir()
# Index.
index = self.get_empty_index()
# Bert embedder.
embedder = BertEmbedder(args.retro_bert_batch_size,
args.retro_bert_max_chunk_length,
args.bert_embedder_type)
# Missing code blocks.
def validate(f):
assert len(f["data"].shape) == 2
n_missing_blocks, missing_code_blocks = get_missing_blocks_by_rank(
codes_dir,
len(text_dataset),
args.retro_block_size,
validate=validate,
)
# Encode each block.
for block_index, block in enumerate(missing_code_blocks):
if block is not None:
# Progress.
print_rank_0("encode block %d / %d ... %s." % (
block_index,
len(missing_code_blocks),
block["path"],
))
# Query block neighbors.
self.encode_block(index, embedder, text_dataset, block)
# Synchronize progress across all ranks. (for easier observation)
print_rank_0(" > waiting for other ranks to finish block.")
torch.distributed.barrier()
def add_codes(self):
if torch.distributed.get_rank() != 0:
return
added_index_path = self.get_added_index_path()
if os.path.exists(added_index_path):
return
args = get_retro_args()
# Index.
print_rank_0("read empty index.")
index = self.get_empty_index()
index_ivf = faiss.extract_index_ivf(index)
# Add codes.
print_rank_0("add codes.")
code_paths = get_added_code_paths()
pbar = tqdm(code_paths)
for code_path in pbar:
pbar.set_description("add codes, mem %.3f gb, %.1f%%" % (
psutil.virtual_memory()[3] / 1024**3,
psutil.virtual_memory()[2],
))
with h5py.File(code_path) as f:
nload = int(args.retro_index_add_load_fraction*f["data"].shape[0])
offset = int(os.path.basename(code_path).split("-")[0])
xids = np.arange(offset, offset + nload)
codes = np.copy(f["data"][:nload])
index_ivf.add_sa_codes(codes, xids)
# Update index's ntotal.
index.ntotal = index_ivf.ntotal
# Write index.
print_rank_0("write added index.")
faiss.write_index(index, added_index_path)
def remove_codes(self):
'''Remove added codes after adding to index.'''
if torch.distributed.get_rank() != 0:
return
assert os.path.isfile(self.get_added_index_path())
args = get_retro_args()
if args.retro_index_delete_added_codes:
raise Exception("remove?")
shutil.rmtree(get_added_codes_dir(), ignore_errors=True)
def add(self, text_dataset):
# Encode chunks.
self.encode(text_dataset)
# Add codes to index.
self.add_codes()
# Wait for (single-process) adding to complete.
torch.distributed.barrier()
# Remove codes.
self.remove_codes()
|
Megatron-LM-master
|
tools/retro/index/indexes/faiss_par_add.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
This class implements a simple, un-optimized wrapper around a Faiss index, that
implements the Index interface (see ..index.py). While this class is
instantiable, it is meant to be extended with optimizations in classes that
inherit from this class (see FaissParAddIndex, for an example).
"""
from datetime import timedelta
import numpy as np
import os
import torch
from tqdm import tqdm
from megatron import get_retro_args, print_rank_0
from tools.bert_embedding import BertEmbedder
from tools.retro.external_libs import faiss
from tools.retro.index.index import Index
from tools.retro.index.utils import (
get_training_data_merged_path,
num_samples_to_block_ranges,
)
class FaissBaseIndex(Index):
def _train(self):
'''Train index (rank 0's method).'''
args = get_retro_args()
assert torch.distributed.get_rank() == 0
# Set num threads (torch.distributed reset it to 1).
# faiss.omp_set_num_threads(32)
faiss.omp_set_num_threads(64)
# faiss.omp_set_num_threads(128)
empty_index_path = self.get_empty_index_path()
# Index already exists? -> return.
if os.path.isfile(empty_index_path):
return
# Load data.
merged_path = get_training_data_merged_path()
inp = np.memmap(
merged_path,
dtype = "f4",
mode = "r",
).reshape((-1, args.hidden_size))
# Init index.
index = faiss.index_factory(args.retro_index_nfeats,
args.retro_index_str)
# Move to GPU.
print("> move faiss index to gpu.")
index_ivf = faiss.extract_index_ivf(index)
clustering_index = \
faiss.index_cpu_to_all_gpus(faiss.IndexFlatL2(index_ivf.d))
index_ivf.clustering_index = clustering_index
print("> finished moving to gpu.")
self.c_verbose(index, True)
self.c_verbose(index_ivf, True)
self.c_verbose(index_ivf.quantizer, True)
self.c_verbose(index_ivf.clustering_index, True)
# Train index.
index.train(inp)
# Save index.
faiss.write_index(index, empty_index_path)
def train(self):
'''Train index.'''
# Single process only.
if torch.distributed.get_rank() == 0:
self._train()
torch.distributed.barrier()
def _add(self, text_dataset):
'''Add to index (rank 0's method).'''
assert torch.distributed.get_rank() == 0
args = get_retro_args()
dataset_sample_ranges = num_samples_to_block_ranges(len(text_dataset))
# Set num threads (torch.distributed reset it to 1).
faiss.omp_set_num_threads(64)
# Bert embedder.
embedder = BertEmbedder(args.retro_bert_batch_size,
args.retro_bert_max_chunk_length,
args.bert_embedder_type)
# Empty/added index paths.
empty_index_path = self.get_empty_index_path()
added_index_path = self.get_added_index_path()
# Skip adding, if index exists.
if os.path.isfile(added_index_path):
return
# Read trained index.
index = faiss.read_index(empty_index_path)
# Iterate data blocks & add.
for sample_range in tqdm(dataset_sample_ranges, "faiss_base.add"):
# Embed text.
embeds = self.embed_text_dataset_block(
embedder, text_dataset, sample_range)
# Add to index.
index.add(embeds)
# Write index.
faiss.write_index(index, added_index_path)
def add(self, text_dataset):
'''Add to index.'''
# Single process only.
if torch.distributed.get_rank() == 0:
self._add(text_dataset)
# Wait for rank 0.
torch.distributed.barrier()
# Get output index path, for return.
return self.get_added_index_path()
|
Megatron-LM-master
|
tools/retro/index/indexes/faiss_base.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from .faiss_base import FaissBaseIndex
from .faiss_par_add import FaissParallelAddIndex
|
Megatron-LM-master
|
tools/retro/index/indexes/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import os
import torch
from megatron import get_retro_args, print_rank_0
from megatron.data.gpt_dataset import build_train_valid_test_datasets \
as build_gpt_train_valid_test_datasets
from megatron.training import (
build_train_valid_test_datasets as build_pretraining_train_valid_test_datasets,
update_train_iters,
)
from tools.retro.db.utils import get_indexed_dataset_infos
from tools.retro.utils import get_num_chunks_per_sample
from .utils import get_neighbor_dirname, get_query_workdir
class ChunkDataset(torch.utils.data.Dataset):
'''Pretraining chunk dataset wraps a standard GPT dataset.
This dataset conceptually divides each sample (e.g., length 2048)
into chunks (e.g., length 64) and restructures them into a list of
chunks (e.g., length num_samples * num_chunks_per_sample).
'''
def __init__(self, sample_dataset, chunk_length):
super().__init__()
self.sample_dataset = sample_dataset
self.chunk_length = chunk_length
self.n_chunks_per_sample = get_num_chunks_per_sample()
self.n_samples = len(sample_dataset)
self.n_chunks = self.n_samples * self.n_chunks_per_sample
def __len__(self):
return self.n_chunks
def __getitem__(self, idx):
# Convert global chunk index to global sample index & local chunk index.
sample_idx = idx // self.n_chunks_per_sample
chunk_idx = idx % self.n_chunks_per_sample
# Extract sample data.
sample = self.sample_dataset[sample_idx]
sample_token_ids = sample["text"]
sample_doc_ids = sample["doc_ids"]
# Chunk start/end token idxs.
token_start_idx = chunk_idx * self.chunk_length
token_end_idx = token_start_idx + self.chunk_length
chunk_token_ids = sample_token_ids[token_start_idx:token_end_idx]
# Sample.
return {
"doc_ids" : sample_doc_ids,
"text" : chunk_token_ids,
}
def verify_indexed_dataset_order():
'''Verify pretraining order same as DB order.'''
args = get_retro_args()
# DB dataset prefixes.
db_indexed_dataset_infos = get_indexed_dataset_infos()
db_prefixes = [ info["prefix"] for info in db_indexed_dataset_infos ]
# Verify order & prefixes.
assert len(args.data_path) >= 2, "blendable dataset supported only."
pretraining_prefixes = args.data_path[1:None:2]
if len(db_prefixes) != len(pretraining_prefixes):
raise Exception("inconsistent dataset count between db & pretraining.")
if db_prefixes != pretraining_prefixes:
raise Exception("inconsistent dataset order between db & pretraining.")
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_retro_args()
print_rank_0('> building train, validation, and test datasets '
'for GPT ...')
train_ds, valid_ds, test_ds = build_gpt_train_valid_test_datasets(
data_prefix=args.retro_gpt_data_path,
splits_string=args.retro_gpt_split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.retro_gpt_seq_length,
seed=args.retro_gpt_seed,
skip_warmup=(not args.retro_gpt_mmap_warmup),
return_doc_ids=args.retro_return_doc_ids)
print_rank_0("> finished creating pretrained GPT datasets ...")
return train_ds, valid_ds, test_ds
def get_chunk_dataset_map():
'''Get train, valid, test chunk datasets.'''
args = get_retro_args()
# Update train iters.
update_train_iters(args)
args.iteration = 0
args.consumed_train_samples = 0
# Verify indexed dataset order.
verify_indexed_dataset_order()
# Datasets.
print_rank_0(" > datasets.")
train_ds, valid_ds, test_ds = build_pretraining_train_valid_test_datasets(
train_valid_test_datasets_provider)
sample_dataset_map = {
"train" : train_ds,
"valid" : valid_ds,
"test" : test_ds,
}
# Info dict.
chunk_dataset_map = {
key : {
"neighbor_dir" : get_neighbor_dirname(key, sample_ds),
"data" : ChunkDataset(sample_ds, args.retro_gpt_chunk_length),
}
for key, sample_ds in sample_dataset_map.items() if sample_ds
}
return chunk_dataset_map
|
Megatron-LM-master
|
tools/retro/query/chunk_dataset.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import numpy as np
import os
import torch
from megatron import get_args, get_retro_args
from tools.bert_embedding.utils import BlockPathMap
from tools.retro.db.utils import get_merged_train_dataset as get_db_dataset
from tools.retro.external_libs import h5py
from .chunk_dataset import get_chunk_dataset_map
from .utils import get_neighbor_dirname
class RetroDataset(torch.utils.data.Dataset):
'''Dataset of retro samples.
Each sample contains the original GPT sample, along with the token IDs
of each neighbor of each chunk within the sequence. Neighbor array has
shape (num_chunks_per_sample, num_neighbors, num_retrieved_tokens).
'''
def __init__(self,
num_neighbors,
num_retrieved_chunks,
block_size,
db_dataset,
chunk_dataset,
neighbor_path_map):
'''Note: chunk dataset wraps original GPT dataset (see
chunk_dataset.py).'''
super().__init__()
self.num_neighbors = num_neighbors
self.num_retrieved_chunks = num_retrieved_chunks
self.block_size = block_size
self.db_dataset = db_dataset
self.chunk_dataset = chunk_dataset
self.neighbor_path_map = neighbor_path_map
def __len__(self):
return len(self.chunk_dataset.sample_dataset)
def __getitem__(self, sample_idx):
n_chunks_per_sample = self.chunk_dataset.n_chunks_per_sample
# Get standard sample.
sample = self.chunk_dataset.sample_dataset[sample_idx]
# Sample idx to chunk idxs.
chunk_idxs = list(range(
sample_idx * n_chunks_per_sample,
(sample_idx + 1) * n_chunks_per_sample,
))
# Collect retrieved tokens.
all_retrieved_chunk_ids = []
all_retrieved_token_ids = []
for chunk_idx in chunk_idxs:
# Neighbor chunk ids.
neighbor_path = self.neighbor_path_map[chunk_idx]
with h5py.File(neighbor_path, "r") as f:
neighbor_chunk_ids = f["neighbors"] \
[chunk_idx % self.block_size, :self.num_neighbors].tolist()
# Retrieved (neighbor + continuation) token ids.
retrieved_chunk_ids = []
retrieved_token_ids = []
for neighbor_chunk_id in neighbor_chunk_ids:
current_chunk_ids = [
i % len(self.db_dataset)
for i in range(
neighbor_chunk_id,
neighbor_chunk_id + self.num_retrieved_chunks)]
current_token_ids = [self.db_dataset[ci]["text"]
for ci in current_chunk_ids]
retrieved_chunk_ids.append(current_chunk_ids)
retrieved_token_ids.append(current_token_ids)
# Collect retrieved tokens.
all_retrieved_chunk_ids.append(retrieved_chunk_ids)
all_retrieved_token_ids.append(retrieved_token_ids)
# Reshape retrieved tokens.
all_retrieved_chunk_ids = np.array(all_retrieved_chunk_ids) \
.reshape((n_chunks_per_sample, self.num_neighbors, -1))
all_retrieved_token_ids = np.array(all_retrieved_token_ids) \
.reshape((n_chunks_per_sample, self.num_neighbors, -1))
# Sample.
sample = {
**sample,
"neighbor_chunks" : all_retrieved_chunk_ids,
"neighbor_tokens" : all_retrieved_token_ids,
}
return sample
def get_retro_datasets(verify_sizes=True):
'''Get train, valid, test retro datasets.'''
args = get_args()
retro_args = get_retro_args()
# DB dataset.
db_dataset = get_db_dataset()
# Retro datasets.
chunk_ds_info_map = get_chunk_dataset_map()
retro_dataset_map = {}
for data_key, chunk_ds_info in chunk_ds_info_map.items():
chunk_dataset = chunk_ds_info["data"]
neighbor_dir = chunk_ds_info["neighbor_dir"]
neighbor_path_map = BlockPathMap.from_dir(neighbor_dir,
retro_args.retro_block_size)
# Verify dataset prefixes.
expected_dir = get_neighbor_dirname(data_key, chunk_dataset.sample_dataset)
assert expected_dir == neighbor_dir, \
"inconsistent dataset source; '%s' vs. '%s'." % \
(expected_dir, neighbor_dir)
# Verify num chunks.
n_sample_chunks = len(chunk_dataset)
n_neighbor_chunks = neighbor_path_map.max_idx
if not os.path.isdir(neighbor_dir):
if torch.distributed.get_rank() == 0:
raise Exception("neighbor directory '%s' not found; please "
"compare --train-samples, --seq-length, --seed, "
"--eval-iters, and --eval-interval, with "
"retro preprocessing args." %
neighbor_dir)
torch.distributed.barrier()
exit()
if verify_sizes and n_sample_chunks != n_neighbor_chunks:
if torch.distributed.get_rank() == 0:
print("neighbor_dir : %s" % neighbor_dir)
print("neighbor_path_map : %s" % neighbor_path_map)
raise Exception("num sampled chunks (%d) != num neighbor chunks "
"(%d); did you complete querying the entire "
"pretraining dataset?"
% (n_sample_chunks, n_neighbor_chunks))
torch.distributed.barrier()
exit()
# Retro dataset.
retro_dataset_map[data_key] = RetroDataset(
num_neighbors=args.retro_num_neighbors,
num_retrieved_chunks=args.retro_num_retrieved_chunks,
block_size=retro_args.retro_block_size,
db_dataset=db_dataset,
chunk_dataset=chunk_dataset,
neighbor_path_map=neighbor_path_map,
)
# Extract datasets.
train_ds = retro_dataset_map.get("train", None)
valid_ds = retro_dataset_map.get("valid", None)
test_ds = retro_dataset_map.get("test", None)
return train_ds, valid_ds, test_ds
|
Megatron-LM-master
|
tools/retro/query/retro_dataset.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import numpy as np
import os
import psutil
import time
import torch
from tqdm import tqdm
from megatron import get_retro_args, print_rank_0
from tools.bert_embedding import BertEmbedder
from tools.bert_embedding.utils import get_missing_blocks_by_rank
from tools.retro.db.utils import \
get_merged_train_dataset as get_db_merged_train_dataset
from tools.retro.external_libs import faiss, h5py
from tools.retro.index.factory import IndexFactory
from tools.retro.index.utils import get_index_dir
from tools.retro.utils import GPTToTextDataset
from .chunk_dataset import get_chunk_dataset_map as get_query_dataset_map
def get_index(ondisk=False):
'''Read index from disk.'''
args = get_retro_args()
# Load index.
index_wrapper = IndexFactory.get_index(args.retro_index_type)
index_dir = get_index_dir()
added_index_path = index_wrapper.get_added_index_path()
if ondisk:
index = faiss.read_index(added_index_path, faiss.IO_FLAG_MMAP)
else:
index = faiss.read_index(added_index_path)
# Search parameters.
faiss.ParameterSpace().set_index_parameter(index, "efSearch",
args.retro_query_ef_search)
faiss.ParameterSpace().set_index_parameter(index, "nprobe",
args.retro_query_nprobe)
return index
def embed_block(gpt_dataset, block, embedder):
'''Embed block of chunks.'''
text_block_dataset = torch.utils.data.Subset(
GPTToTextDataset(gpt_dataset),
range(*block["range"]),
)
return embedder.embed_text_dataset(text_block_dataset)
def query_embeddings(db_dataset, index,
embeddings, chunk_id_range,
sample_map, n_chunks_per_sample,
verbose=True):
'''Query neighbors of a block of embeddings.'''
args = get_retro_args()
# Query neighbor ids.
if verbose: print_rank_0("search.")
t = time.time()
assert index.ntotal > 0, "check we don't accidentally have an empty index."
_, query_neighbor_ids = \
index.search(embeddings, args.retro_query_num_neighbors_query)
if verbose: print_rank_0(" time : %.3f sec." % (time.time() - t))
# Filter banned neighbor ids.
if verbose: print_rank_0("filter banned neighbor ids.")
filtered_neighbor_ids = np.full(
shape=(len(query_neighbor_ids), args.retro_query_num_neighbors_save),
fill_value=-1,
dtype="int64",
)
min_chunk_id, max_chunk_id = chunk_id_range
for chunk_id in range(min_chunk_id, max_chunk_id):
sample_id = chunk_id // n_chunks_per_sample
sample = sample_map[sample_id]
sample_dataset_idx = sample["dataset_idx"].item()
sample_doc_ids = sample["doc_ids"].tolist()
sample_doc_tuples = [(sample_dataset_idx, d) for d in sample_doc_ids]
# Get valid neighbors (!= -1).
query_row = [ i for i in query_neighbor_ids[chunk_id-min_chunk_id]
if i >= 0 ]
# Filter row.
filtered_row = [ i for i in query_row
if tuple(db_dataset.doc_tuples[i].tolist())
not in sample_doc_tuples ]
filtered_row = filtered_row[:args.retro_query_num_neighbors_save]
filtered_row += \
[-1] * (args.retro_query_num_neighbors_save - len(filtered_row))
filtered_neighbor_ids[chunk_id-min_chunk_id] = filtered_row
return query_neighbor_ids, filtered_neighbor_ids
def query_embedding_block(db_dataset, index,
embeddings, chunk_id_range,
sample_map, n_chunks_per_sample):
query_neighbor_ids = []
filtered_neighbor_ids = []
# Query in sub-blocks.
partial_block_size = 1000
for partial_start_idx in tqdm(
range(0, len(embeddings), partial_block_size),
"search",
):
partial_end_idx = min(len(embeddings),
partial_start_idx + partial_block_size)
partial_embeddings = embeddings[partial_start_idx:partial_end_idx]
partial_chunk_id_range = (
chunk_id_range[0] + partial_start_idx,
chunk_id_range[0] + partial_end_idx,
)
partial_query_neighbor_ids, partial_filtered_neighbor_ids = \
query_embeddings(db_dataset, index,
partial_embeddings, partial_chunk_id_range,
sample_map, n_chunks_per_sample,
verbose=False)
query_neighbor_ids.append(partial_query_neighbor_ids)
filtered_neighbor_ids.append(partial_filtered_neighbor_ids)
# Concatenate.
query_neighbor_ids = np.concatenate(query_neighbor_ids, axis=0)
filtered_neighbor_ids = np.concatenate(filtered_neighbor_ids, axis=0)
return query_neighbor_ids, filtered_neighbor_ids
def query_block_neighbors(db_dataset, query_dataset,
index, embedder,
block):
'''Query neighbors of a dataset block (i.e., range).'''
args = get_retro_args()
n_chunks_per_sample = query_dataset.n_chunks_per_sample
# Sample map.
sample_ids = sorted(list(set(chunk_id // n_chunks_per_sample
for chunk_id in range(*block["range"]))))
sample_map = {}
for i in sample_ids:
sample = query_dataset.sample_dataset[i]
sample_map[i] = {
"dataset_idx" : sample["dataset_idx"],
"doc_ids" : sample["doc_ids"],
}
# Embed block.
embeddings = embed_block(query_dataset, block, embedder)
# Query embeddings.
_, filtered_neighbor_ids = query_embedding_block(
db_dataset, index,
embeddings, block["range"],
sample_map, n_chunks_per_sample)
# Save neighbors.
print_rank_0("save neighbors.")
os.makedirs(os.path.dirname(block["path"]), exist_ok=True)
f = h5py.File(block["path"], "w")
f.create_dataset("neighbors", data=filtered_neighbor_ids)
f.close()
def query_dataset_neighbors(db_dataset, query_dataset,
prefix, neighbor_dir,
index, embedder):
'''Query neighbors of each chunk within a dataset.'''
args = get_retro_args()
def validate(f):
assert f["neighbors"].shape[1] == args.retro_query_num_neighbors_save, \
"neighbors.shape == %s; num_neighbors_target == %d." % (
str(f["neighbors"].shape),
args.retro_num_neighbors_target,
)
n_missing_blocks, missing_neighbor_blocks = get_missing_blocks_by_rank(
neighbor_dir,
len(query_dataset),
args.retro_block_size,
validate=validate,
)
# Query each block.
for block_index, block in enumerate(missing_neighbor_blocks):
if block is not None:
# Progress.
print_rank_0("query '%s' block %d / %d ... %s ... mem %.3f gb, %.1f%%." % (
prefix,
block_index,
len(missing_neighbor_blocks),
os.path.basename(block["path"]),
psutil.virtual_memory()[3] / 1024**3,
psutil.virtual_memory()[2],
))
# Query block neighbors.
query_block_neighbors(db_dataset, query_dataset,
index, embedder,
block)
# Synchronize progress across all ranks. (for easier observation)
print_rank_0(" > waiting for other ranks to finish block.")
torch.distributed.barrier()
def query_pretraining_neighbors():
'''Query pretraining datasets (train & valid).'''
args = get_retro_args()
# Num threads.
faiss.omp_set_num_threads(64)
# Load chunk db dataset.
print_rank_0("load chunk db dataset.")
db_dataset = get_db_merged_train_dataset()
db_dataset.load_doc_tuples()
# Load index.
print_rank_0(" > get index.")
index = get_index()
# Load datasets.
print_rank_0(" > get dataset map.")
query_dataset_map = get_query_dataset_map()
# Bert embedder.
embedder = BertEmbedder(args.retro_bert_batch_size,
args.retro_bert_max_chunk_length,
args.bert_embedder_type)
# Query each (i.e., train, valid, test) dataset.
print_rank_0(" > query.")
for prefix, info in query_dataset_map.items():
print_rank_0(" > query '%s' dataset ... %d samples." %
(prefix, len(info["data"])))
query_dataset_neighbors(db_dataset, info["data"],
prefix, info["neighbor_dir"],
index, embedder)
|
Megatron-LM-master
|
tools/retro/query/query.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from .query import query_pretraining_neighbors
|
Megatron-LM-master
|
tools/retro/query/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import hashlib
import os
from megatron import get_retro_args
def get_query_workdir():
args = get_retro_args()
return os.path.join(args.retro_workdir, "query")
def get_neighbor_dirname(key, dataset):
hashes = ",".join([ d.desc_hash for d in dataset.datasets ])
hash = hashlib.md5(hashes.encode()).hexdigest()
return os.path.join(get_query_workdir(), os.path.basename(f"{key}_{hash}"))
|
Megatron-LM-master
|
tools/retro/query/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import importlib
required_libs = [
"h5py",
"transformers", # for huggingface bert
]
for lib in required_libs:
try:
globals()[lib] = importlib.import_module(lib)
except ImportError as e:
raise Exception(f"Missing one or more packages required for Bert embedding: {required_libs}. Tried importing '{lib}'.")
|
Megatron-LM-master
|
tools/bert_embedding/external_libs.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from functools import partial
import numpy as np
import os
import time
import torch
from torch.utils.data import BatchSampler, DataLoader, SequentialSampler, Subset
from torch.utils.data._utils.collate import default_collate
from tqdm import tqdm
from megatron import get_args, get_tokenizer, print_rank_0
from megatron import core
from megatron.arguments import core_transformer_config_from_args
from megatron.core.enums import ModelType
from megatron.core.pipeline_parallel import get_forward_backward_func
from megatron.model import BertModel
from megatron.training import setup_model_and_optimizer
from .dataset import BertEmbeddingDataset
from .external_libs import h5py
from .huggingface import HuggingfaceEmbedder
from .utils import get_missing_blocks_by_rank
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
print_rank_0(" > build Bert model.")
args = get_args()
config = core_transformer_config_from_args(args)
num_tokentypes = 2 if args.bert_binary_head else 0
model = BertModel(
config=config,
num_tokentypes=num_tokentypes,
add_binary_head=args.bert_binary_head,
parallel_output=True,
pre_process=pre_process,
post_process=post_process)
return model
def get_batch(data_iterator):
"""Build the batch."""
# Items and their type.
keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask',
'seq_length']
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = core.tensor_parallel.broadcast_data(keys, data, datatype)
# Unpack.
tokens = data_b['text'].long()
types = data_b['types'].long()
sentence_order = data_b['is_random'].long()
loss_mask = data_b['loss_mask'].float()
lm_labels = data_b['labels'].long()
padding_mask = data_b['padding_mask'].long()
seq_lengths = data_b['seq_length'].long()
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask, \
seq_lengths
def loss_func(loss_mask, sentence_order, seq_lengths,
output_tensor, non_loss_data):
"""Loss function. Sequence lengths returned here for progress print-outs."""
assert non_loss_data
return seq_lengths, output_tensor
def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
# Get the batch.
tokens, types, sentence_order, loss_mask, lm_labels, padding_mask, \
seq_lengths = get_batch(data_iterator)
if not args.bert_binary_head:
types = None
# Forward pass through the model.
output_tensor = model(tokens, padding_mask, tokentype_ids=types,
lm_labels=lm_labels)
return output_tensor, partial(loss_func, loss_mask, sentence_order,
seq_lengths)
def collate_batch(samples):
"""Collate samples of various lengths.
This collate function handles samples with various sequence lengths, by
padding 'text' arrays with pad_id, and other arrays with 0.
"""
n_samples = len(samples)
keys = list(samples[0].keys())
tokenizer = get_tokenizer()
# Max sample length across all samples.
max_length_map = { key:0 for key in keys }
for sample in samples:
for key in keys:
value_length = \
len(sample[key]) if isinstance(sample[key], np.ndarray) else None
max_length_map[key] = None \
if value_length is None else \
max(max_length_map[key], value_length)
# Pad samples.
padded_samples = []
for sample in samples:
padded_sample = {}
for key in keys:
padded_sample[key] = \
np.pad(
sample[key],
(0, max_length_map[key] - len(sample[key])),
mode="constant",
constant_values=tokenizer.pad_id if key == "text" else 0,
) \
if isinstance(sample[key], np.ndarray) else \
sample[key]
padded_samples.append(padded_sample)
# Build batch with padded samples.
batch = default_collate(padded_samples)
return batch
def get_data_loader(dataset, batch_size):
"""Build data loader over data subset.
Get a subset of the dataset (from start_idx -> end_idx), and wrap it in
a sequential sampler and data loader.
"""
args = get_args()
# Sequential & batch samplers.
batch_sampler = BatchSampler(
sampler=SequentialSampler(dataset),
batch_size=batch_size,
drop_last=False,
)
# Data loader.
data_loader = DataLoader(dataset,
batch_sampler=batch_sampler,
num_workers=args.num_workers,
pin_memory=True,
collate_fn=collate_batch)
return data_loader
def embed_data_loader(models, data_loader):
'''Iterate data loader and compute embeddings.'''
# Verify no model parallelism.
args = get_args()
assert args.tensor_model_parallel_size == 1 and \
args.pipeline_model_parallel_size == 1, \
"since we call forward_step directly, only tp == pp == 1 allowed."
# Data iterator.
data_iterator = iter(data_loader)
# Eval mode.
for m in models:
m.eval()
# Embed.
embeddings = []
for _ in tqdm(range(len(data_loader)), "mt embed"):
with torch.no_grad():
result = forward_step(data_iterator, models[0])
embeddings.append(result[0].detach().cpu().numpy())
# Concatenate embeddings.
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
class BertEmbedder:
'''Compute Bert embeddings, from a text dataset.'''
def __init__(self, batch_size, max_bert_seq_length, embedder_type):
args = get_args()
assert args.output_bert_embeddings
self.models, optimizer, opt_param_scheduler = \
setup_model_and_optimizer(model_provider,
ModelType.encoder_or_decoder)
self.batch_size = batch_size
self.max_bert_seq_length = max_bert_seq_length
# Init Huggingface, if in use.
if embedder_type == "megatron":
self.huggingface_embedder = None
elif embedder_type == "huggingface":
self.huggingface_embedder = HuggingfaceEmbedder(batch_size,
max_bert_seq_length)
else:
raise Exception("specialize for embedder type '%s'." % embedder_type)
def embed_text_dataset(self, text_dataset):
'''Embed a text dataset.'''
# Huggingface.
if self.huggingface_embedder:
return self.huggingface_embedder.embed_text_dataset(text_dataset)
# Wrap in a BertEmbeddingDataset to tokenize samples.
bert_dataset = BertEmbeddingDataset(text_dataset,
self.max_bert_seq_length)
# Embed.
data_loader = get_data_loader(bert_dataset, self.batch_size)
embeddings = embed_data_loader(self.models, data_loader)
return embeddings
def embed_text(self, text):
'''Embed a single text string.
Primarily used for on-the-fly embeddings, particularly during
analysis or debugging. For large scale, use 'embed_text_dataset()'.
'''
class SingleTextDataset(torch.utils.data.Dataset):
'''Dataset that holds single string.'''
def __init__(self, text):
assert isinstance(text, str)
self.text = text
def __len__(self):
return 1
def __getitem__(self, i):
return {"text": self.text}
# Embed text.
text_ds = SingleTextDataset(text)
embed = self.embed_text_dataset(text_ds)[0]
return embed
class DiskDataParallelBertEmbedder:
'''Process embeddings in blocks & save to disk.'''
def __init__(self, batch_size, max_bert_seq_length, block_size,
embedder_type):
self.embedder = BertEmbedder(batch_size, max_bert_seq_length,
embedder_type)
self.block_size = block_size
def embed_text_blocks(self, name, workdir, text_dataset,
missing_embedding_blocks):
'''Process a text dataset in blocks.'''
# Iterate blocks.
for block_index, block_info in enumerate(missing_embedding_blocks):
# Missing block lists are extended with None to have equal-length
# lists. Skip the Nones.
if block_info is not None:
# Progress. (*note*: move world progress to here.)
print_rank_0("embed '%s' block %d / %d ... %s." % (
name,
block_index,
len(missing_embedding_blocks),
block_info["path"],
))
# Embed block.
sub_dataset = Subset(text_dataset, range(*block_info["range"]))
embeddings = self.embedder.embed_text_dataset(sub_dataset)
# Save embeddings.
f = h5py.File(block_info["path"], "w")
f.create_dataset("data", data=embeddings)
f.close()
# Synchronize progress across all ranks. (for easier observation)
print_rank_0(" > waiting for other ranks to finish block.")
torch.distributed.barrier()
def embed_text_dataset(self, name, workdir, text_dataset):
'''Embed a text dataset.'''
# Dataset workdir.
os.makedirs(workdir, exist_ok=True)
# Missing embedding blocks (stored on disk).
def validate(f):
assert f["data"].shape[1] == 1024
n_missing_world, missing_embedding_blocks = get_missing_blocks_by_rank(
workdir,
len(text_dataset),
self.block_size,
validate=validate)
# Prevent missing file race condition.
torch.distributed.barrier()
# Embed batches.
self.embed_text_blocks(name, workdir, text_dataset,
missing_embedding_blocks)
|
Megatron-LM-master
|
tools/bert_embedding/embed.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from .embed import BertEmbedder, DiskDataParallelBertEmbedder
|
Megatron-LM-master
|
tools/bert_embedding/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import numpy as np
import torch
from megatron import get_args, get_tokenizer
from megatron.data.bert_dataset import build_training_sample
class BertEmbeddingDataset(torch.utils.data.Dataset):
'''Dataset to convert a text dataset to Bert tokens.'''
def __init__(self, text_dataset, max_seq_length):
super().__init__()
args = get_args()
# Dataset, tokenizer.
self.text_dataset = text_dataset
self.bert_tokenizer = get_tokenizer()
# Params to store.
self.max_seq_length = max_seq_length
self.seed = args.seed
self.masked_lm_prob = args.mask_prob
# Vocab stuff.
self.vocab_id_list = list(self.bert_tokenizer.inv_vocab.keys())
self.vocab_id_to_token_dict = self.bert_tokenizer.inv_vocab
self.cls_id = self.bert_tokenizer.cls
self.sep_id = self.bert_tokenizer.sep
self.mask_id = self.bert_tokenizer.mask
self.pad_id = self.bert_tokenizer.pad
def __len__(self):
return len(self.text_dataset)
def __getitem__(self, idx):
# Text.
text_sample = self.text_dataset[idx]
text = text_sample["text"]
text = text.replace("<|endoftext|>", "")
# Bert/Wordpiece tokens (+truncate).
bert_token_ids = self.bert_tokenizer.tokenize(text)
bert_token_ids = bert_token_ids[:self.max_seq_length - 2] # cls+sep.
if not bert_token_ids:
bert_token_ids = [ self.bert_tokenizer.pad_id ] # hack when empty seq
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32))
# Build sample.
sample = build_training_sample([bert_token_ids],
len(bert_token_ids),
len(bert_token_ids) + 2, # for cls+sep
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id, self.sep_id,
self.mask_id, self.pad_id,
self.masked_lm_prob, np_rng,
binary_head=False)
sample["seq_length"] = len(sample["text"])
return sample
|
Megatron-LM-master
|
tools/bert_embedding/dataset.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
from collections import defaultdict
import glob
import numpy as np
import os
import torch
from tqdm import tqdm
from megatron import print_rank_0
from megatron.core import parallel_state
from .external_libs import h5py
def save_data(data_map, *args):
'''Save map of numpy arrays to hdf5 file.'''
# Parse args.
if len(args) == 1:
path = args[0]
elif len(args) == 2:
dir_path, file_name = args
path = os.path.join(dir_path, file_name)
else:
raise Exception("specialize for len(args) == %d." % len(args))
# Save data.
if not os.path.isfile(path):
f = h5py.File(path, "w")
for k, v in data_map.items():
f.create_dataset(k, data=v)
f.close()
return path
def load_data(paths):
'''Load multiple hdf5 files to single numpy array.'''
# Read data shapes.
shape_map = defaultdict(lambda : (0, None))
for p in paths:
f = h5py.File(p, "r")
for k in f.keys():
shape = tuple(f[k].shape)
shape_map[k] = (shape_map[k][0] + shape[0], shape[1])
f.close()
# Allocate output array.
data_map = { k : np.empty(s, dtype="f4") for k, s in shape_map.items() }
start_map = { k : 0 for k in shape_map }
# Load files.
for pi, p in enumerate(tqdm(paths, "load data")):
f = h5py.File(p, "r")
for k in f.keys():
i0 = start_map[k]
i1 = i0 + len(f[k])
data_map[k][i0:i1] = f[k]
start_map[k] += len(f[k])
f.close()
return data_map
def get_missing_blocks(workdir, n_samples, block_size,
validate=lambda f : None):
'''Divide range [0, num_samples) to sequence of block ranges.
This is a core method within the concept of block processing. The idea
is to divide a range (size n_samples) into a sequence of blocks. Each
block corresponds to a file within 'workdir' with name
'{start_idx}-{end_idx}.hdf5'. This method checks for the existence of
these files, and returns a list of the ones that are missing.
'''
# Block ranges.
block_start_idxs = list(range(0, n_samples, block_size))
block_end_idxs = [ min(n_samples, i + block_size) for i in block_start_idxs ]
block_ranges = list(zip(block_start_idxs, block_end_idxs))
# All block files (existing + missing).
n_digits = int(np.ceil(np.log(n_samples) / np.log(10)) + 1)
all_blocks = [{
"range" : r,
"path" : os.path.join(
workdir,
"%s-%s.hdf5" % tuple([ str(i).zfill(n_digits) for i in r ]),
)
} for r in block_ranges]
all_block_path_set = set(block["path"] for block in all_blocks)
# Delete corrupt files.
if torch.distributed.get_rank() == 0:
existing_block_paths = [block["path"]
for block in all_blocks
if os.path.exists(block["path"])]
for index, path in enumerate(
tqdm(existing_block_paths, "validating block.")):
assert path in all_block_path_set, "unexpected filename, '%s'." % path
try:
f = h5py.File(path, "r")
except:
# raise Exception("unable to open/validate '%s'." % path)
os.remove(path)
continue
try:
validate(f)
except:
# raise Exception("delete block file '%s'." % path)
os.remove(path)
finally:
f.close()
# Wait for files to be deleted.
torch.distributed.barrier()
# Filter missing files.
missing_blocks = [block
for block in all_blocks
if not os.path.exists(block["path"])]
return missing_blocks
def get_missing_blocks_by_rank(workdir, n_samples, block_size,
validate=lambda f : None):
'''Divide missing blocks evenly across all ranks.
See 'get_missing_blocks()' above for description. The returned list of
missing blocks is split evenly across ranks via interleaving. This way,
each rank has a roughly equal number of blocks to process for a
downstream operation.
'''
missing_blocks = get_missing_blocks(workdir, n_samples, block_size,
validate)
# This rank's missing files.
data_parallel_rank = parallel_state.get_data_parallel_rank()
data_parallel_world_size = parallel_state.get_data_parallel_world_size()
rank_missing_blocks = missing_blocks[data_parallel_rank:len(missing_blocks):data_parallel_world_size]
# Extend rank's missing blocks (with None) such that all ranks have equal
# length lists. This allows for easier tracking of global progress.
n_missing_tensor = torch.cuda.LongTensor([len(rank_missing_blocks)])
torch.distributed.all_reduce(n_missing_tensor,
op=torch.distributed.ReduceOp.MAX)
max_n_missing = n_missing_tensor.item()
rank_missing_blocks += [None] * (max_n_missing - len(rank_missing_blocks))
return len(missing_blocks), rank_missing_blocks
class BlockPathMap:
'''Map an index to its containing block path.
The common use for this class is to have a directory of files containing
blocks of processed data, of uniform block size (e.g., 100k samples per
file). Each file must follow a naming convention of 'startIdx-endIdx.[ext]',
where 'endIdx' minus 'startIdx' must equal the block size, with the possible
exception of the final block. Given an input index, this class maps the
index to the containing block file.
'''
@classmethod
def from_dir(cls, _dir, block_size, ext="hdf5"):
'''Get list of block files, and create map.'''
assert os.path.isdir(_dir), f"directory not found, '{_dir}'."
return cls(sorted(glob.glob(_dir + f"/*.{ext}")), block_size)
def __init__(self, block_paths, block_size):
self.max_idx = 0
self.block_path_map = {}
for block_path in block_paths:
name = os.path.splitext(os.path.basename(block_path))[0]
start_idx, end_idx = [ int(i) for i in name.split("-") ]
self.block_path_map[start_idx] = block_path
self.max_idx = max(self.max_idx, end_idx)
self.block_size = block_size
def __str__(self):
return "%d paths" % len(self.block_path_map)
def __getitem__(self, idx):
'''Get block path from index.'''
block_start_idx = self.block_size * (idx // self.block_size)
block_path = self.block_path_map[block_start_idx]
return block_path
|
Megatron-LM-master
|
tools/bert_embedding/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import numpy as np
import torch
from tqdm import tqdm
from .external_libs import transformers
class IterableTextDataset(torch.utils.data.IterableDataset):
'''Iterable over a text dataset.'''
def __init__(self, text_dataset):
self.text_dataset = text_dataset
def __iter__(self):
'''Remove 'endoftext' string.'''
for sample_idx in range(len(self.text_dataset)):
sample = self.text_dataset[sample_idx]
text = sample["text"].replace("<|endoftext|>", "")
yield text
class MyFeatureExtractionPipeline(transformers.FeatureExtractionPipeline):
def _forward(self, model_inputs):
# Embed inputs.
model_outputs = self.model(**model_inputs)
# Attention mask.
embeddings = model_outputs[0]
masks = torch.sum(model_inputs['attention_mask'], dim=1)
# Collect embeddings & check for nan.
outputs = []
for embedding, mask in zip(embeddings, masks):
output = torch.mean(embedding[1: mask - 1], dim=0)
# Nans due to empty input sequences; so only check first element.
if torch.isnan(output.view(-1)[0]).any():
output.zero_()
outputs.append(output)
# Sample.
data = {
"input" : model_inputs["input_ids"],
"output" : outputs,
}
return data
def postprocess(self, model_outputs):
# Return input for analysis.
return {
"input" : model_outputs["input"].numpy(),
"output" : model_outputs["output"].numpy(),
}
class HuggingfaceEmbedder:
def __init__(self, batch_size, max_seq_length):
# Model, tokenizer.
self.model = transformers.BertModel.from_pretrained("bert-large-cased")
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
"bert-large-cased", model_max_length=max_seq_length)
# Feature extraction pipeline.
self.pipe = MyFeatureExtractionPipeline(
model=self.model,
tokenizer=self.tokenizer,
device=torch.cuda.current_device(),
truncation=True,
max_length=max_seq_length,
)
self.batch_size = batch_size
def embed_text_dataset(self, text_dataset, verbose=True):
# Wrap dataset in iterable.
dataset = IterableTextDataset(text_dataset)
# Allocate output array.
n_samples = len(text_dataset)
embeddings = np.zeros((n_samples, 1024), dtype="f4")
start_idx = 0
# Wrap iterator in tqdm for verbose output.
_iter = self.pipe(dataset, batch_size=self.batch_size)
if verbose:
_iter = tqdm(_iter, "hf embed", total=n_samples)
# Embed dataset.
for idx, out_dict in enumerate(_iter):
inp = out_dict["input"]
out = out_dict["output"]
embeddings[start_idx] = out
start_idx += 1
return embeddings
def embed_text(self, text):
'''Embed a single text string.
Primarily used for on-the-fly embeddings, particularly during
analysis or debugging. For large scale, use 'embed_text_dataset()'.
'''
class SingleTextDataset(torch.utils.data.Dataset):
'''Dataset that holds single string.'''
def __init__(self, text):
assert isinstance(text, str)
self.text = text
def __len__(self):
return 1
def __getitem__(self, i):
return {"text": self.text}
# Embed text.
text_ds = SingleTextDataset(text)
embed = self.embed_text_dataset(text_ds, verbose=False)[0]
return embed
|
Megatron-LM-master
|
tools/bert_embedding/huggingface.py
|
Megatron-LM-master
|
tests/__init__.py
|
|
def test_import():
import megatron
|
Megatron-LM-master
|
tests/unit_tests/test_basic.py
|
import pytest
import torch
import megatron.core.utils as util
import numpy as np
def test_divide_properly():
assert util.divide(4,2) == 2
def test_divide_improperly():
with pytest.raises(AssertionError):
util.divide(4,5)
def test_global_memory_buffer():
global_memory_buffer = util.GlobalMemoryBuffer()
obtained_tensor = global_memory_buffer.get_tensor((3,2), torch.float32, "test_tensor")
expected_tensor = torch.empty((3,2), dtype=torch.float32, device=torch.cuda.current_device())
assert torch.equal(obtained_tensor, expected_tensor)
def test_make_viewless_tensor():
inp = torch.rand((3,4))
assert(torch.equal(inp, util.make_viewless_tensor(inp, True, True)))
assert(torch.equal(inp, util.make_viewless_tensor(inp, True, False)))
def test_safely_set_viewless_tensor_data():
tensor = torch.zeros((3,4))
new_data_tensor = torch.tensor(np.random.rand(3,4))
util.safely_set_viewless_tensor_data(tensor, new_data_tensor)
assert(torch.equal(tensor, new_data_tensor))
def test_assert_viewless_tensor():
tensor = torch.rand((3,4))
assert(torch.equal(util.assert_viewless_tensor(tensor), tensor))
input_tensor_list=[tensor,tensor,tensor]
output_tensor_list = util.assert_viewless_tensor(input_tensor_list)
for inp,out in zip(input_tensor_list, output_tensor_list):
assert(torch.equal(inp,out))
|
Megatron-LM-master
|
tests/unit_tests/test_utils.py
|
import os
import torch
import megatron.core.parallel_state as ps
class Utils:
world_size = torch.cuda.device_count()
rank = int(os.environ['LOCAL_RANK'])
@staticmethod
def initialize_distributed():
print(f'Initializing torch.distributed with rank: {Utils.rank}, world_size: {Utils.world_size}')
torch.cuda.set_device(Utils.rank % torch.cuda.device_count())
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(backend='nccl', world_size=Utils.world_size, rank=Utils.rank, init_method=init_method)
@staticmethod
def destroy_model_parallel():
ps.destroy_model_parallel()
torch.distributed.barrier()
@staticmethod
def initialize_model_parallel(tensor_model_parallel_size = 1, pipeline_model_parallel_size = 1, virtual_pipeline_model_parallel_size = None, pipeline_model_parallel_split_rank = None):
ps.destroy_model_parallel()
if not torch.distributed.is_initialized():
Utils.initialize_distributed()
ps.initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank)
|
Megatron-LM-master
|
tests/unit_tests/test_utilities.py
|
Megatron-LM-master
|
tests/unit_tests/__init__.py
|
|
import torch
import megatron.core.parallel_state as ps
import pytest
from tests.unit_tests.test_utilities import Utils
import os
rank = Utils.rank
world_size = Utils.world_size
def test_initialize__and_destroy_model_parallel():
with pytest.raises(AssertionError):
assert(ps.initialize_model_parallel())
Utils.initialize_distributed()
with pytest.raises(RuntimeError):
assert(ps.initialize_model_parallel(tensor_model_parallel_size=2*world_size))
with pytest.raises(RuntimeError):
assert(ps.initialize_model_parallel(pipeline_model_parallel_size=2*world_size))
with pytest.raises(RuntimeError):
assert(ps.initialize_model_parallel(pipeline_model_parallel_size=world_size, tensor_model_parallel_size=world_size))
with pytest.raises(RuntimeError):
assert(ps.initialize_model_parallel(virtual_pipeline_model_parallel_size=2))
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
assert(ps.model_parallel_is_initialized())
assert(ps.get_model_parallel_group() is not None)
assert(ps.get_tensor_model_parallel_group() is not None)
assert(ps.get_pipeline_model_parallel_group() is not None)
assert(ps.get_data_parallel_group() is not None)
Utils.destroy_model_parallel()
assert(ps._MODEL_PARALLEL_GROUP is None)
def test_pipeline_parallel_initializations():
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
assert(ps.get_pipeline_model_parallel_first_rank() == rank % 2 )
assert(ps.get_data_parallel_src_rank() == rank)
assert(ps.get_pipeline_model_parallel_next_rank() == ((rank + 2) % world_size))
assert(ps.get_pipeline_model_parallel_prev_rank() == ((rank - 2) % world_size))
Utils.destroy_model_parallel()
def test_data_parallel_initializations():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.get_data_parallel_src_rank() == rank)
assert(ps.get_data_parallel_world_size() == 1)
assert(ps.get_data_parallel_rank() == 0)
Utils.destroy_model_parallel()
def test_tensor_model_parellel_world_size():
Utils.initialize_model_parallel(tensor_model_parallel_size=world_size)
assert(ps.get_tensor_model_parallel_world_size() == world_size)
ps.set_tensor_model_parallel_world_size(None)
assert(ps.get_tensor_model_parallel_world_size() == world_size)
Utils.destroy_model_parallel()
def test_pipeline_model_parallel_world_size():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.get_pipeline_model_parallel_world_size() == world_size)
ps.set_pipeline_model_parallel_world_size(None)
assert(ps.get_pipeline_model_parallel_world_size() == world_size)
Utils.destroy_model_parallel()
def test_tensor_model_parallel_rank():
Utils.initialize_model_parallel(tensor_model_parallel_size=world_size)
assert(ps.get_tensor_model_parallel_rank() == rank)
ps.set_tensor_model_parallel_rank(None)
assert(ps.get_tensor_model_parallel_rank() == rank)
Utils.destroy_model_parallel()
def test_pipeline_model_parallel_rank():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.get_pipeline_model_parallel_rank() == rank)
ps.set_pipeline_model_parallel_rank(None)
assert(ps.get_pipeline_model_parallel_rank() == rank)
Utils.destroy_model_parallel()
def test_is_pipeline_first_stage():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.is_pipeline_first_stage(ignore_virtual=True) == (rank == 0))
assert(ps.is_pipeline_first_stage() == (rank == 0))
Utils.destroy_model_parallel()
def test_is_pipeline_last_stage():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
assert(ps.is_pipeline_last_stage(ignore_virtual=True) == (rank == world_size-1))
assert(ps.is_pipeline_last_stage() == (rank == world_size-1))
Utils.destroy_model_parallel()
def test_virtual_pipeline_model_parallel_rank():
Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size)
ps.set_virtual_pipeline_model_parallel_rank(rank)
assert(ps.get_virtual_pipeline_model_parallel_rank() == rank)
Utils.destroy_model_parallel()
def test_get_tensor_model_parallel_src_rank():
Utils.initialize_model_parallel(tensor_model_parallel_size=world_size)
assert(ps.get_tensor_model_parallel_src_rank() == ((rank // world_size) * world_size))
Utils.destroy_model_parallel()
|
Megatron-LM-master
|
tests/unit_tests/test_parallel_state.py
|
from megatron.core.tensor_parallel.cross_entropy import vocab_parallel_cross_entropy
import torch
from tests.unit_tests.test_utilities import Utils
import numpy as np
def test_vocab_parallel_cross_entropy():
Utils.initialize_model_parallel(4,2)
vocab_parallel_logits = torch.range(0,7).repeat(16,4).cuda()
target = torch.arange(0,32,2).cuda()
output = vocab_parallel_cross_entropy(vocab_parallel_logits, target)
expected_output = torch.tensor([10.2309, 8.2309, 6.2309, 4.2309, 10.2309, 8.2309, 6.2309, 4.2309,
10.2309, 8.2309, 6.2309, 4.2309, 10.2309, 8.2309, 6.2309, 4.2309]).cuda()
assert(torch.equal(torch.round(expected_output), torch.round(output)))
Utils.destroy_model_parallel()
|
Megatron-LM-master
|
tests/unit_tests/tensor_parallel/test_cross_entropy.py
|
import torch
import megatron.core.tensor_parallel.utils as util
import megatron.core.parallel_state as ps
from tests.unit_tests.test_utilities import Utils
rank = Utils.rank
def test_split_tensor_along_last_dim():
input_tensor = torch.rand((3,4))
torch.equal(input_tensor[0:2,0:2], util.split_tensor_along_last_dim(input_tensor,2)[0])
torch.equal(input_tensor[2:,2:], util.split_tensor_along_last_dim(input_tensor,2)[1])
def test_split_tensor_into_1d_equal_chunks():
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
input_tensor = torch.rand((3,4))
output_tensor = util.split_tensor_into_1d_equal_chunks(input_tensor)
if rank % 2 == 0 :
start = 0
end = int(input_tensor.numel()/2)
else :
start = int(input_tensor.numel()/2)
end = input_tensor.numel()
assert torch.equal(output_tensor, input_tensor.flatten()[start:end])
Utils.destroy_model_parallel()
def test_gather_split_1d_tensor():
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
input_tensor = torch.ones((2,4)).cuda() * rank
actual_output_tensor = util.gather_split_1d_tensor(input_tensor)
if rank %2 == 0:
expected_output_tensor = torch.concat((input_tensor.flatten(), input_tensor.flatten() + 1))
else :
expected_output_tensor = torch.concat((input_tensor.flatten() - 1, input_tensor.flatten()))
assert(torch.equal(actual_output_tensor, expected_output_tensor))
Utils.destroy_model_parallel()
def test_vocab():
global_vocab_size = 1600
per_partition_vocab_size = 1600 / Utils.world_size
assert((rank * per_partition_vocab_size, (rank + 1)* per_partition_vocab_size) == (util.VocabUtility.vocab_range_from_per_partition_vocab_size(global_vocab_size // Utils.world_size, rank, Utils.world_size)))
assert((rank * per_partition_vocab_size, (rank + 1)* per_partition_vocab_size) == (util.VocabUtility.vocab_range_from_global_vocab_size(global_vocab_size, rank, Utils.world_size)))
|
Megatron-LM-master
|
tests/unit_tests/tensor_parallel/test_tensor_parallel_utils.py
|
from megatron.core.tensor_parallel import mappings
from tests.unit_tests.test_utilities import Utils
import torch
def test_CopyToModelParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.ones((1)).cuda()*Utils.rank
output_data = mappings._CopyToModelParallelRegion.backward(None, input_data)
result = torch.ones(1).cuda()
result = result * 22 if Utils.rank >= 4 else result * 6
assert(torch.equal(output_data, result))
assert(torch.equal(input_data, mappings.copy_to_tensor_model_parallel_region(input_data)))
assert(torch.equal(input_data, mappings._CopyToModelParallelRegion.symbolic(None, input_data)))
Utils.destroy_model_parallel()
def test_ReduceFromModelParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.ones((1)).cuda()*Utils.rank
output_data = mappings._ReduceFromModelParallelRegion.symbolic(None, input_data)
result = torch.ones(1).cuda()
result = result * 22 if Utils.rank >= 4 else result * 6
assert(torch.equal(output_data, result))
input_data = torch.ones((1)).cuda()*Utils.rank
assert(torch.equal(mappings.reduce_from_tensor_model_parallel_region(input_data), result))
assert(torch.equal(input_data, mappings._ReduceFromModelParallelRegion.backward(None, input_data)))
Utils.destroy_model_parallel()
def test_ScatterToModelParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.rand((8,4)).cuda()
output_data = mappings.scatter_to_tensor_model_parallel_region(input_data)
req_dim = int(Utils.rank%(Utils.world_size/2))
assert(torch.equal(output_data, input_data[:,req_dim].reshape((8,1))))
output_data = mappings._ScatterToModelParallelRegion.symbolic(None, input_data)
assert(torch.equal(output_data, input_data[:, req_dim].reshape((8,1))))
input_data = torch.ones(8).cuda() * Utils.rank
actual_output_data = mappings._ScatterToModelParallelRegion.backward(None, input_data)
expected_output = torch.cat((
torch.ones(8)*0,
torch.ones(8)*1,
torch.ones(8)*2,
torch.ones(8)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(actual_output_data, expected_output))
Utils.destroy_model_parallel()
def test_GatherFromModelParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.rand((8,4)).cuda()
req_dim = int(Utils.rank%(Utils.world_size/2))
output_data = mappings._GatherFromModelParallelRegion.backward(None, input_data)
assert(torch.equal(output_data, input_data[:, req_dim].reshape((8,1))))
input_data = torch.ones(8).cuda() * Utils.rank
actual_output_data = mappings.gather_from_tensor_model_parallel_region(input_data)
expected_output = torch.cat((
torch.ones(8)*0,
torch.ones(8)*1,
torch.ones(8)*2,
torch.ones(8)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(actual_output_data, expected_output))
assert(torch.equal(mappings._GatherFromModelParallelRegion.symbolic(None, input_data), expected_output))
Utils.destroy_model_parallel()
def test_ScatterToSequenceParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.rand((8,4)).cuda()
req_dim = int(Utils.rank%(Utils.world_size/2))*2
output_data = mappings._ScatterToSequenceParallelRegion.symbolic(None, input_data)
assert(torch.equal(output_data, input_data[req_dim:req_dim+2, :]))
output_data = mappings.scatter_to_sequence_parallel_region(input_data)
assert(torch.equal(output_data, input_data[req_dim:req_dim+2, :]))
input_data = torch.ones(4).cuda() * Utils.rank
output_data = mappings._ScatterToModelParallelRegion.backward(None, input_data)
expected_output = torch.concat((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(output_data, expected_output))
Utils.destroy_model_parallel()
def test_GatherFromSequenceParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.ones(4).cuda() * Utils.rank
output_data = mappings.gather_from_sequence_parallel_region(input_data)
expected_output = torch.concat((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(output_data, expected_output))
assert(torch.equal(mappings._GatherFromSequenceParallelRegion.symbolic(None, input_data), expected_output))
input_data = torch.vstack((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
class Ctx:
tensor_parallel_output_grad = True
output_data = mappings._GatherFromSequenceParallelRegion.backward(Ctx(), input_data)
expected_output = torch.ones((1,4)).cuda() * 4 * int(Utils.rank % 4)
assert(torch.equal(output_data[0], expected_output))
Utils.destroy_model_parallel()
def test_ReduceScatterToSequenceParallelRegion():
Utils.initialize_model_parallel(4,2)
input_data = torch.vstack((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
output_data = mappings.reduce_scatter_to_sequence_parallel_region(input_data)
expected_output = torch.ones(4).cuda() * 4 * int(Utils.rank % 4)
assert(torch.equal(output_data[0], expected_output))
assert(torch.equal(mappings._ReduceScatterToSequenceParallelRegion.symbolic(None, input_data) , expected_output.reshape((1,4))))
input_data = torch.ones(4).cuda() * Utils.rank
output_data = mappings._ReduceScatterToSequenceParallelRegion.backward(None,input_data)
expected_output = torch.concat((
torch.ones(4)*0,
torch.ones(4)*1,
torch.ones(4)*2,
torch.ones(4)*3)).cuda()
if (Utils.rank >= 4):
expected_output = expected_output + 4
assert(torch.equal(output_data, expected_output))
Utils.destroy_model_parallel()
|
Megatron-LM-master
|
tests/unit_tests/tensor_parallel/test_mappings.py
|
from megatron.core.tensor_parallel.data import broadcast_data
import torch
from tests.unit_tests.test_utilities import Utils
def test_broadcast_data():
Utils.initialize_model_parallel(2,4)
input_data = {
0 : torch.ones((8,8)).cuda() * 0.0,
1 : torch.ones((8,8)).cuda() * 1.0,
2 : torch.ones((8,8)).cuda() * 2.0,
3 : torch.ones((8,8)).cuda() * 3.0,
4 : torch.ones((8,8)).cuda() * 4.0,
5 : torch.ones((8,8)).cuda() * 5.0,
6 : torch.ones((8,8)).cuda() * 6.0,
7 : torch.ones((8,8)).cuda() * 7.0
}
dtype = torch.float32
actual_output = broadcast_data([0,1],input_data, dtype)
assert(torch.equal(actual_output[0], input_data[0]))
assert(torch.equal(actual_output[1], input_data[1]))
Utils.destroy_model_parallel()
|
Megatron-LM-master
|
tests/unit_tests/tensor_parallel/test_data.py
|
from megatron.core.tensor_parallel.random import CudaRNGStatesTracker
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from megatron.core.tensor_parallel.random import _CUDA_RNG_STATE_TRACKER
from megatron.core.tensor_parallel.random import checkpoint
from tests.unit_tests.test_utilities import Utils
import pytest
import torch
def test_cuda_rng_states_tracker():
rng_tracker = CudaRNGStatesTracker()
rng_tracker.set_states({"state1":1234})
assert(rng_tracker.get_states()["state1"] == 1234)
rng_tracker.reset()
assert(rng_tracker.get_states() == {})
seed = 1111
rng_tracker.add("state2",seed)
with pytest.raises(Exception):
assert(rng_tracker.add("state3",seed))
with pytest.raises(Exception):
assert(rng_tracker.add("state2",111))
assert(rng_tracker.get_states()['state2'] is not None)
with pytest.raises(Exception):
assert()
rng_tracker.fork("state2")
torch.cuda.manual_seed(seed)
rng_state = torch.cuda.get_rng_state()
assert torch.equal(rng_tracker.get_states()['state2'], rng_state)
def test_model_parallel_cuda_manual_seed():
Utils.initialize_model_parallel(4,2)
model_parallel_cuda_manual_seed(0)
assert(_CUDA_RNG_STATE_TRACKER.get_states()['model-parallel-rng'] is not None)
Utils.destroy_model_parallel()
def test_checkpoint():
def test_forward(*input):
return input[0]+input[1]
assert(torch.equal(torch.ones(16)*3,checkpoint(test_forward, None, torch.ones(16), torch.ones(16)*2)))
Utils.initialize_model_parallel()
input1 = torch.ones((4,4))
checkpoint(test_forward, True, input1, torch.ones((4,4))*2)
assert(torch.equal(torch.ones(input1.numel()).cuda(), input1))
Utils.destroy_model_parallel()
|
Megatron-LM-master
|
tests/unit_tests/tensor_parallel/test_random.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.mlp import MLP
from tests.unit_tests.test_utilities import Utils
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from megatron.core.transformer.transformer_config import TransformerConfig
class TestParallelMLP:
def setup_method(self, method):
Utils.initialize_model_parallel(1,1)
model_parallel_cuda_manual_seed(123)
transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True)
self.mlp = MLP(transformer_config)
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_constructor(self):
assert isinstance(self.mlp, MLP)
num_weights = sum([p.numel() for p in self.mlp.parameters()])
assert num_weights == 1236
"""
def test_cpu_forward(self, mlp):
# [sequence length, micro batch size, hidden size]
hidden_states = torch.ones((32, 2, mlp.config.hidden_size))
output, output_bias = mlp(hidden_states)
assert output.shape[0] == 32
assert output.shape[1] == 2
assert output.shape[2] == mlp.config.hidden_size
assert output_bias.shape[0] == mlp.config.hidden_size
assert output.dtype == torch.float32
"""
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_gpu_forward(self):
mlp = self.mlp
mlp.cuda()
# [sequence length, batch size, hidden size]
hidden_states = torch.ones((32, 2, mlp.config.hidden_size))
hidden_states = hidden_states.cuda()
output, output_bias = mlp(hidden_states)
assert output.shape[0] == 32
assert output.shape[1] == 2
assert output.shape[2] == mlp.config.hidden_size
assert output_bias.shape[0] == mlp.config.hidden_size
assert output.dtype == torch.float32
assert output.device.type == 'cuda'
assert output_bias.device.type == 'cuda'
|
Megatron-LM-master
|
tests/unit_tests/transformer/test_mlp.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.module import Float16Module, MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
from tests.unit_tests.test_utilities import Utils
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
DEVICE_CAPABILITY = None
if torch.cuda.is_available():
DEVICE_CAPABILITY = torch.cuda.get_device_capability()
class DummyModule(MegatronModule):
# def __init__(self, config: TransformerConfig, share_embeddings_and_output_weights=True):
def __init__(self, config: TransformerConfig):
super().__init__(config)
self.linear = torch.nn.modules.Linear(in_features=2, out_features=1)
def forward(self, x):
return self.linear(x)
class TestMegatronModule:
def setup_method(self, method):
Utils.initialize_model_parallel(1,1)
model_parallel_cuda_manual_seed(123)
transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True)
self.megatron_module = DummyModule(config=transformer_config).cuda()
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_megatron_module(self):
megatron_module = self.megatron_module
assert megatron_module
assert megatron_module.config.hidden_size == 12
assert megatron_module.config.ffn_hidden_size == 48
assert megatron_module.linear.weight.dtype == torch.float32
x = torch.ones((2, 2)).cuda()
assert megatron_module(x).dtype == torch.float32
# TODO: test bad configs actually fail
# failed_module = megatron_module
# failed_module.fp16 = True
# failed_module.bf16 = True
class TestFloat16Module:
def setup_method(self, method):
Utils.initialize_model_parallel(1,1)
model_parallel_cuda_manual_seed(123)
self.transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True)
self.megatron_module = DummyModule(config=self.transformer_config).cuda()
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_fp16_module(self):
transformer_config = self.transformer_config
megatron_module = self.megatron_module
transformer_config.fp16 = True
fp16_module = Float16Module(config=transformer_config, module=megatron_module)
assert fp16_module
assert fp16_module.config.hidden_size == 12
assert fp16_module.config.ffn_hidden_size == 48
assert fp16_module.module.linear.weight.dtype == torch.float16
x = torch.ones((2, 2)).cuda()
# inputs are converted to fp16 then outputs are converted to fp32
assert fp16_module(x).dtype == torch.float32
pytest.mark.skipif(
not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8, reason='bfloat16 is not supported on this device'
)
def test_bf16_module(self):
transformer_config = self.transformer_config
megatron_module = self.megatron_module
transformer_config.bf16 = True
bf16_module = Float16Module(config=transformer_config, module=megatron_module)
assert bf16_module
assert bf16_module.config.hidden_size == 12
assert bf16_module.config.ffn_hidden_size == 48
assert bf16_module.module.linear.weight.dtype == torch.bfloat16
x = torch.ones((2, 2)).cuda()
# inputs are converted to bf16 then outputs are converted to fp32
assert bf16_module(x).dtype == torch.float32
|
Megatron-LM-master
|
tests/unit_tests/transformer/test_module.py
|
Megatron-LM-master
|
tests/unit_tests/transformer/__init__.py
|
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.attention import CrossAttention
"""
@pytest.fixture
def core_attention(transformer_config):
return CrossAttention(transformer_config)
class TestCoreAttention:
def test_constructor(self, core_attention):
assert isinstance(core_attention, CrossAttention)
assert core_attention.layer_number == 1
num_weights = sum([p.numel() for p in core_attention.parameters()])
assert num_weights == 0
def test_cpu_forward(self, core_attention):
# we can't currently do this because the global memory buffer is on GPU
pass
def test_gpu_forward(self, core_attention):
# destroy_global_memory_buffer()
# _set_global_memory_buffer()
# model_parallel_cuda_manual_seed(123)
core_attention.cuda()
config = core_attention.config
sequence_length = 32
micro_batch_size = 2
# query_layer (float): [sequence_length, micro_batch_size, num_attention_heads, hidden_size / num_attention_heads]
query_layer = torch.ones(
(
sequence_length,
micro_batch_size,
config.num_attention_heads,
config.hidden_size // config.num_attention_heads,
)
).cuda()
key_layer = torch.ones_like(query_layer).cuda()
value_layer = torch.ones_like(query_layer).cuda()
attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda()
context_layer = core_attention(
query_layer=query_layer, key_layer=key_layer, value_layer=value_layer, attention_mask=attention_mask
)
assert context_layer.shape[0] == sequence_length
assert context_layer.shape[1] == micro_batch_size
assert context_layer.shape[2] == config.hidden_size
assert context_layer.device.type == 'cuda'
assert context_layer.dtype == torch.float32
"""
|
Megatron-LM-master
|
tests/unit_tests/transformer/test_core_attention.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import os
import pytest
import torch
from megatron.core import dist_checkpointing
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_layer import TransformerLayer
from megatron.core.transformer.transformer_block import TransformerBlock
from tests.unit_tests.test_utilities import Utils
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
class TestParallelTransformerBlock:
def setup_method(self, method):
Utils.initialize_model_parallel(1,1)
model_parallel_cuda_manual_seed(123)
self.transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True)
self.parallel_transformer_block = TransformerBlock(self.transformer_config)
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_constructor(self):
parallel_transformer_block = self.parallel_transformer_block
assert isinstance(parallel_transformer_block, TransformerBlock)
num_weights = sum([p.numel() for p in parallel_transformer_block.parameters()])
assert num_weights == 3792
assert parallel_transformer_block.num_layers_per_pipeline_rank == 2
assert len(parallel_transformer_block.layers) == 2
layer_0: TransformerLayer = parallel_transformer_block._get_layer(0)
assert layer_0.layer_number == 1
layer_1: TransformerLayer = parallel_transformer_block._get_layer(1)
assert layer_1.layer_number == 2
def test_gpu_forward(self):
parallel_transformer_block = self.parallel_transformer_block
config: TransformerConfig = parallel_transformer_block.config
sequence_length = 32
micro_batch_size = 2
parallel_transformer_block.cuda()
# [sequence length, batch size, hidden size]
hidden_states = torch.ones((sequence_length, micro_batch_size, config.hidden_size))
hidden_states = hidden_states.cuda()
attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda()
hidden_states = parallel_transformer_block(hidden_states=hidden_states, attention_mask=attention_mask)
assert hidden_states.shape[0] == sequence_length
assert hidden_states.shape[1] == micro_batch_size
assert hidden_states.shape[2] == config.hidden_size
def test_gpu_forward_full_checkpoint(self):
transformer_config = self.transformer_config
config = transformer_config
config.recompute_granularity = 'full'
config.recompute_method = 'block'
config.recompute_num_layers = config.num_layers
full_transformer_block = TransformerBlock(config)
assert full_transformer_block.config.recompute_granularity == 'full'
assert full_transformer_block.config.recompute_method == 'block'
sequence_length = 32
micro_batch_size = 2
full_transformer_block.cuda()
# [sequence length, batch size, hidden size]
hidden_states = torch.ones((sequence_length, micro_batch_size, config.hidden_size))
hidden_states = hidden_states.cuda()
attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda()
hidden_states = full_transformer_block(hidden_states=hidden_states, attention_mask=attention_mask)
assert hidden_states.shape[0] == sequence_length
assert hidden_states.shape[1] == micro_batch_size
assert hidden_states.shape[2] == config.hidden_size
def test_gpu_forward_selective_checkpoint(self):
transformer_config = self.transformer_config
config = transformer_config
config.recompute_granularity = 'selective'
selective_transformer_block = TransformerBlock(config)
assert selective_transformer_block.config.recompute_granularity == 'selective'
assert selective_transformer_block.checkpoint_core_attention
sequence_length = 32
micro_batch_size = 2
selective_transformer_block.cuda()
# [sequence length, batch size, hidden size]
hidden_states = torch.ones((sequence_length, micro_batch_size, config.hidden_size))
hidden_states = hidden_states.cuda()
attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda()
hidden_states = selective_transformer_block(hidden_states=hidden_states, attention_mask=attention_mask)
assert hidden_states.shape[0] == sequence_length
assert hidden_states.shape[1] == micro_batch_size
assert hidden_states.shape[2] == config.hidden_size
|
Megatron-LM-master
|
tests/unit_tests/transformer/test_transformer_block.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.attention import SelfAttention
from tests.unit_tests.test_utilities import Utils
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from megatron.core.transformer.transformer_config import TransformerConfig
class TestParallelAttention:
def setup_method(self, method):
Utils.initialize_model_parallel(1,1)
model_parallel_cuda_manual_seed(123)
self.transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True)
self.parallel_attention = SelfAttention(self.transformer_config)
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_constructor(self):
assert isinstance(self.parallel_attention, SelfAttention)
assert self.parallel_attention.layer_number == 1
num_weights = sum([p.numel() for p in self.parallel_attention.parameters()])
assert num_weights == 648
def test_cpu_forward(self):
# we can't currently do this because the global memory buffer is on GPU
pass
def test_gpu_forward(self):
config = self.parallel_attention.config
sequence_length = 32
micro_batch_size = 2
self.parallel_attention.cuda()
# [sequence length, batch size, hidden size]
hidden_states = torch.ones((sequence_length, micro_batch_size, self.parallel_attention.config.hidden_size))
hidden_states = hidden_states.cuda()
attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda()
output, bias = self.parallel_attention(hidden_states, attention_mask)
assert config.recompute_granularity is None
assert output.shape[0] == sequence_length
assert output.shape[1] == micro_batch_size
assert output.shape[2] == config.hidden_size
assert bias.shape[0] == config.hidden_size
def test_checkpointed_gpu_forward(self):
transformer_config = self.transformer_config
transformer_config.recompute_granularity='selective'
checkpointed_parallel_attention = SelfAttention(transformer_config)
config = checkpointed_parallel_attention.config
sequence_length = 32
micro_batch_size = 2
checkpointed_parallel_attention.cuda()
# [sequence length, batch size, hidden size]
hidden_states = torch.ones(
(sequence_length, micro_batch_size, checkpointed_parallel_attention.config.hidden_size)
)
hidden_states = hidden_states.cuda()
attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda()
output, bias = checkpointed_parallel_attention(hidden_states, attention_mask)
assert config.recompute_granularity == 'selective'
assert output.shape[0] == sequence_length
assert output.shape[1] == micro_batch_size
assert output.shape[2] == config.hidden_size
assert bias.shape[0] == config.hidden_size
|
Megatron-LM-master
|
tests/unit_tests/transformer/test_attention.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_layer import TransformerLayer
from tests.unit_tests.test_utilities import Utils
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from megatron.core.transformer.transformer_config import TransformerConfig
class TestParallelTransformerLayer:
def setup_method(self, method):
Utils.initialize_model_parallel(1,1)
model_parallel_cuda_manual_seed(123)
transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True)
self.parallel_transformer_layer = TransformerLayer(transformer_config)
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_constructor(self):
parallel_transformer_layer = self.parallel_transformer_layer
assert isinstance(parallel_transformer_layer, TransformerLayer)
assert parallel_transformer_layer.layer_number == 1
num_weights = sum([p.numel() for p in parallel_transformer_layer.parameters()])
assert num_weights == 1884
def test_gpu_forward(self):
parallel_transformer_layer = self.parallel_transformer_layer
config: TransformerConfig = parallel_transformer_layer.config
sequence_length = 32
micro_batch_size = 2
parallel_transformer_layer.cuda()
# [sequence length, batch size, hidden size]
hidden_states = torch.ones((sequence_length, micro_batch_size, config.hidden_size))
hidden_states = hidden_states.cuda()
attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda()
hidden_states = parallel_transformer_layer(hidden_states=hidden_states, attention_mask=attention_mask)
assert hidden_states.shape[0] == sequence_length
assert hidden_states.shape[1] == micro_batch_size
assert hidden_states.shape[2] == config.hidden_size
|
Megatron-LM-master
|
tests/unit_tests/transformer/test_transformer_layer.py
|
import torch
from tests.unit_tests.test_utilities import Utils
from megatron.core import ModelParallelConfig
import megatron.core.pipeline_parallel.schedules as schedule
from pytest_mock import mocker
import pytest
rank = Utils.rank
def test_get_forward_backward_func():
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=1)
assert(schedule.get_forward_backward_func() == schedule.forward_backward_no_pipelining)
Utils.destroy_model_parallel()
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4)
assert(schedule.get_forward_backward_func() == schedule.forward_backward_pipelining_without_interleaving)
Utils.destroy_model_parallel()
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=4, virtual_pipeline_model_parallel_size=2)
assert(schedule.get_forward_backward_func() == schedule.forward_backward_pipelining_with_interleaving)
Utils.destroy_model_parallel()
def test_deallocate_output_tensor():
out = torch.tensor([[1, 2, 3], [4, 5, 6]])
schedule.deallocate_output_tensor(out)
assert(out.nelement() == 6)
"""
def test_forward_backward_func_without_pipeline_parallel(mocker):
from megatron.core.pipeline_parallel import get_forward_backward_func
Utils.initialize_model_parallel(tensor_model_parallel_size=2, pipeline_model_parallel_size=1)
def forward_step_func(data_iterator, model):
import os
rank = int(os.environ['LOCAL_RANK'])
dummy_data = torch.ones(1,4)
def loss_func(output_tensor):
return rank, {'loss_reduced':rank}
return model(dummy_data), loss_func
model = torch.nn.Linear(4,1)
model.model_type = 'unit-test'
def set_input_tensor(input_tensor):
return None
model.set_input_tensor = set_input_tensor
forward_backward_func = get_forward_backward_func()
assert(schedule.get_forward_backward_func() == schedule.forward_backward_no_pipelining)
mocker.patch("megatron.core.pipeline_parallel.schedules.custom_backward", return_value=2)
config = ModelParallelConfig(
pipeline_model_parallel_size = 1
)
model.config = config
losses_reduced = forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=None,
model=[model],
num_microbatches=4,
seq_length=None,
micro_batch_size=None,
forward_only=False)
loss_reduced_expected = [{'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}]
for i,j in zip(losses_reduced, loss_reduced_expected):
print(losses_reduced)
assert(i['loss_reduced'] == j['loss_reduced'])
Utils.destroy_model_parallel()
def test_forward_backward_func_with_pipeline_parallel(mocker):
from megatron.core.pipeline_parallel import get_forward_backward_func
Utils.initialize_model_parallel(tensor_model_parallel_size=1, pipeline_model_parallel_size=4)
def forward_step_func(data_iterator, model):
import os
rank = int(os.environ['LOCAL_RANK'])
def loss_func(output_tensor):
return rank, {'loss_reduced':rank}
return torch.rand(512,8,256).cuda(), loss_func
model = torch.nn.Linear(4,1)
model.model_type = 'unit-test'
def set_input_tensor(input_tensor):
return None
model.set_input_tensor = set_input_tensor
forward_backward_func = get_forward_backward_func()
assert(schedule.get_forward_backward_func() == schedule.forward_backward_pipelining_without_interleaving)
sequence_length = 512
micro_batch_size = 8
hidden_size = 256
config = ModelParallelConfig(
pipeline_model_parallel_size = 4,
sequence_parallel = False
)
model.config = config
losses_reduced = forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=None,
dtype=torch.float32,
model=[model],
num_microbatches= micro_batch_size,
seq_length=sequence_length,
micro_batch_size=micro_batch_size,
forward_only=True)
loss_reduced_expected = [{'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}]
for i,j in zip(losses_reduced, loss_reduced_expected):
print(losses_reduced)
assert(i['loss_reduced'] == j['loss_reduced'])
Utils.destroy_model_parallel()
def test_forward_backward_func_with_interleaving(mocker):
from megatron.core.pipeline_parallel import get_forward_backward_func
from megatron.core.enums import ModelType
Utils.initialize_model_parallel(tensor_model_parallel_size=1, pipeline_model_parallel_size=4, virtual_pipeline_model_parallel_size=2)
def forward_step_func(data_iterator, model):
import os
rank = int(os.environ['LOCAL_RANK'])
def loss_func(output_tensor):
return rank, {'loss_reduced':rank}
return torch.rand(512,8,256).cuda(), loss_func
model = torch.nn.Linear(4,1)
def set_input_tensor(input_tensor):
return None
model.set_input_tensor = set_input_tensor
forward_backward_func = get_forward_backward_func()
assert(schedule.get_forward_backward_func() == schedule.forward_backward_pipelining_with_interleaving)
sequence_length = 512
micro_batch_size = 8
hidden_size = 256
mocker.patch("megatron.core.pipeline_parallel.schedules.custom_backward", return_value=2)
with pytest.raises(RuntimeError):
model.model_type = ModelType.encoder_and_decoder
forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=range(0,100),
dtype=torch.float32,
model=[model, model],
num_microbatches= micro_batch_size,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=sequence_length,
sequence_parallel=False,
forward_only=True)
with pytest.raises(RuntimeError):
model.model_type = ModelType.encoder_or_decoder
forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=range(0,100),
dtype=torch.float32,
model=[model, model],
num_microbatches= micro_batch_size,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=256,
sequence_parallel=False,
forward_only=True)
with pytest.raises(RuntimeError):
model.model_type = ModelType.encoder_or_decoder
forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=range(0,100),
dtype=torch.float32,
model=[model, model],
num_microbatches= 7,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=512,
sequence_parallel=False,
forward_only=True)
model.model_type = ModelType.encoder_or_decoder
losses_reduced = forward_backward_func(
forward_step_func=forward_step_func,
data_iterator=range(0,100),
dtype=torch.float32,
model=[model, model],
num_microbatches= micro_batch_size,
tensor_shape=[sequence_length, micro_batch_size, hidden_size],
decoder_seq_length=sequence_length,
sequence_parallel=True,
forward_only=True)
loss_reduced_expected = [{'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}, {'loss_reduced': rank}]
for i,j in zip(losses_reduced, loss_reduced_expected):
print(losses_reduced)
assert(i['loss_reduced'] == j['loss_reduced'])
Utils.destroy_model_parallel()
"""
|
Megatron-LM-master
|
tests/unit_tests/pipeline_parallel/test_schedules.py
|
Megatron-LM-master
|
tests/unit_tests/pipeline_parallel/__init__.py
|
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.models.gpt.gpt_model import GPTModel
from tests.unit_tests.test_utilities import Utils
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
class TestGPTModel:
def setup_method(self, method):
Utils.initialize_model_parallel(1,1)
model_parallel_cuda_manual_seed(123)
transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True)
self.gpt_model = GPTModel(config=transformer_config, vocab_size=100, max_sequence_length=4)
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_constructor(self):
assert isinstance(self.gpt_model, GPTModel)
assert self.gpt_model.max_sequence_length == 4
num_weights = sum([p.numel() for p in self.gpt_model.parameters()])
assert num_weights == 6240
def test_set_input_tensor(self):
config: TransformerConfig = self.gpt_model.config
sequence_length = self.gpt_model.max_sequence_length
micro_batch_size = 2
# [sequence length, batch size, hidden size]
input_tensor = torch.ones((sequence_length, micro_batch_size, config.hidden_size))
self.gpt_model.set_input_tensor(input_tensor)
assert self.gpt_model.decoder.input_tensor.shape[0] == sequence_length
assert self.gpt_model.decoder.input_tensor.shape[1] == micro_batch_size
assert self.gpt_model.decoder.input_tensor.shape[2] == config.hidden_size
def test_post_process_forward(self):
config: TransformerConfig = self.gpt_model.config
sequence_length = self.gpt_model.max_sequence_length
micro_batch_size = 2
self.gpt_model.cuda()
data = list(range(sequence_length))
input_ids = torch.tensor(data, dtype=torch.int64).repeat((micro_batch_size, 1)).cuda()
position_ids = torch.tensor(data, dtype=torch.int64).repeat((micro_batch_size, 1)).cuda()
attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda()
logits = self.gpt_model.forward(input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask)
assert logits.shape[0] == micro_batch_size
assert logits.shape[1] == sequence_length
assert logits.shape[2] == self.gpt_model.vocab_size
def test_no_post_process_forward(self):
pass
def test_no_preprocess_forward(self):
pass
def test_state_dict_for_save_checkpoint(self):
pass
def test_load_state_dict(self):
pass
|
Megatron-LM-master
|
tests/unit_tests/models/test_gpt_model.py
|
Megatron-LM-master
|
tests/unit_tests/models/__init__.py
|
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.models.gpt.gpt_embedding import GPTEmbedding
from tests.unit_tests.test_utilities import Utils
class TestGPTEmbedding:
def setup_method(self, method):
Utils.initialize_model_parallel(1,1)
transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True)
self.gpt_embedding = GPTEmbedding(config=transformer_config, vocab_size=100, max_sequence_length=4, add_position_embedding=True)
def teardown_method(self, method):
Utils.destroy_model_parallel()
def test_constructor(self):
assert isinstance(self.gpt_embedding, GPTEmbedding)
num_weights = sum([p.numel() for p in self.gpt_embedding.parameters()])
assert num_weights == 1248
def test_zero_parameters(self):
sum_weights = sum([p.sum() for p in self.gpt_embedding.parameters()])
assert sum_weights != 0
self.gpt_embedding.zero_parameters()
sum_weights = sum([p.sum() for p in self.gpt_embedding.parameters()])
assert sum_weights == 0
def test_cpu_forward(self):
input_ids = torch.tensor([0, 1, 2, 3], dtype=torch.int64).repeat((2, 1))
position_ids = torch.tensor([0, 1, 2, 3], dtype=torch.int64).repeat((2, 1))
embeddings = self.gpt_embedding(input_ids, position_ids)
assert embeddings.device.type == 'cpu'
assert embeddings.shape[0] == self.gpt_embedding.max_sequence_length
assert embeddings.shape[1] == input_ids.shape[0]
assert embeddings.shape[2] == self.gpt_embedding.config.hidden_size
def test_gpu_forward(self):
self.gpt_embedding.cuda()
input_ids = torch.tensor([0, 1, 2, 3], dtype=torch.int64).repeat((2, 1)).cuda()
position_ids = torch.tensor([0, 1, 2, 3], dtype=torch.int64).repeat((2, 1)).cuda()
embeddings = self.gpt_embedding(input_ids, position_ids)
assert embeddings.device.type == 'cuda'
assert embeddings.shape[0] == self.gpt_embedding.max_sequence_length
assert embeddings.shape[1] == input_ids.shape[0]
assert embeddings.shape[2] == self.gpt_embedding.config.hidden_size
|
Megatron-LM-master
|
tests/unit_tests/models/test_gpt_embedding.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.