python_code
stringlengths
0
4.04M
repo_name
stringlengths
7
58
file_path
stringlengths
5
147
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from operator import attrgetter import torch.distributed as dist import torch.nn as nn from ..pq.utils import attrsetter, get_layers from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d} def quantize_model_(model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False): """ Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps """ # quantize all layers # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names quantized_layers = get_layers(model, "(.*?)", remove_weights=remove_weights) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) # recover module module = attrgetter(layer)(model) if is_master_process: logging.info( f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}" ) # quantization params q_params = { "p": p, "update_step": update_step, "bits": bits, "method": method, "counter": 0, } # instantiate the quantized counterpart if isinstance(module, tuple(MAPPING.keys())): QuantizedModule = MAPPING[module.__class__] quantized_module = QuantizedModule.__new__(QuantizedModule) params = module.__dict__ params.update(q_params) quantized_module.__dict__.update(params) else: if is_master_process: logging.info(f"Module {module} not yet supported for quantization") continue # activation quantization a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method=method) # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # return name of quantized layers return quantized_layers
bart_ls-main
fairseq-py/fairseq/modules/quantization/scalar/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntLinear(nn.Module): """ Quantized counterpart of the nn.Linear module that applies QuantNoise during training. Args: - in_features: input features - out_features: output features - bias: bias or not - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick. - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, in_features, out_features, bias=True, p=0, update_step=3000, bits=8, method="histogram", ): super(IntLinear, self).__init__() self.in_features = int(in_features) self.out_features = int(out_features) self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) self.chosen_bias = bias if self.chosen_bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter("bias", None) self.reset_parameters() # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.chosen_bias: nn.init.constant_(self.bias, 0.0) return def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = F.linear(input, weight, self.bias) return output def extra_repr(self): return "in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}".format( self.in_features, self.out_features, self.bias is not None, self.p, self.bits, self.method, )
bart_ls-main
fairseq-py/fairseq/modules/quantization/scalar/modules/qlinear.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from torch.nn.modules.conv import _ConvNd from torch.nn.modules.utils import _pair from ..ops import emulate_int class IntConv2d(_ConvNd): """ Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training. Args: - standard nn.Conv2d parameters - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-thgourh estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode="zeros", p=0, bits=8, method="histogram", update_step=1000, ): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(IntConv2d, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode, ) # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def _conv_forward(self, input, weight): if self.padding_mode != "zeros": return F.conv2d( F.pad(input, self._padding_repeated_twice, mode=self.padding_mode), weight, self.bias, self.stride, _pair(0), self.dilation, self.groups, ) return F.conv2d( input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = self._conv_forward(input, weight) return output def extra_repr(self): return ( "in_channels={}, out_channels={}, kernel_size={}, stride={}, " "padding={}, dilation={}, groups={}, bias={}, quant_noise={}, " "bits={}, method={}".format( self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.groups, self.bias is not None, self.p, self.bits, self.method, ) )
bart_ls-main
fairseq-py/fairseq/modules/quantization/scalar/modules/qconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .qact import ActivationQuantizer # NOQA from .qconv import IntConv2d # NOQA from .qemb import IntEmbedding # NOQA from .qlinear import IntLinear # NOQA
bart_ls-main
fairseq-py/fairseq/modules/quantization/scalar/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntEmbedding(nn.Module): """ Quantized counterpart of the nn.Embedding module that applies QuantNoise during training. Args: - num_embeddings: number of tokens - embedding_dim: embedding dimension - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, p=0, update_step=1000, bits=8, method="histogram", ): super(IntEmbedding, self).__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert ( padding_idx < self.num_embeddings ), "Padding_idx must be within num_embeddings" elif padding_idx < 0: assert ( padding_idx >= -self.num_embeddings ), "Padding_idx must be within num_embeddings" padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if _weight is None: self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim)) self.reset_parameters() else: assert list(_weight.shape) == [ num_embeddings, embedding_dim, ], "Shape of weight does not match num_embeddings and embedding_dim" self.weight = nn.Parameter(_weight) self.sparse = sparse # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.normal_(self.weight) if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 1000 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = F.embedding( input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) return output def extra_repr(self): s = "{num_embeddings}, {embedding_dim}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" if self.max_norm is not None: s += ", max_norm={max_norm}" if self.norm_type != 2: s += ", norm_type={norm_type}" if self.scale_grad_by_freq is not False: s += ", scale_grad_by_freq={scale_grad_by_freq}" if self.sparse is not False: s += ", sparse=True" s += "quant_noise={p}, bits={bits}, method={method}" return s.format(**self.__dict__)
bart_ls-main
fairseq-py/fairseq/modules/quantization/scalar/modules/qemb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from ..ops import emulate_int class ActivationQuantizer: """ Fake scalar quantization of the activations using a forward hook. Args: - module. a nn.Module for which we quantize the *post-activations* - p: proportion of activations to quantize, set by default to 1 - update_step: to recompute quantization parameters - bits: number of bits for quantization - method: choose among {"tensor", "histogram", "channel"} - clamp_threshold: to prevent gradients overflow Remarks: - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - For the list of quantization methods and number of bits, see ops.py - To remove the hook from the module, simply call self.handle.remove() - At test time, the activations are fully quantized - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - The activations are hard-clamped in [-clamp_threshold, clamp_threshold] to prevent overflow during the backward pass """ def __init__( self, module, p=1, update_step=1000, bits=8, method="histogram", clamp_threshold=5, ): self.module = module self.p = p self.update_step = update_step self.counter = 0 self.bits = bits self.method = method self.clamp_threshold = clamp_threshold self.handle = None self.register_hook() def register_hook(self): # forward hook def quantize_hook(module, x, y): # update parameters every 1000 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # train with QuantNoise and evaluate the fully quantized network p = self.p if self.module.training else 1 # quantize activations y_q, self.scale, self.zero_point = emulate_int( y.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(y) mask.bernoulli_(1 - p) noise = (y_q - y).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) return torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach() # register hook self.handle = self.module.register_forward_hook(quantize_hook)
bart_ls-main
fairseq-py/fairseq/modules/quantization/scalar/modules/qact.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from einops import rearrange from cauchy_mult import cauchy_mult_fwd, cauchy_mult_bwd, cauchy_mult_sym_fwd, cauchy_mult_sym_bwd def cauchy_mult_torch(v: torch.Tensor, z: torch.Tensor, w: torch.Tensor, symmetric=True) -> torch.Tensor: """ v: (B, N) z: (L) w: (B, N) symmetric: whether to assume that v and w contain complex conjugate pairs, of the form [v_half, v_half.conj()] and [w_half, w_half.conj()] """ if not symmetric: return (rearrange(v, 'b n -> b 1 n') / (rearrange(z, 'l -> l 1') - rearrange(w, 'b n -> b 1 n'))).sum(dim=-1) else: N = v.shape[-1] assert N % 2 == 0 vv = rearrange(v[:, :N // 2], 'b n -> b 1 n') zz = rearrange(z, 'l -> l 1') ww = rearrange(w[:, :N // 2], 'b n -> b 1 n') return 2 * ((zz * vv.real - vv.real * ww.real - vv.imag * ww.imag) / (zz * zz - 2 * zz * ww.real + ww.abs().square())).sum(dim=-1) def cauchy_mult_keops(v, z, w): from pykeops.torch import LazyTensor v_l = LazyTensor(rearrange(v, 'b N -> b 1 N 1')) z_l = LazyTensor(rearrange(z, 'L -> 1 L 1 1')) w_l = LazyTensor(rearrange(w, 'b N -> b 1 N 1')) sub = z_l - w_l # (b N L 1), for some reason it doesn't display the last dimension div = v_l / sub s = div.sum(dim=2, backend='GPU') return s.squeeze(-1) def _cauchy_mult(v, z, w, symmetric=True): if not symmetric: return CauchyMultiply.apply(v, z, w) else: return CauchyMultiplySymmetric.apply(v, z, w) def cauchy_mult(v, z, w, symmetric=True): """ Wrap the cuda method to deal with shapes """ v, w = torch.broadcast_tensors(v, w) shape = v.shape # z_shape = z.shape z = z.squeeze() assert len(z.shape) == 1 v = v.contiguous() w = w.contiguous() z = z.contiguous() N = v.size(-1) assert w.size(-1) == N y = _cauchy_mult(v.view(-1, N), z, w.view(-1, N), symmetric=symmetric) y = y.view(*shape[:-1], z.size(-1)) # y = z.new_zeros(*shape[:-1], z.size(-1)) return y class CauchyMultiply(torch.autograd.Function): @staticmethod def forward(ctx, v, z, w): batch, N = v.shape # supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]] supported_N_values = [1 << log_n for log_n in [6]] L = z.shape[-1] if not N in supported_N_values: raise NotImplementedError(f'Only support N values in {supported_N_values}') if L % 32 != 0: raise NotImplementedError(f'Only support L values that are multiples of 32') if not v.is_cuda and z.is_cuda and w.is_cuda: raise NotImplementedError(f'Only support CUDA tensors') ctx.save_for_backward(v, z, w) return cauchy_mult_fwd(v, z, w) @staticmethod def backward(ctx, dout): v, z, w = ctx.saved_tensors dv, dw = cauchy_mult_bwd(v, z, w, dout) return dv, None, dw class CauchyMultiplySymmetric(torch.autograd.Function): @staticmethod def forward(ctx, v, z, w): batch, N = v.shape supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]] L = z.shape[-1] if not N in supported_N_values: raise NotImplementedError(f'Only support N values in {supported_N_values}') max_L_value = 32 * 1024 * 64 * 1024 if L > max_L_value: raise NotImplementedError(f'Only support L values <= {max_L_value}') if not v.is_cuda and z.is_cuda and w.is_cuda: raise NotImplementedError(f'Only support CUDA tensors') ctx.save_for_backward(v, z, w) return cauchy_mult_sym_fwd(v, z, w) @staticmethod def backward(ctx, dout): v, z, w = ctx.saved_tensors dv, dw = cauchy_mult_sym_bwd(v, z, w, dout) return dv, None, dw
bart_ls-main
fairseq-py/fairseq/modules/extensions/cauchy/cauchy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup import torch.cuda from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension from torch.utils.cpp_extension import CUDA_HOME ext_modules = [] if torch.cuda.is_available() and CUDA_HOME is not None: extension = CUDAExtension( 'cauchy_mult', [ 'cauchy.cpp', 'cauchy_cuda.cu', ], extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'], # 'nvcc': ['-O2', '-lineinfo'] 'nvcc': ['-O2', '-lineinfo', '--use_fast_math'] } ) ext_modules.append(extension) setup( name='cauchy_mult', ext_modules=ext_modules, # cmdclass={'build_ext': BuildExtension.with_options(use_ninja=False)}) cmdclass={'build_ext': BuildExtension})
bart_ls-main
fairseq-py/fairseq/modules/extensions/cauchy/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from functools import partial import torch from einops import rearrange from .cauchy import cauchy_mult_torch, cauchy_mult_keops, cauchy_mult from benchmark.utils import benchmark_all, benchmark_combined, benchmark_forward, benchmark_backward def generate_data(batch_size, N, L, symmetric=True, device='cuda'): if not symmetric: v = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True) w = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True) z = torch.randn(L, dtype=torch.complex64, device=device) else: assert N % 2 == 0 v_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device) v = torch.cat([v_half, v_half.conj()], dim=-1).requires_grad_(True) w_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device) w = torch.cat([w_half, w_half.conj()], dim=-1).requires_grad_(True) z = torch.exp(1j * torch.randn(L, dtype=torch.float32, device=device)) return v, z, w if __name__ == '__main__': device = 'cuda' bs = 1024 N = 64 L = 16384 v, z, w = generate_data(bs, N, L, symmetric=True) v_half = v[:, :N // 2].clone().detach().requires_grad_(True) w_half = w[:, :N // 2].clone().detach().requires_grad_(True) repeat = 30 benchmark_all(repeat, cauchy_mult_keops, v, z, w, desc='Cauchy mult keops') fn = partial(cauchy_mult, symmetric=False) benchmark_all(repeat, fn, v, z, w, desc='Cauchy mult') fn = partial(cauchy_mult, symmetric=True) benchmark_all(repeat, fn, v_half, z, w_half, desc='Cauchy mult symmetric')
bart_ls-main
fairseq-py/fairseq/modules/extensions/cauchy/benchmark_cauchy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import json import argparse import torch from benchmark.utils import benchmark_forward def generate_data(batch_size, N, L, symmetric=True, device='cuda'): if not symmetric: v = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True) w = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True) z = torch.randn(L, dtype=torch.complex64, device=device) else: assert N % 2 == 0 v_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device) v = torch.cat([v_half, v_half.conj()], dim=-1).requires_grad_(True) w_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device) w = torch.cat([w_half, w_half.conj()], dim=-1).requires_grad_(True) z = torch.exp(1j * torch.randn(L, dtype=torch.float32, device=device)) return v, z, w parser = argparse.ArgumentParser(description='Tuning Cauchy multiply') parser.add_argument('--name', default='cauchy_mult') parser.add_argument('--mode', default='forward', choices=['forward', 'backward']) parser.add_argument('-bs', '--batch-size', default=1024, type=int) parser.add_argument('-N', default=64, type=int) parser.add_argument('-L', default=2 ** 14, type=int) if __name__ == '__main__': args = parser.parse_args() device = 'cuda' bs = args.batch_size N = args.N L = args.L repeat = 30 v, z, w = generate_data(bs, N, L, symmetric=True) v_half = v[:, :N // 2].clone().detach().requires_grad_(True) w_half = w[:, :N // 2].clone().detach().requires_grad_(True) tuning_extension_name = args.name # print('Extension name:', tuning_extension_name) module = importlib.import_module(tuning_extension_name) if args.mode == 'forward': _, m = benchmark_forward(repeat, module.cauchy_mult_sym_fwd, v_half, z, w_half, verbose=False, desc='Cauchy mult symmetric fwd') else: out = module.cauchy_mult_sym_fwd(v_half, z, w_half) dout = torch.randn_like(out) _, m = benchmark_forward(repeat, module.cauchy_mult_sym_bwd, v_half, z, w_half, dout, verbose=False, desc='Cauchy mult symmetric bwd') result_dict = dict(time_mean = m.mean, time_iqr = m.iqr) print(json.dumps(result_dict))
bart_ls-main
fairseq-py/fairseq/modules/extensions/cauchy/benchmark_cauchy_tune.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import shutil import subprocess import sys # import tempfile # import importlib import random import string import json from functools import partial from multiprocessing import Pipe, Pool, Process from pathlib import Path from tqdm import tqdm import numpy as np def read_file(filename): """ return the contents of the file named filename or None if file not found """ if os.path.isfile(filename): with open(filename, 'r') as f: return f.read() def write_file(filename, string): """dump the contents of string to a file called filename""" with open(filename, 'w', encoding="utf-8") as f: f.write(string) def prepare_kernel_string(kernel_string, params): for k, v in params.items(): kernel_string = "#define " + k + " " + str(v) + "\n" + kernel_string return kernel_string def compile_extension(temp_dir, install=False, verbose=True): # Need to copy this process's environments, otherwise it can't find the compilers env = {**os.environ, 'TUNING_SOURCE_DIR': str(temp_dir), 'TUNING_EXTENSION_NAME': str(temp_dir.stem)} # https://stackoverflow.com/questions/53173314/how-to-change-distutils-output-directory # Need separate build directories for parallel compilation output = subprocess.run( # [sys.executable, "tuning_setup.py", 'build', f'--build-base={str(temp_dir)}', # f'--build-lib={str(temp_dir)}'], [sys.executable, "tuning_setup.py", 'build' if not install else 'develop'], cwd=temp_dir, env=env, capture_output=True, # check=True ) if verbose: print(output) print('Done compiling' if not install else 'Done installing') def uninstall_extensions(tuning_extension_names, verbose=True): # Need to copy this process's environments, otherwise it can't find the compilers env = {**os.environ} output = subprocess.run( [sys.executable, '-m', 'pip', 'uninstall', '-y', *tuning_extension_names], env=env, capture_output=True, # check=True ) if verbose: print(output) print('Done uninstalling') def benchmark_extension(benchmark_script, *benchmark_args, verbose=True): # Need to copy this process's environments, otherwise it can't find the compilers env = os.environ # https://stackoverflow.com/questions/53173314/how-to-change-distutils-output-directory # Need separate build directories for parallel compilation process = subprocess.run( [sys.executable, benchmark_script, *benchmark_args], env=os.environ, capture_output=True, # check=True ) if verbose: print(process) print('Done benchmarking') return json.loads(process.stdout.decode(sys.stdout.encoding)) # def benchmark(connection, temp_dir): # import torch # # module = importlib.import_module(tuning_extension_name) # torch.ops.load_library(temp_dir / 'torch_butterfly_tuning.so') # batch_size = 1024 # n = 32 # twiddle = torch.randn(1, 1, 5, n // 2, 2, 2, device='cuda') # input = torch.randn(batch_size, 1, n, device=twiddle.device) # output = torch.ops.torch_butterfly.butterfly_multiply_fw(twiddle, input, True) # # https://medium.com/@auro_227/timing-your-pytorch-code-fragments-e1a556e81f2 # res = [] # for _ in range(32): # start = torch.cuda.Event(enable_timing=True) # end = torch.cuda.Event(enable_timing=True) # start.record() # output = torch.ops.torch_butterfly.butterfly_multiply_fw(twiddle, input, True) # end.record() # torch.cuda.synchronize() # res.append(start.elapsed_time(end)) # print(output.shape) # res = np.array(res) # connection.send((np.mean(res), np.std(res))) def set_up_tuning_temp_dir(params: dict, source_files, extension_dir, verbose=True): if verbose: print('params: ', params) # TD [2021-10-22]: tempfile.mkdtemp sometimes create dir name with '_' in it, thus messing up # the extension name. # temp_dir = Path(tempfile.mkdtemp(prefix="temp_", dir=Path.cwd().parent)).absolute() tuning_extension_name = 'temp_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=10)) temp_dir = (Path.cwd().parent / tuning_extension_name).absolute() if temp_dir.exists(): shutil.rmtree(temp_dir) # shutil.copytree doesn't want directory that already exists shutil.copytree(extension_dir, temp_dir) sources = [temp_dir / name for name in source_files] for kernel_source in sources: ks = read_file(kernel_source) ks = prepare_kernel_string(ks, params) write_file(kernel_source, ks) return temp_dir class KernelTuner: def __init__(self, extension_dir, source_files, params_list, benchmark_script, benchmark_args, npool=8, verbose=True): self.extension_dir = extension_dir self.source_files = source_files self.params_list = params_list self.benchmark_script = benchmark_script self.benchmark_args = benchmark_args self.npool = npool self.verbose = verbose def tune(self): temp_dirs = [set_up_tuning_temp_dir(params, self.source_files, self.extension_dir, verbose=self.verbose) for params in self.params_list] # Compile in parallel (for speed), then install sequentially to ensure correctness with Pool(self.npool) as p: p.map(compile_extension, temp_dirs) # with Pool(1) as p: # p.map(partial(compile_extension, install=True), [temp_dirs]) for temp_dir in tqdm(temp_dirs): try: compile_extension(temp_dir, install=True) except: pass # # We benchmark on a separate process so that they can import the extension that just got compiled. # for params, temp_dir in params_tempdir: # print('Benchmarking: ', params) # recv_conn, send_conn = Pipe(duplex=False) # benchmark_process = Process(target=benchmark_fwd, args=(send_conn, str(temp_dir.stem))) # benchmark_process.start() # result = recv_conn.recv() # benchmark_process.join() # print('result', result) results = [] for params, temp_dir in tqdm(list(zip(self.params_list, temp_dirs))): try: results.append((params, benchmark_extension(self.benchmark_script, *['--name', temp_dir.stem] + self.benchmark_args))) except: pass print(results) uninstall_extensions([temp_dir.stem for temp_dir in temp_dirs]) for temp_dir in temp_dirs: shutil.rmtree(temp_dir) return results
bart_ls-main
fairseq-py/fairseq/modules/extensions/cauchy/tuner.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from setuptools import setup from pathlib import Path import torch.cuda from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension from torch.utils.cpp_extension import CUDA_HOME extensions_dir = Path(os.getenv('TUNING_SOURCE_DIR')).absolute() assert extensions_dir.exists() source_files=[ 'cauchy.cpp', 'cauchy_cuda.cu', ] sources = [str(extensions_dir / name) for name in source_files] extension_name = os.getenv('TUNING_EXTENSION_NAME', default='cauchy_mult_tuning') ext_modules = [] if torch.cuda.is_available() and CUDA_HOME is not None: extension = CUDAExtension( extension_name, sources, include_dirs=[extensions_dir], extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'], # 'nvcc': ['-O2', '-lineinfo'] 'nvcc': ['-O2', '-lineinfo', '--use_fast_math'] } ) ext_modules.append(extension) setup( name=extension_name, ext_modules=ext_modules, # cmdclass={'build_ext': BuildExtension.with_options(use_ninja=False)}) cmdclass={'build_ext': BuildExtension})
bart_ls-main
fairseq-py/fairseq/modules/extensions/cauchy/tuning_setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import json import argparse import itertools from pathlib import Path from tuner import KernelTuner def forward_params_list(N): blocksize_params = ('MAX_BLOCK_SIZE_VALUE', [64, 128, 256, 512, 1024]) thread_value_default = [2, 4, 8, 16, 32, 32, 32, 32, 32, 32] thread_values_supported = [2, 4, 8, 16, 32, 64, 128] log_N_half = int(math.log2(N)) - 1 thread_values = [] for val in thread_values_supported: if val <= N // 2: array = list(thread_value_default) array[log_N_half - 1] = val thread_values.append('{' + ', '.join(str(v) for v in array) + '}') thread_params = ('ITEMS_PER_THREAD_SYM_FWD_VALUES', thread_values) value_prod = itertools.product(thread_params[1], blocksize_params[1]) params_list = [{thread_params[0]: value[0], blocksize_params[0]: value[1]} for value in value_prod] return params_list def backward_params_list(L): thread_value_supported = [8, 16, 32, 64, 128] thread_params = ('ITEMS_PER_THREAD_SYM_BWD_VALUE', [v for v in thread_value_supported if (L + v - 1) // v <= 1024]) params_list = [{thread_params[0]: value} for value in thread_params[1]] return params_list parser = argparse.ArgumentParser(description='Tuning Cauchy multiply') parser.add_argument('--mode', default='forward', choices=['forward', 'backward']) parser.add_argument('-N', default=64, type=int) parser.add_argument('-L', default=2 ** 14, type=int) parser.add_argument('--filename', default='tuning_result.json') if __name__ == '__main__': args = parser.parse_args() extension_dir = Path(__file__).absolute().parent source_files = ['cauchy_cuda.cu'] if args.mode == 'forward': params_list = forward_params_list(args.N) tuner = KernelTuner(extension_dir, source_files, params_list, benchmark_script='benchmark_cauchy_tune.py', benchmark_args=['--mode', 'forward', '-N', str(args.N), '-L', '16384'], npool=16) else: params_list = backward_params_list(args.L) tuner = KernelTuner(extension_dir, source_files, params_list, benchmark_script='benchmark_cauchy_tune.py', benchmark_args=['--mode', 'backward', '-N', '64', '-L', str(args.L)], npool=16) result = tuner.tune() with open(args.filename, 'w') as f: json.dump(result, f)
bart_ls-main
fairseq-py/fairseq/modules/extensions/cauchy/tune_cauchy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import pytest from einops import rearrange from cauchy import cauchy_mult_torch, cauchy_mult_keops, cauchy_mult def generate_data(batch_size, N, L, symmetric=True, device='cuda'): if not symmetric: v = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True) w = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True) z = torch.randn(L, dtype=torch.complex64, device=device) else: assert N % 2 == 0 v_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device) v = torch.cat([v_half, v_half.conj()], dim=-1).requires_grad_(True) w_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device) w = torch.cat([w_half, w_half.conj()], dim=-1).requires_grad_(True) z = torch.exp(1j * torch.randn(L, dtype=torch.float32, device=device)) return v, z, w def grad_to_half_grad(dx): dx_half, dx_half_conj = dx.chunk(2, dim=-1) return dx_half + dx_half_conj.conj() # @pytest.mark.parametrize('L', [1024]) # @pytest.mark.parametrize('N', [64]) # def test_cauchy_mult_nonsymmetric(N, L): # device = 'cuda' # batch_size = 4 # torch.random.manual_seed(2357) # v, z, w = generate_data(batch_size, N, L, symmetric=False, device=device) # out_torch = cauchy_mult_torch(v, z, w, symmetric=False) # out_keops = cauchy_mult_keops(v, z, w) # out = cauchy_mult(v, z, w, symmetric=False) # assert torch.allclose(out, out_torch, rtol=1e-4, atol=1e-4) # assert torch.allclose(out, out_keops, rtol=1e-4, atol=1e-4) # dout = torch.randn_like(out) # dv_torch, dw_torch = torch.autograd.grad(out_torch, (v, w), dout, retain_graph=True) # dv_keops, dw_keops = torch.autograd.grad(out_keops, (v, w), dout, retain_graph=True) # dv, dw = torch.autograd.grad(out, (v, w), dout, retain_graph=True) # assert torch.allclose(dv, dv_torch, rtol=1e-4, atol=1e-4) # assert torch.allclose(dv, dv_keops, rtol=1e-4, atol=1e-4) # assert torch.allclose(dw, dw_torch, rtol=1e-4, atol=1e-4) # assert torch.allclose(dw, dw_keops, rtol=1e-4, atol=1e-4) @pytest.mark.parametrize('L', [3, 17, 489, 2**10, 1047, 2**11, 2**12, 2**13, 2**14, 2**18]) @pytest.mark.parametrize('N', [4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]) def test_cauchy_mult_symmetric(N, L): # rtol, atol = (1e-4, 1e-4) if N <= 64 and L <= 1024 else(1e-3, 1e-3) atol = 1e-4 tol_factor = 10.0 # Our error shouldn't be this much higher than Keops' error device = 'cuda' batch_size = 4 torch.random.manual_seed(2357) v, z, w = generate_data(batch_size, N, L, symmetric=True, device=device) v_half = v[:, :N // 2].clone().detach().requires_grad_(True) w_half = w[:, :N // 2].clone().detach().requires_grad_(True) # out_torch = cauchy_mult_torch(v, z, w, symmetric=True) out_torch = cauchy_mult_torch(v.cdouble(), z.cdouble(), w.cdouble(), symmetric=True).cfloat() out_keops = cauchy_mult_keops(v, z, w) out = cauchy_mult(v_half, z, w_half, symmetric=True) relerr_out_keops = (out_keops - out_torch).abs() / out_torch.abs() relerr_out = (out - out_torch).abs() / out_torch.abs() print(f'Keops out relative error: max {relerr_out_keops.amax().item():.6f}, mean {relerr_out_keops.mean().item():6f}') print(f'out relative error: max {relerr_out.amax().item():.6f}, mean {relerr_out.mean().item():.6f}') assert (relerr_out.amax() <= relerr_out_keops.amax() * tol_factor + atol) assert (relerr_out.mean() <= relerr_out_keops.mean() * tol_factor + atol) # assert torch.allclose(out, out_torch, rtol=rtol, atol=atol) # assert torch.allclose(out, out_keops, rtol=rtol, atol=atol) dout = torch.randn_like(out) dv_torch, dw_torch = torch.autograd.grad(out_torch, (v, w), dout, retain_graph=True) dv_torch, dw_torch = dv_torch[:, :N // 2], dw_torch[:, :N // 2] dv_keops, dw_keops = torch.autograd.grad(out_keops, (v, w), dout, retain_graph=True) dv_keops, dw_keops = grad_to_half_grad(dv_keops), grad_to_half_grad(dw_keops) dv, dw = torch.autograd.grad(out, (v_half, w_half), dout, retain_graph=True) relerr_dv_keops = (dv_keops - dv_torch).abs() / dv_torch.abs() relerr_dv = (dv - dv_torch).abs() / dv_torch.abs() relerr_dw_keops = (dw_keops - dw_torch).abs() / dw_torch.abs() relerr_dw = (dw - dw_torch).abs() / dw_torch.abs() print(f'Keops dv relative error: max {relerr_dv_keops.amax().item():.6f}, mean {relerr_dv_keops.mean().item():6f}') print(f'dv relative error: max {relerr_dv.amax().item():.6f}, mean {relerr_dv.mean().item():.6f}') print(f'Keops dw relative error: max {relerr_dw_keops.amax().item():.6f}, mean {relerr_dw_keops.mean().item():6f}') print(f'dw relative error: max {relerr_dw.amax().item():.6f}, mean {relerr_dw.mean().item():.6f}') assert (relerr_dv.amax() <= relerr_dv_keops.amax() * tol_factor + atol) assert (relerr_dv.mean() <= relerr_dv_keops.mean() * tol_factor + atol) assert (relerr_dw.amax() <= relerr_dw_keops.amax() * tol_factor + atol) assert (relerr_dw.mean() <= relerr_dw_keops.mean() * tol_factor + atol) # assert torch.allclose(dv, dv_torch, rtol=1e-4, atol=1e-4) # assert torch.allclose(dv, dv_keops, rtol=1e-4, atol=1e-4) # assert torch.allclose(dw, dw_torch, rtol=1e-4, atol=1e-4) # assert torch.allclose(dw, dw_keops, rtol=1e-4, atol=1e-4)
bart_ls-main
fairseq-py/fairseq/modules/extensions/cauchy/test_cauchy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def gen_forward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "lightconv_cuda.cuh" std::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = filters.size(0); const auto filterSize = filters.size(1); const auto numFiltersInBlock = numFeatures / numHeads; const dim3 blocks(minibatch, numFeatures); auto output = at::zeros_like(input); auto stream = at::cuda::getCurrentCUDAStream(); """ sequence_if = """ if (sequenceLength <= {seq}) {{ switch(filterSize) {{ """ case_k = """ case {k}: """ main_block = """ if (padding_l == {pad}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{ lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), filters.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, output.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; } break; """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; } """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ final_return = """ } return {output}; } """ with open("lightconv_cuda_forward.cu", "w") as forward: forward.write(head) for seq in seqs: forward.write(sequence_if.format(seq=seq)) for k in kernels: forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=seq, pad=pad)) forward.write(bad_padding) forward.write(bad_filter) forward.write(con_else) forward.write(final_else) for k in kernels: forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=seq, pad=pad)) forward.write(bad_padding) forward.write(bad_filter) forward.write(final_return) def gen_backward(): head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "lightconv_cuda.cuh" std::vector<at::Tensor> lightconv_cuda_backward( at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor filters) { // gradWrtInput const int minibatch = input.size(0); const int numFeatures = input.size(1); const int sequenceLength = input.size(2); const int numHeads = filters.size(0); const int filterSize = filters.size(1); const dim3 gradBlocks(minibatch, numFeatures); const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads); const dim3 weightGradSecondpassBlocks(numHeads, filterSize); const int numFiltersInBlock = numFeatures / numHeads; auto gradInput = at::zeros_like(input); auto gradFilters = at::zeros_like(filters); at::DeviceGuard g(input.device()); auto stream = at::cuda::getCurrentCUDAStream(); switch(filterSize) { """ sequence_if = """ if (sequenceLength <= {seq}) {{ """ case_k = """ case {k}: """ main_block = """ if (padding_l == {p}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{ lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t> <<<gradBlocks, {b_size}, 0, stream>>>( gradOutput.data<scalar_t>(), filters.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, gradInput.data<scalar_t>()); """ weight_grad_short = """ at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat)); lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t> <<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), gradOutput.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, tempSumGradFilters.data<float>() ); lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t> <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>( tempSumGradFilters.data<float>(), minibatch, numFiltersInBlock, gradFilters.data<scalar_t>() ); }})); }} else """ weight_grad = """ at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat)); lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t> <<<gradBlocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), gradOutput.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, tempSumGradFilters.data<float>() ); lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t> <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>( tempSumGradFilters.data<float>(), minibatch, numFiltersInBlock, gradFilters.data<scalar_t>() ); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; } """ breakout = """ break; """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ last_return = """ } return {gradInput, gradFilters}; } """ kernels = [3, 5, 7, 15, 31, 63, 127, 255] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] thresh = [32, 32, 64, 128, 256, -1, -1, -1] max_mem = [-1, -1, -1, -1, -1, 192, 96, 64] with open("lightconv_cuda_backward.cu", "w") as backward: backward.write(head) for (k, t, mem) in zip(kernels, thresh, max_mem): backward.write(case_k.format(k=k)) for seq in seqs: if (t == -1 or seq <= t) and (mem == -1 or seq < mem): backward.write(sequence_if.format(seq=seq)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=seq, p=p)) backward.write(weight_grad_short.format(k=k, b_size=seq, p=p)) backward.write(bad_padding) else: for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=32, p=p)) backward.write(weight_grad.format(k=k, b_size=32, p=p)) backward.write(bad_padding) backward.write(breakout) break backward.write(con_else) backward.write(bad_filter) backward.write(last_return) if __name__ == "__main__": gen_forward() gen_backward()
bart_ls-main
fairseq-py/fairseq/modules/lightconv_layer/cuda_function_gen.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .lightconv_layer import LightconvLayer # noqa
bart_ls-main
fairseq-py/fairseq/modules/lightconv_layer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import lightconv_cuda import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from torch import nn from torch.autograd import Function class lightconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = lightconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = lightconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors ) grad_input, grad_weights = outputs return grad_input, grad_weights, None @with_incremental_state class LightconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0.0, bias=False, ): super(LightconvLayer, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.weight = nn.Parameter(torch.Tensor(num_heads, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" for k, v in state_dict.items(): if k.endswith(prefix + "weight"): if v.dim() == 3 and v.size(1) == 1: state_dict[k] = v.squeeze(1) def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.0) def forward(self, x, incremental_state=None): # during inference time, incremental BMM is faster if incremental_state is not None: T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) weight = self.weight if self.weight_softmax: weight = F.softmax(weight.float(), dim=1).type_as(weight) weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) weight = ( weight.view(1, H, K) .expand(T * B, H, K) .contiguous() .view(T * B * H, K, 1) ) weight = self.weight_dropout_module(weight) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output # during training time, use CUDA kernel else: x = x.permute(1, 2, 0).contiguous() weight = self.weight if self.weight_softmax: weight = F.softmax(self.weight, -1) if self.weight_dropout_module.p: weight = self.weight_dropout_module(weight) return lightconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1) def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def half(self): return self._apply(lambda t: t.half() if t.is_floating_point() else t)
bart_ls-main
fairseq-py/fairseq/modules/lightconv_layer/lightconv_layer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name="lightconv_layer", ext_modules=[ CUDAExtension( "lightconv_cuda", [ "lightconv_cuda.cpp", "lightconv_cuda_kernel.cu", ], ), ], cmdclass={"build_ext": BuildExtension}, )
bart_ls-main
fairseq-py/fairseq/modules/lightconv_layer/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def gen_forward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] blocks = [32, 64, 128, 256] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "dynamicconv_cuda.cuh" std::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = weight.size(1); const auto filterSize = weight.size(2); const auto numFiltersInBlock = numFeatures / numHeads; const dim3 blocks(minibatch, numFeatures); auto output = at::zeros_like(input); auto stream = at::cuda::getCurrentCUDAStream(); """ switch = """ switch(filterSize) { """ case_k = """ case {k}: """ main_block = """ if (padding_l == {pad}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{ dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( input.data<scalar_t>(), weight.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, output.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; } break;\n """ end = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; } return {output}; } """ with open("dynamicconv_cuda_forward.cu", "w") as forward: forward.write(head) forward.write(switch) for k in kernels: b_size = 32 for b in blocks: if b > k: b_size = b break forward.write(case_k.format(k=k)) for pad in [k // 2, k - 1]: forward.write(main_block.format(k=k, b_size=b_size, pad=pad)) forward.write(bad_padding) forward.write(end) def gen_backward(): kernels = [3, 5, 7, 15, 31, 63, 127, 255] thresh = [512, 512, 512, 512, 512, 380, 256, 256] min_block = [64, 64, 64, 64, 64, 64, 128, 256] seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] head = """ /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "dynamicconv_cuda.cuh" std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) { at::DeviceGuard g(input.device()); const auto minibatch = input.size(0); const auto numFeatures = input.size(1); const auto sequenceLength = input.size(2); const auto numHeads = weight.size(1); const auto filterSize = weight.size(2); const auto numFiltersInBlock = numFeatures / numHeads; auto numChunks = 1; auto gradInput = at::zeros_like(input); auto gradWeight = at::zeros_like(weight); auto stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(minibatch, numHeads, numChunks); """ sequence_if = """ if (sequenceLength < {seq}) {{ switch(filterSize) {{ """ case_k = """ case {k}: """ chunks_reset = """ numChunks = int(ceilf(sequenceLength/float({b_size}))); blocks = dim3(minibatch, numHeads, numChunks); """ main_block = """ if (padding_l == {p}) {{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{ dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t> <<<blocks, {b_size}, 0, stream>>>( gradOutput.data<scalar_t>(), input.data<scalar_t>(), weight.data<scalar_t>(), minibatch, sequenceLength, numFeatures, numFiltersInBlock, numHeads, gradWeight.data<scalar_t>(), gradInput.data<scalar_t>()); }})); }} else """ bad_padding = """ { std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; } break;\n """ bad_filter = """ default: std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; } """ con_else = """ } else """ final_else = """ { switch(filterSize) { """ last_return = """ } return {gradInput, gradWeight}; } """ with open("dynamicconv_cuda_backward.cu", "w") as backward: backward.write(head) for seq in seqs: backward.write(sequence_if.format(seq=seq)) for k, t, m in zip(kernels, thresh, min_block): backward.write(case_k.format(k=k)) if seq <= t: b_size = seq else: b_size = m backward.write(chunks_reset.format(b_size=b_size)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=b_size, p=p)) backward.write(bad_padding) backward.write(bad_filter) backward.write(con_else) backward.write(final_else) for k, m in zip(kernels, min_block): backward.write(case_k.format(k=k)) backward.write(chunks_reset.format(b_size=m)) for p in [k // 2, k - 1]: backward.write(main_block.format(k=k, b_size=m, p=p)) backward.write(bad_padding) backward.write(bad_filter) backward.write(last_return) if __name__ == "__main__": gen_forward() gen_backward()
bart_ls-main
fairseq-py/fairseq/modules/dynamicconv_layer/cuda_function_gen.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .dynamicconv_layer import DynamicconvLayer # noqa
bart_ls-main
fairseq-py/fairseq/modules/dynamicconv_layer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import dynamicconv_cuda import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.unfold import unfold1d from torch import nn from torch.autograd import Function class dynamicconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = dynamicconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = dynamicconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors ) grad_input, grad_weights = outputs return grad_input, grad_weights, None @with_incremental_state class DynamicconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0.0, bias=False, renorm_padding=False, conv_bias=False, query_size=None, ): super(DynamicconvLayer, self).__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.renorm_padding = renorm_padding self.bias = bias self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight_linear.weight) if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.0) nn.init.constant_(self.weight_linaer.bias, 0.0) def forward(self, x, incremental_state=None, query=None, unfold=None): T, B, C = x.size() K, H = self.kernel_size, self.num_heads # R = C // H # during inference time, incremental BMM is faster if incremental_state is not None: unfold = ( x.size(0) > 512 if unfold is None else unfold ) # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output # during training time, use CUDA kernel else: weight = self.weight_linear(x).view(T, B, H, K) if self.weight_softmax: weight = F.softmax(weight, dim=-1) if self.weight_dropout_module.p: weight = self.weight_dropout_module(weight) weight = weight.permute(1, 2, 3, 0).contiguous() self.filters = weight x = x.permute(1, 2, 0).contiguous() output = dynamicconvFunction.apply(x, weight, self.padding_l).permute( 2, 0, 1 ) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def _forward_unfolded(self, x, incremental_state, query): """The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.""" T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T * B * H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K - 1: weight = weight.narrow(1, K - T, T) K, padding_l = T, T - 1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): """Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. """ T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T * B * H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf")) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) else: P = self.padding_l # For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) K, P = T, T - 1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output
bart_ls-main
fairseq-py/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name="dynamicconv_layer", ext_modules=[ CUDAExtension( name="dynamicconv_cuda", sources=[ "dynamicconv_cuda.cpp", "dynamicconv_cuda_kernel.cu", ], ), ], cmdclass={"build_ext": BuildExtension}, )
bart_ls-main
fairseq-py/fairseq/modules/dynamicconv_layer/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset class OffsetTokensDataset(BaseWrapperDataset): def __init__(self, dataset, offset): super().__init__(dataset) self.offset = offset def __getitem__(self, idx): return self.dataset[idx] + self.offset
bart_ls-main
fairseq-py/fairseq/data/offset_tokens_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict import torch from torch.utils.data.dataloader import default_collate from . import FairseqDataset def _flatten(dico, prefix=None): """Flatten a nested dictionary.""" new_dico = OrderedDict() if isinstance(dico, dict): prefix = prefix + "." if prefix is not None else "" for k, v in dico.items(): if v is None: continue new_dico.update(_flatten(v, prefix + k)) elif isinstance(dico, list): for i, v in enumerate(dico): new_dico.update(_flatten(v, prefix + ".[" + str(i) + "]")) else: new_dico = OrderedDict({prefix: dico}) return new_dico def _unflatten(dico): """Unflatten a flattened dictionary into a nested dictionary.""" new_dico = OrderedDict() for full_k, v in dico.items(): full_k = full_k.split(".") node = new_dico for k in full_k[:-1]: if k.startswith("[") and k.endswith("]"): k = int(k[1:-1]) if k not in node: node[k] = OrderedDict() node = node[k] node[full_k[-1]] = v return new_dico class NestedDictionaryDataset(FairseqDataset): def __init__(self, defn, sizes=None): super().__init__() self.defn = _flatten(defn) self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes first = None for v in self.defn.values(): if not isinstance( v, ( FairseqDataset, torch.utils.data.Dataset, ), ): raise ValueError("Expected Dataset but found: {}".format(v.__class__)) first = first or v if len(v) > 0: assert len(v) == len(first), "dataset lengths must match" self._len = len(first) def __getitem__(self, index): return OrderedDict((k, ds[index]) for k, ds in self.defn.items()) def __len__(self): return self._len def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ if len(samples) == 0: return {} sample = OrderedDict() for k, ds in self.defn.items(): try: sample[k] = ds.collater([s[k] for s in samples]) except NotImplementedError: sample[k] = default_collate([s[k] for s in samples]) return _unflatten(sample) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max(s[index] for s in self.sizes) def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" if len(self.sizes) == 1: return self.sizes[0][index] else: return (s[index] for s in self.sizes) @property def supports_prefetch(self): """Whether this dataset supports prefetching.""" return any(ds.supports_prefetch for ds in self.defn.values()) def prefetch(self, indices): """Prefetch the data required for this epoch.""" for ds in self.defn.values(): if getattr(ds, "supports_prefetch", False): ds.prefetch(indices) @property def can_reuse_epoch_itr_across_epochs(self): return all(ds.can_reuse_epoch_itr_across_epochs for ds in self.defn.values()) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.defn.values(): ds.set_epoch(epoch)
bart_ls-main
fairseq-py/fairseq/data/nested_dictionary_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import time from collections import OrderedDict from typing import Dict, List import numpy as np from fairseq.data import data_utils from . import FairseqDataset logger = logging.getLogger(__name__) class MultiCorpusDataset(FairseqDataset): """ Stores multiple instances of FairseqDataset together. Requires each instance to be the same dataset, as the collate method needs to work on batches with samples from each dataset. Allows specifying a distribution over the datasets to use. Note that unlike MultiCorpusSampledDataset, this distribution allows sampling for each item, rather than on a batch level. Each time ordered_indices() is called, a new sample is generated with the specified distribution. Args: datasets: a OrderedDict of FairseqDataset instances. distribution: a List containing the probability of getting an utterance from corresponding dataset seed: random seed for sampling the datsets sort_indices: if true, will sort the ordered indices by size batch_sample: if true, will ensure each batch is from a single dataset """ def __init__( self, datasets: Dict[str, FairseqDataset], distribution: List[float], seed: int, sort_indices: bool = False, batch_sample: bool = False, distributed_rank=None, ): super().__init__() assert isinstance(datasets, OrderedDict) assert len(datasets) == len(distribution) assert sum(distribution) == 1 self.datasets = datasets self.distribution = distribution self.seed = seed self.sort_indices = sort_indices self.batch_sample = batch_sample self.distributed_rank = distributed_rank # Avoid repeated conversions to list later self.dataset_list = list(datasets.values()) self.total_num_instances = 0 first_dataset = list(self.datasets.values())[0] self.dataset_offsets = [] for dataset in datasets.values(): assert isinstance(dataset, FairseqDataset) assert type(dataset) is type(first_dataset) self.dataset_offsets.append(self.total_num_instances) self.total_num_instances += len(dataset) def ordered_indices(self): start = time.time() with data_utils.numpy_seed(self.seed, self.epoch): logger.info(f"sampling new dataset with seed {self.seed} epoch {self.epoch}") sampled_indices = [] num_selected_instances = 0 # For each dataset i, sample self.distribution[i] * self.total_num_instances for i, key in enumerate(self.datasets): if i < len(self.datasets) - 1: num_instances = int(self.distribution[i] * self.total_num_instances) high = self.dataset_offsets[i + 1] else: num_instances = self.total_num_instances - num_selected_instances high = self.total_num_instances logger.info(f"sampling {num_instances} from {key} dataset") num_selected_instances += num_instances # First, add k copies of the dataset where k = num_instances // len(dataset). # This ensures an equal distribution of the data points as much as possible. # For the remaining entries randomly sample them dataset_size = len(self.datasets[key]) num_copies = num_instances // dataset_size dataset_indices = ( np.random.permutation(high - self.dataset_offsets[i]) + self.dataset_offsets[i] )[: num_instances - num_copies * dataset_size] if num_copies > 0: sampled_indices += list( np.concatenate( ( np.repeat( np.arange(self.dataset_offsets[i], high), num_copies ), dataset_indices, ) ) ) else: sampled_indices += list(dataset_indices) assert ( len(sampled_indices) == self.total_num_instances ), f"{len(sampled_indices)} vs {self.total_num_instances}" np.random.shuffle(sampled_indices) if self.sort_indices: sampled_indices.sort(key=lambda i: self.num_tokens(i)) logger.info( "multi_corpus_dataset ordered_indices took {}s".format( time.time() - start ) ) return np.array(sampled_indices, dtype=np.int64) def _map_index(self, index: int): """ If dataset A has length N and dataset B has length M then index 1 maps to index 1 of dataset A, and index N + 1 maps to index 1 of B. """ counter = 0 for key, dataset in self.datasets.items(): if index < counter + len(dataset): return index - counter, key counter += len(dataset) raise ValueError( "Invalid index: {}, max: {}".format(index, self.total_num_instances) ) def __len__(self): """ Length of this dataset is the sum of individual datasets """ return self.total_num_instances def __getitem__(self, index): new_index, key = self._map_index(index) try: item = self.datasets[key][new_index] item["full_id"] = index return item except Exception as e: e.args = (f"Error from {key} dataset", *e.args) raise def collater(self, samples): """ If we are doing batch sampling, then pick the right collater to use. Otherwise we assume all collaters are the same. """ if len(samples) == 0: return None if "full_id" in samples[0]: _, key = self._map_index(samples[0]["full_id"]) try: batch = self.datasets[key].collater(samples) except Exception: print(f"Collating failed for key {key}", flush=True) raise return batch else: # Subclasses may override __getitem__ to not specify full_id return list(self.datasets.values())[0].collater(samples) def num_tokens(self, index: int): index, key = self._map_index(index) return self.datasets[key].num_tokens(index) def size(self, index: int): index, key = self._map_index(index) return self.datasets[key].size(index) @property def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch, **unused): super().set_epoch(epoch) logger.info(f"setting epoch of multi_corpus_dataset to {epoch}") self.epoch = epoch @property def supports_prefetch(self): return False @property def supports_fetch_outside_dataloader(self): return all( self.datasets[key].supports_fetch_outside_dataloader for key in self.datasets ) def batch_by_size( self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, ): if not self.batch_sample: return super().batch_by_size( indices, max_tokens, max_sentences, required_batch_size_multiple ) dataset_indices = {key: [] for key in self.datasets} for i in indices: _, key = self._map_index(i) dataset_indices[key].append(i) batches = [] for key in dataset_indices: cur_batches = super().batch_by_size( np.array(dataset_indices[key], dtype=np.int64), max_tokens, max_sentences, required_batch_size_multiple, ) logger.info(f"Created {len(cur_batches)} batches for dataset {key}") batches += cur_batches # If this dataset is used in a distributed training setup, # then shuffle such that the order is seeded by the distributed rank # as well if self.distributed_rank is not None: with data_utils.numpy_seed(self.seed, self.epoch, self.distributed_rank): np.random.shuffle(batches) return batches
bart_ls-main
fairseq-py/fairseq/data/multi_corpus_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import bisect import numpy as np from torch.utils.data.dataloader import default_collate from . import FairseqDataset class ConcatDataset(FairseqDataset): @staticmethod def cumsum(sequence, sample_ratios): r, s = [], 0 for e, ratio in zip(sequence, sample_ratios): curr_len = int(ratio * len(e)) r.append(curr_len + s) s += curr_len return r def __init__(self, datasets, sample_ratios=1): super(ConcatDataset, self).__init__() assert len(datasets) > 0, "datasets should not be an empty iterable" self.datasets = list(datasets) if isinstance(sample_ratios, int): sample_ratios = [sample_ratios] * len(self.datasets) self.sample_ratios = sample_ratios self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios) self.real_sizes = [len(d) for d in self.datasets] def __len__(self): return self.cumulative_sizes[-1] def __getitem__(self, idx): dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx][sample_idx] def _get_dataset_and_sample_index(self, idx: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] sample_idx = sample_idx % self.real_sizes[dataset_idx] return dataset_idx, sample_idx def collater(self, samples, **extra_args): # For now only supports datasets with same underlying collater implementations if hasattr(self.datasets[0], "collater"): return self.datasets[0].collater(samples, **extra_args) else: return default_collate(samples, **extra_args) def size(self, idx: int): """ Return an example's size as a float or tuple. """ dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx].size(sample_idx) def num_tokens(self, index: int): return np.max(self.size(index)) def attr(self, attr: str, index: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) return getattr(self.datasets[dataset_idx], attr, None) @property def sizes(self): _dataset_sizes = [] for ds, sr in zip(self.datasets, self.sample_ratios): if isinstance(ds.sizes, np.ndarray): _dataset_sizes.append(np.tile(ds.sizes, sr)) else: # Only support underlying dataset with single size array. assert isinstance(ds.sizes, list) _dataset_sizes.append(np.tile(ds.sizes[0], sr)) return np.concatenate(_dataset_sizes) @property def supports_prefetch(self): return all(d.supports_prefetch for d in self.datasets) def ordered_indices(self): """ Returns indices sorted by length. So less padding is needed. """ if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1: # special handling for concatenating lang_pair_datasets indices = np.arange(len(self)) sizes = self.sizes tgt_sizes = ( sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None ) src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) # sort by target length, then source length if tgt_sizes is not None: indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(src_sizes[indices], kind="mergesort")] else: return np.argsort(self.sizes) def prefetch(self, indices): frm = 0 for to, ds in zip(self.cumulative_sizes, self.datasets): real_size = len(ds) if getattr(ds, "supports_prefetch", False): ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to]) frm = to @property def can_reuse_epoch_itr_across_epochs(self): return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.datasets: if hasattr(ds, "set_epoch"): ds.set_epoch(epoch)
bart_ls-main
fairseq-py/fairseq/data/concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset class ReplaceDataset(BaseWrapperDataset): """Replaces tokens found in the dataset by a specified replacement token Args: dataset (~torch.utils.data.Dataset): dataset to replace tokens in replace_map(Dictionary[int,int]): map of token to replace -> replacement token offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be as many as the number of objects returned by the underlying dataset __getitem__ method. """ def __init__(self, dataset, replace_map, offsets): super().__init__(dataset) assert len(replace_map) > 0 self.replace_map = replace_map self.offsets = offsets def __getitem__(self, index): item = self.dataset[index] is_tuple = isinstance(item, tuple) srcs = item if is_tuple else [item] for offset, src in zip(self.offsets, srcs): for k, v in self.replace_map.items(): src_off = src[offset:] if offset >= 0 else src[:offset] src_off.masked_fill_(src_off == k, v) item = srcs if is_tuple else srcs[0] return item
bart_ls-main
fairseq-py/fairseq/data/replace_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from . import FairseqDataset def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True): """Backtranslate a list of samples. Given an input (*samples*) of the form: [{'id': 1, 'source': 'hallo welt'}] this will return: [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}] Args: samples (List[dict]): samples to backtranslate. Individual samples are expected to have a 'source' key, which will become the 'target' after backtranslation. collate_fn (callable): function to collate samples into a mini-batch generate_fn (callable): function to generate backtranslations cuda (bool): use GPU for generation (default: ``True``) Returns: List[dict]: an updated list of samples with a backtranslated source """ collated_samples = collate_fn(samples) s = utils.move_to_cuda(collated_samples) if cuda else collated_samples generated_sources = generate_fn(s) id_to_src = {sample["id"]: sample["source"] for sample in samples} # Go through each tgt sentence in batch and its corresponding best # generated hypothesis and create a backtranslation data pair # {id: id, source: generated backtranslation, target: original tgt} return [ { "id": id.item(), "target": id_to_src[id.item()], "source": hypos[0]["tokens"].cpu(), } for id, hypos in zip(collated_samples["id"], generated_sources) ] class BacktranslationDataset(FairseqDataset): """ Sets up a backtranslation dataset which takes a tgt batch, generates a src using a tgt-src backtranslation function (*backtranslation_fn*), and returns the corresponding `{generated src, input tgt}` batch. Args: tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be backtranslated. Only the source side of this dataset will be used. After backtranslation, the source sentences in this dataset will be returned as the targets. src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated sentences. tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of sentences to be backtranslated. backtranslation_fn (callable, optional): function to call to generate backtranslations. This is typically the `generate` method of a :class:`~fairseq.sequence_generator.SequenceGenerator` object. Pass in None when it is not available at initialization time, and use set_backtranslation_fn function to set it when available. output_collater (callable, optional): function to call on the backtranslated samples to create the final batch (default: ``tgt_dataset.collater``). cuda: use GPU for generation """ def __init__( self, tgt_dataset, src_dict, tgt_dict=None, backtranslation_fn=None, output_collater=None, cuda=True, **kwargs ): self.tgt_dataset = tgt_dataset self.backtranslation_fn = backtranslation_fn self.output_collater = ( output_collater if output_collater is not None else tgt_dataset.collater ) self.cuda = cuda if torch.cuda.is_available() else False self.src_dict = src_dict self.tgt_dict = tgt_dict def __getitem__(self, index): """ Returns a single sample from *tgt_dataset*. Note that backtranslation is not applied in this step; use :func:`collater` instead to backtranslate a batch of samples. """ return self.tgt_dataset[index] def __len__(self): return len(self.tgt_dataset) def set_backtranslation_fn(self, backtranslation_fn): self.backtranslation_fn = backtranslation_fn def collater(self, samples): """Merge and backtranslate a list of samples to form a mini-batch. Using the samples from *tgt_dataset*, load a collated target sample to feed to the backtranslation model. Then take the backtranslation with the best score as the source and the original input as the target. Note: we expect *tgt_dataset* to provide a function `collater()` that will collate samples into the format expected by *backtranslation_fn*. After backtranslation, we will feed the new list of samples (i.e., the `(backtranslated source, original source)` pairs) to *output_collater* and return the result. Args: samples (List[dict]): samples to backtranslate and collate Returns: dict: a mini-batch with keys coming from *output_collater* """ if samples[0].get("is_dummy", False): return samples samples = backtranslate_samples( samples=samples, collate_fn=self.tgt_dataset.collater, generate_fn=(lambda net_input: self.backtranslation_fn(net_input)), cuda=self.cuda, ) return self.output_collater(samples) def num_tokens(self, index): """Just use the tgt dataset num_tokens""" return self.tgt_dataset.num_tokens(index) def ordered_indices(self): """Just use the tgt dataset ordered_indices""" return self.tgt_dataset.ordered_indices() def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``. Note: we use *tgt_dataset* to approximate the length of the source sentence, since we do not know the actual length until after backtranslation. """ tgt_size = self.tgt_dataset.size(index)[0] return (tgt_size, tgt_size) @property def supports_prefetch(self): return getattr(self.tgt_dataset, "supports_prefetch", False) def prefetch(self, indices): return self.tgt_dataset.prefetch(indices)
bart_ls-main
fairseq-py/fairseq/data/backtranslation_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import logging # from fairseq.data.encoders.gpt2_bpe import GPT2BPE, GPT2BPEConfig from fairseq.data.denoising_dataset import collate import math from collections import Counter from . import FairseqDataset, data_utils logger = logging.getLogger(__name__) def _score_ngrams(target_ngrams, prediction_ngrams): """Compute n-gram overlap scores each ngram is counted once as in Pegasus paper """ target_ngrams = set(target_ngrams.keys()) prediction_ngrams = set(prediction_ngrams.keys()) intersection_ngrams_count = len(target_ngrams.intersection(prediction_ngrams)) target_ngrams_count = len(target_ngrams) prediction_ngrams_count = len(prediction_ngrams) precision = intersection_ngrams_count / max(prediction_ngrams_count, 1) recall = intersection_ngrams_count / max(target_ngrams_count, 1) if precision + recall > 0: return 2 * precision * recall / (precision + recall) else: return 0.0 class PegasusDataset(FairseqDataset): """ A wrapper around TokenBlockDataset for BART dataset. Args: dataset (TokenBlockDataset): dataset to wrap sizes (List[int]): sentence lengths vocab (~fairseq.data.Dictionary): vocabulary mask_idx (int): dictionary index used for masked token mask_whole_words: only mask whole words. This should be a byte mask over vocab indices, indicating whether it is the beginning of a word. We will extend any mask to encompass the whole word. shuffle (bool, optional): shuffle the elements before batching. Default: ``True`` seed: Seed for random number generator for reproducibility. args: argparse arguments. """ def __init__( self, dataset, sizes, vocab, shuffle, seed, max_target_length=1024, min_source_length=None, eos=None, truncate_target=False, mask_ratio=0.15, pad_to_multiple=1, ): self.dataset = dataset self.sizes = sizes self.vocab = vocab self.shuffle = shuffle self.seed = seed self.min_source_length = min_source_length self.truncate_target = truncate_target self.max_target_length = max_target_length self.mask_ratio = mask_ratio self.eos = eos if eos is not None else vocab.eos() self.full_stop_index = self.vocab.index("13") self.sent_mask_idx = self.vocab.index("<sent_mask>") # bpe_cfg = GPT2BPEConfig # self.bpe = GPT2BPE(bpe_cfg) # breakpoint() # partial_stops = ';!,' # TODO other punctuations? # partial_stops_bpe = [self.bpe.encode(c) for c in partial_stops] # breakpoint() self.partial_stop_indices = [self.vocab.index(c) for c in ['26', '11', '0']] self.epoch = 0 self.pad_to_multiple = pad_to_multiple @property def can_reuse_epoch_itr_across_epochs(self): return True # only the noise changes, not item sizes def set_epoch(self, epoch, **unused): self.epoch = epoch def __getitem__(self, index): with data_utils.numpy_seed(self.seed, self.epoch, index): tokens = self.dataset[index] assert tokens[-1] == self.eos source, target = self.search_primaries(tokens) assert (source >= 0).all() assert (source[1:-1] >= 1).all() assert (source <= len(self.vocab)).all() assert source[0] == self.vocab.bos() assert source[-1] == self.eos return { "id": index, "source": source, "target": target, } def __len__(self): return len(self.dataset) def search_primaries(self, source): tokens = source[1:-1] full_stops = tokens == self.full_stop_index full_stops[-1] = 1 sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2 num_sentences = sentence_ends.size(0) if num_sentences < 2: # backoff to more punctuations full_stops = torch.zeros_like(tokens) for idx in range(len(tokens)): full_stops[idx] = tokens[idx] in self.partial_stop_indices full_stops[-2] = 1 sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2 num_sentences = sentence_ends.size(0) if num_sentences < 2: # backoff to simple denoising return self.random_delete(source) sentence_scores = [] all_gram_counter = Counter(self.vocab.string(tokens).split()) for i in range(num_sentences): start = sentence_ends[i - 1] if i > 0 else 1 end = sentence_ends[i] sentence = tokens[start: end] score = self.score_sentence(sentence, all_gram_counter) sentence_scores.append( (score, (start, end)) ) sentence_scores.sort(reverse=True, key=lambda x:x[0]) top_m = math.ceil(len(sentence_scores)*self.mask_ratio) tgt_len = 0 tgt_spans = [] for idx in range(top_m): s, e = sentence_scores[idx][1] tgt_len += e - s + 1 tgt_spans.append((s, e)) if tgt_len >= self.max_target_length - 2: break tgt_spans.sort(key=lambda x:x[0]) last_end = 0 src_tokens, tgt_tokens = [], [] for span in tgt_spans: src_tokens.append(tokens[last_end:span[0]]) # TODO add mask src_tokens.append(torch.tensor([self.sent_mask_idx])) assert len(tokens[span[0]:span[1]]) > 0 tgt_tokens.append(tokens[span[0]:span[1]]) last_end = span[1] src_seq = torch.cat(src_tokens) tgt_seq = torch.cat(tgt_tokens) input = torch.cat([source[:1], src_seq, source[-1:]], dim=-1) tgt_seq = tgt_seq[:self.max_target_length - 2] target = torch.cat([source[:1], tgt_seq, source[-1:]], dim=-1) return input, target def score_sentence(self, sent, all_gram_counter): # str_pred = self.bpe.decode(self.vocab.string(pred)) # str_ref = self.bpe.decode(self.vocab.string(ref)) str_sent = self.vocab.string(sent) sent_counter = Counter(str_sent.split()) rest_counter = all_gram_counter - sent_counter return _score_ngrams(rest_counter, sent_counter) def random_delete(self, source): tokens = source[1:-1] input, output = self.random_span(tokens) input = torch.cat([source[:1],input, source[-1:]]) target = torch.cat([source[:1], output, source[-1:]]) return input, target def random_span(self, tokens): length = len(tokens) num_noise_tokens = int(np.round(length * self.mask_ratio)) # avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens. num_noise_tokens = min(max(num_noise_tokens, 1), length - 1) num_remain_tokens = length - num_noise_tokens separate = torch.randint(0, num_remain_tokens + 1, (1,)) output = tokens[separate:separate+num_noise_tokens] input = torch.cat([tokens[:separate], torch.tensor([self.sent_mask_idx]), tokens[separate+num_noise_tokens:]]) return input, output def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch of data """ return collate( samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length, pad_to_multiple=self.pad_to_multiple ) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return self.sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return self.compute_lengths(self.sizes[index]) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) if self.min_source_length: ignored = indices[self.sizes[indices] < self.min_source_length].tolist() indices = indices[self.sizes[indices] >= self.min_source_length] if len(ignored) > 0: logger.warning( ( "{:,} samples have invalid sizes and will be skipped, " "min_positions={}, first few sample ids={}" ).format(len(ignored), self.min_source_length, ignored[:10]) ) return indices[np.argsort(self.sizes[indices], kind="mergesort")] def filter_indices_by_size(self, indices, max_sizes): """ customized hacky funcion to reduce the time for building data iterator """ if isinstance(max_sizes, float) or isinstance(max_sizes, int) or self.truncate_target: # if truncating elsewhere, then ignore the target limit if isinstance(max_sizes, tuple): max_sizes = max_sizes[0] if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray): ignored = indices[self.sizes[indices] > max_sizes].tolist() indices = indices[self.sizes[indices] <= max_sizes] elif ( hasattr(self, "sizes") and isinstance(self.sizes, list) and len(self.sizes) == 1 ): ignored = indices[self.sizes[0][indices] > max_sizes].tolist() indices = indices[self.sizes[0][indices] <= max_sizes] else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored def prefetch(self, indices): self.src.prefetch(indices) self.tgt.prefetch(indices) @property def supports_prefetch(self): return ( hasattr(self.src, "supports_prefetch") and self.src.supports_prefetch and hasattr(self.tgt, "supports_prefetch") and self.tgt.supports_prefetch )
bart_ls-main
fairseq-py/fairseq/data/pegasus_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class IdDataset(FairseqDataset): def __getitem__(self, index): return index def __len__(self): return 0 def collater(self, samples): return torch.tensor(samples)
bart_ls-main
fairseq-py/fairseq/data/id_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class PrependDataset(BaseWrapperDataset): def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): super().__init__(dataset) self.prepend_getter = prepend_getter self.ensure_first_token = ensure_first_token_is def __getitem__(self, idx): item = self.dataset[idx] is_tuple = isinstance(item, tuple) src = item[0] if is_tuple else item assert self.ensure_first_token is None or src[0] == self.ensure_first_token prepend_idx = self.prepend_getter(self.dataset, idx) assert isinstance(prepend_idx, int) src[0] = prepend_idx item = tuple((src,) + item[1:]) if is_tuple else src return item
bart_ls-main
fairseq-py/fairseq/data/prepend_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict from typing import Callable, Dict, List import numpy as np from . import FairseqDataset def uniform_sampler(x): # Sample from uniform distribution return np.random.choice(x, 1).item() class MultiCorpusSampledDataset(FairseqDataset): """ Stores multiple instances of FairseqDataset together and in every iteration creates a batch by first sampling a dataset according to a specified probability distribution and then getting instances from that dataset. Args: datasets: an OrderedDict of FairseqDataset instances. sampling_func: A function for sampling over list of dataset keys. The default strategy is to sample uniformly. """ def __init__( self, datasets: Dict[str, FairseqDataset], sampling_func: Callable[[List], int] = None, ): super().__init__() assert isinstance(datasets, OrderedDict) self.datasets = datasets if sampling_func is None: sampling_func = uniform_sampler self.sampling_func = sampling_func self.total_num_instances = 0 for _, dataset in datasets.items(): assert isinstance(dataset, FairseqDataset) self.total_num_instances += len(dataset) self._ordered_indices = None def __len__(self): """ Length of this dataset is the sum of individual datasets """ return self.total_num_instances def ordered_indices(self): """ Ordered indices for batching. Here we call the underlying dataset's ordered_indices() so that we get the same random ordering as we would have from using the underlying dataset directly. """ if self._ordered_indices is None: self._ordered_indices = OrderedDict( [ (key, dataset.ordered_indices()) for key, dataset in self.datasets.items() ] ) return np.arange(len(self)) def _map_index_to_dataset(self, key: int, index: int): """ Different underlying datasets have different lengths. In order to ensure we are not accessing an index outside the range of the current dataset size, we wrap around. This function should be called after we have created an ordering for this and all underlying datasets. """ assert ( self._ordered_indices is not None ), "Must call MultiCorpusSampledDataset.ordered_indices() first" mapped_index = index % len(self.datasets[key]) return self._ordered_indices[key][mapped_index] def __getitem__(self, index: int): """ Get the item associated with index from each underlying dataset. Since index is in the range of [0, TotalNumInstances], we need to map the index to the dataset before retrieving the item. """ return OrderedDict( [ (key, dataset[self._map_index_to_dataset(key, index)]) for key, dataset in self.datasets.items() ] ) def collater(self, samples: List[Dict]): """ Generate a mini-batch for this dataset. To convert this into a regular mini-batch we use the following logic: 1. Select a dataset using the specified probability distribution. 2. Call the collater function of the selected dataset. """ if len(samples) == 0: return None selected_key = self.sampling_func(list(self.datasets.keys())) selected_samples = [sample[selected_key] for sample in samples] return self.datasets[selected_key].collater(selected_samples) def num_tokens(self, index: int): """ Return an example's length (number of tokens), used for batching. Here we return the max across all examples at index across all underlying datasets. """ return max( dataset.num_tokens(self._map_index_to_dataset(key, index)) for key, dataset in self.datasets.items() ) def size(self, index: int): """ Return an example's size as a float or tuple. Here we return the max across all underlying datasets. This value is used when filtering a dataset with max-positions. """ return max( dataset.size(self._map_index_to_dataset(key, index)) for key, dataset in self.datasets.items() ) @property def supports_prefetch(self): return all( getattr(dataset, "supports_prefetch", False) for dataset in self.datasets.values() ) def prefetch(self, indices): for key, dataset in self.datasets.items(): dataset.prefetch( [self._map_index_to_dataset(key, index) for index in indices] ) @property def supports_fetch_outside_dataloader(self): return all( self.datasets[key].supports_fetch_outside_dataloader for key in self.datasets )
bart_ls-main
fairseq-py/fairseq/data/multi_corpus_sampled_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqDataset class NumSamplesDataset(FairseqDataset): def __getitem__(self, index): return 1 def __len__(self): return 0 def collater(self, samples): return sum(samples)
bart_ls-main
fairseq-py/fairseq/data/num_samples_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data import data_utils class WordNoising(object): """Generate a noisy version of a sentence, without changing words themselves.""" def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None): self.dictionary = dictionary self.bpe_end = None if bpe_cont_marker: self.bpe_end = np.array( [ not self.dictionary[i].endswith(bpe_cont_marker) for i in range(len(self.dictionary)) ] ) elif bpe_end_marker: self.bpe_end = np.array( [ self.dictionary[i].endswith(bpe_end_marker) for i in range(len(self.dictionary)) ] ) self.get_word_idx = ( self._get_bpe_word_idx if self.bpe_end is not None else self._get_token_idx ) def noising(self, x, lengths, noising_prob=0.0): raise NotImplementedError() def _get_bpe_word_idx(self, x): """ Given a list of BPE tokens, for every index in the tokens list, return the index of the word grouping that it belongs to. For example, for input x corresponding to ["how", "are", "y@@", "ou"], return [[0], [1], [2], [2]]. """ # x: (T x B) bpe_end = self.bpe_end[x] if x.size(0) == 1 and x.size(1) == 1: # Special case when we only have one word in x. If x = [[N]], # bpe_end is a scalar (bool) instead of a 2-dim array of bools, # which makes the sum operation below fail. return np.array([[0]]) # do a reduce front sum to generate word ids word_idx = bpe_end[::-1].cumsum(0)[::-1] word_idx = word_idx.max(0)[None, :] - word_idx return word_idx def _get_token_idx(self, x): """ This is to extend noising functions to be able to apply to non-bpe tokens, e.g. word or characters. """ x = torch.t(x) word_idx = np.array([range(len(x_i)) for x_i in x]) return np.transpose(word_idx) class WordDropout(WordNoising): """Randomly drop input words. If not passing blank_idx (default is None), then dropped words will be removed. Otherwise, it will be replaced by the blank_idx.""" def __init__( self, dictionary, default_dropout_prob=0.1, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) self.default_dropout_prob = default_dropout_prob def noising(self, x, lengths, dropout_prob=None, blank_idx=None): if dropout_prob is None: dropout_prob = self.default_dropout_prob # x: (T x B), lengths: B if dropout_prob == 0: return x, lengths assert 0 < dropout_prob < 1 # be sure to drop entire words word_idx = self.get_word_idx(x) sentences = [] modified_lengths = [] for i in range(lengths.size(0)): # Since dropout probabilities need to apply over non-pad tokens, # it is not trivial to generate the keep mask without consider # input lengths; otherwise, this could be done outside the loop # We want to drop whole words based on word_idx grouping num_words = max(word_idx[:, i]) + 1 # ith example: [x0, x1, ..., eos, pad, ..., pad] # We should only generate keep probs for non-EOS tokens. Thus if the # input sentence ends in EOS, the last word idx is not included in # the dropout mask generation and we append True to always keep EOS. # Otherwise, just generate the dropout mask for all word idx # positions. has_eos = x[lengths[i] - 1, i] == self.dictionary.eos() if has_eos: # has eos? keep = np.random.rand(num_words - 1) >= dropout_prob keep = np.append(keep, [True]) # keep EOS symbol else: keep = np.random.rand(num_words) >= dropout_prob words = x[: lengths[i], i].tolist() # TODO: speed up the following loop # drop words from the input according to keep new_s = [ w if keep[word_idx[j, i]] else blank_idx for j, w in enumerate(words) ] new_s = [w for w in new_s if w is not None] # we need to have at least one word in the sentence (more than the # start / end sentence symbols) if len(new_s) <= 1: # insert at beginning in case the only token left is EOS # EOS should be at end of list. new_s.insert(0, words[np.random.randint(0, len(words))]) assert len(new_s) >= 1 and ( not has_eos # Either don't have EOS at end or last token is EOS or (len(new_s) >= 2 and new_s[-1] == self.dictionary.eos()) ), "New sentence is invalid." sentences.append(new_s) modified_lengths.append(len(new_s)) # re-construct input modified_lengths = torch.LongTensor(modified_lengths) modified_x = torch.LongTensor( modified_lengths.max(), modified_lengths.size(0) ).fill_(self.dictionary.pad()) for i in range(modified_lengths.size(0)): modified_x[: modified_lengths[i], i].copy_(torch.LongTensor(sentences[i])) return modified_x, modified_lengths class WordShuffle(WordNoising): """Shuffle words by no more than k positions.""" def __init__( self, dictionary, default_max_shuffle_distance=3, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) self.default_max_shuffle_distance = 3 def noising(self, x, lengths, max_shuffle_distance=None): if max_shuffle_distance is None: max_shuffle_distance = self.default_max_shuffle_distance # x: (T x B), lengths: B if max_shuffle_distance == 0: return x, lengths # max_shuffle_distance < 1 will return the same sequence assert max_shuffle_distance > 1 # define noise word scores noise = np.random.uniform( 0, max_shuffle_distance, size=(x.size(0), x.size(1)), ) noise[0] = -1 # do not move start sentence symbol # be sure to shuffle entire words word_idx = self.get_word_idx(x) x2 = x.clone() for i in range(lengths.size(0)): length_no_eos = lengths[i] if x[lengths[i] - 1, i] == self.dictionary.eos(): length_no_eos = lengths[i] - 1 # generate a random permutation scores = word_idx[:length_no_eos, i] + noise[word_idx[:length_no_eos, i], i] # ensure no reordering inside a word scores += 1e-6 * np.arange(length_no_eos.item()) permutation = scores.argsort() # shuffle words x2[:length_no_eos, i].copy_( x2[:length_no_eos, i][torch.from_numpy(permutation)] ) return x2, lengths class UnsupervisedMTNoising(WordNoising): """ Implements the default configuration for noising in UnsupervisedMT (github.com/facebookresearch/UnsupervisedMT) """ def __init__( self, dictionary, max_word_shuffle_distance, word_dropout_prob, word_blanking_prob, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary) self.max_word_shuffle_distance = max_word_shuffle_distance self.word_dropout_prob = word_dropout_prob self.word_blanking_prob = word_blanking_prob self.word_dropout = WordDropout( dictionary=dictionary, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker, ) self.word_shuffle = WordShuffle( dictionary=dictionary, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker, ) def noising(self, x, lengths): # 1. Word Shuffle noisy_src_tokens, noisy_src_lengths = self.word_shuffle.noising( x=x, lengths=lengths, max_shuffle_distance=self.max_word_shuffle_distance, ) # 2. Word Dropout noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( x=noisy_src_tokens, lengths=noisy_src_lengths, dropout_prob=self.word_dropout_prob, ) # 3. Word Blanking noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( x=noisy_src_tokens, lengths=noisy_src_lengths, dropout_prob=self.word_blanking_prob, blank_idx=self.dictionary.unk(), ) return noisy_src_tokens class NoisingDataset(torch.utils.data.Dataset): def __init__( self, src_dataset, src_dict, seed, noiser=None, noising_class=UnsupervisedMTNoising, **kwargs ): """ Wrap a :class:`~torch.utils.data.Dataset` and apply noise to the samples based on the supplied noising configuration. Args: src_dataset (~torch.utils.data.Dataset): dataset to wrap. to build self.src_dataset -- a LanguagePairDataset with src dataset as the source dataset and None as the target dataset. Should NOT have padding so that src_lengths are accurately calculated by language_pair_dataset collate function. We use language_pair_dataset here to encapsulate the tgt_dataset so we can re-use the LanguagePairDataset collater to format the batches in the structure that SequenceGenerator expects. src_dict (~fairseq.data.Dictionary): source dictionary seed (int): seed to use when generating random noise noiser (WordNoising): a pre-initialized :class:`WordNoising` instance. If this is None, a new instance will be created using *noising_class* and *kwargs*. noising_class (class, optional): class to use to initialize a default :class:`WordNoising` instance. kwargs (dict, optional): arguments to initialize the default :class:`WordNoising` instance given by *noiser*. """ self.src_dataset = src_dataset self.src_dict = src_dict self.seed = seed self.noiser = ( noiser if noiser is not None else noising_class( dictionary=src_dict, **kwargs, ) ) self.sizes = src_dataset.sizes def __getitem__(self, index): """ Returns a single noisy sample. Multiple samples are fed to the collater create a noising dataset batch. """ src_tokens = self.src_dataset[index] src_lengths = torch.LongTensor([len(src_tokens)]) src_tokens = src_tokens.unsqueeze(0) # Transpose src tokens to fit expected shape of x in noising function # (batch size, sequence length) -> (sequence length, batch size) src_tokens_t = torch.t(src_tokens) with data_utils.numpy_seed(self.seed + index): noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths) # Transpose back to expected src_tokens format # (sequence length, 1) -> (1, sequence length) noisy_src_tokens = torch.t(noisy_src_tokens) return noisy_src_tokens[0] def __len__(self): """ The length of the noising dataset is the length of src. """ return len(self.src_dataset) @property def supports_prefetch(self): return self.src_dataset.supports_prefetch def prefetch(self, indices): if self.src_dataset.supports_prefetch: self.src_dataset.prefetch(indices)
bart_ls-main
fairseq-py/fairseq/data/noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from fairseq.data import data_utils from . import BaseWrapperDataset class TruncateDataset(BaseWrapperDataset): """Truncate a sequence by returning the first truncation_length tokens""" def __init__(self, dataset, truncation_length): super().__init__(dataset) assert truncation_length is not None self.truncation_length = truncation_length self.dataset = dataset def __getitem__(self, index): item = self.dataset[index] item_len = item.size(0) if item_len > self.truncation_length: item = item[: self.truncation_length] return item @property def sizes(self): return np.minimum(self.dataset.sizes, self.truncation_length) def __len__(self): return len(self.dataset) class RandomCropDataset(TruncateDataset): """Truncate a sequence by returning a random crop of truncation_length tokens""" def __init__(self, dataset, truncation_length, seed=1): super().__init__(dataset, truncation_length) self.seed = seed self.epoch = 0 @property def can_reuse_epoch_itr_across_epochs(self): return True # only the crop changes, not item sizes def set_epoch(self, epoch, **unused): super().set_epoch(epoch) self.epoch = epoch def __getitem__(self, index): with data_utils.numpy_seed(self.seed, self.epoch, index): item = self.dataset[index] item_len = item.size(0) excess = item_len - self.truncation_length if excess > 0: start_idx = np.random.randint(0, excess) item = item[start_idx : start_idx + self.truncation_length] return item def maybe_shorten_dataset( dataset, split, shorten_data_split_list, shorten_method, tokens_per_sample, seed, ): truncate_split = ( split in shorten_data_split_list.split(",") or len(shorten_data_split_list) == 0 ) if shorten_method == "truncate" and truncate_split: dataset = TruncateDataset(dataset, tokens_per_sample) elif shorten_method == "random_crop" and truncate_split: dataset = RandomCropDataset(dataset, tokens_per_sample, seed) return dataset
bart_ls-main
fairseq-py/fairseq/data/shorten_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np from . import BaseWrapperDataset logger = logging.getLogger(__name__) class SubsampleDataset(BaseWrapperDataset): """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples Args: dataset (~torch.utils.data.Dataset): dataset to subsample size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive) """ def __init__(self, dataset, size_ratio, shuffle=False): super().__init__(dataset) assert size_ratio < 1 self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int) self.indices = np.random.choice( list(range(len(self.dataset))), self.actual_size, replace=False ) self.shuffle = shuffle logger.info( "subsampled dataset from {} to {} (ratio={})".format( len(self.dataset), self.actual_size, size_ratio ) ) def __getitem__(self, index): return self.dataset[self.indices[index]] def __len__(self): return self.actual_size def collater(self, samples): return self.dataset.collater(samples) @property def sizes(self): return self.dataset.sizes[self.indices] @property def name(self): return self.dataset.name def num_tokens(self, index): return self.dataset.num_tokens(self.indices[index]) def size(self, index): return self.dataset.size(self.indices[index]) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) def prefetch(self, indices): self.dataset.prefetch(self.indices[indices])
bart_ls-main
fairseq-py/fairseq/data/subsample_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from . import BaseWrapperDataset class SortDataset(BaseWrapperDataset): def __init__(self, dataset, sort_order): super().__init__(dataset) if not isinstance(sort_order, (list, tuple)): sort_order = [sort_order] self.sort_order = sort_order assert all(len(so) == len(dataset) for so in sort_order) def ordered_indices(self): return np.lexsort(self.sort_order)
bart_ls-main
fairseq-py/fairseq/data/sort_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum class TextCompressionLevel(Enum): none = 0 low = 1 high = 2 class TextCompressor(object): def __init__( self, level: TextCompressionLevel, max_input_byte_length: int = 2 ** 16 ): self.level = level self.max_input_length = max_input_byte_length def compress(self, text: str) -> bytes: if self.level == TextCompressionLevel.low: import zlib # zlib: built-in, fast return zlib.compress(text.encode(), level=0) elif self.level == TextCompressionLevel.high: try: import unishox2 # unishox2: optimized for short text but slower except ImportError: raise ImportError( "Please install unishox2 for the text compression feature: " "pip install unishox2-py3" ) assert len(text.encode()) <= self.max_input_length return unishox2.compress(text)[0] else: return text.encode() def decompress(self, compressed: bytes) -> str: if self.level == TextCompressionLevel.low: import zlib return zlib.decompress(compressed).decode() elif self.level == TextCompressionLevel.high: try: import unishox2 except ImportError: raise ImportError( "Please install unishox2 for the text compression feature: " "pip install unishox2-py3" ) return unishox2.decompress(compressed, self.max_input_length) else: return compressed.decode()
bart_ls-main
fairseq-py/fairseq/data/text_compressor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import FairseqDataset, data_utils def collate(samples, pad_idx, eos_idx, fixed_pad_length=None, pad_to_bsz=None): if len(samples) == 0: return {} def merge(key, is_list=False): if is_list: res = [] for i in range(len(samples[0][key])): res.append( data_utils.collate_tokens( [s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False, pad_to_length=fixed_pad_length, pad_to_bsz=pad_to_bsz, ) ) return res else: return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad=False, pad_to_length=fixed_pad_length, pad_to_bsz=pad_to_bsz, ) src_tokens = merge("source") if samples[0]["target"] is not None: is_target_list = isinstance(samples[0]["target"], list) target = merge("target", is_target_list) else: target = src_tokens return { "id": torch.LongTensor([s["id"] for s in samples]), "nsentences": len(samples), "ntokens": sum(len(s["source"]) for s in samples), "net_input": { "src_tokens": src_tokens, "src_lengths": torch.LongTensor([s["source"].numel() for s in samples]), }, "target": target, } class MonolingualDataset(FairseqDataset): """ A wrapper around torch.utils.data.Dataset for monolingual data. Args: dataset (torch.utils.data.Dataset): dataset to wrap sizes (List[int]): sentence lengths vocab (~fairseq.data.Dictionary): vocabulary shuffle (bool, optional): shuffle the elements before batching (default: True). """ def __init__( self, dataset, sizes, src_vocab, tgt_vocab=None, add_eos_for_other_targets=False, shuffle=False, targets=None, add_bos_token=False, fixed_pad_length=None, pad_to_bsz=None, src_lang_idx=None, tgt_lang_idx=None, ): self.dataset = dataset self.sizes = np.array(sizes) self.vocab = src_vocab self.tgt_vocab = tgt_vocab or src_vocab self.add_eos_for_other_targets = add_eos_for_other_targets self.shuffle = shuffle self.add_bos_token = add_bos_token self.fixed_pad_length = fixed_pad_length self.pad_to_bsz = pad_to_bsz self.src_lang_idx = src_lang_idx self.tgt_lang_idx = tgt_lang_idx assert targets is None or all( t in {"self", "future", "past"} for t in targets ), "targets must be none or one of 'self', 'future', 'past'" if targets is not None and len(targets) == 0: targets = None self.targets = targets def __getitem__(self, index): if self.targets is not None: # *future_target* is the original sentence # *source* is shifted right by 1 (maybe left-padded with eos) # *past_target* is shifted right by 2 (left-padded as needed) # # Left-to-right language models should condition on *source* and # predict *future_target*. # Right-to-left language models should condition on *source* and # predict *past_target*. source, future_target, past_target = self.dataset[index] source, target = self._make_source_target( source, future_target, past_target ) else: source = self.dataset[index] target = None source, target = self._maybe_add_bos(source, target) return {"id": index, "source": source, "target": target} def __len__(self): return len(self.dataset) def _make_source_target(self, source, future_target, past_target): if self.targets is not None: target = [] if ( self.add_eos_for_other_targets and (("self" in self.targets) or ("past" in self.targets)) and source[-1] != self.vocab.eos() ): # append eos at the end of source source = torch.cat([source, source.new([self.vocab.eos()])]) if "future" in self.targets: future_target = torch.cat( [future_target, future_target.new([self.vocab.pad()])] ) if "past" in self.targets: # first token is before the start of sentence which is only used in "none" break mode when # add_eos_for_other_targets is False past_target = torch.cat( [ past_target.new([self.vocab.pad()]), past_target[1:], source[-2, None], ] ) for t in self.targets: if t == "self": target.append(source) elif t == "future": target.append(future_target) elif t == "past": target.append(past_target) else: raise Exception("invalid target " + t) if len(target) == 1: target = target[0] else: target = future_target return source, self._filter_vocab(target) def _maybe_add_bos(self, source, target): if self.add_bos_token: source = torch.cat([source.new([self.vocab.bos()]), source]) if target is not None: target = torch.cat([target.new([self.tgt_vocab.bos()]), target]) return source, target def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" return self.sizes[indices] def _filter_vocab(self, target): if len(self.tgt_vocab) != len(self.vocab): def _filter(target): mask = target.ge(len(self.tgt_vocab)) if mask.any(): target[mask] = self.tgt_vocab.unk() return target if isinstance(target, list): return [_filter(t) for t in target] return _filter(target) return target def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the right. - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the right. """ return collate( samples, self.vocab.pad(), self.vocab.eos(), self.fixed_pad_length, self.pad_to_bsz, ) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return self.sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return self.sizes[index] def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch(indices)
bart_ls-main
fairseq-py/fairseq/data/monolingual_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import subprocess import threading from pathlib import Path import numpy as np import torch def fasta_file_path(prefix_path): return prefix_path + ".fasta" class FastaDataset(torch.utils.data.Dataset): """ For loading protein sequence datasets in the common FASTA data format """ def __init__(self, path: str, cache_indices=False): self.fn = fasta_file_path(path) self.threadlocal = threading.local() self.cache = Path(f"{path}.fasta.idx.npy") if cache_indices: if self.cache.exists(): self.offsets, self.sizes = np.load(self.cache) else: self.offsets, self.sizes = self._build_index(path) np.save(self.cache, np.stack([self.offsets, self.sizes])) else: self.offsets, self.sizes = self._build_index(path) def _get_file(self): if not hasattr(self.threadlocal, "f"): self.threadlocal.f = open(self.fn, "r") return self.threadlocal.f def __getitem__(self, idx): f = self._get_file() f.seek(self.offsets[idx]) desc = f.readline().strip() line = f.readline() seq = "" while line != "" and line[0] != ">": seq += line.strip() line = f.readline() return desc, seq def __len__(self): return self.offsets.size def _build_index(self, path: str): # Use grep and awk to get 100M/s on local SSD. # Should process your enormous 100G fasta in ~10 min single core... path = fasta_file_path(path) bytes_offsets = subprocess.check_output( f"cat {path} | tqdm --bytes --total $(wc -c < {path})" "| grep --byte-offset '^>' -o | cut -d: -f1", shell=True, ) fasta_lengths = subprocess.check_output( f"cat {path} | tqdm --bytes --total $(wc -c < {path})" "| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'", shell=True, ) bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ") sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ") return bytes_np, sizes_np def __setstate__(self, state): self.__dict__ = state self.threadlocal = threading.local() def __getstate__(self): d = {} for i, v in self.__dict__.items(): if i != "threadlocal": d[i] = v return d def __del__(self): if hasattr(self.threadlocal, "f"): self.threadlocal.f.close() del self.threadlocal.f @staticmethod def exists(path): return os.path.exists(fasta_file_path(path)) class EncodedFastaDataset(FastaDataset): """ The FastaDataset returns raw sequences - this allows us to return indices with a dictionary instead. """ def __init__(self, path, dictionary): super().__init__(path, cache_indices=True) self.dictionary = dictionary def __getitem__(self, idx): desc, seq = super().__getitem__(idx) return self.dictionary.encode_line(seq, line_tokenizer=list).long()
bart_ls-main
fairseq-py/fairseq/data/fasta_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch.utils.data.dataloader import default_collate from . import FairseqDataset class BaseWrapperDataset(FairseqDataset): def __init__(self, dataset): super().__init__() self.dataset = dataset def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples): if hasattr(self.dataset, "collater"): return self.dataset.collater(samples) else: return default_collate(samples) @property def sizes(self): return self.dataset.sizes def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def attr(self, attr: str, index: int): return self.dataset.attr(attr, index) def prefetch(self, indices): self.dataset.prefetch(indices) def get_batch_shapes(self): return self.dataset.get_batch_shapes() def batch_by_size( self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, ): return self.dataset.batch_by_size( indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) def filter_indices_by_size(self, indices, max_sizes): return self.dataset.filter_indices_by_size(indices, max_sizes) @property def can_reuse_epoch_itr_across_epochs(self): return self.dataset.can_reuse_epoch_itr_across_epochs def set_epoch(self, epoch): super().set_epoch(epoch) if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch)
bart_ls-main
fairseq-py/fairseq/data/base_wrapper_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class NumelDataset(BaseWrapperDataset): def __init__(self, dataset, reduce=False): super().__init__(dataset) self.reduce = reduce def __getitem__(self, index): item = self.dataset[index] if torch.is_tensor(item): return torch.numel(item) else: return np.size(item) def __len__(self): return len(self.dataset) def collater(self, samples): if self.reduce: return sum(samples) else: return torch.tensor(samples)
bart_ls-main
fairseq-py/fairseq/data/numel_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .dictionary import Dictionary, TruncatedDictionary from .fairseq_dataset import FairseqDataset, FairseqIterableDataset from .base_wrapper_dataset import BaseWrapperDataset from .add_target_dataset import AddTargetDataset from .append_token_dataset import AppendTokenDataset from .audio.raw_audio_dataset import BinarizedAudioDataset, FileAudioDataset from .audio.hubert_dataset import HubertDataset from .backtranslation_dataset import BacktranslationDataset from .bucket_pad_length_dataset import BucketPadLengthDataset from .colorize_dataset import ColorizeDataset from .concat_dataset import ConcatDataset from .concat_sentences_dataset import ConcatSentencesDataset from .denoising_dataset import DenoisingDataset from .long_denoising_dataset import LongDenoisingDataset from .pegasus_dataset import PegasusDataset from .id_dataset import IdDataset from .indexed_dataset import ( IndexedCachedDataset, IndexedDataset, IndexedRawTextDataset, MMapIndexedDataset, ) from .language_pair_dataset import LanguagePairDataset from .list_dataset import ListDataset from .lm_context_window_dataset import LMContextWindowDataset from .lru_cache_dataset import LRUCacheDataset from .mask_tokens_dataset import MaskTokensDataset from .monolingual_dataset import MonolingualDataset from .multi_corpus_sampled_dataset import MultiCorpusSampledDataset from .nested_dictionary_dataset import NestedDictionaryDataset from .noising import NoisingDataset from .numel_dataset import NumelDataset from .num_samples_dataset import NumSamplesDataset from .offset_tokens_dataset import OffsetTokensDataset from .pad_dataset import LeftPadDataset, PadDataset, RightPadDataset from .prepend_dataset import PrependDataset from .prepend_token_dataset import PrependTokenDataset from .raw_label_dataset import RawLabelDataset from .replace_dataset import ReplaceDataset from .resampling_dataset import ResamplingDataset from .roll_dataset import RollDataset from .round_robin_zip_datasets import RoundRobinZipDatasets from .sort_dataset import SortDataset from .strip_token_dataset import StripTokenDataset from .subsample_dataset import SubsampleDataset from .token_block_dataset import TokenBlockDataset from .transform_eos_dataset import TransformEosDataset from .transform_eos_lang_pair_dataset import TransformEosLangPairDataset from .shorten_dataset import TruncateDataset, RandomCropDataset from .multilingual.sampled_multi_dataset import SampledMultiDataset from .multilingual.sampled_multi_epoch_dataset import SampledMultiEpochDataset from .fasta_dataset import FastaDataset, EncodedFastaDataset from .iterators import ( CountingIterator, EpochBatchIterator, GroupedIterator, ShardedIterator, ) __all__ = [ "AddTargetDataset", "AppendTokenDataset", "BacktranslationDataset", "BaseWrapperDataset", "BinarizedAudioDataset", "BucketPadLengthDataset", "ColorizeDataset", "ConcatDataset", "ConcatSentencesDataset", "CountingIterator", "DenoisingDataset", "LongDenoisingDataset", "PegasusDataset", "Dictionary", "EncodedFastaDataset", "EpochBatchIterator", "FairseqDataset", "FairseqIterableDataset", "FastaDataset", "FileAudioDataset", "GroupedIterator", "HubertDataset", "IdDataset", "IndexedCachedDataset", "IndexedDataset", "IndexedRawTextDataset", "LanguagePairDataset", "LeftPadDataset", "ListDataset", "LMContextWindowDataset", "LRUCacheDataset", "MaskTokensDataset", "MMapIndexedDataset", "MonolingualDataset", "MultiCorpusSampledDataset", "NestedDictionaryDataset", "NoisingDataset", "NumelDataset", "NumSamplesDataset", "OffsetTokensDataset", "PadDataset", "PrependDataset", "PrependTokenDataset", "RandomCropDataset", "RawLabelDataset", "ResamplingDataset", "ReplaceDataset", "RightPadDataset", "RollDataset", "RoundRobinZipDatasets", "SampledMultiDataset", "SampledMultiEpochDataset", "ShardedIterator", "SortDataset", "StripTokenDataset", "SubsampleDataset", "TokenBlockDataset", "TransformEosDataset", "TransformEosLangPairDataset", "TruncateDataset", "TruncatedDictionary", ]
bart_ls-main
fairseq-py/fairseq/data/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class ConcatSentencesDataset(FairseqDataset): def __init__(self, *datasets): super().__init__() self.datasets = datasets assert all( len(ds) == len(datasets[0]) for ds in datasets ), "datasets must have the same length" def __getitem__(self, index): return torch.cat([ds[index] for ds in self.datasets]) def __len__(self): return len(self.datasets[0]) def collater(self, samples): return self.datasets[0].collater(samples) @property def sizes(self): return sum(ds.sizes for ds in self.datasets) def num_tokens(self, index): return sum(ds.num_tokens(index) for ds in self.datasets) def size(self, index): return sum(ds.size(index) for ds in self.datasets) def ordered_indices(self): return self.datasets[0].ordered_indices() @property def supports_prefetch(self): return any(getattr(ds, "supports_prefetch", False) for ds in self.datasets) def prefetch(self, indices): for ds in self.datasets: if getattr(ds, "supports_prefetch", False): ds.prefetch(indices) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.datasets: if hasattr(ds, "set_epoch"): ds.set_epoch(epoch)
bart_ls-main
fairseq-py/fairseq/data/concat_sentences_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from functools import lru_cache import numpy as np import torch from fairseq.data import Dictionary, data_utils from . import BaseWrapperDataset, LRUCacheDataset class MaskTokensDataset(BaseWrapperDataset): """ A wrapper Dataset for masked language modeling. Input items are masked according to the specified masking probability. Args: dataset: Dataset to wrap. sizes: Sentence lengths vocab: Dictionary with the vocabulary and special tokens. pad_idx: Id of pad token in vocab mask_idx: Id of mask token in vocab return_masked_tokens: controls whether to return the non-masked tokens (the default) or to return a tensor with the original masked token IDs (and *pad_idx* elsewhere). The latter is useful as targets for masked LM training. seed: Seed for random number generator for reproducibility. mask_prob: probability of replacing a token with *mask_idx*. leave_unmasked_prob: probability that a masked token is unmasked. random_token_prob: probability of replacing a masked token with a random token from the vocabulary. freq_weighted_replacement: sample random replacement words based on word frequencies in the vocab. mask_whole_words: only mask whole words. This should be a byte mask over vocab indices, indicating whether it is the beginning of a word. We will extend any mask to encompass the whole word. bpe: BPE to use for whole-word masking. mask_multiple_length : repeat each mask index multiple times. Default value is 1. mask_stdev : standard deviation of masks distribution in case of multiple masking. Default value is 0. """ @classmethod def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs): """Return the source and target datasets for masked LM training.""" dataset = LRUCacheDataset(dataset) return ( LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)), LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)), ) def __init__( self, dataset: torch.utils.data.Dataset, vocab: Dictionary, pad_idx: int, mask_idx: int, return_masked_tokens: bool = False, seed: int = 1, mask_prob: float = 0.15, leave_unmasked_prob: float = 0.1, random_token_prob: float = 0.1, freq_weighted_replacement: bool = False, mask_whole_words: torch.Tensor = None, mask_multiple_length: int = 1, mask_stdev: float = 0.0, ): assert 0.0 < mask_prob < 1.0 assert 0.0 <= random_token_prob <= 1.0 assert 0.0 <= leave_unmasked_prob <= 1.0 assert random_token_prob + leave_unmasked_prob <= 1.0 assert mask_multiple_length >= 1 assert mask_stdev >= 0.0 self.dataset = dataset self.vocab = vocab self.pad_idx = pad_idx self.mask_idx = mask_idx self.return_masked_tokens = return_masked_tokens self.seed = seed self.mask_prob = mask_prob self.leave_unmasked_prob = leave_unmasked_prob self.random_token_prob = random_token_prob self.mask_whole_words = mask_whole_words self.mask_multiple_length = mask_multiple_length self.mask_stdev = mask_stdev if random_token_prob > 0.0: if freq_weighted_replacement: weights = np.array(self.vocab.count) else: weights = np.ones(len(self.vocab)) weights[: self.vocab.nspecial] = 0 self.weights = weights / weights.sum() self.epoch = 0 @property def can_reuse_epoch_itr_across_epochs(self): return True # only the noise changes, not item sizes def set_epoch(self, epoch, **unused): super().set_epoch(epoch) self.epoch = epoch def __getitem__(self, index: int): return self.__getitem_cached__(self.seed, self.epoch, index) @lru_cache(maxsize=8) def __getitem_cached__(self, seed: int, epoch: int, index: int): with data_utils.numpy_seed(self.seed, self.epoch, index): item = self.dataset[index] sz = len(item) assert ( self.mask_idx not in item ), "Dataset contains mask_idx (={}), this is not expected!".format( self.mask_idx, ) if self.mask_whole_words is not None: word_begins_mask = self.mask_whole_words.gather(0, item) word_begins_idx = word_begins_mask.nonzero().view(-1) sz = len(word_begins_idx) words = np.split(word_begins_mask, word_begins_idx)[1:] assert len(words) == sz word_lens = list(map(len, words)) # decide elements to mask mask = np.full(sz, False) num_mask = int( # add a random number for probabilistic rounding self.mask_prob * sz / float(self.mask_multiple_length) + np.random.rand() ) # multiple masking as described in the vq-wav2vec paper (https://arxiv.org/abs/1910.05453) mask_idc = np.random.choice(sz, num_mask, replace=False) if self.mask_stdev > 0.0: lengths = np.random.normal( self.mask_multiple_length, self.mask_stdev, size=num_mask ) lengths = [max(0, int(round(x))) for x in lengths] mask_idc = np.asarray( [ mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j]) ], dtype=np.int64, ) else: mask_idc = np.concatenate( [mask_idc + i for i in range(self.mask_multiple_length)] ) mask_idc = mask_idc[mask_idc < len(mask)] try: mask[mask_idc] = True except: # something wrong print( "Assigning mask indexes {} to mask {} failed!".format( mask_idc, mask ) ) raise if self.return_masked_tokens: # exit early if we're just returning the masked tokens # (i.e., the targets for masked LM training) if self.mask_whole_words is not None: mask = np.repeat(mask, word_lens) new_item = np.full(len(mask), self.pad_idx) new_item[mask] = item[torch.from_numpy(mask.astype(np.uint8)) == 1] return torch.from_numpy(new_item) # decide unmasking and random replacement rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob if rand_or_unmask_prob > 0.0: rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob) if self.random_token_prob == 0.0: unmask = rand_or_unmask rand_mask = None elif self.leave_unmasked_prob == 0.0: unmask = None rand_mask = rand_or_unmask else: unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob decision = np.random.rand(sz) < unmask_prob unmask = rand_or_unmask & decision rand_mask = rand_or_unmask & (~decision) else: unmask = rand_mask = None if unmask is not None: mask = mask ^ unmask if self.mask_whole_words is not None: mask = np.repeat(mask, word_lens) new_item = np.copy(item) new_item[mask] = self.mask_idx if rand_mask is not None: num_rand = rand_mask.sum() if num_rand > 0: if self.mask_whole_words is not None: rand_mask = np.repeat(rand_mask, word_lens) num_rand = rand_mask.sum() new_item[rand_mask] = np.random.choice( len(self.vocab), num_rand, p=self.weights, ) return torch.from_numpy(new_item)
bart_ls-main
fairseq-py/fairseq/data/mask_tokens_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from functools import lru_cache from . import BaseWrapperDataset class LRUCacheDataset(BaseWrapperDataset): def __init__(self, dataset, token=None): super().__init__(dataset) @lru_cache(maxsize=8) def __getitem__(self, index): return self.dataset[index] @lru_cache(maxsize=8) def collater(self, samples): return self.dataset.collater(samples)
bart_ls-main
fairseq-py/fairseq/data/lru_cache_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import numpy as np import torch from . import FairseqDataset, data_utils def collate( samples, pad_idx, eos_idx, vocab, left_pad_source=False, left_pad_target=False, input_feeding=True, pad_to_length=None, pad_to_multiple=1 ): assert input_feeding if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None, pad_to_multiple=1): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx=None, # use eos_idx of each sample instead of vocab.eos() left_pad=left_pad, move_eos_to_beginning=move_eos_to_beginning, pad_to_length=pad_to_length, pad_to_multiple=pad_to_multiple, ) id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, pad_to_multiple=pad_to_multiple ) # sort by descending source length src_lengths = torch.LongTensor([s["source"].numel() for s in samples]) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) ntokens = sum(len(s["target"]) for s in samples) if input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) prev_output_tokens = prev_output_tokens.index_select(0, sort_order) else: ntokens = sum(len(s["source"]) for s in samples) batch = { "id": id, "ntokens": ntokens, "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, }, "target": target, "nsentences": samples[0]["source"].size(0), "sort_order": sort_order, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens return batch class DenoisingDataset(FairseqDataset): """ A wrapper around TokenBlockDataset for BART dataset. Args: dataset (TokenBlockDataset): dataset to wrap sizes (List[int]): sentence lengths vocab (~fairseq.data.Dictionary): vocabulary mask_idx (int): dictionary index used for masked token mask_whole_words: only mask whole words. This should be a byte mask over vocab indices, indicating whether it is the beginning of a word. We will extend any mask to encompass the whole word. shuffle (bool, optional): shuffle the elements before batching. Default: ``True`` seed: Seed for random number generator for reproducibility. args: argparse arguments. """ def __init__( self, dataset, sizes, vocab, mask_idx, mask_whole_words, shuffle, seed, args, eos=None, item_transform_func=None, ): self.dataset = dataset self.sizes = sizes self.vocab = vocab self.shuffle = shuffle self.seed = seed self.mask_idx = mask_idx self.mask_whole_word = mask_whole_words self.mask_ratio = args.mask self.random_ratio = args.mask_random self.insert_ratio = args.insert self.rotate_ratio = args.rotate self.permute_sentence_ratio = args.permute_sentences self.eos = eos if eos is not None else vocab.eos() self.item_transform_func = item_transform_func if args.bpe != "gpt2": self.full_stop_index = self.vocab.eos() else: assert args.bpe == "gpt2" self.full_stop_index = self.vocab.index("13") self.replace_length = args.replace_length if self.replace_length not in [-1, 0, 1]: raise ValueError(f"invalid arg: replace_length={self.replace_length}") if args.mask_length not in ["subword", "word", "span-poisson"]: raise ValueError(f"invalid arg: mask-length={args.mask_length}") if args.mask_length == "subword" and args.replace_length not in [0, 1]: raise ValueError(f"if using subwords, use replace-length=1 or 0") self.mask_span_distribution = None if args.mask_length == "span-poisson": _lambda = args.poisson_lambda lambda_to_the_k = 1 e_to_the_minus_lambda = math.exp(-_lambda) k_factorial = 1 ps = [] for k in range(0, 128): ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial) lambda_to_the_k *= _lambda k_factorial *= k + 1 if ps[-1] < 0.0000001: break ps = torch.FloatTensor(ps) self.mask_span_distribution = torch.distributions.Categorical(ps) self.epoch = 0 @property def can_reuse_epoch_itr_across_epochs(self): return True # only the noise changes, not item sizes def set_epoch(self, epoch, **unused): self.epoch = epoch def __getitem__(self, index): with data_utils.numpy_seed(self.seed, self.epoch, index): tokens = self.dataset[index] assert tokens[-1] == self.eos source, target = tokens, tokens.clone() if self.permute_sentence_ratio > 0.0: source = self.permute_sentences(source, self.permute_sentence_ratio) if self.mask_ratio > 0: source = self.add_whole_word_mask(source, self.mask_ratio) if self.insert_ratio > 0: source = self.add_insertion_noise(source, self.insert_ratio) if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio: source = self.add_rolling_noise(source) # there can additional changes to make: if self.item_transform_func is not None: source, target = self.item_transform_func(source, target) assert (source >= 0).all() assert (source[1:-1] >= 1).all() assert (source <= len(self.vocab)).all() assert source[0] == self.vocab.bos() assert source[-1] == self.eos return { "id": index, "source": source, "target": target, } def __len__(self): return len(self.dataset) def permute_sentences(self, source, p=1.0): full_stops = source == self.full_stop_index # Pretend it ends with a full stop so last span is a sentence full_stops[-2] = 1 # Tokens that are full stops, where the previous token is not sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2 result = source.clone() num_sentences = sentence_ends.size(0) num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0) substitutions = torch.randperm(num_sentences)[:num_to_permute] ordering = torch.arange(0, num_sentences) ordering[substitutions] = substitutions[torch.randperm(num_to_permute)] # Ignore <bos> at start index = 1 for i in ordering: sentence = source[(sentence_ends[i - 1] if i > 0 else 1) : sentence_ends[i]] result[index : index + sentence.size(0)] = sentence index += sentence.size(0) return result def word_starts(self, source): if self.mask_whole_word is not None: is_word_start = self.mask_whole_word.gather(0, source) else: is_word_start = torch.ones(source.size()) is_word_start[0] = 0 is_word_start[-1] = 0 return is_word_start def add_whole_word_mask(self, source, p): is_word_start = self.word_starts(source) num_to_mask = int(math.ceil(is_word_start.float().sum() * p)) num_inserts = 0 if num_to_mask == 0: return source if self.mask_span_distribution is not None: lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,)) # Make sure we have enough to mask cum_length = torch.cumsum(lengths, 0) while cum_length[-1] < num_to_mask: lengths = torch.cat( [ lengths, self.mask_span_distribution.sample(sample_shape=(num_to_mask,)), ], dim=0, ) cum_length = torch.cumsum(lengths, 0) # Trim to masking budget i = 0 while cum_length[i] < num_to_mask: i += 1 lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1]) num_to_mask = i + 1 lengths = lengths[:num_to_mask] # Handle 0-length mask (inserts) separately lengths = lengths[lengths > 0] num_inserts = num_to_mask - lengths.size(0) num_to_mask -= num_inserts if num_to_mask == 0: return self.add_insertion_noise(source, num_inserts / source.size(0)) assert (lengths > 0).all() else: lengths = torch.ones((num_to_mask,)).long() assert is_word_start[-1] == 0 word_starts = is_word_start.nonzero(as_tuple=False) indices = word_starts[ torch.randperm(word_starts.size(0))[:num_to_mask] ].squeeze(1) mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio source_length = source.size(0) assert source_length - 1 not in indices to_keep = torch.ones(source_length, dtype=torch.bool) is_word_start[ -1 ] = 255 # acts as a long length, so spans don't go over the end of doc if self.replace_length == 0: to_keep[indices] = 0 else: # keep index, but replace it with [MASK] source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint( 1, len(self.vocab), size=(mask_random.sum(),) ) if self.mask_span_distribution is not None: assert len(lengths.size()) == 1 assert lengths.size() == indices.size() lengths -= 1 while indices.size(0) > 0: assert lengths.size() == indices.size() lengths -= is_word_start[indices + 1].long() uncompleted = lengths >= 0 indices = indices[uncompleted] + 1 mask_random = mask_random[uncompleted] lengths = lengths[uncompleted] if self.replace_length != -1: # delete token to_keep[indices] = 0 else: # keep index, but replace it with [MASK] source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint( 1, len(self.vocab), size=(mask_random.sum(),) ) else: # A bit faster when all lengths are 1 while indices.size(0) > 0: uncompleted = is_word_start[indices + 1] == 0 indices = indices[uncompleted] + 1 mask_random = mask_random[uncompleted] if self.replace_length != -1: # delete token to_keep[indices] = 0 else: # keep index, but replace it with [MASK] source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint( 1, len(self.vocab), size=(mask_random.sum(),) ) assert source_length - 1 not in indices source = source[to_keep] if num_inserts > 0: source = self.add_insertion_noise(source, num_inserts / source.size(0)) return source def add_permuted_noise(self, tokens, p): num_words = len(tokens) num_to_permute = math.ceil(((num_words * 2) * p) / 2.0) substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1 tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]] return tokens def add_rolling_noise(self, tokens): offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1) tokens = torch.cat( (tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]), dim=0, ) return tokens def add_insertion_noise(self, tokens, p): if p == 0.0: return tokens num_tokens = len(tokens) n = int(math.ceil(num_tokens * p)) noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1 noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool) noise_mask[noise_indices] = 1 result = torch.LongTensor(n + len(tokens)).fill_(-1) num_random = int(math.ceil(n * self.random_ratio)) result[noise_indices[num_random:]] = self.mask_idx result[noise_indices[:num_random]] = torch.randint( low=1, high=len(self.vocab), size=(num_random,) ) result[~noise_mask] = tokens assert (result >= 0).all() return result def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch of data """ return collate( samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length ) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return self.sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return self.sizes[index] def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) return indices[np.argsort(self.sizes[indices], kind="mergesort")] def prefetch(self, indices): self.src.prefetch(indices) self.tgt.prefetch(indices) @property def supports_prefetch(self): return ( hasattr(self.src, "supports_prefetch") and self.src.supports_prefetch and hasattr(self.tgt, "supports_prefetch") and self.tgt.supports_prefetch )
bart_ls-main
fairseq-py/fairseq/data/denoising_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset import numpy as np class StripTokenDataset(BaseWrapperDataset): def __init__(self, dataset, id_to_strip): super().__init__(dataset) self.id_to_strip = id_to_strip def __getitem__(self, index): item = self.dataset[index] while len(item) > 0 and item[-1] == self.id_to_strip: item = item[:-1] while len(item) > 0 and item[0] == self.id_to_strip: item = item[1:] return item
bart_ls-main
fairseq-py/fairseq/data/strip_token_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import random from datetime import date, datetime from typing import List, Optional, Tuple from fairseq.data import FairseqDataset, FairseqIterableDataset KOSKI_THREADS = 16 logger = logging.getLogger(__name__) def _set_up_dataframe( table, namespace, column_projections: List[str] = None, where_clause: str = None, limit: int = None, shuffle: bool = False, ): import koski.dataframes as kd # Silencing Koski logs kd.set_min_log_level(2) ctx = kd.create_ctx( use_case=kd.UseCase.PROD, description="streaming data into fairseq models", oncall="fairseq", ) dataframe = kd.data_warehouse( namespace=namespace, table=table, session_ctx=ctx, ) if column_projections: dataframe = dataframe.map(column_projections) if where_clause: dataframe = dataframe.filter(where_clause) if limit is not None: dataframe = dataframe.limit(limit) if shuffle: dataframe = dataframe.shuffle(memory_limit=int(12 * 1024 * 1024)) # 12GB return dataframe def _date_from_string(date_string: str) -> date: return datetime.strptime(date_string, "%Y-%m-%d").date() def _date_where_clause(date_ranges) -> Optional[str]: if not date_ranges: return None clauses = [] for ds_range in date_ranges: # sanitize old_date = _date_from_string(ds_range[0]).isoformat() new_date = _date_from_string(ds_range[1]).isoformat() clauses.append(f"(ds >= '{old_date}' AND ds <= '{new_date}')") return f"({' OR '.join(clauses)})" if clauses else None class HiveDataset(FairseqDataset): """ Used to read data from a Hive table. Loads all data into memory on instantiation. Given a query, this will returns tuples, like: [('col1 val1', 'col2 val1'), ('col1 val2', 'col2 val2'), ...] Args: table: Data warehouse table to query from. namespace: Data warehouse namespace in which that table lives. date_ranges: List of tuples of date ranges from which to fetch data, each in yyyy-mm-dd format. Example: [(2019-12-31, 2020-01-01)] limit: Limit on the total number of rows to fetch. filter_fn: A function that takes in a row and outputs a bool. Can be used to filter data at query time to save memory. """ def __init__( self, table: str, namespace: str, date_ranges: List[Tuple[str, str]], limit=None, filter_fn=None, ) -> None: super().__init__() dataframe = _set_up_dataframe( table=table, namespace=namespace, where_clause=_date_where_clause(date_ranges), limit=limit, ) logger.info("Loading Hive data...") self.data = [] for c in dataframe.rows(num_worker_threads=KOSKI_THREADS): if filter_fn is not None and not filter_fn(c): continue self.data.append(c) logger.info(f"Finished loading {len(self.data)} rows") def __len__(self): return len(self.data) def __iter__(self): for c in self.data: yield c def __getitem__(self, index): return self.data[index] class StreamingHiveDataset(FairseqIterableDataset): """Used to stream data from a Hive table. Given a query, this will returns tuples, like: [('col1 val1', 'col2 val1'), ('col1 val2', 'col2 val2'), ...] Args: table: Hive table to query from. namespace: Data warehouse namespace in which that table lives. limit: Limit on the total number of rows to fetch. where_clause: SQL filter to be appended (via 'AND') to the query. Note that only Koski functions are supported. date_ranges: List of tuples of date ranges from which to fetch data, each in yyyy-mm-dd format. Example: [(2019-12-31, 2020-01-01)] shuffle: Performs a total shuffle across data taken from date_ranges. Note that fresh_date_ranges is not shuffled. fresh_date_ranges: Date ranges considered 'fresh' will be sampled at a constant ratio with the rest of the data. To ensure the ratio is accurate, these dates should not overlap with date_ranges. fresh_ratio: Ratio of date_ranges to fresh_date_ranges. Must be a positive integer. For example, if 1/4 of the data should come from fresh_date_ranges, fresh_ratio should be 4. """ def __init__( self, table: str, namespace: str, limit: int, columns: Optional[List[str]] = None, where_clause: Optional[str] = None, date_ranges: Optional[List[Tuple[str, str]]] = None, shuffle=False, shuffle_col: str = "thread_key", fresh_date_ranges: Optional[List[Tuple[str, str]]] = None, fresh_ratio: int = 4, ) -> None: super().__init__() self.table = table self.namespace = namespace self.limit = limit self.columns = columns self.given_filter = where_clause self.date_ranges = date_ranges self.fresh_date_ranges = fresh_date_ranges self.fresh_ratio = fresh_ratio self.shuffle = shuffle self.shuffle_col = shuffle_col def __len__(self): return self.limit def __iter__(self): iterable = None if self.shuffle: iterable = self._shuffled_iterable() else: iterable = self._ordered_iterable() fresh_iterable = None if self.fresh_date_ranges: fresh_iterable = self._fresh_iterable() row_count = 0 while row_count < self.limit: if fresh_iterable is not None and row_count % self.fresh_ratio == 0: yield next(fresh_iterable) else: yield next(iterable) row_count += 1 def _fresh_iterable(self): dataframe = _set_up_dataframe( table=self.table, namespace=self.namespace, column_projections=self.columns, where_clause=self._build_where_clause( date_clause=_date_where_clause(self.fresh_date_ranges), ), limit=self.limit, ) for c in dataframe.rows(num_worker_threads=KOSKI_THREADS): yield c def _shuffled_iterable(self): # Run through training examples in random slices to shuffle num_slices = 100 slices = [i for i in range(num_slices)] random.shuffle(slices) for i in slices: dataframe = _set_up_dataframe( table=self.table, namespace=self.namespace, column_projections=self.columns, where_clause=self._build_where_clause( date_clause=_date_where_clause(self.date_ranges), shuffle_clause=f"abs(hash({self.shuffle_col}) % {num_slices}) = {i}", ), limit=self.limit, ) for c in dataframe.rows(num_worker_threads=KOSKI_THREADS): yield c def _ordered_iterable(self): dataframe = _set_up_dataframe( table=self.table, namespace=self.namespace, column_projections=self.columns, where_clause=self._build_where_clause( date_clause=_date_where_clause(self.date_ranges) ), limit=self.limit, ) for c in dataframe.rows(num_worker_threads=KOSKI_THREADS): yield c def _build_where_clause(self, date_clause=None, shuffle_clause=None) -> str: clauses = [ self.given_filter, date_clause, shuffle_clause, ] return " AND ".join([f"({x})" for x in clauses if x])
bart_ls-main
fairseq-py/fairseq/data/fb_hive_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. try: from collections.abc import Iterable except ImportError: from collections import Iterable import contextlib import itertools import logging import re import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager from fairseq import utils import os logger = logging.getLogger(__name__) def infer_language_pair(path): """Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx""" src, dst = None, None for filename in PathManager.ls(path): parts = filename.split(".") if len(parts) >= 3 and len(parts[1].split("-")) == 2: return parts[1].split("-") return src, dst def collate_tokens( values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False, pad_to_length=None, pad_to_multiple=1, pad_to_bsz=None, ): """Convert a list of 1d tensors into a padded 2d tensor.""" size = max(v.size(0) for v in values) size = size if pad_to_length is None else max(size, pad_to_length) if pad_to_multiple != 1 and size % pad_to_multiple != 0: size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple) batch_size = len(values) if pad_to_bsz is None else max(len(values), pad_to_bsz) res = values[0].new(batch_size, size).fill_(pad_idx) def copy_tensor(src, dst): assert dst.numel() == src.numel() if move_eos_to_beginning: if eos_idx is None: # if no eos_idx is specified, then use the last token in src dst[0] = src[-1] else: dst[0] = eos_idx dst[1:] = src[:-1] else: dst.copy_(src) for i, v in enumerate(values): copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)]) return res def load_indexed_dataset( path, dictionary=None, dataset_impl=None, combine=False, default="cached" ): """A helper function for loading indexed datasets. Args: path (str): path to indexed dataset (e.g., 'data-bin/train') dictionary (~fairseq.data.Dictionary): data dictionary dataset_impl (str, optional): which dataset implementation to use. If not provided, it will be inferred automatically. For legacy indexed data we use the 'cached' implementation by default. combine (bool, optional): automatically load and combine multiple datasets. For example, if *path* is 'data-bin/train', then we will combine 'data-bin/train', 'data-bin/train1', ... and return a single ConcatDataset instance. """ import fairseq.data.indexed_dataset as indexed_dataset from fairseq.data.concat_dataset import ConcatDataset datasets = [] for k in itertools.count(): path_k = path + (str(k) if k > 0 else "") try: path_k = indexed_dataset.get_indexed_dataset_to_local(path_k) except Exception as e: if "StorageException: [404] Path not found" in str(e): logger.warning(f"path_k: {e} not found") else: raise e dataset_impl_k = dataset_impl if dataset_impl_k is None: dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k) dataset = indexed_dataset.make_dataset( path_k, impl=dataset_impl_k or default, fix_lua_indexing=True, dictionary=dictionary, ) if dataset is None: break logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k)) datasets.append(dataset) if not combine: break if len(datasets) == 0: return None elif len(datasets) == 1: return datasets[0] else: return ConcatDataset(datasets) @contextlib.contextmanager def numpy_seed(seed, *addl_seeds): """Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward""" if seed is None: yield return if len(addl_seeds) > 0: seed = int(hash((seed, *addl_seeds)) % 1e6) state = np.random.get_state() np.random.seed(seed) try: yield finally: np.random.set_state(state) def collect_filtered(function, iterable, filtered): """ Similar to :func:`filter` but collects filtered elements in ``filtered``. Args: function (callable): function that returns ``False`` for elements that should be filtered iterable (iterable): iterable to filter filtered (list): list to store filtered elements """ for el in iterable: if function(el): yield el else: filtered.append(el) def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False): def compare_leq(a, b): return a <= b if not isinstance(a, tuple) else max(a) <= b def check_size(idx): if isinstance(max_positions, float) or isinstance(max_positions, int): return size_fn(idx) <= max_positions elif isinstance(max_positions, dict): idx_size = size_fn(idx) assert isinstance(idx_size, dict) intersect_keys = set(max_positions.keys()) & set(idx_size.keys()) return all( all( a is None or b is None or a <= b for a, b in zip(idx_size[key], max_positions[key]) ) for key in intersect_keys ) else: # For MultiCorpusSampledDataset, will generalize it later if not isinstance(size_fn(idx), Iterable): return all(size_fn(idx) <= b for b in max_positions) return all( a is None or b is None or a <= b for a, b in zip(size_fn(idx), max_positions) ) ignored = [] itr = collect_filtered(check_size, indices, ignored) indices = np.fromiter(itr, dtype=np.int64, count=-1) return indices, ignored def filter_by_size(indices, dataset, max_positions, raise_exception=False): """ [deprecated] Filter indices based on their size. Use `FairseqDataset::filter_indices_by_size` instead. Args: indices (List[int]): ordered list of dataset indices dataset (FairseqDataset): fairseq dataset instance max_positions (tuple): filter elements larger than this size. Comparisons are done component-wise. raise_exception (bool, optional): if ``True``, raise an exception if any elements are filtered (default: False). """ warnings.warn( "data_utils.filter_by_size is deprecated. " "Use `FairseqDataset::filter_indices_by_size` instead.", stacklevel=2, ) if isinstance(max_positions, float) or isinstance(max_positions, int): if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray): ignored = indices[dataset.sizes[indices] > max_positions].tolist() indices = indices[dataset.sizes[indices] <= max_positions] elif ( hasattr(dataset, "sizes") and isinstance(dataset.sizes, list) and len(dataset.sizes) == 1 ): ignored = indices[dataset.sizes[0][indices] > max_positions].tolist() indices = indices[dataset.sizes[0][indices] <= max_positions] else: indices, ignored = _filter_by_size_dynamic( indices, dataset.size, max_positions ) else: indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions) if len(ignored) > 0 and raise_exception: raise Exception( ( "Size of sample #{} is invalid (={}) since max_positions={}, " "skip this example with --skip-invalid-size-inputs-valid-test" ).format(ignored[0], dataset.size(ignored[0]), max_positions) ) if len(ignored) > 0: logger.warning( ( "{} samples have invalid sizes and will be skipped, " "max_positions={}, first few sample ids={}" ).format(len(ignored), max_positions, ignored[:10]) ) return indices def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ if max_sizes is None: return indices, [] if type(max_sizes) in (int, float): max_src_size, max_tgt_size = max_sizes, max_sizes else: max_src_size, max_tgt_size = max_sizes if tgt_sizes is None: ignored = indices[src_sizes[indices] > max_src_size] else: ignored = indices[ (src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size) ] if len(ignored) > 0: if tgt_sizes is None: indices = indices[src_sizes[indices] <= max_src_size] else: indices = indices[ (src_sizes[indices] <= max_src_size) & (tgt_sizes[indices] <= max_tgt_size) ] return indices, ignored.tolist() def batch_by_size( indices, num_tokens_fn, num_tokens_vec=None, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, fixed_shapes=None, ): """ Yield mini-batches of indices bucketed by size. Batches may contain sequences of different lengths. Args: indices (List[int]): ordered list of dataset indices num_tokens_fn (callable): function that returns the number of tokens at a given index num_tokens_vec (List[int], optional): precomputed vector of the number of tokens for each index in indices (to enable faster batch generation) max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). required_batch_size_multiple (int, optional): require batch size to be less than N or a multiple of N (default: 1). fixed_shapes (List[Tuple[int, int]], optional): if given, batches will only be created with the given shapes. *max_sentences* and *required_batch_size_multiple* will be ignored (default: None). """ try: from fairseq.data.data_utils_fast import ( batch_by_size_fn, batch_by_size_vec, batch_fixed_shapes_fast, ) except ImportError: raise ImportError( "Please build Cython components with: " "`python setup.py build_ext --inplace`" ) except ValueError: raise ValueError( "Please build (or rebuild) Cython components with `python setup.py build_ext --inplace`." ) # added int() to avoid TypeError: an integer is required max_tokens = ( int(max_tokens) if max_tokens is not None else -1 ) max_sentences = max_sentences if max_sentences is not None else -1 bsz_mult = required_batch_size_multiple if not isinstance(indices, np.ndarray): indices = np.fromiter(indices, dtype=np.int64, count=-1) if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray): num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1) if fixed_shapes is None: if num_tokens_vec is None: return batch_by_size_fn( indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult, ) else: return batch_by_size_vec( indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ) else: fixed_shapes = np.array(fixed_shapes, dtype=np.int64) sort_order = np.lexsort( [ fixed_shapes[:, 1].argsort(), # length fixed_shapes[:, 0].argsort(), # bsz ] ) fixed_shapes_sorted = fixed_shapes[sort_order] return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted) def post_process(sentence: str, symbol: str): if symbol == "sentencepiece": sentence = sentence.replace(" ", "").replace("\u2581", " ").strip() elif symbol == "wordpiece": sentence = sentence.replace(" ", "").replace("_", " ").strip() elif symbol == "letter": sentence = sentence.replace(" ", "").replace("|", " ").strip() elif symbol == "silence": import re sentence = sentence.replace("<SIL>", "") sentence = re.sub(' +', ' ', sentence).strip() elif symbol == "_EOW": sentence = sentence.replace(" ", "").replace("_EOW", " ").strip() elif symbol in {"subword_nmt", "@@ ", "@@"}: if symbol == "subword_nmt": symbol = "@@ " sentence = (sentence + " ").replace(symbol, "").rstrip() elif symbol == "none": pass elif symbol is not None: raise NotImplementedError(f"Unknown post_process option: {symbol}") return sentence def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans """ bsz, all_sz = shape mask = np.full((bsz, all_sz), False) all_num_mask = int( # add a random number for probabilistic rounding mask_prob * all_sz / float(mask_length) + np.random.rand() ) all_num_mask = max(min_masks, all_num_mask) mask_idcs = [] for i in range(bsz): if padding_mask is not None: sz = all_sz - padding_mask[i].long().sum().item() num_mask = int( # add a random number for probabilistic rounding mask_prob * sz / float(mask_length) + np.random.rand() ) num_mask = max(min_masks, num_mask) else: sz = all_sz num_mask = all_num_mask if mask_type == "static": lengths = np.full(num_mask, mask_length) elif mask_type == "uniform": lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask) elif mask_type == "normal": lengths = np.random.normal(mask_length, mask_other, size=num_mask) lengths = [max(1, int(round(x))) for x in lengths] elif mask_type == "poisson": lengths = np.random.poisson(mask_length, size=num_mask) lengths = [int(round(x)) for x in lengths] else: raise Exception("unknown mask selection " + mask_type) if sum(lengths) == 0: lengths[0] = min(mask_length, sz - 1) if no_overlap: mask_idc = [] def arrange(s, e, length, keep_length): span_start = np.random.randint(s, e - length) mask_idc.extend(span_start + i for i in range(length)) new_parts = [] if span_start - s - min_space >= keep_length: new_parts.append((s, span_start - min_space + 1)) if e - span_start - keep_length - min_space > keep_length: new_parts.append((span_start + length + min_space, e)) return new_parts parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): lens = np.fromiter( (e - s if e - s >= length + min_space else 0 for s, e in parts), np.int, ) l_sum = np.sum(lens) if l_sum == 0: break probs = lens / np.sum(lens) c = np.random.choice(len(parts), p=probs) s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = np.asarray(mask_idc) else: min_len = min(lengths) if sz - min_len <= num_mask: min_len = sz - num_mask - 1 mask_idc = np.random.choice(sz - min_len, num_mask, replace=False) mask_idc = np.asarray( [ mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j]) ] ) mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): if len(mask_idc) > min_len: mask_idc = np.random.choice(mask_idc, min_len, replace=False) mask[i, mask_idc] = True return mask def get_mem_usage(): try: import psutil mb = 1024 * 1024 return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb" except ImportError: return "N/A" # lens: torch.LongTensor # returns: torch.BoolTensor def lengths_to_padding_mask(lens): bsz, max_lens = lens.size(0), torch.max(lens).item() mask = torch.arange(max_lens).to(lens.device).view(1, max_lens) mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens) return mask # lens: torch.LongTensor # returns: torch.BoolTensor def lengths_to_mask(lens): return ~lengths_to_padding_mask(lens) def get_buckets(sizes, num_buckets): buckets = np.unique( np.percentile( sizes, np.linspace(0, 100, num_buckets + 1), interpolation='lower', )[1:] ) return buckets def get_bucketed_sizes(orig_sizes, buckets): sizes = np.copy(orig_sizes) assert np.min(sizes) >= 0 start_val = -1 for end_val in buckets: mask = (sizes > start_val) & (sizes <= end_val) sizes[mask] = end_val start_val = end_val return sizes def _find_extra_valid_paths(dataset_path: str) -> set: paths = utils.split_paths(dataset_path) all_valid_paths = set() for sub_dir in paths: contents = PathManager.ls(sub_dir) valid_paths = [c for c in contents if re.match("valid*[0-9].*", c) is not None] all_valid_paths |= {os.path.basename(p) for p in valid_paths} # Remove .bin, .idx etc roots = {os.path.splitext(p)[0] for p in all_valid_paths} return roots def raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None: """Raises if there are paths matching 'valid*[0-9].*' which are not combined or ignored.""" if ( train_cfg.dataset.ignore_unused_valid_subsets or train_cfg.dataset.combine_valid_subsets or train_cfg.dataset.disable_validation or not hasattr(train_cfg.task, "data") ): return other_paths = _find_extra_valid_paths(train_cfg.task.data) specified_subsets = train_cfg.dataset.valid_subset.split(",") ignored_paths = [p for p in other_paths if p not in specified_subsets] if ignored_paths: advice = "Set --combine-val to combine them or --ignore-unused-valid-subsets to ignore them." msg = f"Valid paths {ignored_paths} will be ignored. {advice}" raise ValueError(msg)
bart_ls-main
fairseq-py/fairseq/data/data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class PrependTokenDataset(BaseWrapperDataset): def __init__(self, dataset, token=None): super().__init__(dataset) self.token = token if token is not None: self._sizes = np.array(dataset.sizes) + 1 else: self._sizes = dataset.sizes def __getitem__(self, idx): item = self.dataset[idx] if self.token is not None: item = torch.cat([item.new([self.token]), item]) return item @property def sizes(self): return self._sizes def num_tokens(self, index): n = self.dataset.num_tokens(index) if self.token is not None: n += 1 return n def size(self, index): n = self.dataset.size(index) if self.token is not None: n += 1 return n
bart_ls-main
fairseq-py/fairseq/data/prepend_token_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class TransformEosDataset(FairseqDataset): """A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS. Note that the transformation is applied in :func:`collater`. Args: dataset (~fairseq.data.FairseqDataset): dataset to wrap eos (int): index of the end-of-sentence symbol append_eos_to_src (bool, optional): append EOS to the end of src remove_eos_from_src (bool, optional): remove EOS from the end of src append_eos_to_tgt (bool, optional): append EOS to the end of tgt remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt """ def __init__( self, dataset, eos, append_eos_to_src=False, remove_eos_from_src=False, append_eos_to_tgt=False, remove_eos_from_tgt=False, has_target=True, ): if not isinstance(dataset, FairseqDataset): raise ValueError("dataset must be an instance of FairseqDataset") if append_eos_to_src and remove_eos_from_src: raise ValueError("cannot combine append_eos_to_src and remove_eos_from_src") if append_eos_to_tgt and remove_eos_from_tgt: raise ValueError("cannot combine append_eos_to_tgt and remove_eos_from_tgt") self.dataset = dataset self.eos = torch.LongTensor([eos]) self.append_eos_to_src = append_eos_to_src self.remove_eos_from_src = remove_eos_from_src self.append_eos_to_tgt = append_eos_to_tgt self.remove_eos_from_tgt = remove_eos_from_tgt self.has_target = has_target # precompute how we should adjust the reported sizes self._src_delta = 0 self._src_delta += 1 if append_eos_to_src else 0 self._src_delta -= 1 if remove_eos_from_src else 0 self._tgt_delta = 0 self._tgt_delta += 1 if append_eos_to_tgt else 0 self._tgt_delta -= 1 if remove_eos_from_tgt else 0 self._checked_src = False self._checked_tgt = False def _check_src(self, src, expect_eos): if not self._checked_src: assert (src[-1] == self.eos[0]) == expect_eos self._checked_src = True def _check_tgt(self, tgt, expect_eos): if self.has_target and not self._checked_tgt: assert (tgt[-1] == self.eos[0]) == expect_eos self._checked_tgt = True def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples): def transform(item): if self.append_eos_to_src: self.eos = self.eos.to(device=item["source"].device) self._check_src(item["source"], expect_eos=False) item["source"] = torch.cat([item["source"], self.eos]) if self.remove_eos_from_src: self.eos = self.eos.to(device=item["source"].device) self._check_src(item["source"], expect_eos=True) item["source"] = item["source"][:-1] if self.append_eos_to_tgt: self.eos = self.eos.to(device=item["target"].device) self._check_tgt(item["target"], expect_eos=False) item["target"] = torch.cat([item["target"], self.eos]) if self.remove_eos_from_tgt: self.eos = self.eos.to(device=item["target"].device) self._check_tgt(item["target"], expect_eos=True) item["target"] = item["target"][:-1] return item samples = list(map(transform, samples)) return self.dataset.collater(samples) def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): if self.has_target: src_len, tgt_len = self.dataset.size(index) return (src_len + self._src_delta, tgt_len + self._tgt_delta) else: return self.dataset.size(index) def ordered_indices(self): # NOTE: we assume that the ordering does not change based on the # addition or removal of eos return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
bart_ls-main
fairseq-py/fairseq/data/transform_eos_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset class ColorizeDataset(BaseWrapperDataset): """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """ def __init__(self, dataset, color_getter): super().__init__(dataset) self.color_getter = color_getter def collater(self, samples): base_collate = super().collater(samples) if len(base_collate) > 0: base_collate["net_input"]["colors"] = torch.tensor( list(self.color_getter(self.dataset, s["id"]) for s in samples), dtype=torch.long, ) return base_collate
bart_ls-main
fairseq-py/fairseq/data/colorize_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class RawLabelDataset(FairseqDataset): def __init__(self, labels): super().__init__() self.labels = labels def __getitem__(self, index): return self.labels[index] def __len__(self): return len(self.labels) def collater(self, samples): return torch.tensor(samples)
bart_ls-main
fairseq-py/fairseq/data/raw_label_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset class ListDataset(BaseWrapperDataset): def __init__(self, dataset, sizes=None): super().__init__(dataset) self._sizes = sizes def __iter__(self): for x in self.dataset: yield x def collater(self, samples): return samples @property def sizes(self): return self._sizes def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] def set_epoch(self, epoch): pass
bart_ls-main
fairseq-py/fairseq/data/list_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from collections import OrderedDict from typing import Dict, Sequence import numpy as np from . import FairseqDataset, LanguagePairDataset logger = logging.getLogger(__name__) class RoundRobinZipDatasets(FairseqDataset): """Zip multiple :class:`~fairseq.data.FairseqDataset` instances together. Shorter datasets are repeated in a round-robin fashion to match the length of the longest one. Args: datasets (Dict[~fairseq.data.FairseqDataset]): a dictionary of :class:`~fairseq.data.FairseqDataset` instances. eval_key (str, optional): a key used at evaluation time that causes this instance to pass-through batches from *datasets[eval_key]*. """ def __init__(self, datasets, eval_key=None): super().__init__() if isinstance(datasets, dict): datasets = OrderedDict(datasets) assert isinstance(datasets, OrderedDict) assert datasets, "Can't make a RoundRobinZipDatasets out of nothing" for dataset in datasets.values(): assert isinstance(dataset, FairseqDataset) self.datasets = datasets self.eval_key = eval_key self.longest_dataset_key = max(datasets, key=lambda k: len(datasets[k])) self.longest_dataset = datasets[self.longest_dataset_key] self._ordered_indices: Dict[str, Sequence[int]] = None def _map_index(self, key, index): assert ( self._ordered_indices is not None ), "Must call RoundRobinZipDatasets.ordered_indices() first" o = self._ordered_indices[key] return o[index % len(o)] def __getitem__(self, index): if self.eval_key is None: return OrderedDict( [ (key, dataset[self._map_index(key, index)]) for key, dataset in self.datasets.items() ] ) else: # at evaluation time it's useful to pass-through batches from a single key return self.datasets[self.eval_key][self._map_index(self.eval_key, index)] def __len__(self): if self._ordered_indices is not None: return len(self._ordered_indices[self.longest_dataset_key]) return len(self.longest_dataset) def collater(self, samples): """Merge a list of samples to form a mini-batch.""" if len(samples) == 0: return None if self.eval_key is None: return OrderedDict( [ (key, dataset.collater([sample[key] for sample in samples])) for key, dataset in self.datasets.items() ] ) else: # at evaluation time it's useful to pass-through batches from a single key return self.datasets[self.eval_key].collater(samples) def num_tokens(self, index): """Return an example's length (number of tokens), used for batching.""" # TODO make it configurable whether to use max() or sum() here return max( dataset.num_tokens(self._map_index(key, index)) for key, dataset in self.datasets.items() ) def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return { key: dataset.size(self._map_index(key, index)) for key, dataset in self.datasets.items() } def ordered_indices(self): """Ordered indices for batching.""" if self._ordered_indices is None: # Call the underlying dataset's ordered_indices() here, so that we # get the same random ordering as we would have from using the # underlying sub-datasets directly. self._ordered_indices = OrderedDict( [ (key, dataset.ordered_indices()) for key, dataset in self.datasets.items() ] ) return np.arange(len(self)) def filter_indices_by_size(self, indices, max_positions=None): """ Filter each sub-dataset independently, then update the round robin to work on the filtered sub-datasets. """ def _deep_until_language_pair(dataset): if isinstance(dataset, LanguagePairDataset): return dataset if hasattr(dataset, "tgt_dataset"): return _deep_until_language_pair(dataset.tgt_dataset) if hasattr(dataset, "dataset"): return _deep_until_language_pair(dataset.dataset) raise Exception(f"Don't know how to unwrap this dataset: {dataset}") if not isinstance(max_positions, dict): max_positions = {k: max_positions for k in self.datasets.keys()} ignored_some = False for key, dataset in self.datasets.items(): dataset = _deep_until_language_pair(dataset) self._ordered_indices[key], ignored = dataset.filter_indices_by_size( self._ordered_indices[key], max_positions[key] ) if len(ignored) > 0: ignored_some = True logger.warning( f"{len(ignored)} samples from {key} have invalid sizes and will be skipped, " f"max_positions={max_positions[key]}, first few sample ids={ignored[:10]}" ) # Since we are modifying in place the _ordered_indices, # it's not possible anymore to return valid ignored indices. # Hopefully the extra debug information print above should be enough to debug. # Ideally we would receive ignore_invalid_inputs so that we could have # a proper error message. return (np.arange(len(self)), [0] if ignored_some else []) @property def supports_prefetch(self): return all( getattr(dataset, "supports_prefetch", False) for dataset in self.datasets.values() ) def prefetch(self, indices): for key, dataset in self.datasets.items(): dataset.prefetch([self._map_index(key, index) for index in indices])
bart_ls-main
fairseq-py/fairseq/data/round_robin_zip_datasets.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import math import operator import os import queue import time from threading import Thread import numpy as np import torch from fairseq.data import data_utils logger = logging.getLogger(__name__) # Object used by _background_consumer to signal the source is exhausted # to the main thread. _sentinel = object() class CountingIterator(object): """Wrapper around an iterable that maintains the iteration count. Args: iterable (iterable): iterable to wrap start (int): starting iteration count. Note that this doesn't actually advance the iterator. total (int): override the iterator length returned by ``__len``. This can be used to truncate *iterator*. Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, start=None, total=None): self._itr = iter(iterable) self.n = start or getattr(iterable, "n", 0) self.total = total or self.n + len(iterable) def __len__(self): return self.total def __iter__(self): return self def __next__(self): if not self.has_next(): raise StopIteration try: x = next(self._itr) except StopIteration: raise IndexError(f"Iterator expected to have length {self.total}, " "but exhausted at position {self.n}.") self.n += 1 return x def has_next(self): """Whether the iterator has been exhausted.""" return self.n < self.total def skip(self, n): """Fast-forward the iterator by skipping n elements.""" for _ in range(n): next(self) return self def take(self, n): """Truncate the iterator to n elements at most.""" self.total = min(self.total, n) # Propagate this change to the underlying iterator if hasattr(self._itr, "take"): self._itr.take(max(n - self.n, 0)) return self class EpochBatchIterating(object): def __len__(self) -> int: raise NotImplementedError @property def next_epoch_idx(self): raise NotImplementedError def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): """Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator (default: True). fix_batches_to_gpus (bool, optional): ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching (default: False). set_dataset_epoch (bool, optional): update the wrapped Dataset with the new epoch number (default: True). """ raise NotImplementedError def end_of_epoch(self) -> bool: """Returns whether the most recent epoch iterator has been exhausted""" raise NotImplementedError @property def iterations_in_epoch(self) -> int: """The number of consumed batches in the current epoch.""" raise NotImplementedError def state_dict(self): """Returns a dictionary containing a whole state of the iterator.""" raise NotImplementedError def load_state_dict(self, state_dict): """Copies the state of the iterator from the given *state_dict*.""" raise NotImplementedError @property def first_batch(self): return "DUMMY" class StreamingEpochBatchIterator(EpochBatchIterating): """A steaming-style iterator over a :class:`torch.utils.data.IterableDataset`. Args: dataset (~torch.utils.data.Dataset): dataset from which to load the data max_sentences: batch size collate_fn (callable): merges a list of samples to form a mini-batch num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). buffer_size (int, optional): the number of batches to keep ready in the queue. Helps speeding up dataloading. When buffer_size is zero, the default torch.utils.data.DataLoader preloading is used. timeout (int, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative (default: ``0``). """ def __init__( self, dataset, max_sentences=1, collate_fn=None, epoch=1, num_workers=0, buffer_size=0, timeout=0, ): assert isinstance(dataset, torch.utils.data.IterableDataset) self.dataset = dataset self.max_sentences = max_sentences self.collate_fn = collate_fn self.epoch = max(epoch, 1) # we use 1-based indexing for epochs self.num_workers = num_workers # This upper limit here is to prevent people from abusing this feature # in a shared computing environment. self.buffer_size = min(buffer_size, 20) self.timeout = timeout self._current_epoch_iterator = None @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" if self._current_epoch_iterator is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): self.epoch = self.next_epoch_idx if set_dataset_epoch and hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(self.epoch) self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle) return self._current_epoch_iterator def end_of_epoch(self) -> bool: return not self._current_epoch_iterator.has_next() @property def iterations_in_epoch(self) -> int: if self._current_epoch_iterator is not None: return self._current_epoch_iterator.n return 0 def state_dict(self): return { "epoch": self.epoch, } def load_state_dict(self, state_dict): self.epoch = state_dict["epoch"] def _get_iterator_for_epoch(self, epoch, shuffle, offset=0): if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" # Create data loader worker_init_fn = getattr(self.dataset, "worker_init_fn", None) itr = torch.utils.data.DataLoader( self.dataset, batch_size=self.max_sentences, collate_fn=self.collate_fn, num_workers=self.num_workers, timeout=self.timeout, worker_init_fn=worker_init_fn, pin_memory=True, ) # Wrap with a BufferedIterator if needed if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) # Wrap with CountingIterator itr = CountingIterator(itr, start=offset) return itr class EpochBatchIterator(EpochBatchIterating): """A multi-epoch iterator over a :class:`torch.utils.data.Dataset`. Compared to :class:`torch.utils.data.DataLoader`, this iterator: - can be reused across multiple epochs with the :func:`next_epoch_itr` method (optionally shuffled between epochs) - can be serialized/deserialized with the :func:`state_dict` and :func:`load_state_dict` methods - supports sharding with the *num_shards* and *shard_id* arguments Args: dataset (~torch.utils.data.Dataset): dataset from which to load the data collate_fn (callable): merges a list of samples to form a mini-batch batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of indices, or a callable to create such an iterator (~torch.utils.data.Sampler). A callable batch_sampler will be called for each epoch to enable per epoch dynamic batch iterators defined by this callable batch_sampler. seed (int, optional): seed for random number generator for reproducibility (default: 1). num_shards (int, optional): shard the data iterator into N shards (default: 1). shard_id (int, optional): which shard of the data iterator to return (default: 0). num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). buffer_size (int, optional): the number of batches to keep ready in the queue. Helps speeding up dataloading. When buffer_size is zero, the default torch.utils.data.DataLoader preloading is used. timeout (int, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative (default: ``0``). disable_shuffling (bool, optional): force disable shuffling (default: ``False``). """ def __init__( self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, buffer_size=0, timeout=0, disable_shuffling=False, ): assert isinstance(dataset, torch.utils.data.Dataset) self.dataset = dataset self.collate_fn = collate_fn self.batch_sampler = batch_sampler self._frozen_batches = ( tuple(batch_sampler) if not callable(batch_sampler) else None ) self.seed = seed self.num_shards = num_shards self.shard_id = shard_id self.num_workers = num_workers # This upper limit here is to prevent people from abusing this feature # in a shared computing environment. self.buffer_size = min(buffer_size, 20) self.timeout = timeout self.disable_shuffling = disable_shuffling self.epoch = max(epoch, 1) # we use 1-based indexing for epochs self.shuffle = not disable_shuffling self._cur_epoch_itr = None self._next_epoch_itr = None self._supports_prefetch = getattr(dataset, "supports_prefetch", False) @property def frozen_batches(self): if self._frozen_batches is None: self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch)) return self._frozen_batches @property def first_batch(self): if len(self.frozen_batches) == 0: raise Exception( "The dataset is empty. This could indicate " "that all elements in the dataset have been skipped. " "Try increasing the max number of allowed tokens or using " "a larger dataset." ) if getattr(self.dataset, "supports_fetch_outside_dataloader", True): return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]]) else: return "DUMMY" def __len__(self): return int(math.ceil(len(self.frozen_batches) / float(self.num_shards))) @property def n(self): return self.iterations_in_epoch @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" if self._next_epoch_itr is not None: return self.epoch elif self._cur_epoch_itr is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): """Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator (default: True). fix_batches_to_gpus (bool, optional): ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching (default: False). set_dataset_epoch (bool, optional): update the wrapped Dataset with the new epoch number (default: True). """ if self.disable_shuffling: shuffle = False prev_epoch = self.epoch self.epoch = self.next_epoch_idx if set_dataset_epoch and hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(self.epoch) if self._next_epoch_itr is not None: self._cur_epoch_itr = self._next_epoch_itr self._next_epoch_itr = None else: if callable(self.batch_sampler) and prev_epoch != self.epoch: # reset _frozen_batches to refresh the next epoch self._frozen_batches = None self._cur_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus, ) self.shuffle = shuffle return self._cur_epoch_itr def end_of_epoch(self) -> bool: """Returns whether the most recent epoch iterator has been exhausted""" return not self._cur_epoch_itr.has_next() @property def iterations_in_epoch(self): """The number of consumed batches in the current epoch.""" if self._cur_epoch_itr is not None: return self._cur_epoch_itr.n elif self._next_epoch_itr is not None: return self._next_epoch_itr.n return 0 def state_dict(self): """Returns a dictionary containing a whole state of the iterator.""" if self.end_of_epoch(): epoch = self.epoch + 1 iter_in_epoch = 0 else: epoch = self.epoch iter_in_epoch = self.iterations_in_epoch return { "version": 2, "epoch": epoch, "iterations_in_epoch": iter_in_epoch, "shuffle": self.shuffle, } def load_state_dict(self, state_dict): """Copies the state of the iterator from the given *state_dict*.""" self.epoch = state_dict["epoch"] itr_pos = state_dict.get("iterations_in_epoch", 0) version = state_dict.get("version", 1) if itr_pos > 0: # fast-forward epoch iterator self._next_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle=state_dict.get("shuffle", True), offset=itr_pos, ) if self._next_epoch_itr is None: if version == 1: # legacy behavior: we finished the epoch, increment epoch counter self.epoch += 1 else: raise RuntimeError( "Cannot resume training due to dataloader mismatch, please " "report this to the fairseq developers. You can relaunch " "training with `--reset-dataloader` and it should work." ) else: self._next_epoch_itr = None def _get_iterator_for_epoch( self, epoch, shuffle, fix_batches_to_gpus=False, offset=0 ): def shuffle_batches(batches, seed): with data_utils.numpy_seed(seed): np.random.shuffle(batches) return batches if self._supports_prefetch: batches = self.frozen_batches if shuffle and not fix_batches_to_gpus: batches = shuffle_batches(list(batches), self.seed + epoch) batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) self.dataset.prefetch([i for s in batches for i in s]) if shuffle and fix_batches_to_gpus: batches = shuffle_batches(batches, self.seed + epoch + self.shard_id) else: if shuffle: batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch) else: batches = self.frozen_batches batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) if offset > 0 and offset >= len(batches): return None if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" # Create data loader itr = torch.utils.data.DataLoader( self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, timeout=self.timeout, pin_memory=True, ) # Wrap with a BufferedIterator if needed if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) # Wrap with CountingIterator itr = CountingIterator(itr, start=offset) return itr class GroupedIterator(CountingIterator): """Wrapper around an iterable that returns groups (chunks) of items. Args: iterable (iterable): iterable to wrap chunk_size (int): size of each chunk Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, chunk_size): itr = _chunk_iterator(iterable, chunk_size) super().__init__( itr, start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))), total=int(math.ceil(len(iterable) / float(chunk_size))), ) self.chunk_size = chunk_size def _chunk_iterator(itr, chunk_size): chunk = [] for x in itr: chunk.append(x) if len(chunk) == chunk_size: yield chunk chunk = [] if len(chunk) > 0: yield chunk class ShardedIterator(CountingIterator): """A sharded wrapper around an iterable, padded to length. Args: iterable (iterable): iterable to wrap num_shards (int): number of shards to split the iterable into shard_id (int): which shard to iterator over fill_value (Any, optional): padding value when the iterable doesn't evenly divide *num_shards* (default: None). Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, num_shards, shard_id, fill_value=None): if shard_id < 0 or shard_id >= num_shards: raise ValueError("shard_id must be between 0 and num_shards") sharded_len = int(math.ceil(len(iterable) / float(num_shards))) itr = map( operator.itemgetter(1), itertools.zip_longest( range(sharded_len), itertools.islice(iterable, shard_id, len(iterable), num_shards), fillvalue=fill_value, ), ) super().__init__( itr, start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))), total=sharded_len, ) class BackgroundConsumer(Thread): def __init__(self, queue, source, max_len, cuda_device): Thread.__init__(self) self._queue = queue self._source = source self._max_len = max_len self.count = 0 self.cuda_device = cuda_device def run(self): # set_device to avoid creation of GPU0 context when using pin_memory if self.cuda_device is not None: torch.cuda.set_device(self.cuda_device) try: for item in self._source: self._queue.put(item) # Stop if we reached the maximum length self.count += 1 if self._max_len is not None and self.count >= self._max_len: break # Signal the consumer we are done. self._queue.put(_sentinel) except Exception as e: self._queue.put(e) class BufferedIterator(object): def __init__(self, size, iterable): self._queue = queue.Queue(size) self._iterable = iterable self._consumer = None self.start_time = time.time() self.warning_time = None self.total = len(iterable) def _create_consumer(self): self._consumer = BackgroundConsumer( self._queue, self._iterable, self.total, torch.cuda.current_device() if torch.cuda.is_available() else None ) self._consumer.daemon = True self._consumer.start() def __iter__(self): return self def __len__(self): return self.total def take(self, n): self.total = min(self.total, n) # Propagate this change to the underlying iterator if hasattr(self._iterable, "take"): self._iterable.take(n) return self def __next__(self): # Create consumer if not created yet if self._consumer is None: self._create_consumer() # Notify the user if there is a data loading bottleneck if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): if time.time() - self.start_time > 5 * 60: if ( self.warning_time is None or time.time() - self.warning_time > 15 * 60 ): logger.debug( "Data loading buffer is empty or nearly empty. This may " "indicate a data loading bottleneck, and increasing the " "number of workers (--num-workers) may help." ) self.warning_time = time.time() # Get next example item = self._queue.get(True) if isinstance(item, Exception): raise item if item is _sentinel: raise StopIteration() return item class GroupedEpochBatchIterator(EpochBatchIterator): """Grouped version of EpochBatchIterator It takes several samplers from different datasets. Each epoch shuffle the dataset wise sampler individually with different random seed. The those sub samplers are combined with into one big samplers with deterministic permutation to mix batches from different datasets. It will act like EpochBatchIterator but make sure 1) data from one data set each time 2) for different workers, they use the same order to fetch the data so they will use data from the same dataset everytime mult_rate is used for update_freq > 1 case where we want to make sure update_freq mini-batches come from same source """ def __init__( self, dataset, collate_fn, batch_samplers, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0, mult_rate=1, buffer_size=0, ): super().__init__( dataset, collate_fn, batch_samplers, seed, num_shards, shard_id, num_workers, epoch, buffer_size, ) # level 0: sub-samplers 1: batch_idx 2: batches self._frozen_batches = tuple([tuple(sub_batch) for sub_batch in batch_samplers]) self.step_size = mult_rate * num_shards self.lengths = [ (len(x) // self.step_size) * self.step_size for x in self.frozen_batches ] def __len__(self): return sum(self.lengths) @property def first_batch(self): if len(self.frozen_batches) == 0: raise Exception( "The dataset is empty. This could indicate " "that all elements in the dataset have been skipped. " "Try increasing the max number of allowed tokens or using " "a larger dataset." ) if self.dataset.supports_fetch_outside_dataloader: return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0][0]]) else: return "DUMMY" def _get_iterator_for_epoch( self, epoch, shuffle, fix_batches_to_gpus=False, offset=0 ): def shuffle_batches(batches, seed): with data_utils.numpy_seed(seed): np.random.shuffle(batches) return batches def return_full_batches(batch_sets, seed, shuffle): if shuffle: batch_sets = [shuffle_batches(list(x), seed) for x in batch_sets] batch_sets = [ batch_sets[i][: self.lengths[i]] for i in range(len(batch_sets)) ] batches = list(itertools.chain.from_iterable(batch_sets)) if shuffle: with data_utils.numpy_seed(seed): idx = np.random.permutation(len(batches) // self.step_size) if len(idx) * self.step_size != len(batches): raise ValueError( "ERROR: %d %d %d %d" % (len(idx), self.step_size, len(batches), self.shard_id), ":".join(["%d" % x for x in self.lengths]), ) mini_shards = [ batches[i * self.step_size : (i + 1) * self.step_size] for i in idx ] batches = list(itertools.chain.from_iterable(mini_shards)) return batches if self._supports_prefetch: raise NotImplementedError("To be implemented") else: batches = return_full_batches( self.frozen_batches, self.seed + epoch, shuffle ) batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) if offset > 0 and offset >= len(batches): return None if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" itr = torch.utils.data.DataLoader( self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, ) if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) return CountingIterator(itr, start=offset)
bart_ls-main
fairseq-py/fairseq/data/iterators.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import subprocess import json import tempfile import hashlib from typing import Hashable try: import pyarrow.plasma as plasma PYARROW_AVAILABLE = True except ImportError: plasma = None PYARROW_AVAILABLE = False class PlasmaArray: """ Wrapper around numpy arrays that automatically moves the data to shared memory upon serialization. This is particularly helpful when passing numpy arrays through multiprocessing, so that data is not unnecessarily duplicated or pickled. """ def __init__(self, array): super().__init__() self.array = array self.disable = array.nbytes < 134217728 # disable for arrays <128MB self.object_id = None self.path = None # variables with underscores shouldn't be pickled self._client = None self._server = None self._server_tmp = None self._plasma = None @property def plasma(self): if self._plasma is None and not self.disable: self._plasma = plasma return self._plasma def start_server(self): if self.plasma is None or self._server is not None: return assert self.object_id is None assert self.path is None self._server_tmp = tempfile.NamedTemporaryFile() self.path = self._server_tmp.name self._server = subprocess.Popen( ["plasma_store", "-m", str(int(1.05 * self.array.nbytes)), "-s", self.path] ) @property def client(self): if self._client is None: assert self.path is not None self._client = self.plasma.connect(self.path, num_retries=200) return self._client def __getstate__(self): """Called on pickle load""" if self.plasma is None: return self.__dict__ if self.object_id is None: self.start_server() self.object_id = self.client.put(self.array) state = self.__dict__.copy() del state["array"] state["_client"] = None state["_server"] = None state["_server_tmp"] = None state["_plasma"] = None return state def __setstate__(self, state): """Called on pickle save""" self.__dict__.update(state) if self.plasma is None: return self.array = self.client.get(self.object_id) def __del__(self): if self._server is not None: self._server.kill() self._server = None self._server_tmp.close() self._server_tmp = None DEFAULT_PLASMA_PATH = "/tmp/plasma" class PlasmaView: """Interface to write and read from shared memory. Whereas PlasmaArray writes to plasma on serialization, PlasmaView writes to shared memory on instantiation.""" def __init__(self, array, split_path: str, hash_data: Hashable, plasma_path=None): """ Args: array: numpy array to store. This can be read with ``PlasmaView().array`` split_path: the path whence the data was read, used for hashing hash_data: other metadata about the array that can be used to create a unique key. as of writing, the 3 callers in ``TokenBlockDataset`` use:: hash_data = ((block_size, document_sep_len, str(break_mode), len(dataset)), 0|1|2) """ assert PYARROW_AVAILABLE assert split_path is not None if plasma_path is None: plasma_path = DEFAULT_PLASMA_PATH self.path = plasma_path self.split_path = split_path self._client = None # Initialize lazily for pickle. plasma clients should not be deep copied or serialized. self._n = None self.object_id = self.get_object_id(self.split_path, hash_data) try: self.client.put(array, object_id=self.object_id) except plasma.PlasmaObjectExists: pass @property def client(self): if self._client is None: self._client = plasma.connect(self.path, num_retries=200) return self._client @property def array(self): """Fetch a read only view of an np.array, stored in plasma.""" ret = self.client.get(self.object_id) return ret @staticmethod def get_object_id(split_path: str, hash_data: Hashable): """Returns plasma.ObjectID from hashing split_path and object_num.""" hash = hashlib.blake2b(bytes(split_path, "utf-8"), digest_size=20) harg = json.dumps(hash_data).encode("utf-8") hash.update(harg) return plasma.ObjectID(hash.digest()) def __getstate__(self): """Called on pickle save""" self.disconnect() state = self.__dict__.copy() assert state["_client"] is None assert "object_id" in state return state def __setstate__(self, state): """Called on pickle load""" self.__dict__.update(state) def __del__(self): self.disconnect() def disconnect(self): if self._client is not None: self._client.disconnect() self._client = None def __len__(self): """Save reads by caching len""" if self._n is None: self._n = len(self.array) return self._n GB100 = (1024 ** 3) * 100 class PlasmaStore: def __init__(self, path=DEFAULT_PLASMA_PATH, nbytes: int = GB100): self.server = self.start(path, nbytes) def __del__(self): self.server.kill() @staticmethod def start(path=DEFAULT_PLASMA_PATH, nbytes: int = GB100) -> subprocess.Popen: if not PYARROW_AVAILABLE: raise ImportError("please run pip install pyarrow to use --use_plasma_view") # best practice is to allocate more space than we need. The limitation seems to be the size of /dev/shm _server = subprocess.Popen(["plasma_store", "-m", str(nbytes), "-s", path]) plasma.connect(path, num_retries=200) # If we can't connect we fail immediately return _server
bart_ls-main
fairseq-py/fairseq/data/plasma_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np from fairseq.data import BaseWrapperDataset, plasma_utils logger = logging.getLogger(__name__) class ResamplingDataset(BaseWrapperDataset): """Randomly samples from a given dataset at each epoch. Sampling is done with or without replacement, depending on the "replace" parameter. Optionally, the epoch size can be rescaled. This is potentially desirable to increase per-epoch coverage of the base dataset (since sampling with replacement means that many items in the dataset will be left out). In the case of sampling without replacement, size_ratio should be strictly less than 1. Args: dataset (~torch.utils.data.Dataset): dataset on which to sample. weights (List[float]): list of probability weights (default: None, which corresponds to uniform sampling). replace (bool): sampling mode; True for "with replacement", or False for "without replacement" (default: True) size_ratio (float): the ratio to subsample to; must be positive (default: 1.0). batch_by_size (bool): whether or not to batch by sequence length (default: True). seed (int): RNG seed to use (default: 0). epoch (int): starting epoch number (default: 1). """ def __init__( self, dataset, weights=None, replace=True, size_ratio=1.0, batch_by_size=True, seed=0, epoch=1, ): super().__init__(dataset) if weights is None: self.weights = None else: assert len(weights) == len(dataset) weights_arr = np.array(weights, dtype=np.float64) weights_arr /= weights_arr.sum() self.weights = plasma_utils.PlasmaArray(weights_arr) self.replace = replace assert size_ratio > 0.0 if not self.replace: assert size_ratio < 1.0 self.size_ratio = float(size_ratio) self.actual_size = np.ceil(len(dataset) * self.size_ratio).astype(int) self.batch_by_size = batch_by_size self.seed = seed self._cur_epoch = None self._cur_indices = None self.set_epoch(epoch) def __getitem__(self, index): return self.dataset[self._cur_indices.array[index]] def __len__(self): return self.actual_size @property def sizes(self): if isinstance(self.dataset.sizes, list): return [s[self._cur_indices.array] for s in self.dataset.sizes] return self.dataset.sizes[self._cur_indices.array] def num_tokens(self, index): return self.dataset.num_tokens(self._cur_indices.array[index]) def size(self, index): return self.dataset.size(self._cur_indices.array[index]) def ordered_indices(self): if self.batch_by_size: order = [ np.arange(len(self)), self.sizes, ] # No need to handle `self.shuffle == True` return np.lexsort(order) else: return np.arange(len(self)) def prefetch(self, indices): self.dataset.prefetch(self._cur_indices.array[indices]) @property def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch): logger.debug("ResamplingDataset.set_epoch: {}".format(epoch)) super().set_epoch(epoch) if epoch == self._cur_epoch: return self._cur_epoch = epoch # Generate a weighted sample of indices as a function of the # random seed and the current epoch. rng = np.random.RandomState( [ 42, # magic number self.seed % (2 ** 32), # global seed self._cur_epoch, # epoch index ] ) self._cur_indices = plasma_utils.PlasmaArray( rng.choice( len(self.dataset), self.actual_size, replace=self.replace, p=(None if self.weights is None else self.weights.array), ) )
bart_ls-main
fairseq-py/fairseq/data/resampling_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import FairseqDataset, data_utils logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True, pad_to_length=None, pad_to_multiple=1, ): if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, pad_to_multiple=pad_to_multiple, ) def check_alignment(alignment, src_len, tgt_len): if alignment is None or len(alignment) == 0: return False if ( alignment[:, 0].max().item() >= src_len - 1 or alignment[:, 1].max().item() >= tgt_len - 1 ): logger.warning("alignment size mismatch found, skipping alignment!") return False return True def compute_alignment_weights(alignments): """ Given a tensor of shape [:, 2] containing the source-target indices corresponding to the alignments, a weight vector containing the inverse frequency of each target index is computed. For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then a tensor containing [1., 0.5, 0.5, 1] should be returned (since target index 3 is repeated twice) """ align_tgt = alignments[:, 1] _, align_tgt_i, align_tgt_c = torch.unique( align_tgt, return_inverse=True, return_counts=True ) align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]] return 1.0 / align_weights.float() id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, ) # sort by descending source length src_lengths = torch.LongTensor( [s["source"].ne(pad_idx).long().sum() for s in samples] ) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) tgt_lengths = torch.LongTensor( [s["target"].ne(pad_idx).long().sum() for s in samples] ).index_select(0, sort_order) ntokens = tgt_lengths.sum().item() if samples[0].get("prev_output_tokens", None) is not None: prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target) elif input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) else: ntokens = src_lengths.sum().item() batch = { "id": id, "nsentences": len(samples), "ntokens": ntokens, "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths,}, "target": target, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select( 0, sort_order ) if samples[0].get("alignment", None) is not None: bsz, tgt_sz = batch["target"].shape src_sz = batch["net_input"]["src_tokens"].shape[1] offsets = torch.zeros((len(sort_order), 2), dtype=torch.long) offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz if left_pad_source: offsets[:, 0] += src_sz - src_lengths if left_pad_target: offsets[:, 1] += tgt_sz - tgt_lengths alignments = [ alignment + offset for align_idx, offset, src_len, tgt_len in zip( sort_order, offsets, src_lengths, tgt_lengths ) for alignment in [samples[align_idx]["alignment"].view(-1, 2)] if check_alignment(alignment, src_len, tgt_len) ] if len(alignments) > 0: alignments = torch.cat(alignments, dim=0) align_weights = compute_alignment_weights(alignments) batch["alignments"] = alignments batch["align_weights"] = align_weights if samples[0].get("constraints", None) is not None: # Collate the packed constraints across the samples, padding to # the length of the longest sample. lens = [sample.get("constraints").size(0) for sample in samples] max_len = max(lens) constraints = torch.zeros((len(samples), max(lens))).long() for i, sample in enumerate(samples): constraints[i, 0 : lens[i]] = samples[i].get("constraints") batch["constraints"] = constraints.index_select(0, sort_order) return batch class LanguagePairDataset(FairseqDataset): """ A pair of torch.utils.data.Datasets. Args: src (torch.utils.data.Dataset): source dataset to wrap src_sizes (List[int]): source sentence lengths src_dict (~fairseq.data.Dictionary): source vocabulary tgt (torch.utils.data.Dataset, optional): target dataset to wrap tgt_sizes (List[int], optional): target sentence lengths tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary left_pad_source (bool, optional): pad source tensors on the left side (default: True). left_pad_target (bool, optional): pad target tensors on the left side (default: False). shuffle (bool, optional): shuffle dataset elements before batching (default: True). input_feeding (bool, optional): create a shifted version of the targets to be passed into the model for teacher forcing (default: True). remove_eos_from_source (bool, optional): if set, removes eos from end of source if it's present (default: False). append_eos_to_target (bool, optional): if set, appends eos to end of target if it's absent (default: False). align_dataset (torch.utils.data.Dataset, optional): dataset containing alignments. constraints (Tensor, optional): 2d tensor with a concatenated, zero- delimited list of constraints for each sentence. append_bos (bool, optional): if set, appends bos to the beginning of source/target sentence. num_buckets (int, optional): if set to a value greater than 0, then batches will be bucketed into the given number of batch shapes. src_lang_id (int, optional): source language ID, if set, the collated batch will contain a field 'src_lang_id' in 'net_input' which indicates the source language of the samples. tgt_lang_id (int, optional): target language ID, if set, the collated batch will contain a field 'tgt_lang_id' which indicates the target language of the samples. """ def __init__( self, src, src_sizes, src_dict, tgt=None, tgt_sizes=None, tgt_dict=None, left_pad_source=True, left_pad_target=False, shuffle=True, input_feeding=True, remove_eos_from_source=False, append_eos_to_target=False, align_dataset=None, constraints=None, append_bos=False, eos=None, num_buckets=0, src_lang_id=None, tgt_lang_id=None, pad_to_multiple=1, ): if tgt_dict is not None: assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() if tgt is not None: assert len(src) == len( tgt ), "Source and target must contain the same number of examples" self.src = src self.tgt = tgt self.src_sizes = np.array(src_sizes) self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None self.sizes = ( np.vstack((self.src_sizes, self.tgt_sizes)).T if self.tgt_sizes is not None else self.src_sizes ) self.src_dict = src_dict self.tgt_dict = tgt_dict self.left_pad_source = left_pad_source self.left_pad_target = left_pad_target self.shuffle = shuffle self.input_feeding = input_feeding self.remove_eos_from_source = remove_eos_from_source self.append_eos_to_target = append_eos_to_target self.align_dataset = align_dataset if self.align_dataset is not None: assert ( self.tgt_sizes is not None ), "Both source and target needed when alignments are provided" self.constraints = constraints self.append_bos = append_bos self.eos = eos if eos is not None else src_dict.eos() self.src_lang_id = src_lang_id self.tgt_lang_id = tgt_lang_id if num_buckets > 0: from fairseq.data import BucketPadLengthDataset self.src = BucketPadLengthDataset( self.src, sizes=self.src_sizes, num_buckets=num_buckets, pad_idx=self.src_dict.pad(), left_pad=self.left_pad_source, ) self.src_sizes = self.src.sizes logger.info("bucketing source lengths: {}".format(list(self.src.buckets))) if self.tgt is not None: self.tgt = BucketPadLengthDataset( self.tgt, sizes=self.tgt_sizes, num_buckets=num_buckets, pad_idx=self.tgt_dict.pad(), left_pad=self.left_pad_target, ) self.tgt_sizes = self.tgt.sizes logger.info( "bucketing target lengths: {}".format(list(self.tgt.buckets)) ) # determine bucket sizes using self.num_tokens, which will return # the padded lengths (thanks to BucketPadLengthDataset) num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long]) self.bucketed_num_tokens = num_tokens(np.arange(len(self.src))) self.buckets = [ (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens) ] else: self.buckets = None self.pad_to_multiple = pad_to_multiple def get_batch_shapes(self): return self.buckets def __getitem__(self, index): tgt_item = self.tgt[index] if self.tgt is not None else None src_item = self.src[index] # Append EOS to end of tgt sentence if it does not have an EOS and remove # EOS from end of src sentence if it exists. This is useful when we use # use existing datasets for opposite directions i.e., when we want to # use tgt_dataset as src_dataset and vice versa if self.append_eos_to_target: eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos() if self.tgt and self.tgt[index][-1] != eos: tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])]) if self.append_bos: bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos() if self.tgt and self.tgt[index][0] != bos: tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]]) bos = self.src_dict.bos() if self.src[index][0] != bos: src_item = torch.cat([torch.LongTensor([bos]), self.src[index]]) if self.remove_eos_from_source: eos = self.src_dict.eos() if self.src[index][-1] == eos: src_item = self.src[index][:-1] example = { "id": index, "source": src_item, "target": tgt_item, } if self.align_dataset is not None: example["alignment"] = self.align_dataset[index] if self.constraints is not None: example["constraints"] = self.constraints[index] return example def __len__(self): return len(self.src) def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate pad_to_length (dict, optional): a dictionary of {'source': source_pad_to_length, 'target': target_pad_to_length} to indicate the max length to pad to in source and target respectively. Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the left if *left_pad_source* is ``True``. - `src_lengths` (LongTensor): 1D Tensor of the unpadded lengths of each source sentence of shape `(bsz)` - `prev_output_tokens` (LongTensor): a padded 2D Tensor of tokens in the target sentence, shifted right by one position for teacher forcing, of shape `(bsz, tgt_len)`. This key will not be present if *input_feeding* is ``False``. Padding will appear on the left if *left_pad_target* is ``True``. - `src_lang_id` (LongTensor): a long Tensor which contains source language IDs of each sample in the batch - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the left if *left_pad_target* is ``True``. - `tgt_lang_id` (LongTensor): a long Tensor which contains target language IDs of each sample in the batch """ res = collate( samples, pad_idx=self.src_dict.pad(), eos_idx=self.eos, left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target, input_feeding=self.input_feeding, pad_to_length=pad_to_length, pad_to_multiple=self.pad_to_multiple, ) if self.src_lang_id is not None or self.tgt_lang_id is not None: src_tokens = res["net_input"]["src_tokens"] bsz = src_tokens.size(0) if self.src_lang_id is not None: res["net_input"]["src_lang_id"] = ( torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens) ) if self.tgt_lang_id is not None: res["tgt_lang_id"] = ( torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens) ) return res def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" sizes = self.src_sizes[indices] if self.tgt_sizes is not None: sizes = np.maximum(sizes, self.tgt_sizes[indices]) return sizes def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return ( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)).astype(np.int64) else: indices = np.arange(len(self), dtype=np.int64) if self.buckets is None: # sort by target length, then source length if self.tgt_sizes is not None: indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(self.src_sizes[indices], kind="mergesort")] else: # sort by bucketed_num_tokens, which is: # max(padded_src_len, padded_tgt_len) return indices[ np.argsort(self.bucketed_num_tokens[indices], kind="mergesort") ] @property def supports_prefetch(self): return getattr(self.src, "supports_prefetch", False) and ( getattr(self.tgt, "supports_prefetch", False) or self.tgt is None ) def prefetch(self, indices): self.src.prefetch(indices) if self.tgt is not None: self.tgt.prefetch(indices) if self.align_dataset is not None: self.align_dataset.prefetch(indices) def filter_indices_by_size(self, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ return data_utils.filter_paired_dataset_indices_by_size( self.src_sizes, self.tgt_sizes, indices, max_sizes, )
bart_ls-main
fairseq-py/fairseq/data/language_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class AppendTokenDataset(BaseWrapperDataset): def __init__(self, dataset, token=None): super().__init__(dataset) self.token = token if token is not None: self._sizes = np.array(dataset.sizes) + 1 else: self._sizes = dataset.sizes def __getitem__(self, idx): item = self.dataset[idx] if self.token is not None: item = torch.cat([item, item.new([self.token])]) return item @property def sizes(self): return self._sizes def num_tokens(self, index): n = self.dataset.num_tokens(index) if self.token is not None: n += 1 return n def size(self, index): n = self.dataset.size(index) if self.token is not None: n += 1 return n
bart_ls-main
fairseq-py/fairseq/data/append_token_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import logging import random from . import FairseqDataset, data_utils logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, vocab, left_pad_source=False, left_pad_target=False, input_feeding=True, pad_to_length=None, pad_to_multiple=1 ): assert input_feeding if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None, pad_to_multiple=1): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx=None, # use eos_idx of each sample instead of vocab.eos() left_pad=left_pad, move_eos_to_beginning=move_eos_to_beginning, pad_to_length=pad_to_length, pad_to_multiple=pad_to_multiple, ) id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, pad_to_multiple=pad_to_multiple ) # sort by descending source length src_lengths = torch.LongTensor([s["source"].numel() for s in samples]) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None ### for model-based denoising ### masked_unfiltered = None if samples[0].get("masked_unfiltered", None) is not None: masked_unfiltered = merge( "masked_unfiltered", left_pad=left_pad_target, pad_to_length=pad_to_length["masked_unfiltered"] if pad_to_length is not None else None, pad_to_multiple=pad_to_multiple ) masked_unfiltered = masked_unfiltered.index_select(0, sort_order) ### for model-based denoising ### if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) ntokens = sum(len(s["target"]) for s in samples) if input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) prev_output_tokens = prev_output_tokens.index_select(0, sort_order) else: ntokens = sum(len(s["source"]) for s in samples) batch = { "id": id, "ntokens": ntokens, "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, 'masked_unfiltered': masked_unfiltered, }, "target": target, "nsentences": samples[0]["source"].size(0), "sort_order": sort_order, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens return batch class LongDenoisingDataset(FairseqDataset): """ A wrapper around TokenBlockDataset for BART dataset. Args: dataset (TokenBlockDataset): dataset to wrap sizes (List[int]): sentence lengths vocab (~fairseq.data.Dictionary): vocabulary mask_idx (int): dictionary index used for masked token mask_whole_words: only mask whole words. This should be a byte mask over vocab indices, indicating whether it is the beginning of a word. We will extend any mask to encompass the whole word. shuffle (bool, optional): shuffle the elements before batching. Default: ``True`` seed: Seed for random number generator for reproducibility. args: argparse arguments. """ def __init__( self, dataset, sizes, vocab, shuffle, seed, noise_density, mean_noise_span_length, sample_ratio=1, model_based=False, min_source_length=None, eos=None, truncate_target=False, pad_to_multiple=1, dynamic_span=False ): self.dataset = dataset self.sizes = sizes self.vocab = vocab self.shuffle = shuffle self.seed = seed self.min_source_length = min_source_length self.model_based = model_based self.truncate_target = truncate_target self.noise_density = noise_density # this is the initial masking ratio self.mean_noise_span_length = mean_noise_span_length self.sample_ratio = sample_ratio self.eos = eos if eos is not None else vocab.eos() self.sentinel_start = vocab.index("<sentinel_0>") self.epoch = 0 self.pad_to_multiple = pad_to_multiple self.dynamic_span = dynamic_span if self.dynamic_span: avg_span_lens = [4, 8, 12] self.noisy_span_lens = [random.choice(avg_span_lens) for _ in range(len(sizes))] else: self.noisy_span_lens = None @property def can_reuse_epoch_itr_across_epochs(self): return True # only the noise changes, not item sizes def set_epoch(self, epoch, **unused): self.epoch = epoch def compute_lengths(self, orig_length, noisy_span_len): """ calculate the source/target length # TODO the lengths calculation here is not exact """ raw_length = orig_length - 2 num_noise_tokens = int(round(raw_length * self.noise_density)) num_noise_tokens *= self.sample_ratio num_nonnoise_tokens = raw_length - num_noise_tokens num_noise_spans = int(self.sample_ratio * round(num_noise_tokens / noisy_span_len)) source_len = num_nonnoise_tokens + num_noise_spans + 2 target_len = num_noise_tokens + num_noise_spans + 2 # HACK the target lengths are handled within the model or via truncating if self.model_based or self.truncate_target: target_len = 1024 return (int(source_len), int(target_len)) def __getitem__(self, index): if self.dynamic_span: noisy_span_len = self.noisy_span_lens[index] else: noisy_span_len = self.mean_noise_span_length with data_utils.numpy_seed(self.seed, self.epoch, index): tokens = self.dataset[index] assert tokens[-1] == self.eos if tokens.size(0) <= 2: from random import randrange random_index = randrange(len(self.dataset)) tokens = self.dataset[random_index] # @xwhan some incorrect processed samples? if self.model_based: source, masked_unfiltered = self.add_noise(tokens, noisy_span_len) else: source, target = self.add_noise(tokens, noisy_span_len) assert (source >= 0).all() assert (source[1:-1] >= 1).all() assert (source <= len(self.vocab)).all() assert source[0] == self.vocab.bos() assert source[-1] == self.eos return { "id": index, "source": source, "masked_unfiltered": None if not self.model_based else masked_unfiltered, "target": None if self.model_based else target, } def __len__(self): return len(self.dataset) def add_noise(self, source, noisy_span_len): length = source.size(0) - 2 mask_indices = self.random_spans_noise_mask(length, noisy_span_len) labels_mask = ~mask_indices tokens = source[1:-1] if self.model_based: input_ids_sentinel = self.create_sentinel_ids(mask_indices.astype(np.int8)) labels_sentinel = self.create_sentinel_ids(labels_mask.astype(np.int8)) # masked inputs with <mask> only # MLM targets with masked positions as pad_idx masked = self.fill_input_ids(tokens, input_ids_sentinel, self.vocab.index("<mask>")) masked = torch.cat([source[:1], masked, source[-1:]]) masked_target = torch.full(source.size(), self.vocab.pad()) masked_target[1:-1] = self.fill_input_ids(tokens, labels_sentinel, self.vocab.pad()) return masked, masked_target else: input_ids_sentinel = self.create_sentinel_ids(mask_indices.astype(np.int8), offset=self.sentinel_start - 1) labels_sentinel = self.create_sentinel_ids(labels_mask.astype(np.int8), offset=self.sentinel_start - 1) input_ids = self.filter_input_ids(tokens, input_ids_sentinel) label_ids = self.filter_input_ids(tokens, labels_sentinel) if self.truncate_target: label_ids = label_ids[:1024-2] source = torch.cat([source[:1], input_ids, source[-1:]]) target = torch.cat([source[:1], label_ids, source[-1:]]) return source, target def filter_input_ids(self, input_ids, sentinel_ids): """ Puts sentinel mask on `input_ids` and squeeze consecutive mask tokens into a single mask token by deleting. This will reduce the sequence length from `expanded_inputs_length` to `input_length`. """ # batch_size = input_ids.shape[0] sentinel_ids = torch.tensor(sentinel_ids) input_ids_full = torch.where(sentinel_ids != 0, sentinel_ids, input_ids) input_ids = input_ids_full[input_ids_full > 0] return input_ids def fill_input_ids(self, input_ids, sentinel_ids, fill_idx): """ set masked spans as <mask> """ sentinel_ids = torch.tensor(sentinel_ids) masked_ids = torch.where(sentinel_ids != 0, fill_idx, input_ids) return masked_ids def create_sentinel_ids(self, mask_indices, offset=0): """ Sentinel ids creation given the indices that should be masked. The start indices of each mask are replaced by the sentinel ids in increasing order. Consecutive mask indices to be deleted are replaced with `-1`. """ start_indices = mask_indices - np.roll(mask_indices, 1, axis=-1) * mask_indices start_indices[0] = mask_indices[0] sentinel_ids = np.where(start_indices != 0, np.cumsum(start_indices, axis=-1), start_indices) sentinel_ids = np.where(sentinel_ids != 0, sentinel_ids + offset, 0) sentinel_ids -= mask_indices - start_indices return sentinel_ids def random_spans_noise_mask(self, length, noisy_span_len): orig_length = length num_noise_tokens = int(np.round(length * self.noise_density)) # avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens. num_noise_tokens = min(max(num_noise_tokens, 1), length - 1) num_noise_spans = int(np.round(num_noise_tokens / noisy_span_len)) # avoid degeneracy by ensuring positive number of noise spans num_noise_spans = max(num_noise_spans, 1) num_nonnoise_tokens = length - num_noise_tokens # pick the lengths of the noise spans and the non-noise spans def _random_segmentation(num_items, num_segments): """Partition a sequence of items randomly into non-empty segments. Args: num_items: an integer scalar > 0 num_segments: an integer scalar in [1, num_items] Returns: a Tensor with shape [num_segments] containing positive integers that add up to num_items """ mask_indices = np.arange(num_items - 1) < (num_segments - 1) np.random.shuffle(mask_indices) first_in_segment = np.pad(mask_indices, [[1, 0]]) segment_id = np.cumsum(first_in_segment) # count length of sub segments assuming that list is sorted _, segment_length = np.unique(segment_id, return_counts=True) return segment_length noise_span_lengths = _random_segmentation(num_noise_tokens, num_noise_spans) nonnoise_span_lengths = _random_segmentation(num_nonnoise_tokens, num_noise_spans) interleaved_span_lengths = np.reshape( np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1), [num_noise_spans * 2] ) span_starts = np.cumsum(interleaved_span_lengths)[:-1] span_start_indicator = np.zeros((length,), dtype=np.int8) span_start_indicator[span_starts] = True span_num = np.cumsum(span_start_indicator) is_noise = np.equal(span_num % 2, 1) return is_noise[:orig_length] def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch of data """ return collate( samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length, pad_to_multiple=self.pad_to_multiple ) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" if self.dynamic_span: noisy_span_len = self.noisy_span_lens[index] else: noisy_span_len = self.mean_noise_span_length return self.compute_lengths(self.sizes[index], noisy_span_len)[0] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" if self.dynamic_span: noisy_span_len = self.noisy_span_lens[index] else: noisy_span_len = self.mean_noise_span_length return self.compute_lengths(self.sizes[index], noisy_span_len) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) if self.min_source_length: ignored = indices[self.sizes[indices] < self.min_source_length].tolist() indices = indices[self.sizes[indices] >= self.min_source_length] if len(ignored) > 0: logger.warning( ( "{:,} samples have invalid sizes and will be skipped, " "min_positions={}, first few sample ids={}" ).format(len(ignored), self.min_source_length, ignored[:10]) ) return indices[np.argsort(self.sizes[indices], kind="mergesort")] def filter_indices_by_size(self, indices, max_sizes): """ customized hacky funcion to reduce the time for building data iterator """ if isinstance(max_sizes, float) or isinstance(max_sizes, int) or self.truncate_target: # if truncating elsewhere, then ignore the target limit if isinstance(max_sizes, tuple): max_sizes = max_sizes[0] if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray): ignored = indices[self.sizes[indices] > max_sizes].tolist() indices = indices[self.sizes[indices] <= max_sizes] elif ( hasattr(self, "sizes") and isinstance(self.sizes, list) and len(self.sizes) == 1 ): ignored = indices[self.sizes[0][indices] > max_sizes].tolist() indices = indices[self.sizes[0][indices] <= max_sizes] else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored def prefetch(self, indices): self.src.prefetch(indices) self.tgt.prefetch(indices) @property def supports_prefetch(self): return ( hasattr(self.src, "supports_prefetch") and self.src.supports_prefetch and hasattr(self.tgt, "supports_prefetch") and self.tgt.supports_prefetch )
bart_ls-main
fairseq-py/fairseq/data/long_denoising_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data import data_utils from . import BaseWrapperDataset class PadDataset(BaseWrapperDataset): def __init__(self, dataset, pad_idx, left_pad): super().__init__(dataset) self.pad_idx = pad_idx self.left_pad = left_pad def collater(self, samples): return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) class LeftPadDataset(PadDataset): def __init__(self, dataset, pad_idx): super().__init__(dataset, pad_idx, left_pad=True) class RightPadDataset(PadDataset): def __init__(self, dataset, pad_idx): super().__init__(dataset, pad_idx, left_pad=False)
bart_ls-main
fairseq-py/fairseq/data/pad_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import shutil import struct from functools import lru_cache import numpy as np import torch from fairseq.dataclass.constants import DATASET_IMPL_CHOICES from fairseq.data.fasta_dataset import FastaDataset from fairseq.file_io import PathManager from fairseq.data.huffman import HuffmanMMapIndexedDataset, HuffmanMMapIndex from . import FairseqDataset from typing import Union def best_fitting_int_dtype( max_int_to_represent, ) -> Union[np.uint16, np.uint32, np.int64]: if max_int_to_represent is None: return np.uint32 # Safe guess elif max_int_to_represent < 65500: return np.uint16 elif max_int_to_represent < 4294967295: return np.uint32 else: return np.int64 # we avoid np.uint64 because it doesn't save space and its type promotion behaves unexpectedly # https://github.com/numpy/numpy/issues/5745 def get_available_dataset_impl(): return list(map(str, DATASET_IMPL_CHOICES)) def infer_dataset_impl(path): if IndexedRawTextDataset.exists(path): return "raw" elif IndexedDataset.exists(path): with open(index_file_path(path), "rb") as f: magic = f.read(8) if magic == IndexedDataset._HDR_MAGIC: return "cached" elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: return "mmap" elif magic == HuffmanMMapIndex._HDR_MAGIC[:8]: return "huffman" else: return None elif FastaDataset.exists(path): return "fasta" else: return None def make_builder(out_file, impl, vocab_size=None): if impl == "mmap": return MMapIndexedDatasetBuilder( out_file, dtype=best_fitting_int_dtype(vocab_size) ) elif impl == "fasta": raise NotImplementedError elif impl == "huffman": raise ValueError("Use HuffmanCodeBuilder directly as it has a different interface.") else: return IndexedDatasetBuilder(out_file) def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None): if impl == "raw" and IndexedRawTextDataset.exists(path): assert dictionary is not None return IndexedRawTextDataset(path, dictionary) elif impl == "lazy" and IndexedDataset.exists(path): return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing) elif impl == "cached" and IndexedDataset.exists(path): return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing) elif impl == "mmap" and MMapIndexedDataset.exists(path): return MMapIndexedDataset(path) elif impl == "fasta" and FastaDataset.exists(path): from fairseq.data.fasta_dataset import EncodedFastaDataset return EncodedFastaDataset(path, dictionary) elif impl == "huffman" and HuffmanMMapIndexedDataset.exists(path): return HuffmanMMapIndexedDataset(path) return None def dataset_exists(path, impl): if impl == "raw": return IndexedRawTextDataset.exists(path) elif impl == "mmap": return MMapIndexedDataset.exists(path) elif impl == "huffman": return HuffmanMMapIndexedDataset.exists(path) else: return IndexedDataset.exists(path) def read_longs(f, n): a = np.empty(n, dtype=np.int64) f.readinto(a) return a def write_longs(f, a): f.write(np.array(a, dtype=np.int64)) _code_to_dtype = { 1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float, 7: np.double, 8: np.uint16, 9: np.uint32, 10: np.uint64, } def _dtype_header_code(dtype) -> int: for k in _code_to_dtype.keys(): if _code_to_dtype[k] == dtype: return k raise ValueError(dtype) def index_file_path(prefix_path): return prefix_path + ".idx" def data_file_path(prefix_path): return prefix_path + ".bin" class IndexedDataset(FairseqDataset): """Loader for TorchNet IndexedDataset""" _HDR_MAGIC = b"TNTIDX\x00\x00" def __init__(self, path, fix_lua_indexing=False): super().__init__() self.path = path self.fix_lua_indexing = fix_lua_indexing self.data_file = None self.read_index(path) def read_index(self, path): with open(index_file_path(path), "rb") as f: magic = f.read(8) assert magic == self._HDR_MAGIC, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) version = f.read(8) assert struct.unpack("<Q", version) == (1,) code, self.element_size = struct.unpack("<QQ", f.read(16)) self.dtype = _code_to_dtype[code] self._len, self.s = struct.unpack("<QQ", f.read(16)) self.dim_offsets = read_longs(f, self._len + 1) self.data_offsets = read_longs(f, self._len + 1) self.sizes = read_longs(f, self.s) def read_data(self, path): self.data_file = open(data_file_path(path), "rb", buffering=0) def check_index(self, i): if i < 0 or i >= self._len: raise IndexError("index out of range") def __del__(self): if self.data_file: self.data_file.close() @lru_cache(maxsize=8) def __getitem__(self, i) -> torch.Tensor: if not self.data_file: self.read_data(self.path) self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) item = torch.from_numpy(a).long() if self.fix_lua_indexing: item -= 1 # subtract 1 for 0-based indexing return item def __len__(self): return self._len def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] @staticmethod def exists(path): return PathManager.exists(index_file_path(path)) and PathManager.exists( data_file_path(path) ) @property def supports_prefetch(self): return False # avoid prefetching to save memory class IndexedCachedDataset(IndexedDataset): def __init__(self, path, fix_lua_indexing=False): super().__init__(path, fix_lua_indexing=fix_lua_indexing) self.cache = None self.cache_index = {} @property def supports_prefetch(self): return True def prefetch(self, indices): if all(i in self.cache_index for i in indices): return if not self.data_file: self.read_data(self.path) indices = sorted(set(indices)) total_size = 0 for i in indices: total_size += self.data_offsets[i + 1] - self.data_offsets[i] self.cache = np.empty(total_size, dtype=self.dtype) ptx = 0 self.cache_index.clear() for i in indices: self.cache_index[i] = ptx size = self.data_offsets[i + 1] - self.data_offsets[i] a = self.cache[ptx : ptx + size] self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) ptx += size if self.data_file: # close and delete data file after prefetch so we can pickle self.data_file.close() self.data_file = None @lru_cache(maxsize=8) def __getitem__(self, i): self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) ptx = self.cache_index[i] np.copyto(a, self.cache[ptx : ptx + a.size]) item = torch.from_numpy(a).long() if self.fix_lua_indexing: item -= 1 # subtract 1 for 0-based indexing return item class IndexedRawTextDataset(FairseqDataset): """Takes a text file as input and binarizes it in memory at instantiation. Original lines are also kept in memory""" def __init__(self, path, dictionary, append_eos=True, reverse_order=False): self.tokens_list = [] self.lines = [] self.sizes = [] self.append_eos = append_eos self.reverse_order = reverse_order self.read_data(path, dictionary) self.size = len(self.tokens_list) def read_data(self, path, dictionary): with open(path, "r", encoding="utf-8") as f: for line in f: self.lines.append(line.strip("\n")) tokens = dictionary.encode_line( line, add_if_not_exist=False, append_eos=self.append_eos, reverse_order=self.reverse_order, ).long() self.tokens_list.append(tokens) self.sizes.append(len(tokens)) self.sizes = np.array(self.sizes) def check_index(self, i): if i < 0 or i >= self.size: raise IndexError("index out of range") @lru_cache(maxsize=8) def __getitem__(self, i): self.check_index(i) return self.tokens_list[i] def get_original_text(self, i): self.check_index(i) return self.lines[i] def __del__(self): pass def __len__(self): return self.size def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] @staticmethod def exists(path): return PathManager.exists(path) class IndexedDatasetBuilder: element_sizes = { np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8, } def __init__(self, out_file, dtype=np.int32): self.out_file = open(out_file, "wb") self.dtype = dtype self.data_offsets = [0] self.dim_offsets = [0] self.sizes = [] self.element_size = self.element_sizes[self.dtype] def add_item(self, tensor): # +1 for Lua compatibility bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype)) self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) for s in tensor.size(): self.sizes.append(s) self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) def merge_file_(self, another_file): index = IndexedDataset(another_file) assert index.dtype == self.dtype begin = self.data_offsets[-1] for offset in index.data_offsets[1:]: self.data_offsets.append(begin + offset) self.sizes.extend(index.sizes) begin = self.dim_offsets[-1] for dim_offset in index.dim_offsets[1:]: self.dim_offsets.append(begin + dim_offset) with open(data_file_path(another_file), "rb") as f: while True: data = f.read(1024) if data: self.out_file.write(data) else: break def finalize(self, index_file): self.out_file.close() index = open(index_file, "wb") index.write(b"TNTIDX\x00\x00") index.write(struct.pack("<Q", 1)) index.write( struct.pack("<QQ", _dtype_header_code(self.dtype), self.element_size) ) index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes))) write_longs(index, self.dim_offsets) write_longs(index, self.data_offsets) write_longs(index, self.sizes) index.close() def _warmup_mmap_file(path): with open(path, "rb") as stream: while stream.read(100 * 1024 * 1024): pass class MMapIndexedDataset(torch.utils.data.Dataset): class Index: _HDR_MAGIC = b"MMIDIDX\x00\x00" @classmethod def writer(cls, path, dtype): class _Writer: def __enter__(self): self._file = open(path, "wb") self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack("<Q", 1)) self._file.write(struct.pack("<B", _dtype_header_code(dtype))) return self @staticmethod def _get_pointers(sizes): dtype_size = dtype().itemsize address = 0 pointers = [] for size in sizes: pointers.append(address) address += size * dtype_size return pointers def write(self, sizes): pointers = self._get_pointers(sizes) self._file.write(struct.pack("<Q", len(sizes))) sizes = np.array(sizes, dtype=np.int32) self._file.write(sizes.tobytes(order="C")) del sizes pointers = np.array(pointers, dtype=np.int64) self._file.write(pointers.tobytes(order="C")) del pointers def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path): with open(path, "rb") as stream: magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) version = struct.unpack("<Q", stream.read(8)) assert (1,) == version (dtype_code,) = struct.unpack("<B", stream.read(1)) self._dtype = _code_to_dtype[dtype_code] self._dtype_size = self._dtype().itemsize self._len = struct.unpack("<Q", stream.read(8))[0] offset = stream.tell() _warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode="r", order="C") self._bin_buffer = memoryview(self._bin_buffer_mmap) self._sizes = np.frombuffer( self._bin_buffer, dtype=np.int32, count=self._len, offset=offset ) self._pointers = np.frombuffer( self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes, ) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap @property def dtype(self): return self._dtype @property def sizes(self): return self._sizes @lru_cache(maxsize=8) def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def __init__(self, path): super().__init__() self._path = None self._index = None self._bin_buffer = None self._do_init(path) def __getstate__(self): return self._path def __setstate__(self, state): self._do_init(state) def _do_init(self, path): self._path = path self._index = self.Index(index_file_path(self._path)) _warmup_mmap_file(data_file_path(self._path)) self._bin_buffer_mmap = np.memmap( data_file_path(self._path), mode="r", order="C" ) self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap del self._index def __len__(self): return len(self._index) @lru_cache(maxsize=8) def __getitem__(self, i): ptr, size = self._index[i] np_array = np.frombuffer( self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr ) if self._index.dtype != np.int64: np_array = np_array.astype(np.int64) return torch.from_numpy(np_array) @property def sizes(self): return self._index.sizes @property def supports_prefetch(self): return False @staticmethod def exists(path): return PathManager.exists(index_file_path(path)) and PathManager.exists( data_file_path(path) ) def get_indexed_dataset_to_local(path) -> str: local_index_path = PathManager.get_local_path(index_file_path(path)) local_data_path = PathManager.get_local_path(data_file_path(path)) assert local_index_path.endswith(".idx") and local_data_path.endswith(".bin"), ( "PathManager.get_local_path does not return files with expected patterns: " f"{local_index_path} and {local_data_path}" ) local_path = local_data_path[:-4] # stripping surfix ".bin" assert local_path == local_index_path[:-4] # stripping surfix ".idx" return local_path class MMapIndexedDatasetBuilder: def __init__(self, out_file, dtype=np.int64): self._data_file = open(out_file, "wb") self._dtype = dtype self._sizes = [] def add_item(self, tensor): np_array = np.array(tensor.numpy(), dtype=self._dtype) self._data_file.write(np_array.tobytes(order="C")) self._sizes.append(np_array.size) def merge_file_(self, another_file): # Concatenate index index = MMapIndexedDataset.Index(index_file_path(another_file)) assert index.dtype == self._dtype for size in index.sizes: self._sizes.append(size) # Concatenate data with open(data_file_path(another_file), "rb") as f: shutil.copyfileobj(f, self._data_file) def finalize(self, index_file): self._data_file.close() with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index: index.write(self._sizes)
bart_ls-main
fairseq-py/fairseq/data/indexed_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset class RollDataset(BaseWrapperDataset): def __init__(self, dataset, shifts): super().__init__(dataset) self.shifts = shifts def __getitem__(self, index): item = self.dataset[index] return torch.roll(item, self.shifts)
bart_ls-main
fairseq-py/fairseq/data/roll_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from collections import Counter from multiprocessing import Pool import torch from fairseq import utils from fairseq.data import data_utils from fairseq.file_chunker_utils import Chunker, find_offsets from fairseq.file_io import PathManager from fairseq.tokenizer import tokenize_line class Dictionary: """A mapping from symbols to consecutive integers""" def __init__( self, *, # begin keyword-only arguments bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", extra_special_symbols=None, ): self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos self.symbols = [] self.count = [] self.indices = {} self.bos_index = self.add_symbol(bos) self.pad_index = self.add_symbol(pad) self.eos_index = self.add_symbol(eos) self.unk_index = self.add_symbol(unk) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(s) self.nspecial = len(self.symbols) def __eq__(self, other): return self.indices == other.indices def __getitem__(self, idx): if idx < len(self.symbols): return self.symbols[idx] return self.unk_word def get_count(self, idx): return self.count[idx] def __len__(self): """Returns the number of symbols in the dictionary""" return len(self.symbols) def __contains__(self, sym): return sym in self.indices def index(self, sym): """Returns the index of the specified symbol""" assert isinstance(sym, str) if sym in self.indices: return self.indices[sym] return self.unk_index def string( self, tensor, bpe_symbol=None, escape_unk=False, extra_symbols_to_ignore=None, unk_string=None, include_eos=False, separator=" ", ): """Helper for converting a tensor of token indices to a string. Can optionally remove BPE symbols or escape <unk> words. """ if torch.is_tensor(tensor) and tensor.dim() == 2: return "\n".join( self.string( t, bpe_symbol, escape_unk, extra_symbols_to_ignore, include_eos=include_eos, ) for t in tensor ) extra_symbols_to_ignore = set(extra_symbols_to_ignore or []) if not include_eos: extra_symbols_to_ignore.add(self.eos()) def token_string(i): if i == self.unk(): if unk_string is not None: return unk_string else: return self.unk_string(escape_unk) else: return self[i] if hasattr(self, "bos_index"): extra_symbols_to_ignore.add(self.bos()) sent = separator.join( token_string(i) for i in tensor if utils.item(i) not in extra_symbols_to_ignore ) return data_utils.post_process(sent, bpe_symbol) def unk_string(self, escape=False): """Return unknown string, optionally escaped as: <<unk>>""" if escape: return "<{}>".format(self.unk_word) else: return self.unk_word def add_symbol(self, word, n=1, overwrite=False): """Adds a word to the dictionary""" if word in self.indices and not overwrite: idx = self.indices[word] self.count[idx] = self.count[idx] + n return idx else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(n) return idx def update(self, new_dict): """Updates counts from new dictionary.""" for word in new_dict.symbols: idx2 = new_dict.indices[word] if word in self.indices: idx = self.indices[word] self.count[idx] = self.count[idx] + new_dict.count[idx2] else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(new_dict.count[idx2]) def finalize(self, threshold=-1, nwords=-1, padding_factor=8): """Sort symbols by frequency in descending order, ignoring special ones. Args: - threshold defines the minimum word count - nwords defines the total number of words in the final dictionary, including special symbols - padding_factor can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ if nwords <= 0: nwords = len(self) new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial))) new_symbols = self.symbols[: self.nspecial] new_count = self.count[: self.nspecial] c = Counter( dict( sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :])) ) ) for symbol, count in c.most_common(nwords - self.nspecial): if count >= threshold: new_indices[symbol] = len(new_symbols) new_symbols.append(symbol) new_count.append(count) else: break assert len(new_symbols) == len(new_indices) self.count = list(new_count) self.symbols = list(new_symbols) self.indices = new_indices self.pad_to_multiple_(padding_factor) def pad_to_multiple_(self, padding_factor): """Pad Dictionary size to be a multiple of *padding_factor*.""" if padding_factor > 1: i = 0 while len(self) % padding_factor != 0: symbol = "madeupword{:04d}".format(i) self.add_symbol(symbol, n=0) i += 1 def bos(self): """Helper to get index of beginning-of-sentence symbol""" return self.bos_index def pad(self): """Helper to get index of pad symbol""" return self.pad_index def eos(self): """Helper to get index of end-of-sentence symbol""" return self.eos_index def unk(self): """Helper to get index of unk symbol""" return self.unk_index @classmethod def load(cls, f): """Loads the dictionary from a text file with the format: ``` <symbol0> <count0> <symbol1> <count1> ... ``` """ d = cls() d.add_from_file(f) return d def add_from_file(self, f): """ Loads a pre-existing dictionary from a text file and adds its symbols to this instance. """ if isinstance(f, str): try: with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception( "Incorrect encoding detected in {}, please " "rebuild the dataset".format(f) ) return lines = f.readlines() indices_start_line = self._load_meta(lines) for line in lines[indices_start_line:]: try: line, field = line.rstrip().rsplit(" ", 1) if field == "#fairseq:overwrite": overwrite = True line, field = line.rsplit(" ", 1) else: overwrite = False count = int(field) word = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(word) ) self.add_symbol(word, n=count, overwrite=overwrite) except ValueError: raise ValueError( f"Incorrect dictionary format, expected '<token> <cnt> [flags]': \"{line}\"" ) def _save(self, f, kv_iterator): if isinstance(f, str): PathManager.mkdirs(os.path.dirname(f)) with PathManager.open(f, "w", encoding="utf-8") as fd: return self.save(fd) for k, v in kv_iterator: print("{} {}".format(k, v), file=f) def _get_meta(self): return [], [] def _load_meta(self, lines): return 0 def save(self, f): """Stores dictionary into a text file""" ex_keys, ex_vals = self._get_meta() self._save( f, zip( ex_keys + self.symbols[self.nspecial :], ex_vals + self.count[self.nspecial :], ), ) def dummy_sentence(self, length): t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long() t[-1] = self.eos() return t def encode_line( self, line, line_tokenizer=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False, ) -> torch.IntTensor: words = line_tokenizer(line) if reverse_order: words = list(reversed(words)) nwords = len(words) ids = torch.IntTensor(nwords + 1 if append_eos else nwords) for i, word in enumerate(words): if add_if_not_exist: idx = self.add_symbol(word) else: idx = self.index(word) if consumer is not None: consumer(word, idx) ids[i] = idx if append_eos: ids[nwords] = self.eos_index return ids @staticmethod def _add_file_to_dictionary_single_worker( filename, tokenize, eos_word, start_offset, end_offset, ): counter = Counter() with Chunker(filename, start_offset, end_offset) as line_iterator: for line in line_iterator: for word in tokenize(line): counter.update([word]) counter.update([eos_word]) return counter @staticmethod def add_file_to_dictionary(filename, dict, tokenize, num_workers): def merge_result(counter): for w, c in sorted(counter.items()): dict.add_symbol(w, c) local_file = PathManager.get_local_path(filename) offsets = find_offsets(local_file, num_workers) if num_workers > 1: chunks = zip(offsets, offsets[1:]) pool = Pool(processes=num_workers) results = [] for (start_offset, end_offset) in chunks: results.append( pool.apply_async( Dictionary._add_file_to_dictionary_single_worker, ( local_file, tokenize, dict.eos_word, start_offset, end_offset, ), ) ) pool.close() pool.join() for r in results: merge_result(r.get()) else: merge_result( Dictionary._add_file_to_dictionary_single_worker( local_file, tokenize, dict.eos_word, offsets[0], offsets[1] ) ) class TruncatedDictionary(object): def __init__(self, wrapped_dict, length): self.__class__ = type( wrapped_dict.__class__.__name__, (self.__class__, wrapped_dict.__class__), {}, ) self.__dict__ = wrapped_dict.__dict__ self.wrapped_dict = wrapped_dict self.length = min(len(self.wrapped_dict), length) def __len__(self): return self.length def __getitem__(self, i): if i < self.length: return self.wrapped_dict[i] return self.wrapped_dict.unk()
bart_ls-main
fairseq-py/fairseq/data/dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch.utils.data from fairseq.data import data_utils logger = logging.getLogger(__name__) class EpochListening: """Mixin for receiving updates whenever the epoch increments.""" @property def can_reuse_epoch_itr_across_epochs(self): """ Whether we can reuse the :class:`fairseq.data.EpochBatchIterator` for this dataset across epochs. This needs to return ``False`` if the sample sizes can change across epochs, in which case we may need to regenerate batches at each epoch. If your dataset relies in ``set_epoch`` then you should consider setting this to ``False``. """ return True def set_epoch(self, epoch): """Will receive the updated epoch number at the beginning of the epoch.""" pass class FairseqDataset(torch.utils.data.Dataset, EpochListening): """A dataset that provides helpers for batching.""" def __getitem__(self, index): raise NotImplementedError def __len__(self): raise NotImplementedError def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ raise NotImplementedError def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" raise NotImplementedError def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" raise NotImplementedError def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" raise NotImplementedError def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" return np.arange(len(self), dtype=np.int64) @property def supports_prefetch(self): """Whether this dataset supports prefetching.""" return False def attr(self, attr: str, index: int): return getattr(self, attr, None) def prefetch(self, indices): """Prefetch the data required for this epoch.""" raise NotImplementedError def get_batch_shapes(self): """ Return a list of valid batch shapes, for example:: [(8, 512), (16, 256), (32, 128)] The first dimension of each tuple is the batch size and can be ``None`` to automatically infer the max batch size based on ``--max-tokens``. The second dimension of each tuple is the max supported length as given by :func:`fairseq.data.FairseqDataset.num_tokens`. This will be used by :func:`fairseq.data.FairseqDataset.batch_by_size` to restrict batch shapes. This is useful on TPUs to avoid too many dynamic shapes (and recompilations). """ return None def batch_by_size( self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, ): """ Given an ordered set of indices, return batches according to *max_tokens*, *max_sentences* and *required_batch_size_multiple*. """ from fairseq.data import data_utils fixed_shapes = self.get_batch_shapes() if fixed_shapes is not None: def adjust_bsz(bsz, num_tokens): if bsz is None: assert max_tokens is not None, "Must specify --max-tokens" bsz = max_tokens // num_tokens if max_sentences is not None: bsz = min(bsz, max_sentences) elif ( bsz >= required_batch_size_multiple and bsz % required_batch_size_multiple != 0 ): bsz -= bsz % required_batch_size_multiple return bsz fixed_shapes = np.array( [ [adjust_bsz(bsz, num_tokens), num_tokens] for (bsz, num_tokens) in fixed_shapes ] ) try: num_tokens_vec = self.num_tokens_vec(indices).astype('int64') except NotImplementedError: num_tokens_vec = None return data_utils.batch_by_size( indices, num_tokens_fn=self.num_tokens, num_tokens_vec=num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, fixed_shapes=fixed_shapes, ) def filter_indices_by_size(self, indices, max_sizes): """ Filter a list of sample indices. Remove those that are longer than specified in *max_sizes*. WARNING: don't update, override method in child classes Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ if isinstance(max_sizes, float) or isinstance(max_sizes, int): if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray): ignored = indices[self.sizes[indices] > max_sizes].tolist() indices = indices[self.sizes[indices] <= max_sizes] elif ( hasattr(self, "sizes") and isinstance(self.sizes, list) and len(self.sizes) == 1 ): ignored = indices[self.sizes[0][indices] > max_sizes].tolist() indices = indices[self.sizes[0][indices] <= max_sizes] else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored @property def supports_fetch_outside_dataloader(self): """Whether this dataset supports fetching outside the workers of the dataloader.""" return True class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening): """ For datasets that need to be read sequentially, usually because the data is being streamed or otherwise can't be manipulated on a single machine. """ def __iter__(self): raise NotImplementedError
bart_ls-main
fairseq-py/fairseq/data/fairseq_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from typing import Dict from fairseq.data.monolingual_dataset import MonolingualDataset from . import FairseqDataset class LMContextWindowDataset(FairseqDataset): """ Wraps a MonolingualDataset and provides more context for evaluation. Each item in the new dataset will have a maximum size of ``tokens_per_sample + context_window``. Args: dataset: dataset to wrap tokens_per_sample (int): the max number of tokens in each dataset item context_window (int): the number of accumulated tokens to add to each dataset item pad_idx (int): padding symbol """ def __init__( self, dataset: MonolingualDataset, tokens_per_sample: int, context_window: int, pad_idx: int, ): assert context_window > 0 self.dataset = dataset self.tokens_per_sample = tokens_per_sample self.context_window = context_window self.pad_idx = pad_idx self.prev_tokens = np.empty([0]) def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples) -> Dict: sample = self.dataset.collater(samples) pad = self.pad_idx max_sample_len = self.tokens_per_sample + self.context_window bsz, tsz = sample["net_input"]["src_tokens"].shape start_idxs = [0] * bsz toks = sample["net_input"]["src_tokens"] lengths = sample["net_input"]["src_lengths"] tgt = sample["target"] new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64) new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64) sample_lens = toks.ne(pad).long().sum(dim=1).cpu() for i in range(bsz): sample_len = sample_lens[i] extra = len(self.prev_tokens) + sample_len - max_sample_len if extra > 0: self.prev_tokens = self.prev_tokens[extra:] pads = np.full(self.context_window - len(self.prev_tokens), pad) new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads]) new_tgt[ i, len(self.prev_tokens) : len(self.prev_tokens) + len(tgt[i]) ] = tgt[i] start_idxs[i] = len(self.prev_tokens) lengths[i] += len(self.prev_tokens) self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window :] sample["net_input"]["src_tokens"] = torch.from_numpy(new_toks) sample["target"] = torch.from_numpy(new_tgt) sample["start_indices"] = start_idxs return sample def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): # NOTE we don't shuffle the data to retain access to the previous dataset elements return np.arange(len(self.dataset)) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
bart_ls-main
fairseq-py/fairseq/data/lm_context_window_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch.nn.functional as F from fairseq.data import BaseWrapperDataset from fairseq.data.data_utils import get_buckets, get_bucketed_sizes class BucketPadLengthDataset(BaseWrapperDataset): """ Bucket and pad item lengths to the nearest bucket size. This can be used to reduce the number of unique batch shapes, which is important on TPUs since each new batch shape requires a recompilation. Args: dataset (FairseqDatset): dataset to bucket sizes (List[int]): all item sizes num_buckets (int): number of buckets to create pad_idx (int): padding symbol left_pad (bool): if True, pad on the left; otherwise right pad """ def __init__( self, dataset, sizes, num_buckets, pad_idx, left_pad, tensor_key=None, ): super().__init__(dataset) self.pad_idx = pad_idx self.left_pad = left_pad assert num_buckets > 0 self.buckets = get_buckets(sizes, num_buckets) self._bucketed_sizes = get_bucketed_sizes(sizes, self.buckets) self._tensor_key = tensor_key def _set_tensor(self, item, val): if self._tensor_key is None: return val item[self._tensor_key] = val return item def _get_tensor(self, item): if self._tensor_key is None: return item return item[self._tensor_key] def _pad(self, tensor, bucket_size, dim=-1): num_pad = bucket_size - tensor.size(dim) return F.pad( tensor, (num_pad if self.left_pad else 0, 0 if self.left_pad else num_pad), value=self.pad_idx, ) def __getitem__(self, index): item = self.dataset[index] bucket_size = self._bucketed_sizes[index] tensor = self._get_tensor(item) padded = self._pad(tensor, bucket_size) return self._set_tensor(item, padded) @property def sizes(self): return self._bucketed_sizes def num_tokens(self, index): return self._bucketed_sizes[index] def size(self, index): return self._bucketed_sizes[index]
bart_ls-main
fairseq-py/fairseq/data/bucket_pad_length_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data import FairseqDataset, plasma_utils from fairseq.data.indexed_dataset import best_fitting_int_dtype from typing import Tuple class TokenBlockDataset(FairseqDataset): """Break a Dataset of tokens into blocks. Args: dataset (~torch.utils.data.Dataset): dataset to break into blocks sizes (List[int]): sentence lengths (required for 'complete' and 'eos') block_size (int): maximum block size (ignored in 'eos' break mode) break_mode (str, optional): Mode used for breaking tokens. Values can be one of: - 'none': break tokens into equally sized blocks (up to block_size) - 'complete': break tokens into blocks (up to block_size) such that blocks contains complete sentences, although block_size may be exceeded if some sentences exceed block_size - 'complete_doc': similar to 'complete' mode, but do not cross document boundaries - 'eos': each block contains one sentence (block_size is ignored) include_targets (bool, optional): return next tokens as targets (default: False). document_sep_len (int, optional): document separator size (required for 'complete_doc' break mode). Typically 1 if the sentences have eos and 0 otherwise. """ def __init__( self, dataset, sizes, block_size, pad, eos, break_mode=None, include_targets=False, document_sep_len=1, use_plasma_view=False, split_path=None, plasma_path=None, ): super().__init__() self.dataset = dataset self.pad = pad self.eos = eos self.include_targets = include_targets assert len(dataset) > 0 assert len(dataset) == len(sizes) _sizes, block_to_dataset_index, slice_indices = self._build_slice_indices( sizes, break_mode, document_sep_len, block_size ) if use_plasma_view: plasma_id = (block_size, document_sep_len, str(break_mode), len(dataset)) self._slice_indices = plasma_utils.PlasmaView( slice_indices, split_path, (plasma_id, 0), plasma_path=plasma_path ) self._sizes = plasma_utils.PlasmaView( _sizes, split_path, (plasma_id, 1), plasma_path=plasma_path ) self._block_to_dataset_index = plasma_utils.PlasmaView( block_to_dataset_index, split_path, (plasma_id, 2), plasma_path=plasma_path, ) else: self._slice_indices = plasma_utils.PlasmaArray(slice_indices) self._sizes = plasma_utils.PlasmaArray(_sizes) self._block_to_dataset_index = plasma_utils.PlasmaArray( block_to_dataset_index ) @staticmethod def _build_slice_indices( sizes, break_mode, document_sep_len, block_size ) -> Tuple[np.ndarray]: """Use token_block_utils_fast to build arrays for indexing into self.dataset""" try: from fairseq.data.token_block_utils_fast import ( _get_slice_indices_fast, _get_block_to_dataset_index_fast, ) except ImportError: raise ImportError( "Please build Cython components with: `pip install --editable .` " "or `python setup.py build_ext --inplace`" ) if isinstance(sizes, list): sizes = np.array(sizes, dtype=np.int64) else: if torch.is_tensor(sizes): sizes = sizes.numpy() sizes = sizes.astype(np.int64) break_mode = break_mode if break_mode is not None else "none" # For "eos" break-mode, block_size is not required parameters. if break_mode == "eos" and block_size is None: block_size = 0 slice_indices = _get_slice_indices_fast( sizes, str(break_mode), block_size, document_sep_len ) _sizes = slice_indices[:, 1] - slice_indices[:, 0] # build index mapping block indices to the underlying dataset indices if break_mode == "eos": # much faster version for eos break mode block_to_dataset_index = np.stack( [ np.arange(len(sizes)), # starting index in dataset np.zeros( len(sizes), dtype=np.compat.long ), # starting offset within starting index np.arange(len(sizes)), # ending index in dataset ], 1, ) else: block_to_dataset_index = _get_block_to_dataset_index_fast( sizes, slice_indices, ) size_dtype = np.uint16 if block_size < 65535 else np.uint32 num_tokens = slice_indices[-1].max() slice_indices_dtype = best_fitting_int_dtype(num_tokens) slice_indices = slice_indices.astype(slice_indices_dtype) _sizes = _sizes.astype(size_dtype) block_to_dataset_index = block_to_dataset_index.astype(slice_indices_dtype) return _sizes, block_to_dataset_index, slice_indices @property def slice_indices(self): return self._slice_indices.array @property def sizes(self): return self._sizes.array @property def block_to_dataset_index(self): return self._block_to_dataset_index.array def attr(self, attr: str, index: int): start_ds_idx, _, _ = self.block_to_dataset_index[index] return self.dataset.attr(attr, start_ds_idx) def __getitem__(self, index): start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index] buffer = torch.cat( [self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)] ) slice_s, slice_e = self.slice_indices[index] length = slice_e - slice_s s, e = start_offset, start_offset + length item = buffer[s:e] if self.include_targets: # *target* is the original sentence (=item) # *source* is shifted right by 1 (maybe left-padded with eos) # *past_target* is shifted right by 2 (left-padded as needed) if s == 0: source = torch.cat([item.new([self.eos]), buffer[0 : e - 1]]) past_target = torch.cat( [item.new([self.pad, self.eos]), buffer[0 : e - 2]] ) else: source = buffer[s - 1 : e - 1] if s == 1: past_target = torch.cat([item.new([self.eos]), buffer[0 : e - 2]]) else: past_target = buffer[s - 2 : e - 2] return source, item, past_target return item def __len__(self): return len(self.slice_indices) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch( { ds_idx for index in indices for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]] for ds_idx in range(start_ds_idx, end_ds_idx + 1) } )
bart_ls-main
fairseq-py/fairseq/data/token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import torch from . import FairseqDataset class TransformEosLangPairDataset(FairseqDataset): """A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on collated samples of language pair dataset. Note that the transformation is applied in :func:`collater`. Args: dataset (~fairseq.data.FairseqDataset): dataset that collates sample into LanguagePairDataset schema src_eos (int): original source end-of-sentence symbol index to be replaced new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the beginning of 'prev_output_tokens' """ def __init__( self, dataset: FairseqDataset, src_eos: int, new_src_eos: Optional[int] = None, tgt_bos: Optional[int] = None, new_tgt_bos: Optional[int] = None, ): self.dataset = dataset self.src_eos = src_eos self.new_src_eos = new_src_eos self.tgt_bos = tgt_bos self.new_tgt_bos = new_tgt_bos def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples, **extra_args): samples = self.dataset.collater(samples, **extra_args) if len(samples) == 0: return samples if 'net_input' not in samples: return samples if self.new_src_eos is not None: if self.dataset.left_pad_source: assert ( samples["net_input"]["src_tokens"][:, -1] != self.src_eos ).sum() == 0 samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos else: eos_idx = samples["net_input"]["src_lengths"] - 1 assert ( samples["net_input"]["src_tokens"][ torch.arange(eos_idx.size(0)), eos_idx ] != self.src_eos ).sum() == 0 eos_idx = eos_idx.resize_(len(samples["net_input"]["src_lengths"]), 1) samples["net_input"]["src_tokens"].scatter_( 1, eos_idx, self.new_src_eos ) if ( self.new_tgt_bos is not None and "prev_output_tokens" in samples["net_input"] ): if self.dataset.left_pad_target: # TODO: support different padding direction on target side raise NotImplementedError( "TransformEosLangPairDataset does not implement --left-pad-target True option" ) else: assert ( samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos ).sum() == 0 samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos return samples def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) @property def sizes(self): # dataset.sizes can be a dynamically computed sizes: return self.dataset.sizes def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
bart_ls-main
fairseq-py/fairseq/data/transform_eos_lang_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset, data_utils from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel class AddTargetDataset(BaseWrapperDataset): def __init__( self, dataset, labels, pad, eos, batch_targets, process_label=None, label_len_fn=None, add_to_input=False, text_compression_level=TextCompressionLevel.none ): super().__init__(dataset) self.labels = labels self.batch_targets = batch_targets self.pad = pad self.eos = eos self.process_label = process_label self.label_len_fn = label_len_fn self.add_to_input = add_to_input self.text_compressor = TextCompressor(level=text_compression_level) def get_label(self, index, process_fn=None): lbl = self.labels[index] lbl = self.text_compressor.decompress(lbl) return lbl if process_fn is None else process_fn(lbl) def __getitem__(self, index): item = self.dataset[index] item["label"] = self.get_label(index, process_fn=self.process_label) return item def size(self, index): sz = self.dataset.size(index) own_sz = self.label_len_fn(self.get_label(index)) return sz, own_sz def collater(self, samples): collated = self.dataset.collater(samples) if len(collated) == 0: return collated indices = set(collated["id"].tolist()) target = [s["label"] for s in samples if s["id"] in indices] if self.batch_targets: collated["target_lengths"] = torch.LongTensor([len(t) for t in target]) target = data_utils.collate_tokens(target, pad_idx=self.pad, left_pad=False) collated["ntokens"] = collated["target_lengths"].sum().item() else: collated["ntokens"] = sum([len(t) for t in target]) collated["target"] = target if self.add_to_input: eos = target.new_full((target.size(0), 1), self.eos) collated["target"] = torch.cat([target, eos], dim=-1).long() collated["net_input"]["prev_output_tokens"] = torch.cat( [eos, target], dim=-1 ).long() collated["ntokens"] += target.size(0) return collated def filter_indices_by_size(self, indices, max_sizes): indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored
bart_ls-main
fairseq-py/fairseq/data/add_target_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data import Dictionary class MaskedLMDictionary(Dictionary): """ Dictionary for Masked Language Modelling tasks. This extends Dictionary by adding the mask symbol. """ def __init__( self, pad="<pad>", eos="</s>", unk="<unk>", mask="<mask>", ): super().__init__(pad=pad, eos=eos, unk=unk) self.mask_word = mask self.mask_index = self.add_symbol(mask) self.nspecial = len(self.symbols) def mask(self): """Helper to get index of mask symbol""" return self.mask_index class BertDictionary(MaskedLMDictionary): """ Dictionary for BERT task. This extends MaskedLMDictionary by adding support for cls and sep symbols. """ def __init__( self, pad="<pad>", eos="</s>", unk="<unk>", mask="<mask>", cls="<cls>", sep="<sep>", ): super().__init__(pad=pad, eos=eos, unk=unk, mask=mask) self.cls_word = cls self.sep_word = sep self.cls_index = self.add_symbol(cls) self.sep_index = self.add_symbol(sep) self.nspecial = len(self.symbols) def cls(self): """Helper to get index of cls symbol""" return self.cls_index def sep(self): """Helper to get index of sep symbol""" return self.sep_index
bart_ls-main
fairseq-py/fairseq/data/legacy/masked_lm_dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import numpy as np import torch from fairseq.data import FairseqDataset class BlockPairDataset(FairseqDataset): """Break a Dataset of tokens into sentence pair blocks for next sentence prediction as well as masked language model. High-level logics are: 1. break input tensor to tensor blocks 2. pair the blocks with 50% next sentence and 50% random sentence 3. return paired blocks as well as related segment labels Args: dataset (~torch.utils.data.Dataset): dataset to break into blocks sizes: array of sentence lengths dictionary: dictionary for the task block_size: maximum block size break_mode: mode for breaking copurs into block pairs. currently we support 2 modes doc: respect document boundaries and each part of the pair should belong to on document none: don't respect any boundary and cut tokens evenly short_seq_prob: probability for generating shorter block pairs doc_break_size: Size for empty line separating documents. Typically 1 if the sentences have eos, 0 otherwise. """ def __init__( self, dataset, dictionary, sizes, block_size, break_mode="doc", short_seq_prob=0.1, doc_break_size=1, ): super().__init__() self.dataset = dataset self.pad = dictionary.pad() self.eos = dictionary.eos() self.cls = dictionary.cls() self.mask = dictionary.mask() self.sep = dictionary.sep() self.break_mode = break_mode self.dictionary = dictionary self.short_seq_prob = short_seq_prob self.block_indices = [] assert len(dataset) == len(sizes) if break_mode == "doc": cur_doc = [] for sent_id, sz in enumerate(sizes): assert doc_break_size == 0 or sz != 0, ( "when doc_break_size is non-zero, we expect documents to be" "separated by a blank line with a single eos." ) # empty line as document separator if sz == doc_break_size: if len(cur_doc) == 0: continue self.block_indices.append(cur_doc) cur_doc = [] else: cur_doc.append(sent_id) max_num_tokens = block_size - 3 # Account for [CLS], [SEP], [SEP] self.sent_pairs = [] self.sizes = [] for doc_id, doc in enumerate(self.block_indices): self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes) elif break_mode is None or break_mode == "none": # each block should have half of the block size since we are constructing block pair sent_length = (block_size - 3) // 2 total_len = sum(dataset.sizes) length = math.ceil(total_len / sent_length) def block_at(i): start = i * sent_length end = min(start + sent_length, total_len) return (start, end) sent_indices = np.array([block_at(i) for i in range(length)]) sent_sizes = np.array([e - s for s, e in sent_indices]) dataset_index = self._sent_to_dataset_index(sent_sizes) # pair sentences self._pair_sentences(dataset_index) else: raise ValueError("Invalid break_mode: " + break_mode) def _pair_sentences(self, dataset_index): """ Give a list of evenly cut blocks/sentences, pair these sentences with 50% consecutive sentences and 50% random sentences. This is used for none break mode """ # pair sentences for sent_id, sent in enumerate(dataset_index): next_sent_label = ( 1 if np.random.rand() > 0.5 and sent_id != len(dataset_index) - 1 else 0 ) if next_sent_label: next_sent = dataset_index[sent_id + 1] else: next_sent = dataset_index[ self._skip_sampling(len(dataset_index), [sent_id, sent_id + 1]) ] self.sent_pairs.append((sent, next_sent, next_sent_label)) # The current blocks don't include the special tokens but the # sizes already account for this self.sizes.append(3 + sent[3] + next_sent[3]) def _sent_to_dataset_index(self, sent_sizes): """ Build index mapping block indices to the underlying dataset indices """ dataset_index = [] ds_idx, ds_remaining = -1, 0 for to_consume in sent_sizes: sent_size = to_consume if ds_remaining == 0: ds_idx += 1 ds_remaining = sent_sizes[ds_idx] start_ds_idx = ds_idx start_offset = sent_sizes[ds_idx] - ds_remaining while to_consume > ds_remaining: to_consume -= ds_remaining ds_idx += 1 ds_remaining = sent_sizes[ds_idx] ds_remaining -= to_consume dataset_index.append( ( start_ds_idx, # starting index in dataset start_offset, # starting offset within starting index ds_idx, # ending index in dataset sent_size, # sentence length ) ) assert ds_remaining == 0 assert ds_idx == len(self.dataset) - 1 return dataset_index def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes): """ Go through a single document and genrate sentence paris from it """ current_chunk = [] current_length = 0 curr = 0 # To provide more randomness, we decrease target seq length for parts of # samples (10% by default). Note that max_num_tokens is the hard threshold # for batching and will never be changed. target_seq_length = max_num_tokens if np.random.random() < self.short_seq_prob: target_seq_length = np.random.randint(2, max_num_tokens) # loop through all sentences in document while curr < len(doc): sent_id = doc[curr] current_chunk.append(sent_id) current_length = sum(sizes[current_chunk]) # split chunk and generate pair when exceed target_seq_length or # finish the loop if curr == len(doc) - 1 or current_length >= target_seq_length: # split the chunk into 2 parts a_end = 1 if len(current_chunk) > 2: a_end = np.random.randint(1, len(current_chunk) - 1) sent_a = current_chunk[:a_end] len_a = sum(sizes[sent_a]) # generate next sentence label, note that if there is only 1 sentence # in current chunk, label is always 0 next_sent_label = ( 1 if np.random.rand() > 0.5 and len(current_chunk) != 1 else 0 ) if not next_sent_label: # if next sentence label is 0, sample sent_b from a random doc target_b_length = target_seq_length - len_a rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id]) random_doc = self.block_indices[rand_doc_id] random_start = np.random.randint(0, len(random_doc)) sent_b = [] len_b = 0 for j in range(random_start, len(random_doc)): sent_b.append(random_doc[j]) len_b = sum(sizes[sent_b]) if len_b >= target_b_length: break # return the second part of the chunk since it's not used num_unused_segments = len(current_chunk) - a_end curr -= num_unused_segments else: # if next sentence label is 1, use the second part of chunk as sent_B sent_b = current_chunk[a_end:] len_b = sum(sizes[sent_b]) # currently sent_a and sent_B may be longer than max_num_tokens, # truncate them and return block idx and offsets for them sent_a, sent_b = self._truncate_sentences( sent_a, sent_b, max_num_tokens ) self.sent_pairs.append((sent_a, sent_b, next_sent_label)) self.sizes.append(3 + sent_a[3] + sent_b[3]) current_chunk = [] curr += 1 def _skip_sampling(self, total, skip_ids): """ Generate a random integer which is not in skip_ids. Sample range is [0, total) TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later """ rand_id = np.random.randint(total - len(skip_ids)) return rand_id if rand_id < min(skip_ids) else rand_id + len(skip_ids) def _truncate_sentences(self, sent_a, sent_b, max_num_tokens): """ Trancate a pair of sentence to limit total length under max_num_tokens Logics: 1. Truncate longer sentence 2. Tokens to be truncated could be at the beginning or the end of the sentnce Returns: Truncated sentences represented by dataset idx """ len_a, len_b = sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b]) front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0 while True: total_length = ( len_a + len_b - front_cut_a - front_cut_b - end_cut_a - end_cut_b ) if total_length <= max_num_tokens: break if len_a - front_cut_a - end_cut_a > len_b - front_cut_b - end_cut_b: if np.random.rand() < 0.5: front_cut_a += 1 else: end_cut_a += 1 else: if np.random.rand() < 0.5: front_cut_b += 1 else: end_cut_b += 1 # calculate ds indices as well as offsets and return truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a) truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b) return truncated_sent_a, truncated_sent_b def _cut_sentence(self, sent, front_cut, end_cut): """ Cut a sentence based on the numbers of tokens to be cut from beginning and end Represent the sentence as dataset idx and return """ start_ds_idx, end_ds_idx, offset = sent[0], sent[-1], 0 target_len = sum(self.dataset.sizes[sent]) - front_cut - end_cut while front_cut > 0: if self.dataset.sizes[start_ds_idx] > front_cut: offset += front_cut break else: front_cut -= self.dataset.sizes[start_ds_idx] start_ds_idx += 1 while end_cut > 0: if self.dataset.sizes[end_ds_idx] > end_cut: break else: end_cut -= self.dataset.sizes[end_ds_idx] end_ds_idx -= 1 return start_ds_idx, offset, end_ds_idx, target_len def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length): """ Fetch a block of tokens based on its dataset idx """ buffer = torch.cat( [self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)] ) s, e = offset, offset + length return buffer[s:e] def __getitem__(self, index): block1, block2, next_sent_label = self.sent_pairs[index] block1 = self._fetch_block(*block1) block2 = self._fetch_block(*block2) return block1, block2, next_sent_label def __len__(self): return len(self.sizes) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): prefetch_idx = set() for index in indices: for block1, block2, _ in [self.sent_pairs[index]]: for ds_idx in range(block1[0], block1[2] + 1): prefetch_idx.add(ds_idx) for ds_idx in range(block2[0], block2[2] + 1): prefetch_idx.add(ds_idx) self.dataset.prefetch(prefetch_idx)
bart_ls-main
fairseq-py/fairseq/data/legacy/block_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .block_pair_dataset import BlockPairDataset from .masked_lm_dataset import MaskedLMDataset from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary __all__ = [ "BertDictionary", "BlockPairDataset", "MaskedLMDataset", "MaskedLMDictionary", ]
bart_ls-main
fairseq-py/fairseq/data/legacy/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data import Dictionary class HFBertDictionary(Dictionary): """ Dictionary for Hugginface BERT. This is using totally the same dictionary as Google's released bert. It doesn't have special tokens since they are included in dictionary file """ def __init__( self, pad="[PAD]", unk="[UNK]", cls="[CLS]", mask="[MASK]", sep="[SEP]" ): ( self.pad_word, self.unk_word, self.cls_word, self.mask_word, self.sep_word, self.eos_word, self.bos_word, ) = ( pad, unk, cls, mask, sep, sep, sep, ) self.symbols = [] self.count = [] self.indices = {} self.nspecial = 0 def bos(self): """Helper to get index of bos symbol""" idx = self.add_symbol(self.bos_word) return idx def pad(self): """Helper to get index of pad symbol""" idx = self.add_symbol(self.pad_word) return idx def eos(self): """Helper to get index of eos symbol""" idx = self.add_symbol(self.eos_word) return idx def unk(self): """Helper to get index of unk symbol""" idx = self.add_symbol(self.unk_word) return idx def cls(self): """Helper to get index of cls symbol""" idx = self.add_symbol(self.cls_word) return idx def sep(self): """Helper to get index of sep symbol""" idx = self.add_symbol(self.sep_word) return idx def mask(self): """Helper to get index of sep symbol""" idx = self.add_symbol(self.mask_word) return idx
bart_ls-main
fairseq-py/fairseq/data/legacy/fb_hf_bert_dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Tuple import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset, data_utils from fairseq.data.concat_dataset import ConcatDataset from fairseq.data.legacy.block_pair_dataset import BlockPairDataset from fairseq.data.token_block_dataset import TokenBlockDataset class MaskedLMDataset(FairseqDataset): """ A wrapper Dataset for masked language modelling. The dataset wraps around TokenBlockDataset or BlockedPairDataset and creates a batch where the input blocks are masked according to the specified masking probability. Additionally the batch can also contain sentence level targets if this is specified. Args: dataset: Dataset which generates blocks of data. Only BlockPairDataset and TokenBlockDataset are supported. sizes: Sentence lengths vocab: Dictionary with the vocabulary and special tokens. pad_idx: Id of padding token in dictionary mask_idx: Id of mask token in dictionary classif_token_idx: Id of classification token in dictionary. This is the token associated with the sentence embedding (Eg: CLS for BERT) sep_token_idx: Id of separator token in dictionary (Eg: SEP in BERT) seed: Seed for random number generator for reproducibility. shuffle: Shuffle the elements before batching. has_pairs: Specifies whether the underlying dataset generates a pair of blocks along with a sentence_target or not. Setting it to True assumes that the underlying dataset generates a label for the pair of sentences which is surfaced as sentence_target. The default value assumes a single block with no sentence target. segment_id: An optional segment id for filling in the segment labels when we are in the single block setting (Eg: XLM). Default is 0. masking_ratio: specifies what percentage of the blocks should be masked. masking_prob: specifies the probability of a given token being replaced with the "MASK" token. random_token_prob: specifies the probability of a given token being replaced by a random token from the vocabulary. """ def __init__( self, dataset: FairseqDataset, sizes: np.ndarray, vocab: Dictionary, pad_idx: int, mask_idx: int, classif_token_idx: int, sep_token_idx: int, seed: int = 1, shuffle: bool = True, has_pairs: bool = True, segment_id: int = 0, masking_ratio: float = 0.15, masking_prob: float = 0.8, random_token_prob: float = 0.1, ): # Make sure the input datasets are the ones supported assert ( isinstance(dataset, TokenBlockDataset) or isinstance(dataset, BlockPairDataset) or isinstance(dataset, ConcatDataset) ), ( "MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or " "ConcatDataset" ) self.dataset = dataset self.sizes = np.array(sizes) self.vocab = vocab self.pad_idx = pad_idx self.mask_idx = mask_idx self.classif_token_idx = classif_token_idx self.sep_token_idx = sep_token_idx self.shuffle = shuffle self.seed = seed self.has_pairs = has_pairs self.segment_id = segment_id self.masking_ratio = masking_ratio self.masking_prob = masking_prob self.random_token_prob = random_token_prob # If we have only one block then sizes needs to be updated to include # the classification token if not has_pairs: self.sizes = self.sizes + 1 def __getitem__(self, index: int): # if has_pairs, then expect 2 blocks and a sentence target if self.has_pairs: (block_one, block_two, sentence_target) = self.dataset[index] else: block_one = self.dataset[index] return { "id": index, "block_one": block_one, "block_two": block_two if self.has_pairs else None, "sentence_target": sentence_target if self.has_pairs else None, } def __len__(self): return len(self.dataset) def _mask_block( self, sentence: np.ndarray, mask_idx: int, pad_idx: int, dictionary_token_range: Tuple, ): """ Mask tokens for Masked Language Model training Samples mask_ratio tokens that will be predicted by LM. Note:This function may not be efficient enough since we had multiple conversions between np and torch, we can replace them with torch operators later. Args: sentence: 1d tensor to be masked mask_idx: index to use for masking the sentence pad_idx: index to use for masking the target for tokens we aren't predicting dictionary_token_range: range of indices in dictionary which can be used for random word replacement (e.g. without special characters) Return: masked_sent: masked sentence target: target with words which we are not predicting replaced by pad_idx """ masked_sent = np.copy(sentence) sent_length = len(sentence) mask_num = math.ceil(sent_length * self.masking_ratio) mask = np.random.choice(sent_length, mask_num, replace=False) target = np.copy(sentence) for i in range(sent_length): if i in mask: rand = np.random.random() # replace with mask if probability is less than masking_prob # (Eg: 0.8) if rand < self.masking_prob: masked_sent[i] = mask_idx # replace with random token if probability is less than # masking_prob + random_token_prob (Eg: 0.9) elif rand < (self.masking_prob + self.random_token_prob): # sample random token from dictionary masked_sent[i] = np.random.randint( dictionary_token_range[0], dictionary_token_range[1] ) else: target[i] = pad_idx return masked_sent, target def _collate(self, samples: List[Dict], pad_idx: int, eos_idx: int): """ Does the heavy lifting for creating a batch from the input list of examples. The logic is as follows: 1. Mask the input blocks. In case has_pair is True then we have 2 blocks to mask. 2. Prepend the first masked block tensor with the special token used as sentence embedding. Eg: CLS in BERT. This happens irrespective of the value of has_pair. 3. If has_pair is True, then append the first masked block with the special separator token (eg: SEP for BERT) and compute segment label accordingly. In this case, also append the second masked block with this special separator token and compute its segment label. 4. For the targets tensor, prepend and append with padding index accordingly. 5. Concatenate all tensors. """ if len(samples) == 0: return {} # To ensure determinism, we reset the state of the PRNG after every # batch based on the seed and the first id of the batch. This ensures # that across epochs we get the same mask for the same example. This # is needed for reproducibility and is how BERT does masking # TODO: Can we add deteminism without this constraint? with data_utils.numpy_seed(self.seed + samples[0]["id"]): for s in samples: # token range is needed for replacing with random token during # masking token_range = (self.vocab.nspecial, len(self.vocab)) # mask according to specified probabilities. masked_blk_one, masked_tgt_one = self._mask_block( s["block_one"], self.mask_idx, self.pad_idx, token_range, ) tokens = np.concatenate([[self.classif_token_idx], masked_blk_one]) targets = np.concatenate([[self.pad_idx], masked_tgt_one]) segments = np.ones(len(tokens)) * self.segment_id # if has_pairs is True then we need to add the SEP token to both # the blocks after masking and re-compute segments based on the new # lengths. if self.has_pairs: tokens_one = np.concatenate([tokens, [self.sep_token_idx]]) targets_one = np.concatenate([targets, [self.pad_idx]]) masked_blk_two, masked_tgt_two = self._mask_block( s["block_two"], self.mask_idx, self.pad_idx, token_range ) tokens_two = np.concatenate([masked_blk_two, [self.sep_token_idx]]) targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]]) # block + 1 sep + 1 special (CLS) segments_one = np.zeros(len(tokens_one)) # block + 1 sep segments_two = np.ones(len(tokens_two)) tokens = np.concatenate([tokens_one, tokens_two]) targets = np.concatenate([targets_one, targets_two]) segments = np.concatenate([segments_one, segments_two]) s["source"] = torch.LongTensor(tokens) s["segment_labels"] = torch.LongTensor(segments) s["lm_target"] = torch.LongTensor(targets) def merge(key): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad=False ) return { "id": torch.LongTensor([s["id"] for s in samples]), "ntokens": sum(len(s["source"]) for s in samples), "net_input": { "src_tokens": merge("source"), "segment_labels": merge("segment_labels"), }, "lm_target": merge("lm_target"), "sentence_target": torch.LongTensor([s["sentence_target"] for s in samples]) if self.has_pairs else None, "nsentences": len(samples), } def collater(self, samples: List[Dict]): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch of data """ return self._collate(samples, self.vocab.pad(), self.vocab.eos()) def num_tokens(self, index: int): """ Return the number of tokens in a sample. This value is used to enforce max-tokens during batching. """ return self.sizes[index] def size(self, index: int): """ Return an example's size as a float or tuple. This value is used when filtering a dataset with max-positions. """ return self.sizes[index] def ordered_indices(self): """ Return an ordered list of indices. Batches will be constructed based on this order. """ if self.shuffle: return np.random.permutation(len(self)) else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch(indices)
bart_ls-main
fairseq-py/fairseq/data/legacy/masked_lm_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import hashlib import logging import math import numpy as np from fairseq.data import SampledMultiDataset from .sampled_multi_dataset import CollateFormat, default_virtual_size_func logger = logging.getLogger(__name__) class SampledMultiEpochDataset(SampledMultiDataset): """Samples from multiple sub-datasets according to sampling ratios using virtual epoch sizes to speed up dataloading. Args: datasets ( List[~torch.utils.data.Dataset] or OrderedDict[str, ~torch.utils.data.Dataset] ): datasets sampling_ratios (List[float]): list of probability of each dataset to be sampled (default: None, which corresponds to concating all dataset together). seed (int): RNG seed to use (default: 2). epoch (int): starting epoch number (default: 1). eval_key (str, optional): a key used at evaluation time that causes this instance to pass-through batches from *datasets[eval_key]*. collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures the collater to output batches of data mixed from all sub-datasets, and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys of sub-datasets. Note that not all sub-datasets will present in a single batch in both formats. virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). split (str): the split of the data, e.g. 'train', 'valid' or 'test'. virtual_epoch_size (int): virtual epoch size, the dataset will go through the data by this virtual epoch size one by one to speed up data loading, e.g. indicing and filtering can be performed whenever a virtual epoch is loaded without waiting for the whole dataset to be loaded. shared_collater (bool): whether or not to all sub-datasets have the same collater. shard_epoch (int): the real epoch number for shard selection. shuffle (bool): whether or not to shuffle data (default: True). """ def __init__( self, datasets, sampling_ratios=None, seed=2, epoch=1, eval_key=None, collate_format=CollateFormat.single, virtual_size=default_virtual_size_func, split="", virtual_epoch_size=None, shared_collater=False, shard_epoch=1, shuffle=True, ): self.virtual_epoch_size = virtual_epoch_size self._current_epoch_start_index = None self._random_global_indices = None self.shard_epoch = shard_epoch if shard_epoch is not None else 1 self.load_next_shard = None self._epoch_sizes = None super().__init__( datasets=datasets, sampling_ratios=sampling_ratios, seed=seed, epoch=epoch, eval_key=eval_key, collate_format=collate_format, virtual_size=virtual_size, split=split, shared_collater=shared_collater, shuffle=shuffle, ) def _setup(self, epoch): self.virtual_epoch_size = ( self.virtual_epoch_size if self.virtual_epoch_size is not None else self.virtual_size ) if self.virtual_epoch_size > self.virtual_size: logger.warning( f"virtual epoch size {self.virtual_epoch_size} " f"is greater than virtual dataset size {self.virtual_size}" ) self.virtual_epoch_size = self.virtual_size self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size) self._current_epoch_start_index = self._get_epoch_start_index(epoch) logger.info( f"virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}" ) def _map_epoch_index_to_global(self, index): index = self._current_epoch_start_index + index # add randomness return self._random_global_indices[index] @property def sizes(self): if self._epoch_sizes is not None: return self._epoch_sizes _sizes = super().sizes indices = self._random_global_indices[ self._current_epoch_start_index : self._current_epoch_start_index + len(self) ] self._epoch_sizes = _sizes[indices] # del super()._sizes to save memory del self._sizes self._sizes = None return self._epoch_sizes def _get_dataset_and_index(self, index): i = self._map_epoch_index_to_global(index) return super()._get_dataset_and_index(i) def __len__(self): return ( self.virtual_epoch_size if self._current_epoch_start_index + self.virtual_epoch_size < self.virtual_size else self.virtual_size - self._current_epoch_start_index ) def set_epoch(self, epoch): if self._current_epoch_start_index is None: # initializing epoch idnices of a virtual dataset self._setup(epoch) self._next_virtual_epoch(epoch) else: # working on already intialized epoch indices if epoch == self._cur_epoch: # re-enter so return return self._next_virtual_epoch(epoch) def _get_epoch_start_index(self, epoch): assert epoch >= 1 # fairseq is using 1-based epoch everywhere return ((epoch - 1) % self.num_virtual_epochs) * self.virtual_epoch_size def _next_global_indices(self, epoch): rng = np.random.RandomState( [ int( hashlib.sha1( str(self.__class__.__name__).encode("utf-8") ).hexdigest(), 16, ) % (2 ** 32), self.seed % (2 ** 32), # global seed epoch, # epoch index, ] ) del self._random_global_indices self._random_global_indices = rng.choice( self.virtual_size, self.virtual_size, replace=False ) if self.load_next_shard is None: self.load_next_shard = False else: # increase shard epoch for next loading self.shard_epoch += 1 self.load_next_shard = True logger.info( "to load next epoch/shard in next load_dataset: " f"epoch={epoch}/shard_epoch={self.shard_epoch}" ) def _next_virtual_epoch(self, epoch): index = self._get_epoch_start_index(epoch) if index == 0 or self._random_global_indices is None: # need to start from the beginning, # so call super().set_epoch(epoch) to establish the global virtual indices logger.info( "establishing a new set of global virtual indices for " f"epoch={epoch}/shard_epoch={self.shard_epoch}" ) super().set_epoch(epoch) self._next_global_indices(epoch) else: self._cur_epoch = epoch # reset cache sizes and ordered_indices for the epoch after moving to a new epoch self._clean_if_not_none( [ self._epoch_sizes, ] ) self._epoch_sizes = None self._current_epoch_start_index = index
bart_ls-main
fairseq-py/fairseq/data/multilingual/sampled_multi_epoch_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
bart_ls-main
fairseq-py/fairseq/data/multilingual/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum from typing import Dict, List, Optional, Sequence import torch from fairseq.data import Dictionary class EncoderLangtok(Enum): """ Prepend to the beginning of source sentence either the source or target language token. (src/tgt). """ src = "src" tgt = "tgt" class LangTokSpec(Enum): main = "main" mono_dae = "mono_dae" class LangTokStyle(Enum): multilingual = "multilingual" mbart = "mbart" @torch.jit.export def get_lang_tok( lang: str, lang_tok_style: str, spec: str = LangTokSpec.main.value ) -> str: # TOKEN_STYLES can't be defined outside this fn since it needs to be # TorchScriptable. TOKEN_STYLES: Dict[str, str] = { LangTokStyle.mbart.value: "[{}]", LangTokStyle.multilingual.value: "__{}__", } if spec.endswith("dae"): lang = f"{lang}_dae" elif spec.endswith("mined"): lang = f"{lang}_mined" style = TOKEN_STYLES[lang_tok_style] return style.format(lang) def augment_dictionary( dictionary: Dictionary, language_list: List[str], lang_tok_style: str, langtoks_specs: Sequence[str] = (LangTokSpec.main.value,), extra_data: Optional[Dict[str, str]] = None, ) -> None: for spec in langtoks_specs: for language in language_list: dictionary.add_symbol( get_lang_tok(lang=language, lang_tok_style=lang_tok_style, spec=spec) ) if lang_tok_style == LangTokStyle.mbart.value or ( extra_data is not None and LangTokSpec.mono_dae.value in extra_data ): dictionary.add_symbol("<mask>")
bart_ls-main
fairseq-py/fairseq/data/multilingual/multilingual_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List logger = logging.getLogger(__name__) def uniform(dataset_sizes: List[int]): return [1.0] * len(dataset_sizes) def temperature_sampling(dataset_sizes, temp): total_size = sum(dataset_sizes) return [(size / total_size) ** (1.0 / temp) for size in dataset_sizes] def make_temperature_sampling(temp=1.0): def sampling_func(dataset_sizes): return temperature_sampling(dataset_sizes, temp) return sampling_func def make_ratio_sampling(ratios): def sampling_func(dataset_sizes): return ratios return sampling_func class SamplingMethod: @staticmethod def add_arguments(parser): parser.add_argument( "--sampling-method", choices=[ "uniform", "temperature", "concat", "RoundRobin", ], type=str, default="concat", help="The method to sample data per language pairs", ) parser.add_argument( "--sampling-temperature", default=1.5, type=float, help="only work with --sampling-method temperature", ) @staticmethod def build_sampler(args, task): return SamplingMethod(args, task) def __init__(self, args, task): self.args = args self.task = task def is_adaptive(self): return False def sampling_method_selector(self): args = self.args logger.info(f"selected sampler: {args.sampling_method}") if args.sampling_method == "uniform": return uniform elif args.sampling_method == "temperature" or self.is_adaptive(): return make_temperature_sampling(float(args.sampling_temperature)) else: # default to concating all data set together return None
bart_ls-main
fairseq-py/fairseq/data/multilingual/sampling_method.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import datetime import hashlib import logging import time from bisect import bisect_right from collections import OrderedDict, defaultdict from enum import Enum from typing import List import numpy as np import torch from fairseq.data import FairseqDataset, data_utils from fairseq.distributed import utils as distributed_utils def get_time_gap(s, e): return ( datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) ).__str__() logger = logging.getLogger(__name__) def default_virtual_size_func(datasets, ratios, max_scale_up=1.5): sizes = [len(d) for d in datasets] if ratios is None: return sum(sizes) largest_idx = np.argmax(sizes) largest_r = ratios[largest_idx] largest_s = sizes[largest_idx] # set virtual sizes relative to the largest dataset virtual_sizes = [(r / largest_r) * largest_s for r in ratios] vsize = sum(virtual_sizes) max_size = sum(sizes) * max_scale_up return int(vsize if vsize < max_size else max_size) class CollateFormat(Enum): single = 1 ordered_dict = 2 class SampledMultiDataset(FairseqDataset): """Samples from multiple sub-datasets according to given sampling ratios. Args: datasets ( List[~torch.utils.data.Dataset] or OrderedDict[str, ~torch.utils.data.Dataset] ): datasets sampling_ratios (List[float]): list of probability of each dataset to be sampled (default: None, which corresponds to concatenating all dataset together). seed (int): RNG seed to use (default: 2). epoch (int): starting epoch number (default: 1). eval_key (str, optional): a key used at evaluation time that causes this instance to pass-through batches from *datasets[eval_key]*. collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures the collater to output batches of data mixed from all sub-datasets, and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys of sub-datasets. Note that not all sub-datasets will present in a single batch in both formats. virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). split (str): the split of the data, e.g. 'train', 'valid' or 'test'. shared_collater (bool): whether or not to all sub-datasets have the same collater. shuffle (bool): whether or not to shuffle data (default: True). """ def __init__( self, datasets, sampling_ratios=None, seed=2, epoch=1, eval_key=None, collate_format=CollateFormat.single, virtual_size=default_virtual_size_func, split="", shared_collater=False, shuffle=True, ): super().__init__() self.shared_collater = shared_collater self.shuffle = shuffle if isinstance(datasets, OrderedDict): self.keys = list(datasets.keys()) datasets = list(datasets.values()) elif isinstance(datasets, List): self.keys = list(range(len(datasets))) else: raise AssertionError() self.datasets = datasets self.split = split self.eval_key = eval_key if self.eval_key is not None: self.collate_format = CollateFormat.single else: self.collate_format = collate_format self.seed = seed self._cur_epoch = None self.cumulated_sizes = None # self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset # namely, data item i is sampled from the kth sub-dataset self.datasets[k] # where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k] self._cur_indices = None self._sizes = None self.virtual_size_per_dataset = None # caching properties self._reset_cached_properties() self.setup_sampling(sampling_ratios, virtual_size) self.set_epoch(epoch) def _clean_if_not_none(self, var_list): for v in var_list: if v is not None: del v def _reset_cached_properties(self): self._clean_if_not_none([self._sizes, self._cur_indices]) self._sizes = None self._cur_indices = None def setup_sampling(self, sample_ratios, virtual_size): sizes = [len(d) for d in self.datasets] if sample_ratios is None: # default back to concating datasets self.sample_ratios = None self.virtual_size = sum(sizes) else: if not isinstance(sample_ratios, np.ndarray): sample_ratios = np.array(sample_ratios) self.sample_ratios = sample_ratios virtual_size = ( default_virtual_size_func if virtual_size is None else virtual_size ) self.virtual_size = ( virtual_size(self.datasets, self.sample_ratios) if callable(virtual_size) else virtual_size ) def adjust_sampling(self, epoch, sampling_ratios, virtual_size): if sampling_ratios is not None: sampling_ratios = self._sync_sample_ratios(sampling_ratios) self.setup_sampling(sampling_ratios, virtual_size) def _sync_sample_ratios(self, ratios): # in case the ratios are not precisely the same across processes # also to ensure every procresses update the ratios in the same pace ratios = torch.DoubleTensor(ratios) if torch.distributed.is_initialized(): if torch.cuda.is_available(): distributed_utils.all_reduce( ratios.cuda(), group=distributed_utils.get_data_parallel_group() ) else: distributed_utils.all_reduce( ratios, group=distributed_utils.get_data_parallel_group() ) ret = ratios.cpu() ret = ret.numpy() return ret def random_choice_in_dataset(self, rng, dataset, choice_size): if hasattr(dataset, "random_choice_in_dataset"): return dataset.random_choice_in_dataset(rng, choice_size) dataset_size = len(dataset) return rng.choice( dataset_size, choice_size, replace=(choice_size > dataset_size) ) def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size): def get_counts(sample_ratios): counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64) diff = virtual_size - counts.sum() assert diff >= 0 # due to round-offs, the size might not match the desired sizes if diff > 0: dataset_indices = rng.choice( len(sample_ratios), size=diff, p=sample_ratios ) for i in dataset_indices: counts[i] += 1 return counts def get_in_dataset_indices(datasets, sizes, sample_ratios): counts = get_counts(sample_ratios) # uniformally sample desired counts for each dataset # if the desired counts are large, sample with replacement: indices = [ self.random_choice_in_dataset(rng, d, c) for c, d in zip(counts, datasets) ] return indices sizes = [len(d) for d in datasets] if sample_ratios is None: # default back to concating datasets in_dataset_indices = [list(range(s)) for s in sizes] virtual_sizes_per_dataset = sizes else: ratios = sample_ratios / sample_ratios.sum() in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios) virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices] virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64) cumulative_sizes = np.cumsum(virtual_sizes_per_dataset) assert sum(virtual_sizes_per_dataset) == virtual_size assert cumulative_sizes[-1] == virtual_size if virtual_size < sum(sizes): logger.warning( f"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)})." " If virtual size << real data size, there could be data coverage issue." ) in_dataset_indices = np.hstack(in_dataset_indices) return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset def _get_dataset_and_index(self, index): i = bisect_right(self.cumulated_sizes, index) return i, self._cur_indices[index] def __getitem__(self, index): # self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]] # where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k] ds_idx, ds_sample_idx = self._get_dataset_and_index(index) ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx]) return ret def num_tokens(self, index): return self.sizes[index].max() def num_tokens_vec(self, indices): sizes_vec = self.sizes[np.array(indices)] # max across all dimensions but first one return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape)))) def size(self, index): return self.sizes[index] def __len__(self): return self.virtual_size def collater(self, samples, **extra_args): """Merge a list of samples to form a mini-batch.""" if len(samples) == 0: return None if self.collate_format == "ordered_dict": collect_samples = [[] for _ in range(len(self.datasets))] for (i, sample) in samples: collect_samples[i].append(sample) batch = OrderedDict( [ (self.keys[i], dataset.collater(collect_samples[i])) for i, (key, dataset) in enumerate(zip(self.keys, self.datasets)) if len(collect_samples[i]) > 0 ] ) elif self.shared_collater: batch = self.datasets[0].collater([s for _, s in samples]) else: samples_dict = defaultdict(list) pad_to_length = ( defaultdict(int) if "pad_to_length" not in extra_args else extra_args["pad_to_length"] ) for ds_idx, s in samples: pad_to_length["source"] = max( pad_to_length["source"], s["source"].size(0) ) if s["target"] is not None: pad_to_length["target"] = max( pad_to_length["target"], s["target"].size(0) ) samples_dict[ds_idx].append(s) batches = [ self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length) for i in range(len(self.datasets)) if len(samples_dict[i]) > 0 ] def straight_data(tensors): batch = torch.cat(tensors, dim=0) return batch src_lengths = straight_data( [b["net_input"]["src_lengths"] for b in batches] ) src_lengths, sort_order = src_lengths.sort(descending=True) def straight_order(tensors): batch = straight_data(tensors) return batch.index_select(0, sort_order) batch = { "id": straight_order([b["id"] for b in batches]), "nsentences": sum(b["nsentences"] for b in batches), "ntokens": sum(b["ntokens"] for b in batches), "net_input": { "src_tokens": straight_order( [b["net_input"]["src_tokens"] for b in batches] ), "src_lengths": src_lengths, }, "target": straight_order([b["target"] for b in batches]) if batches[0]["target"] is not None else None, } if "prev_output_tokens" in batches[0]["net_input"]: batch["net_input"]["prev_output_tokens"] = straight_order( [b["net_input"]["prev_output_tokens"] for b in batches] ) if "src_lang_id" in batches[0]["net_input"]: batch["net_input"]["src_lang_id"] = straight_order( [b["net_input"]["src_lang_id"] for b in batches] ) if "tgt_lang_id" in batches[0]: batch["tgt_lang_id"] = straight_order( [b["tgt_lang_id"] for b in batches] ) return batch @property def sizes(self): if self._sizes is not None: return self._sizes start_time = time.time() in_sub_dataset_indices = [ self._cur_indices[ 0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i] ] for i in range(len(self.datasets)) ] sub_dataset_sizes = [ d.sizes[indices] for d, indices in zip(self.datasets, in_sub_dataset_indices) ] self._sizes = np.vstack(sub_dataset_sizes) logger.info(f"sizes() calling time: {get_time_gap(start_time, time.time())}") return self._sizes def ordered_indices(self): if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) sizes = self.sizes tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) # sort by target length, then source length if tgt_sizes is not None: indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")] return sort_indices def prefetch(self, indices): prefetch_indices = [[] for _ in range(len(self.datasets))] for i in indices: ds_idx, ds_sample_idx = self._get_dataset_and_index(i) prefetch_indices[ds_idx].append(ds_sample_idx) for i in range(len(prefetch_indices)): self.datasets[i].prefetch(prefetch_indices[i]) @property def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch): super().set_epoch(epoch) if epoch == self._cur_epoch: # re-enter so return return for d in self.datasets: if hasattr(d, "set_epoch"): d.set_epoch(epoch) self._cur_epoch = epoch self._establish_virtual_datasets() def _establish_virtual_datasets(self): if self.sample_ratios is None and self._cur_indices is not None: # not a samping dataset, no need to resample if indices are already established return self._reset_cached_properties() start_time = time.time() # Generate a weighted sample of indices as a function of the # random seed and the current epoch. rng = np.random.RandomState( [ int( hashlib.sha1( str(self.__class__.__name__).encode("utf-8") ).hexdigest(), 16, ) % (2 ** 32), self.seed % (2 ** 32), # global seed self._cur_epoch, # epoch index, ] ) self._clean_if_not_none( [self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes] ) self._sizes = None indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices( rng, self.datasets, self.sample_ratios, self.virtual_size ) self._cur_indices = indices self.cumulated_sizes = cumulated_sizes self.virtual_size_per_dataset = virtual_size_per_dataset raw_sizes = [len(d) for d in self.datasets] sampled_sizes = self.virtual_size_per_dataset logger.info( f"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; " f"raw total size: {sum(raw_sizes)}" ) logger.info( f"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; " f"resampled total size: {sum(sampled_sizes)}" ) if self.sample_ratios is not None: logger.info( f"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}" ) else: logger.info(f"[{self.split}] A concat dataset") logger.info( f"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}" ) def filter_indices_by_size(self, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ sizes = self.sizes tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) return data_utils.filter_paired_dataset_indices_by_size( src_sizes, tgt_sizes, indices, max_sizes )
bart_ls-main
fairseq-py/fairseq/data/multilingual/sampled_multi_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import json import logging import math import os from collections import OrderedDict, defaultdict from argparse import ArgumentError from fairseq import utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, Dictionary, LanguagePairDataset, PrependTokenDataset, SampledMultiDataset, SampledMultiEpochDataset, StripTokenDataset, TransformEosLangPairDataset, TruncateDataset, data_utils, indexed_dataset, ) from fairseq.data.multilingual.multilingual_utils import ( EncoderLangtok, LangTokSpec, LangTokStyle, augment_dictionary, get_lang_tok, ) from fairseq.data.multilingual.sampled_multi_dataset import CollateFormat from fairseq.file_io import PathManager from fairseq.utils import FileContentsAction, csv_str_list, eval_str_dict logger = logging.getLogger(__name__) SRC_DICT_NAME = 'src' TGT_DICT_NAME = 'tgt' def _lang_id(dic: Dictionary, lang: str): """Return language ID index.""" idx = dic.index(lang) assert idx != dic.unk_index, "cannot find language ID for lang {}".format(lang) return idx def load_sampling_weights(from_file): with open(from_file) as f: weights = json.load(f) return weights class MultilingualDatasetManager(object): def __init__(self, args, lang_pairs, langs, dicts, sampling_method): super().__init__() self.args = args self.seed = args.seed self.lang_pairs = lang_pairs self.extra_lang_pairs = ( list( {p for _, v in args.extra_lang_pairs.items() for p in v.split(",")} ) if args.extra_lang_pairs else [] ) self.src_langs = {p.split("-")[0] for p in args.lang_pairs + self.extra_lang_pairs} self.tgt_langs = {p.split("-")[1] for p in args.lang_pairs + self.extra_lang_pairs} self.langs = langs self.dicts = dicts self.lang_dict = self.create_lang_dictionary(self.langs) self.sampling_method = sampling_method self.sampling_scheduler = None self._has_sharded_data = False self._num_shards_dict = {} self._training_data_sizes = defaultdict(lambda: {}) @classmethod def setup_data_manager(cls, args, lang_pairs, langs, dicts, sampling_method): return MultilingualDatasetManager( args, lang_pairs, langs, dicts, sampling_method ) @staticmethod def add_args(parser): parser.add_argument( "data", help="colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner", action=FileContentsAction, ) parser.add_argument( "--langs", default=None, type=csv_str_list, help="a list of languages comma sperated languages which can appear in lang-pairs; " "note that the ordering determines language token IDs", ) parser.add_argument( "--lang-dict", default=None, type=str, help="an external file which contains a list of " "languages which can appear in lang-pairs; " "note that the ordering determines language token IDs; " "--langs and --lang-dict are two exclusive options", ) parser.add_argument('--source-dict', default=None, type=str, help='path to source dictionary; if specified it will override per language dictionary loading') parser.add_argument('--target-dict', default=None, type=str, help='path to target dictionary; if specified it will override per language dictionary loading') parser.add_argument( "--lang-tok-style", default=LangTokStyle.multilingual.value, type=str, choices=[LangTokStyle.multilingual.value, LangTokStyle.mbart.value], help="language token styles", ) parser.add_argument( "--load-alignments", action="store_true", help="load the binarized alignments", ) parser.add_argument( "--left-pad-source", default="True", type=str, metavar="BOOL", help="pad the source on the left", ) parser.add_argument( "--left-pad-target", default="False", type=str, metavar="BOOL", help="pad the target on the left", ) try: parser.add_argument( "--max-source-positions", default=1024, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) except ArgumentError: # this might have already been defined. Once we transition this to hydra it should be fine to add it here. pass parser.add_argument( "--upsample-primary", default=1, type=int, help="amount to upsample primary dataset", ) parser.add_argument( "--truncate-source", action="store_true", default=False, help="truncate source to max-source-positions", ) parser.add_argument( "--encoder-langtok", default=None, type=str, choices=[EncoderLangtok.src.value, EncoderLangtok.tgt.value], metavar="SRCTGT", help="prepend to the beginning of source sentence the source or target " "language token. (src/tgt)", ) parser.add_argument( "--decoder-langtok", action="store_true", help="prepend to the beginning of target sentence the target language token", ) parser.add_argument( "--lang-tok-replacing-bos-eos", action="store_true", default=False ) parser.add_argument( "--enable-lang-ids", default=False, action="store_true", help="whether to include language IDs in samples", ) parser.add_argument( "--enable-reservsed-directions-shared-datasets", default=False, action="store_true", help="whether to allow datasets be used in reversed directions", ) parser.add_argument( "--extra-data", help='a dictionary of data name to this path, \ e.g. {"mined", path_to_mined_data, "denoised": path_to_denoised_data}', type=lambda uf: eval_str_dict(uf, type=str), default=None, ) parser.add_argument( "--extra-lang-pairs", help='a dictionary of data name to the language pairs they serve, \ e.g. {"mined": comma-separated-lang-pairs, "denoised": comma-separated-lang-pairs}', type=lambda uf: eval_str_dict(uf, type=str), default=None, ) parser.add_argument( "--fixed-dictionary", help="Fixed dictionary to use with model path", default=None, type=str, ) parser.add_argument( "--langtoks-specs", help='a list of comma separated data types that a set of language tokens to be specialized for, \ e.g. "main,dae,mined". There will be a set of language tokens added to the vocab to \ distinguish languages in different training data types. If not specified, default language \ tokens per languages will be added', default=LangTokSpec.main.value, type=csv_str_list, ) parser.add_argument( "--langtoks", help='a dictionary of how to add language tokens, \ e.g. {"mined": (None, "tgt"), "mono_dae": ("src.dae", "tgt"), "main": \ ("src", "tgt")}, or {"mined": ("src.mined", "tgt")}', default=None, type=lambda uf: eval_str_dict(uf, type=str), ) parser.add_argument( "--sampling-weights-from-file", help='a file contain a python dictionary of how to sample data sets, \ e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \ "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', default=None, type=str, ) parser.add_argument( "--sampling-weights", help='a dictionary of how to sample data sets, \ e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \ "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', default=None, type=lambda uf: eval_str_dict(uf, type=str), ) parser.add_argument( "--virtual-epoch-size", default=None, type=int, help="virtual epoch size to speed up data loading", ) parser.add_argument( "--virtual-data-size", default=None, type=int, help="virtual data size of the whole joint dataset to speed" "up data loading and have specific dynamic sampling strategy interval", ) @classmethod def load_langs(cls, args, **kwargs): if args.lang_dict and args.langs: raise ValueError("--langs and --lang-dict can not both be specified") if args.lang_dict is None and args.langs is None: logger.warning( "External language dictionary is not provided; " "use lang-pairs to infer the set of supported languages. " "The language ordering is not stable which might cause " "misalignment in pretraining and finetuning." ) # infer from lang_pairs as it is langs = list( {x for lang_pair in args.lang_pairs for x in lang_pair.split("-")} ) langs = sorted(langs) logger.info(f"inferred language list: {langs}") elif args.lang_dict: with open( PathManager.get_local_path(args.lang_dict), "r", encoding="utf-8" ) as f: langs = [lang.strip() for lang in f.readlines() if lang.strip()] logger.info( f"loaded language list from {args.lang_dict} as they are ordered in file" ) elif args.langs: langs = args.langs logger.info( f"parsed the language list as they are ordered in the option: {langs}" ) return langs def has_sharded_data(self, split): return self._has_sharded_data and split == getattr( self.args, "train_subset", None ) def _shared_collater(self): return not (self.args.extra_data and "mono_dae" in self.args.extra_data) and ( not self.args.lang_tok_replacing_bos_eos ) def estimate_global_pass_epoch(self, epoch): if self.args.virtual_epoch_size is None or self.args.virtual_data_size is None: return None # one epoch more for remaining data in each shard virtual_epochs_per_shard = math.ceil( self.args.virtual_data_size / self.args.virtual_epoch_size ) # note that fairseq epoch / shard_epoch starts from 1 shard_epoch = (epoch - 1) // virtual_epochs_per_shard + 1 return shard_epoch @classmethod def prepare(cls, load_dictionary, args, **kargs): args.left_pad_source = utils.eval_bool(args.left_pad_source) args.left_pad_target = utils.eval_bool(args.left_pad_target) if not hasattr(args, "shuffle_instance"): args.shuffle_instance = False if args.langtoks is None: args.langtoks = {} if "main" not in args.langtoks: src_langtok_spec = args.encoder_langtok if args.encoder_langtok else None tgt_langtok_spec = "tgt" if args.decoder_langtok else None args.langtoks["main"] = (src_langtok_spec, tgt_langtok_spec) def check_langs(langs, pairs): messages = [] for src, tgt in pairs: if src not in langs or tgt not in langs: messages.append( f"language pair {src}-{tgt} contains languages " "that are not in the language dictionary" ) if len(messages) > 0: raise ValueError(" ".join(messages) + f"; langs: {langs}") if args.lang_pairs is None: raise ValueError( "--lang-pairs is required. List all the language pairs in the training objective." ) if isinstance(args.lang_pairs, str): args.lang_pairs = args.lang_pairs.split(",") if args.source_lang is not None or args.target_lang is not None: training = False else: training = True language_list = cls.load_langs(args, **kargs) check_langs( language_list, ( [p.split("-") for p in args.lang_pairs] if training else [(args.source_lang, args.target_lang)] ), ) def load_dictionary_and_postproc(path): d = load_dictionary(path) augment_dictionary( dictionary=d, language_list=language_list, lang_tok_style=args.lang_tok_style, langtoks_specs=args.langtoks_specs, extra_data=args.extra_data, ) return d dicts = cls.load_all_dictionaries(args, language_list, load_dictionary_and_postproc, training) return language_list, dicts, training @classmethod def load_all_dictionaries(cls, args, language_list, load_dictionary, training): dicts = OrderedDict() if args.source_dict is not None: dicts[SRC_DICT_NAME] = load_dictionary(args.source_dict) if args.target_dict is not None: dicts[TGT_DICT_NAME] = load_dictionary(args.target_dict) if training: extra_lang_pairs = ( list( {p for _, v in args.extra_lang_pairs.items() for p in v.split(",")} ) if args.extra_lang_pairs else [] ) src_langs_to_load_dicts = sorted( {p.split("-")[0] for p in (args.lang_pairs + extra_lang_pairs)} ) tgt_langs_to_load_dicts = sorted( {p.split("-")[1] for p in (args.lang_pairs + extra_lang_pairs)} ) else: src_langs_to_load_dicts = [args.source_lang] tgt_langs_to_load_dicts = [args.target_lang] paths = utils.split_paths(args.data) assert len(paths) > 0 def load_dicts(langs_to_load_dicts): for lang in langs_to_load_dicts: dicts[lang] = load_dictionary( os.path.join(paths[0], "dict.{}.txt".format(lang)) ) if len(dicts) > 0: dict0 = next(iter(dicts.values())) assert dicts[lang].pad() == dict0.pad() assert dicts[lang].eos() == dict0.eos() assert dicts[lang].unk() == dict0.unk() logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang]))) if args.fixed_dictionary is not None: fixed_dict = load_dictionary(args.fixed_dictionary) dicts = {lang: fixed_dict for lang in src_langs_to_load_dicts + tgt_langs_to_load_dicts} else: if args.source_dict is None: load_dicts(src_langs_to_load_dicts) if args.target_dict is None: load_dicts(tgt_langs_to_load_dicts) return dicts def get_source_dictionary(self, lang): if self.args.source_dict is not None: return self.dicts[SRC_DICT_NAME] else: return self.dicts[lang] def get_target_dictionary(self, lang): if self.args.target_dict is not None: return self.dicts[TGT_DICT_NAME] else: return self.dicts[lang] @classmethod def create_lang_dictionary(cls, langs): unk = "<unk>" # hack to remove symbols other than unk as they are not needed by lang dict lang_dict = Dictionary(pad=unk, eos=unk, unk=unk, bos=unk) for lang in langs: lang_dict.add_symbol(lang) return lang_dict @classmethod def get_langtok_index(cls, lang_tok, dic): idx = dic.index(lang_tok) assert ( idx != dic.unk_index ), "cannot find language token {} in the dictionary".format(lang_tok) return idx def get_encoder_langtok(self, src_lang, tgt_lang, spec=None): if spec is None: return None if spec and spec.startswith("src"): if src_lang is None: return None langtok = get_lang_tok( lang=src_lang, lang_tok_style=self.args.lang_tok_style, spec=spec ) else: if tgt_lang is None: return None langtok = get_lang_tok( lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec ) return self.get_langtok_index( langtok, self.get_source_dictionary(src_lang) if src_lang else self.get_target_dictionary(tgt_lang) ) def get_decoder_langtok(self, tgt_lang, spec=None): if spec is None: return None langtok = get_lang_tok( lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec ) return self.get_langtok_index(langtok, self.get_target_dictionary(tgt_lang)) @classmethod def load_data(cls, path, vdict, impl): dataset = data_utils.load_indexed_dataset(path, vdict, impl) return dataset @classmethod def split_exists(cls, split, src, tgt, lang, data_path, dataset_impl): filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) def load_lang_dataset( self, data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, max_source_positions, prepend_bos=False, load_alignments=False, truncate_source=False, ): src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = split + (str(k) if k > 0 else "") # infer langcode if self.split_exists(split_k, src, tgt, src, data_path, dataset_impl): prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt)) elif self.split_exists(split_k, tgt, src, src, data_path, dataset_impl): prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src)) else: if k > 0: break else: logger.error( f"Dataset not found: {data_path}, {split_k}, {src}, {tgt}" ) raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) src_dataset = self.load_data(prefix + src, src_dict, dataset_impl) if truncate_source: src_dataset = AppendTokenDataset( TruncateDataset( StripTokenDataset(src_dataset, src_dict.eos()), max_source_positions - 1, ), src_dict.eos(), ) src_datasets.append(src_dataset) tgt_datasets.append(self.load_data(prefix + tgt, tgt_dict, dataset_impl)) logger.info( "{} {} {}-{} {} examples".format( data_path, split_k, src, tgt, len(src_datasets[-1]) ) ) if not combine: break assert len(src_datasets) == len(tgt_datasets) if len(src_datasets) == 1: src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0] else: sample_ratios = [1] * len(src_datasets) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) if prepend_bos: assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index") src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) align_dataset = None if load_alignments: align_path = os.path.join( data_path, "{}.align.{}-{}".format(split, src, tgt) ) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset( align_path, None, dataset_impl ) return src_dataset, tgt_dataset, align_dataset def load_langpair_dataset( self, data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, src_dataset_transform_func=lambda dataset: dataset, tgt_dataset_transform_func=lambda dataset: dataset, src_lang_id=None, tgt_lang_id=None, langpairs_sharing_datasets=None, ): norm_direction = "-".join(sorted([src, tgt])) if langpairs_sharing_datasets is not None: src_dataset = langpairs_sharing_datasets.get( (data_path, split, norm_direction, src), "NotInCache" ) tgt_dataset = langpairs_sharing_datasets.get( (data_path, split, norm_direction, tgt), "NotInCache" ) align_dataset = langpairs_sharing_datasets.get( (data_path, split, norm_direction, src, tgt), "NotInCache" ) # a hack: any one is not in cache, we need to reload them if ( langpairs_sharing_datasets is None or src_dataset == "NotInCache" or tgt_dataset == "NotInCache" or align_dataset == "NotInCache" or split != getattr(self.args, "train_subset", None) ): # source and target datasets can be reused in reversed directions to save memory # reversed directions of valid and test data will not share source and target datasets src_dataset, tgt_dataset, align_dataset = self.load_lang_dataset( data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, max_source_positions=max_source_positions, prepend_bos=prepend_bos, load_alignments=load_alignments, truncate_source=truncate_source, ) src_dataset = src_dataset_transform_func(src_dataset) tgt_dataset = tgt_dataset_transform_func(tgt_dataset) if langpairs_sharing_datasets is not None: langpairs_sharing_datasets[ (data_path, split, norm_direction, src) ] = src_dataset langpairs_sharing_datasets[ (data_path, split, norm_direction, tgt) ] = tgt_dataset langpairs_sharing_datasets[ (data_path, split, norm_direction, src, tgt) ] = align_dataset if align_dataset is None: # no align data so flag the reverse direction as well in sharing langpairs_sharing_datasets[ (data_path, split, norm_direction, tgt, src) ] = align_dataset else: logger.info( f"Reusing source and target datasets of [{split}] {tgt}-{src} for reversed direction: " f"[{split}] {src}-{tgt}: src length={len(src_dataset)}; tgt length={len(tgt_dataset)}" ) return LanguagePairDataset( src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset.sizes if tgt_dataset is not None else None, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, src_lang_id=src_lang_id, tgt_lang_id=tgt_lang_id, ) def src_dataset_tranform_func(self, src_lang, tgt_lang, dataset, spec=None): if self.args.lang_tok_replacing_bos_eos: # it is handled by self.alter_dataset_langtok # TODO: Unifiy with alter_dataset_langtok return dataset if spec is None: return dataset tok = self.get_encoder_langtok(src_lang, tgt_lang, spec) if tok: return PrependTokenDataset(dataset, tok) return dataset def tgt_dataset_tranform_func(self, source_lang, target_lang, dataset, spec=None): if dataset is None: # note that target dataset can be None during inference time return None if self.args.lang_tok_replacing_bos_eos: # TODO: Unifiy with alter_dataset_langtok # It is handled by self.alter_dataset_langtok. # The complication in self.alter_dataset_langtok # makes a unified framework difficult. return dataset # if not self.args.decoder_langtok: if not spec: return dataset tok = self.get_decoder_langtok(target_lang, spec) if tok: return PrependTokenDataset(dataset, tok) return dataset def alter_dataset_langtok( self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None, src_langtok_spec=None, tgt_langtok_spec=None, ): if src_langtok_spec is None and tgt_langtok_spec is None: return lang_pair_dataset new_src_eos = None if ( src_langtok_spec is not None and src_eos is not None and (src_lang is not None or tgt_lang is not None) ): new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang, src_langtok_spec) else: src_eos = None new_tgt_bos = None if tgt_langtok_spec and tgt_eos is not None and tgt_lang is not None: new_tgt_bos = self.get_decoder_langtok(tgt_lang, tgt_langtok_spec) else: tgt_eos = None return TransformEosLangPairDataset( lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos, ) def load_a_dataset( self, split, data_path, src, src_dict, tgt, tgt_dict, combine, prepend_bos=False, langpairs_sharing_datasets=None, data_category=None, **extra_kwargs, ): dataset_impl = self.args.dataset_impl upsample_primary = self.args.upsample_primary left_pad_source = self.args.left_pad_source left_pad_target = self.args.left_pad_target max_source_positions = self.args.max_source_positions max_target_positions = self.args.max_target_positions load_alignments = self.args.load_alignments truncate_source = self.args.truncate_source src_dataset_transform_func = self.src_dataset_tranform_func tgt_dataset_transform_func = self.tgt_dataset_tranform_func enable_lang_ids = self.args.enable_lang_ids lang_dictionary = self.lang_dict src_langtok_spec, tgt_langtok_spec = extra_kwargs["langtok_spec"] src_langtok = self.get_encoder_langtok(src, tgt, src_langtok_spec) tgt_langtok = self.get_decoder_langtok(tgt, tgt_langtok_spec) logger.info( f"{data_category}:{src}-{tgt} src_langtok: {src_langtok}; tgt_langtok: {tgt_langtok}" ) langpair_ds = self.load_langpair_dataset( data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos, load_alignments, truncate_source, src_dataset_transform_func=lambda dataset: src_dataset_transform_func( src, tgt, dataset, src_langtok_spec ), tgt_dataset_transform_func=lambda dataset: tgt_dataset_transform_func( src, tgt, dataset, tgt_langtok_spec ), src_lang_id=_lang_id(lang_dictionary, src) if enable_lang_ids and lang_dictionary is not None else None, tgt_lang_id=_lang_id(lang_dictionary, tgt) if enable_lang_ids and lang_dictionary is not None else None, langpairs_sharing_datasets=langpairs_sharing_datasets, ) # TODO: handle modified lang toks for mined data and dae data if self.args.lang_tok_replacing_bos_eos: ds = self.alter_dataset_langtok( langpair_ds, src_eos=self.get_source_dictionary(src).eos() if src else self.get_target_dictionary(tgt).eos(), src_lang=src, tgt_eos=self.get_target_dictionary(tgt).eos(), tgt_lang=tgt, src_langtok_spec=src_langtok_spec, tgt_langtok_spec=tgt_langtok_spec, ) else: ds = langpair_ds return ds def load_split_langpair_datasets(self, split, data_param_list): datasets = [] langpairs_sharing_datasets = ( {} if self.args.enable_reservsed_directions_shared_datasets else None ) for param in data_param_list: ds = self.load_a_dataset( split=split, langpairs_sharing_datasets=langpairs_sharing_datasets, **param, ) datasets.append(ds) return datasets def get_data_paths_and_lang_pairs(self, split): datapaths = {"main": self.args.data} lang_pairs = {"main": self.lang_pairs} if split == getattr(self.args, "train_subset", None): # only training data can have extra data and extra language pairs if self.args.extra_data: extra_datapaths = self.args.extra_data datapaths.update(extra_datapaths) if self.args.extra_lang_pairs: extra_lang_pairs = { k: v.split(",") for k, v in self.args.extra_lang_pairs.items() } lang_pairs.update(extra_lang_pairs) return datapaths, lang_pairs @classmethod def get_dataset_key(cls, data_category, src, tgt): return f"{data_category}:{src}-{tgt}" @classmethod def _get_shard_num_dict(cls, split, paths): shards = defaultdict(int) for path in paths: files = PathManager.ls(path) directions = set() for f in files: if f.startswith(split) and f.endswith(".idx"): # idx files of the form "{split}.{src}-{tgt}.{lang}.idx" direction = f.split(".")[-3] directions.add(direction) for direction in directions: shards[direction] += 1 return shards def get_split_num_data_shards(self, split): if split in self._num_shards_dict: return self._num_shards_dict[split] num_shards_dict = {} data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split) for data_category, paths in data_paths.items(): if data_category not in lang_pairs: continue paths = utils.split_paths(paths) shards_dict = self._get_shard_num_dict(split, paths) lang_dirs = [ lang_pair.split("-") for lang_pair in lang_pairs[data_category] ] lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs] for src, tgt in lang_dirs: key = self.get_dataset_key(data_category, src, tgt) if "mono_" in data_category: # monolingual data requires tgt only assert src is None or src == tgt, ( f"error: src={src}, " "tgt={tgt} for data_category={data_category}" ) num_shards_dict[key] = shards_dict[tgt] else: if f"{src}-{tgt}" in shards_dict: num_shards_dict[key] = shards_dict[f"{src}-{tgt}"] elif f"{tgt}-{src}" in shards_dict: # follow the fairseq tradition to use reversed direction data if it is not available num_shards_dict[key] = shards_dict[f"{tgt}-{src}"] self._num_shards_dict[split] = num_shards_dict logger.info(f"[{split}] num of shards: {num_shards_dict}") return num_shards_dict @classmethod def get_shard_id(cls, num_shards, epoch, shard_epoch=None): shard = epoch if shard_epoch is None else shard_epoch shard = (shard - 1) % num_shards return shard def get_split_data_path(self, paths, epoch, shard_epoch, num_shards): path = paths[self.get_shard_id(num_shards, epoch, shard_epoch)] return path def get_split_data_param_list(self, split, epoch, shard_epoch=None): # TODO: to extend with extra datasets and keys and loop over different shard data paths param_list = [] data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split) logger.info(f"langtoks settings: {self.args.langtoks}") split_num_shards_dict = self.get_split_num_data_shards(split) for data_category, paths in data_paths.items(): if data_category not in lang_pairs: continue paths = utils.split_paths(paths) assert len(paths) > 0 if len(paths) > 1: self._has_sharded_data = True if split != getattr(self.args, "train_subset", None): # if not training data set, use the first shard for valid and test paths = paths[:1] if data_category in self.args.langtoks: lang_tok_spec = self.args.langtoks[data_category] else: # default to None lang_tok_spec = (None, None) # infer langcode lang_dirs = [ lang_pair.split("-") for lang_pair in lang_pairs[data_category] ] lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs] for src, tgt in lang_dirs: assert src is not None or data_category == "mono_dae", ( f"error: src={src}, " "tgt={tgt} for data_category={data_category}" ) # logger.info(f"preparing param for {data_category}: {src} - {tgt}") key = self.get_dataset_key(data_category, src, tgt) data_path = self.get_split_data_path( paths, epoch, shard_epoch, split_num_shards_dict[key] ) param_list.append( { "key": key, "data_path": data_path, "split": split, "src": src, "src_dict": self.get_source_dictionary(src) if src and data_category != "mono_dae" else None, "tgt": tgt, "tgt_dict": self.get_target_dictionary(tgt), "data_category": data_category, "langtok_spec": lang_tok_spec, } ) return param_list def get_train_dataset_sizes( self, data_param_list, datasets, epoch, shard_epoch=None ): num_shards = [ self.get_split_num_data_shards(param["split"])[param["key"]] for param in data_param_list ] data_sizes = [] for (key, d), num_shard in zip(datasets, num_shards): my_data_sizes = self._training_data_sizes[key] shard_ind = self.get_shard_id(num_shard, epoch, shard_epoch) if shard_ind not in my_data_sizes: my_data_sizes[shard_ind] = len(d) known_size = max(my_data_sizes.values()) data_sizes.append( # If we don't know the data size of the shard yet, # use the the max known data size to approximate. # Note that we preprocess shards by a designated shard size # and put any remaining data at the end into the last shard so # the max shard size approximation is almost correct before loading # the last shard; after loading the last shard, it will have the # exact data sizes of the whole data size. (key, sum(my_data_sizes.get(i, known_size) for i in range(num_shard))) ) logger.info( f"estimated total data sizes of all shards used in sampling ratios: {data_sizes}. " "Note that if the data a shard has not been loaded yet, use the max known data size to approximate" ) return [s for _, s in data_sizes] def get_train_sampling_ratios( self, data_param_list, datasets, epoch=1, shard_epoch=None ): data_sizes = self.get_train_dataset_sizes( data_param_list, datasets, epoch, shard_epoch ) sampling_func = self.sampling_method.sampling_method_selector() sample_ratios = sampling_func(data_sizes) if sampling_func is not None else None return sample_ratios def get_sampling_ratios(self, data_param_list, datasets, epoch, shard_epoch=None): if self.args.sampling_weights_from_file: weights = load_sampling_weights(self.args.sampling_weights_from_file) sample_ratios = [weights[k] for k, _ in datasets] logger.info( "| ignoring --sampling-weights when loadding sampling weights " f"from file {self.args.sampling_weights_from_file}" ) elif self.args.sampling_weights: sample_ratios = [self.args.sampling_weights[k] for k, _ in datasets] else: sample_ratios = self.get_train_sampling_ratios( data_param_list, datasets, epoch, shard_epoch ) if sample_ratios is not None: logger.info( "| Upsample ratios: {}".format( list(zip(map(lambda x: x["key"], data_param_list), sample_ratios)) ) ) assert len(sample_ratios) == len(datasets) return sample_ratios def load_split_datasets( self, split, training, epoch=1, combine=False, shard_epoch=None, **kwargs ): data_param_list = self.get_split_data_param_list( split, epoch, shard_epoch=shard_epoch ) langpairs_sharing_datasets = ( {} if self.args.enable_reservsed_directions_shared_datasets else None ) datasets = [ ( param["key"], self.load_a_dataset( combine=combine, langpairs_sharing_datasets=langpairs_sharing_datasets, **param, ), ) for param in data_param_list ] return datasets, data_param_list def load_into_concat_dataset(self, split, datasets, data_param_list): if self.args.lang_tok_replacing_bos_eos: # TODO: to investigate why TransformEosLangPairDataset doesn't work with ConcatDataset return SampledMultiDataset( OrderedDict(datasets), sampling_ratios=None, eval_key=None, collate_format=CollateFormat.single, virtual_size=None, split=split, ) return ConcatDataset([d for _, d in datasets]) def load_sampled_multi_epoch_dataset( self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs ): datasets, data_param_list = self.load_split_datasets( split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs ) if training and split == getattr(self.args, "train_subset", None): sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch) return SampledMultiEpochDataset( OrderedDict(datasets), epoch=epoch, shard_epoch=shard_epoch, # valid and test datasets will be degenerate to concating datasets: sampling_ratios=sample_ratios, eval_key=None, collate_format=CollateFormat.single, virtual_size=self.args.virtual_data_size, split=split, virtual_epoch_size=self.args.virtual_epoch_size, # if not using lang_tok altering, simplified to use the same collater shared_collater=self._shared_collater(), ) else: return self.load_into_concat_dataset(split, datasets, data_param_list) def load_sampled_multi_dataset( self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs ): datasets, data_param_list = self.load_split_datasets( split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs ) if training and split == getattr(self.args, "train_subset", None): sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch) return SampledMultiDataset( OrderedDict(datasets), epoch=epoch, # valid and test datasets will be degerate to concating datasets: sampling_ratios=sample_ratios, eval_key=None, collate_format=CollateFormat.single, virtual_size=self.args.virtual_data_size, split=split, # if not using lang_tok altering, simplified to use the same collater shared_collater=self._shared_collater(), ) else: return self.load_into_concat_dataset(split, datasets, data_param_list) def load_dataset( self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs ): if self.args.virtual_epoch_size is None: return self.load_sampled_multi_dataset( split, training, epoch, combine, shard_epoch, **kwargs ) else: return self.load_sampled_multi_epoch_dataset( split, training, epoch, combine, shard_epoch, **kwargs )
bart_ls-main
fairseq-py/fairseq/data/multilingual/multilingual_data_manager.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .huffman_coder import HuffmanCodeBuilder, HuffmanCoder from .huffman_mmap_indexed_dataset import ( HuffmanMMapIndex, HuffmanMMapIndexedDataset, HuffmanMMapIndexedDatasetBuilder, vocab_file_path, ) __all__ = [ "HuffmanCoder", "HuffmanCodeBuilder", "HuffmanMMapIndexedDatasetBuilder", "HuffmanMMapIndexedDataset", "HuffmanMMapIndex", "vocab_file_path", ]
bart_ls-main
fairseq-py/fairseq/data/huffman/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import mmap import os import shutil import struct import typing as tp from functools import lru_cache import numpy as np import torch from fairseq.data import indexed_dataset from fairseq.data.huffman import HuffmanCoder from fairseq.file_io import PathManager class HuffmanMMapIndex: """ keep an index of the offsets in the huffman binary file. First a header, then the list of sizes (num tokens) for each instance and finally the addresses of each instance. """ _HDR_MAGIC = b"HUFFIDX\x00\x00" _VERSION = 1 @classmethod def writer(cls, path: str, data_len: int): class _Writer: def __enter__(self): self._file = open(path, "wb") # write header (magic + version) self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack("<Q", cls._VERSION)) self._file.write(struct.pack("<Q", data_len)) return self def write(self, sizes, pointers): # add number of items in the index to the header self._file.write(struct.pack("<Q", len(sizes))) # write sizes sizes = np.array(sizes, dtype=np.int32) self._file.write(sizes.tobytes(order="C")) del sizes # write address pointers pointers = np.array(pointers, dtype=np.int64) self._file.write(pointers.tobytes(order="C")) del pointers def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path): with open(path, "rb") as stream: # read headers magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) (version,) = struct.unpack("<Q", stream.read(8)) assert ( self._VERSION == version ), "Unexpected file version f{version} != code version f{self._VERSION}" # read length of data file (self._data_len,) = struct.unpack("<Q", stream.read(8)) # read number of items in data file/index (self._len,) = struct.unpack("<Q", stream.read(8)) offset = stream.tell() indexed_dataset._warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode="r", order="C") self._bin_buffer = memoryview(self._bin_buffer_mmap) self._sizes = np.frombuffer( self._bin_buffer, dtype=np.int32, count=self._len, offset=offset ) self._pointers = np.frombuffer( self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes, ) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap def __iter__(self): for i in range(self._len): yield self[i] @property def data_len(self): return self._data_len @property def sizes(self): return self._sizes @lru_cache(maxsize=8) def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def vocab_file_path(prefix_path): return prefix_path + ".vocab" class HuffmanMMapIndexedDataset(torch.utils.data.Dataset): """ an indexed dataset that use mmap and memoryview to access data from disk that was compressed with a HuffmanCoder. """ def __init__(self, prefix_path): super().__init__() self._prefix_path = None self._index = None self._bin_buffer = None self._coder = None self._file = None self._bin_buffer_mmap = None self._do_init(prefix_path) def __getstate__(self): return self._prefix_path def __setstate__(self, state): self._do_init(state) def _do_init(self, prefix_path): self._prefix_path = prefix_path self._index = HuffmanMMapIndex( indexed_dataset.index_file_path(self._prefix_path) ) self._coder = HuffmanCoder.from_file(vocab_file_path(self._prefix_path)) indexed_dataset._warmup_mmap_file( indexed_dataset.data_file_path(self._prefix_path) ) self._file = os.open( indexed_dataset.data_file_path(self._prefix_path), os.O_RDONLY ) self._bin_buffer_mmap = mmap.mmap( self._file, self._index.data_len, access=mmap.ACCESS_READ, ) self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): del self._bin_buffer if self._file: os.close(self._file) del self._index def __len__(self): return len(self._index) def _decode(self, i): ptr, _ = self._index[i] if i == 0: raw_bytes = self._bin_buffer[:ptr] else: (prev_ptr, _) = self._index[i - 1] raw_bytes = self._bin_buffer[prev_ptr:ptr] return self._coder.decode(raw_bytes.tobytes()) @lru_cache(maxsize=8) def __getitem__(self, i): nodes = self._decode(i) return torch.tensor([n.id for n in nodes], dtype=torch.int64) def __iter__(self): for idx in range(len(self)): yield self[idx] def get_symbols(self, i): nodes = self._decode(i) for n in nodes: yield n.symbol @property def sizes(self): return self._index.sizes @property def supports_prefetch(self): return False @property def coder(self): return self._coder @staticmethod def exists(prefix_path): return ( PathManager.exists(indexed_dataset.index_file_path(prefix_path)) and PathManager.exists(indexed_dataset.data_file_path(prefix_path)) and PathManager.exists(vocab_file_path(prefix_path)) ) class HuffmanMMapIndexedDatasetBuilder: """ Helper to build a memory mapped datasets with a huffman encoder. You can either open/close this manually or use it as a ContextManager. Provide your own coder, it will then be stored alongside the dataset. The builder will first write the vocab file, then open the binary file so you can stream into it, finally the index will be written when the builder is closed (your index should fit in memory). """ def __init__(self, path_prefix: str, coder: HuffmanCoder) -> None: self._path_prefix = path_prefix self._coder = coder self._sizes = [] self._ptrs = [] self._data_len = 0 def open(self): self._coder.to_file(vocab_file_path(self._path_prefix)) self._data_file = open(indexed_dataset.data_file_path(self._path_prefix), "wb") def __enter__(self) -> "HuffmanMMapIndexedDatasetBuilder": self.open() return self def add_item(self, tokens: tp.List[str]) -> None: """ add a list of tokens to the dataset, they will compressed with the provided coder before being written to file. """ encoded = self._coder.encode(tokens) code_len = len(encoded) last_ptr = 0 if len(self._ptrs) > 0: last_ptr = self._ptrs[-1] self._sizes.append(len(tokens)) self._ptrs.append(last_ptr + code_len) self._data_len += code_len self._data_file.write(encoded) def append(self, other_dataset_path_prefix: str) -> None: """ append an existing dataset. Beware, if it wasn't built with the same coder, you are in trouble. """ other_index = HuffmanMMapIndex( indexed_dataset.index_file_path(other_dataset_path_prefix) ) for (ptr, size) in other_index: self._ptrs.append(ptr + self._data_len) self._sizes.append(size) # Concatenate data with open(indexed_dataset.data_file_path(other_dataset_path_prefix), "rb") as f: shutil.copyfileobj(f, self._data_file) self._data_len += other_index.data_len def close(self): self._data_file.close() with HuffmanMMapIndex.writer( indexed_dataset.index_file_path(self._path_prefix), self._data_len ) as index: index.write(self._sizes, self._ptrs) def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.close()
bart_ls-main
fairseq-py/fairseq/data/huffman/huffman_mmap_indexed_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re import typing as tp from collections import Counter, deque from dataclasses import dataclass from bitarray import bitarray, util from fairseq.data import Dictionary # basically we have to write to addressable bytes for the memory mapped # dataset loader. Sentences that get encoded to a length that is not a # multiple of BLOCKSIZE (a byte) will be padded to fit. (see _pad in the coder) BLOCKSIZE = 8 class HuffmanCoder: def __init__( self, root: "HuffmanNode", bos="<s>", pad="<pad>", eos="</s>", unk="<unk>" ): self.root = root self.table = root.code_table() self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos def _pad(self, a: bitarray) -> bitarray: """ bitpadding, 1 then 0. If the array is already a multiple of blocksize, we add a full block. """ pad_len = BLOCKSIZE - (len(a) % BLOCKSIZE) - 1 padding = bitarray("1" + "0" * pad_len) return a + padding def _unpad(self, a: bitarray) -> bitarray: """ remove the bitpadding. There will be a set of 0s preceded by a 1 at the end of the bitarray, we remove that """ # count the 0 padding at the end until we find the first 1 # we want to remove the one too remove_cnt = util.rindex(a, 1) return a[:remove_cnt] def encode(self, iter: tp.List[str]) -> bytes: """ encode a list of tokens a return bytes. We use bitpadding to make sure the encoded bits fit in bytes. """ a = bitarray() for token in iter: code = self.get_code(token) if code is None: if self.unk_word is None: raise Exception(f"unknown token {token} cannot be encoded.") else: token = self.unk_word a = a + self.get_code(token) return self._pad(a).tobytes() def decode(self, bits: bytes) -> tp.Iterator["HuffmanNode"]: """ take bitpadded bytes and decode it to a set of leaves. You can then use each node to find the symbol/id """ a = bitarray() a.frombytes(bits) return self.root.decode(self._unpad(a)) def get_code(self, symbol: str) -> tp.Optional[bitarray]: node = self.get_node(symbol) return None if node is None else node.code def get_node(self, symbol: str) -> "HuffmanNode": return self.table.get(symbol) @classmethod def from_file( cls, filename: str, bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", ) -> "HuffmanCoder": builder = HuffmanCodeBuilder.from_file(filename) return builder.build_code(bos=bos, pad=pad, eos=eos, unk=unk) def to_file(self, filename, sep="\t"): nodes = list(self.table.values()) nodes.sort(key=lambda n: n.id) with open(filename, "w", encoding="utf-8") as output: for n in nodes: output.write(f"{n.symbol}{sep}{n.count}\n") def __iter__(self): for n in self.table.values(): yield n def merge(self, other_coder: "HuffmanCoder") -> "HuffmanCoder": builder = HuffmanCodeBuilder() for n in self: builder.increment(n.symbol, n.count) for n in other_coder: builder.increment(n.symbol, n.count) return builder.build_code() def __eq__(self, other: "HuffmanCoder") -> bool: return self.table == other.table def __len__(self) -> int: return len(self.table) def __contains__(self, sym: str) -> bool: return sym in self.table def to_dictionary(self) -> Dictionary: dictionary = Dictionary(bos=self.bos, unk=self.unk, pad=self.pad, eos=self.eos) for n in self: dictionary.add_symbol(n.symbol, n=n.count) dictionary.finalize() return dictionary @dataclass class HuffmanNode: """ a node in a Huffman tree """ id: int count: int symbol: tp.Optional[str] = None left: tp.Optional["HuffmanNode"] = None right: tp.Optional["HuffmanNode"] = None code: tp.Optional[bitarray] = None def is_leaf(self) -> bool: return self.left is None and self.right is None def code_table(self, prefix: tp.Optional[bitarray] = None) -> tp.Dict[str, "HuffmanNode"]: defaulted_prefix = prefix if prefix is not None else bitarray() if self.is_leaf(): self.code = ( defaulted_prefix if len(defaulted_prefix) > 0 else bitarray("0") ) # leaf could be the root if there is only one symbol return {self.symbol: self} codes_right = self.right.code_table(defaulted_prefix + bitarray([0])) codes_left = self.left.code_table(defaulted_prefix + bitarray([1])) return {**codes_left, **codes_right} def decode(self, bits: bitarray) -> tp.Iterator["HuffmanNode"]: current_node = self for bit in bits: if bit == 0: # go right current_node = current_node.right else: # go left current_node = current_node.left if current_node is None: # we shouldn't be on a leaf here raise Exception("fell off a leaf") if current_node.is_leaf(): yield current_node current_node = self if current_node != self: raise Exception("couldn't decode all the bits") class HuffmanCodeBuilder: """ build a dictionary with occurence count and then build the Huffman code for it. """ def __init__(self): self.symbols = Counter() def add_symbols(self, *syms) -> None: self.symbols.update(syms) def increment(self, symbol: str, cnt: int) -> None: self.symbols[symbol] += cnt @classmethod def from_file(cls, filename): c = cls() with open(filename, "r", encoding="utf-8") as input: for line in input: split = re.split(r"[\s]+", line) c.increment(split[0], int(split[1])) return c def to_file(self, filename, sep="\t"): with open(filename, "w", encoding="utf-8") as output: for (tok, cnt) in self.symbols.most_common(): output.write(f"{tok}{sep}{cnt}\n") def _smallest(self, q1: deque, q2: deque) -> HuffmanNode: if len(q1) == 0: return q2.pop() if len(q2) == 0: return q1.pop() if q1[-1].count < q2[-1].count: return q1.pop() return q2.pop() def __add__(self, c: "HuffmanCodeBuilder") -> "HuffmanCodeBuilder": new_c = self.symbols + c.symbols new_b = HuffmanCodeBuilder() new_b.symbols = new_c return new_b def build_code( self, bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", ) -> HuffmanCoder: assert len(self.symbols) > 0, "cannot build code from empty list of symbols" if self.symbols[bos] == 0: self.add_symbols(bos) if self.symbols[pad] == 0: self.add_symbols(pad) if self.symbols[eos] == 0: self.add_symbols(eos) if self.symbols[unk] == 0: self.add_symbols(unk) node_id = 0 leaves_queue = deque( [ HuffmanNode(symbol=symbol, count=count, id=idx) for idx, (symbol, count) in enumerate(self.symbols.most_common()) ] ) # left are the most common, right are the least common if len(leaves_queue) == 1: root = leaves_queue.pop() root.id = 0 return HuffmanCoder(root) nodes_queue = deque() while len(leaves_queue) > 0 or len(nodes_queue) != 1: # get the lowest two nodes at the head of each queue node1 = self._smallest(leaves_queue, nodes_queue) node2 = self._smallest(leaves_queue, nodes_queue) # add new node nodes_queue.appendleft( HuffmanNode( count=node1.count + node2.count, left=node1, right=node2, id=node_id ) ) node_id += 1 # we are left with the root return HuffmanCoder(nodes_queue.pop(), bos=bos, pad=pad, eos=eos, unk=unk)
bart_ls-main
fairseq-py/fairseq/data/huffman/huffman_coder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class SentencepieceConfig(FairseqDataclass): sentencepiece_model: str = field( default="???", metadata={"help": "path to sentencepiece model"} ) @register_bpe("sentencepiece", dataclass=SentencepieceConfig) class SentencepieceBPE(object): def __init__(self, cfg): sentencepiece_model = file_utils.cached_path(cfg.sentencepiece_model) try: import sentencepiece as spm self.sp = spm.SentencePieceProcessor() self.sp.Load(sentencepiece_model) except ImportError: raise ImportError( "Please install sentencepiece with: pip install sentencepiece" ) def encode(self, x: str) -> str: return " ".join(self.sp.EncodeAsPieces(x)) def decode(self, x: str) -> str: return x.replace(" ", "").replace("\u2581", " ").strip() def is_beginning_of_word(self, x: str) -> bool: if x in ["<unk>", "<s>", "</s>", "<pad>"]: # special elements are always considered beginnings # HACK: this logic is already present in fairseq/tasks/masked_lm.py # but these special tokens are also contained in the sentencepiece # vocabulary which causes duplicate special tokens. This hack makes # sure that they are all taken into account. return True return x.startswith("\u2581")
bart_ls-main
fairseq-py/fairseq/data/encoders/sentencepiece_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class fastBPEConfig(FairseqDataclass): bpe_codes: str = field(default="???", metadata={"help": "path to fastBPE BPE"}) @register_bpe("fastbpe", dataclass=fastBPEConfig) class fastBPE(object): def __init__(self, cfg): if cfg.bpe_codes is None: raise ValueError("--bpe-codes is required for --bpe=fastbpe") codes = file_utils.cached_path(cfg.bpe_codes) try: import fastBPE self.bpe = fastBPE.fastBPE(codes) self.bpe_symbol = "@@ " except ImportError: raise ImportError("Please install fastBPE with: pip install fastBPE") def encode(self, x: str) -> str: return self.bpe.apply([x])[0] def decode(self, x: str) -> str: return (x + " ").replace(self.bpe_symbol, "").rstrip()
bart_ls-main
fairseq-py/fairseq/data/encoders/fastbpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data.encoders import register_tokenizer from fairseq.dataclass import FairseqDataclass @register_tokenizer("nltk", dataclass=FairseqDataclass) class NLTKTokenizer(object): def __init__(self, *unused): try: from nltk.tokenize import word_tokenize self.word_tokenize = word_tokenize except ImportError: raise ImportError("Please install nltk with: pip install nltk") def encode(self, x: str) -> str: return " ".join(self.word_tokenize(x)) def decode(self, x: str) -> str: return x
bart_ls-main
fairseq-py/fairseq/data/encoders/nltk_tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass from .gpt2_bpe_utils import get_encoder DEFAULT_ENCODER_JSON = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json" DEFAULT_VOCAB_BPE = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe" @dataclass class GPT2BPEConfig(FairseqDataclass): gpt2_encoder_json: str = field( default=DEFAULT_ENCODER_JSON, metadata={"help": "path to encoder.json"} ) gpt2_vocab_bpe: str = field( default=DEFAULT_VOCAB_BPE, metadata={"help": "path to vocab.bpe"} ) @register_bpe("gpt2", dataclass=GPT2BPEConfig) class GPT2BPE(object): def __init__(self, cfg): encoder_json = file_utils.cached_path(cfg.gpt2_encoder_json) vocab_bpe = file_utils.cached_path(cfg.gpt2_vocab_bpe) self.bpe = get_encoder(encoder_json, vocab_bpe) def encode(self, x: str) -> str: return " ".join(map(str, self.bpe.encode(x))) def decode(self, x: str) -> str: return self.bpe.decode( # [int(tok) if tok not in {"<unk>", "<mask>"} else tok for tok in x.split()] [int(tok) if tok not in {"<unk>", "<mask>"} and not tok.startswith("<sentinel_") else tok for tok in x.split()] ) def is_beginning_of_word(self, x: str) -> bool: return self.decode(x).startswith(" ")
bart_ls-main
fairseq-py/fairseq/data/encoders/gpt2_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class SubwordNMTBPEConfig(FairseqDataclass): bpe_codes: str = field(default="???", metadata={"help": "path to subword NMT BPE"}) bpe_separator: str = field(default="@@", metadata={"help": "BPE separator"}) @register_bpe("subword_nmt", dataclass=SubwordNMTBPEConfig) class SubwordNMTBPE(object): def __init__(self, cfg): if cfg.bpe_codes is None: raise ValueError("--bpe-codes is required for --bpe=subword_nmt") codes = file_utils.cached_path(cfg.bpe_codes) try: from subword_nmt import apply_bpe bpe_parser = apply_bpe.create_parser() bpe_args = bpe_parser.parse_args( [ "--codes", codes, "--separator", cfg.bpe_separator, ] ) self.bpe = apply_bpe.BPE( bpe_args.codes, bpe_args.merges, bpe_args.separator, None, bpe_args.glossaries, ) self.bpe_symbol = bpe_args.separator + " " except ImportError: raise ImportError( "Please install subword_nmt with: pip install subword-nmt" ) def encode(self, x: str) -> str: return self.bpe.process_line(x) def decode(self, x: str) -> str: return (x + " ").replace(self.bpe_symbol, "").rstrip()
bart_ls-main
fairseq-py/fairseq/data/encoders/subword_nmt_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass from fairseq import file_utils @dataclass class HuggingFaceByteLevelBPEConfig(FairseqDataclass): bpe_merges: str = field(default="???", metadata={"help": "path to merges.txt"}) bpe_vocab: str = field(default="???", metadata={"help": "path to vocab.json"}) bpe_add_prefix_space: bool = field( default=False, metadata={"help": "add prefix space before encoding"} ) @register_bpe("hf_byte_bpe", dataclass=HuggingFaceByteLevelBPEConfig) class HuggingFaceByteLevelBPE(object): def __init__(self, cfg): try: from tokenizers import ByteLevelBPETokenizer except ImportError: raise ImportError( "Please install huggingface/tokenizers with: " "pip install tokenizers" ) bpe_vocab = file_utils.cached_path(cfg.bpe_vocab) bpe_merges = file_utils.cached_path(cfg.bpe_merges) self.bpe = ByteLevelBPETokenizer( bpe_vocab, bpe_merges, add_prefix_space=cfg.bpe_add_prefix_space, ) def encode(self, x: str) -> str: return " ".join(map(str, self.bpe.encode(x).ids)) def decode(self, x: str) -> str: return self.bpe.decode( [int(tok) if tok not in {"<unk>", "<mask>"} else tok for tok in x.split()] ) def is_beginning_of_word(self, x: str) -> bool: return self.decode(x).startswith(" ")
bart_ls-main
fairseq-py/fairseq/data/encoders/hf_byte_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from fairseq import registry build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry( "--tokenizer", default=None, ) build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry( "--bpe", default=None, ) # automatically import any Python files in the encoders/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): module = file[: file.find(".py")] importlib.import_module("fairseq.data.encoders." + module)
bart_ls-main
fairseq-py/fairseq/data/encoders/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class BertBPEConfig(FairseqDataclass): bpe_cased: bool = field(default=False, metadata={"help": "set for cased BPE"}) bpe_vocab_file: Optional[str] = field( default=None, metadata={"help": "bpe vocab file"} ) @register_bpe("bert", dataclass=BertBPEConfig) class BertBPE(object): def __init__(self, cfg): try: from transformers import BertTokenizer except ImportError: raise ImportError( "Please install transformers with: pip install transformers" ) if cfg.bpe_vocab_file: self.bert_tokenizer = BertTokenizer( cfg.bpe_vocab_file, do_lower_case=not cfg.bpe_cased ) else: vocab_file_name = ( "bert-base-cased" if cfg.bpe_cased else "bert-base-uncased" ) self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name) def encode(self, x: str) -> str: return " ".join(self.bert_tokenizer.tokenize(x)) def decode(self, x: str) -> str: return self.bert_tokenizer.clean_up_tokenization( self.bert_tokenizer.convert_tokens_to_string(x.split(" ")) ) def is_beginning_of_word(self, x: str) -> bool: return not x.startswith("##")
bart_ls-main
fairseq-py/fairseq/data/encoders/hf_bert_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.data.encoders.byte_utils import ( SPACE, SPACE_ESCAPE, byte_encode, smart_byte_decode, ) from fairseq.dataclass import FairseqDataclass @dataclass class ByteBpeConfig(FairseqDataclass): sentencepiece_model_path: str = field( default="???", metadata={"help": "path to sentencepiece model"} ) @register_bpe("byte_bpe", dataclass=ByteBpeConfig) class ByteBPE(object): def __init__(self, cfg): vocab = file_utils.cached_path(cfg.sentencepiece_model_path) try: import sentencepiece as spm self.sp = spm.SentencePieceProcessor() self.sp.Load(vocab) except ImportError: raise ImportError( "Please install sentencepiece with: pip install sentencepiece" ) def encode(self, x: str) -> str: byte_encoded = byte_encode(x) return SPACE.join(self.sp.EncodeAsPieces(byte_encoded)) @staticmethod def decode(x: str) -> str: unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) return smart_byte_decode(unescaped)
bart_ls-main
fairseq-py/fairseq/data/encoders/byte_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.data import encoders def get_whole_word_mask(args, dictionary): bpe = encoders.build_bpe(args) if bpe is not None: def is_beginning_of_word(i): if i < dictionary.nspecial: # special elements are always considered beginnings return True tok = dictionary[i] if tok.startswith("madeupword"): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor( list(map(is_beginning_of_word, range(len(dictionary)))) ) return mask_whole_words return None
bart_ls-main
fairseq-py/fairseq/data/encoders/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from fairseq.data.encoders import register_tokenizer from fairseq.dataclass import FairseqDataclass @register_tokenizer("space", dataclass=FairseqDataclass) class SpaceTokenizer(object): def __init__(self, *unused): self.space_tok = re.compile(r"\s+") def encode(self, x: str) -> str: return self.space_tok.sub(" ", x) def decode(self, x: str) -> str: return x
bart_ls-main
fairseq-py/fairseq/data/encoders/space_tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Byte pair encoding utilities from GPT-2. Original source: https://github.com/openai/gpt-2/blob/master/src/encoder.py Original license: MIT """ import json from functools import lru_cache @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class Encoder: def __init__(self, encoder, bpe_merges, errors="replace"): self.encoder = encoder self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} try: import regex as re self.re = re except ImportError: raise ImportError("Please install regex with: pip install regex") # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = self.re.compile( r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] for token in self.re.findall(self.pat, text): token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) bpe_tokens.extend( self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") ) return bpe_tokens def decode(self, tokens): text = "".join([self.decoder.get(token, token) for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode( "utf-8", errors=self.errors ) return text def get_encoder(encoder_json_path, vocab_bpe_path): with open(encoder_json_path, "r") as f: encoder = json.load(f) with open(vocab_bpe_path, "r", encoding="utf-8") as f: bpe_data = f.read() bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]] return Encoder( encoder=encoder, bpe_merges=bpe_merges, )
bart_ls-main
fairseq-py/fairseq/data/encoders/gpt2_bpe_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq.data.encoders import register_tokenizer from fairseq.dataclass import FairseqDataclass @dataclass class MosesTokenizerConfig(FairseqDataclass): source_lang: str = field(default="en", metadata={"help": "source language"}) target_lang: str = field(default="en", metadata={"help": "target language"}) moses_no_dash_splits: bool = field( default=False, metadata={"help": "don't apply dash split rules"} ) moses_no_escape: bool = field( default=False, metadata={"help": "don't perform HTML escaping on apostrophe, quotes, etc."}, ) @register_tokenizer("moses", dataclass=MosesTokenizerConfig) class MosesTokenizer(object): def __init__(self, cfg: MosesTokenizerConfig): self.cfg = cfg try: from sacremoses import MosesTokenizer, MosesDetokenizer self.tok = MosesTokenizer(cfg.source_lang) self.detok = MosesDetokenizer(cfg.target_lang) except ImportError: raise ImportError( "Please install Moses tokenizer with: pip install sacremoses" ) def encode(self, x: str) -> str: return self.tok.tokenize( x, aggressive_dash_splits=(not self.cfg.moses_no_dash_splits), return_str=True, escape=(not self.cfg.moses_no_escape), ) def decode(self, x: str) -> str: return self.detok.detokenize(x.split())
bart_ls-main
fairseq-py/fairseq/data/encoders/moses_tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data.encoders import register_bpe SPACE = chr(32) SPACE_ESCAPE = chr(9601) @register_bpe("characters") class Characters(object): def __init__(self, *unused): pass @staticmethod def add_args(parser): pass @staticmethod def encode(x: str) -> str: escaped = x.replace(SPACE, SPACE_ESCAPE) return SPACE.join(list(escaped)) @staticmethod def decode(x: str) -> str: return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
bart_ls-main
fairseq-py/fairseq/data/encoders/characters.py