python_code
stringlengths
0
4.04M
repo_name
stringlengths
8
58
file_path
stringlengths
5
147
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from fairseq import optim from omegaconf import DictConfig logger = logging.getLogger(__name__) class AMPOptimizer(optim.FairseqOptimizer): """ Wrap an *optimizer* to support AMP (automatic mixed precision) training. """ def __init__(self, cfg: DictConfig, params, fp32_optimizer, **kwargs): super().__init__(cfg.optimizer) self.fp32_optimizer = fp32_optimizer amp_kwargs = {"init_scale": cfg.common.fp16_init_scale} if getattr(cfg.common, "amp_scale_window", None) is not None: amp_kwargs["growth_interval"] = cfg.common.amp_init_scale self._grad_scaler = torch.cuda.amp.GradScaler(**amp_kwargs) self.min_loss_scale = cfg.common.min_loss_scale @classmethod def build_optimizer(cls, cfg: DictConfig, params, **kwargs): """ Args: cfg (omegaconf.DictConfig): fairseq args params (iterable): iterable of parameters to optimize """ fp32_optimizer = optim.build_optimizer(cfg.optimizer, params) return cls(cfg, params, fp32_optimizer, **kwargs) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ self._grad_scaler.scale(loss).backward() def step(self): self.scaler.step(self.fp32_optimizer) self.scaler.update() def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" self.scaler.unscale_(self.optimizer) grad_norm = self.fp32_optimizer.clip_grad_norm(max_norm, aggregate_norm_fn) if not torch.isfinite(grad_norm).all(): new_loss_scale = self.next_loss_scale if new_loss_scale <= self.min_loss_scale: raise FloatingPointError( ( "AMP: Minimum loss scale reached ({}). Your loss is probably exploding. " "Try restarting training or use fp32. {}" ).format(self.min_loss_scale, new_loss_scale) ) else: logger.info("AMP: overflow detected, setting scale to " f"to {new_loss_scale}") return grad_norm @property def scaler(self): return self._grad_scaler @property def next_loss_scale(self): return self.scaler.get_scale() * self.scaler.get_backoff_factor() @property def optimizer(self): return self.fp32_optimizer.optimizer @optimizer.setter def optimizer(self, optimizer): self.fp32_optimizer.optimizer = optimizer @property def lr_scheduler(self): return getattr(self.fp32_optimizer, "lr_scheduler", None) @property def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr) def all_reduce_grads(self, module): self.fp32_optimizer.all_reduce_grads(module) @property def supports_flat_params(self): return self.fp32_optimizer.supports_flat_params
bart_ls-main
fairseq-py/fairseq/optim/amp_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List import torch from fairseq.dataclass import FairseqDataclass from omegaconf import II, DictConfig from torch.optim.optimizer import Optimizer, required from . import FairseqOptimizer, register_optimizer @dataclass class FairseqNAGConfig(FairseqDataclass): momentum: float = field(default=0.99, metadata={"help": "momentum factor"}) weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) # TODO common vars in parent class lr: List[float] = II("optimization.lr") @register_optimizer("nag", dataclass=FairseqNAGConfig) class FairseqNAG(FairseqOptimizer): def __init__(self, cfg: DictConfig, params): super().__init__(cfg) self._optimizer = NAG(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "momentum": self.cfg.momentum, "weight_decay": self.cfg.weight_decay, } class NAG(Optimizer): def __init__(self, params, lr=required, momentum=0, weight_decay=0): defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay) super(NAG, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: weight_decay = group["weight_decay"] momentum = group["momentum"] lr = group["lr"] lr_old = group.get("lr_old", lr) lr_correct = lr / lr_old if lr_old > 0 else lr for p in group["params"]: if p.grad is None: continue p_data_fp32 = p.data if p_data_fp32.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() d_p = p.grad.data.float() param_state = self.state[p] if "momentum_buffer" not in param_state: param_state["momentum_buffer"] = torch.zeros_like(d_p) else: param_state["momentum_buffer"] = param_state["momentum_buffer"].to( d_p ) buf = param_state["momentum_buffer"] if weight_decay != 0: p_data_fp32.mul_(1 - lr * weight_decay) p_data_fp32.add_(buf, alpha=momentum * momentum * lr_correct) p_data_fp32.add_(d_p, alpha=-(1 + momentum) * lr) buf.mul_(momentum * lr_correct).add_(d_p, alpha=-lr) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) group["lr_old"] = lr return loss
bart_ls-main
fairseq-py/fairseq/optim/nag.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import types import torch def get_fused_adam_class(): """ Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated. """ try: # The "deprecated" interface in recent versions of apex is a bit # faster than the main interface, since we don't use the apex # optimizer. This can be installed by passing the # `--deprecated_fused_adam` option when building apex. global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") return FusedAdamV1 except ImportError: try: # fallback to the newer interface from apex.optimizers import FusedAdam as _FusedAdam # noqa from apex.multi_tensor_apply import multi_tensor_applier if multi_tensor_applier.available: return FusedAdamV2 except ImportError: pass return None class FusedAdamV1(torch.optim.Optimizer): """ Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) .. _Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0.0, max_grad_norm=0.0, amsgrad=False, use_fp16_stats=False, ): global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") if amsgrad: raise RuntimeError("FusedAdam does not support the AMSGrad variant.") defaults = { "lr": lr, "bias_correction": bias_correction, "betas": betas, "eps": eps, "weight_decay": weight_decay, "max_grad_norm": max_grad_norm, } super().__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 self.use_fp16_stats = use_fp16_stats self.FLOAT16_MAX = 65504.0 @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True @property def supports_step_with_scale(self): return True def step(self, closure=None, grads=None, scale=1.0, grad_norms=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads if grad_norms is None: grad_norms = [None] * len(self.param_groups) for group, grads_this_group, grad_norm in zip( self.param_groups, grads_group, grad_norms ): if grads_this_group is None: grads_this_group = [None] * len(group["params"]) # compute combined scale factor for this group combined_scale = scale if group.get("max_grad_norm", 0) > 0: # norm is in fact norm*scale clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"] if clip > 1: combined_scale = clip * scale bias_correction = 1 if group.get("bias_correction", 1) else 0 for p, grad in zip(group["params"], grads_this_group): # note: p.grad should not ever be set for correct # operation of mixed precision optimizer that sometimes # sends None gradients if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) if p.device.type == "cpu": p_data_fp32 = p.data.cuda(non_blocking=True).float() out_p = torch.tensor([], dtype = torch.float) else: p_data_fp32 = p.data.float() out_p = p.data state = self.state[p] # State initialization dtype = torch.float16 if self.use_fp16_stats else p_data_fp32.dtype if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32, dtype=dtype) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32, dtype=dtype) if self.use_fp16_stats: state["exp_avg_scale"] = 1.0 state["exp_avg_sq_scale"] = 1.0 else: device = p_data_fp32.device state["exp_avg"] = state["exp_avg"].to(device, dtype) state["exp_avg_sq"] = state["exp_avg_sq"].to(device, dtype) exp_avg = state["exp_avg"] exp_avg_sq = state["exp_avg_sq"] if self.use_fp16_stats: assert exp_avg.dtype == torch.float16 exp_avg = exp_avg.float() * state["exp_avg_scale"] exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"] beta1, beta2 = group["betas"] state["step"] += 1 with torch.cuda.device(p_data_fp32.device): fused_adam_cuda.adam( p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group["lr"], beta1, beta2, group["eps"], combined_scale, state["step"], self.eps_mode, bias_correction, group["weight_decay"], ) if p.device.type == "cpu": p.data.copy_(p_data_fp32, non_blocking=True) if self.use_fp16_stats: def inf_norm(t): return torch.norm(t, float("inf")) # from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py state["exp_avg_scale"], state["exp_avg_sq_scale"] = ( 1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX, 1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX, ) state["exp_avg"], state["exp_avg_sq"] = ( (exp_avg / state["exp_avg_scale"]).half(), (exp_avg_sq / state["exp_avg_sq_scale"]).half(), ) return loss try: from apex.optimizers import FusedAdam from apex.multi_tensor_apply import multi_tensor_applier class FusedAdamV2(FusedAdam): """ Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. """ def __init__(self, *args, use_fp16_stats=False, **kwargs): if use_fp16_stats: raise NotImplementedError("--fp16-adam-stats is only supported with FusedAdamV1") super().__init__(*args, **kwargs) if not hasattr(self, "multi_tensor_adam"): raise Exception( "Apex installation is outdated. Please install an updated version of apex." ) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step( self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, ): """Performs a single optimization step.""" loss = None if closure is not None: loss = closure() for group in self.param_groups: bias_correction = 1 if group["bias_correction"] else 0 beta1, beta2 = group["betas"] # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if "step" in group: group["step"] += 1 else: group["step"] = 1 # create lists for multi-tensor apply g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], [] g_32, p_32, m_32, v_32 = [], [], [], [] for p in group["params"]: if p.grad is None: continue if p.grad.data.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like( p.data, dtype=torch.float ) else: state["exp_avg"] = state["exp_avg"].to( device=p.data.device, dtype=torch.float ) state["exp_avg_sq"] = state["exp_avg_sq"].to( device=p.data.device, dtype=torch.float ) if p.dtype == torch.float16: g_16.append(p.grad.data.float()) p_16.append(p.data.float()) orig_p_16.append(p.data) m_16.append(state["exp_avg"]) v_16.append(state["exp_avg_sq"]) elif p.dtype == torch.float32: g_32.append(p.grad.data) p_32.append(p.data) m_32.append(state["exp_avg"]) v_32.append(state["exp_avg_sq"]) else: raise RuntimeError("FusedAdam only support fp16 and fp32.") with torch.cuda.device(p.device): if len(g_16) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) for orig_p, p in zip(orig_p_16, p_16): orig_p.copy_(p.data) if len(g_32) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) return loss except ImportError: pass
bart_ls-main
fairseq-py/fairseq/optim/fused_adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import torch import torch.distributed as dist from fairseq.dataclass.configs import FairseqBMUFConfig from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.optim.fairseq_optimizer import FairseqOptimizer class FairseqBMUF(FairseqOptimizer): """ Implements incremental block distributed data parallelism similar to https://ieeexplore.ieee.org/document/7472805 Paper title: Scalable training of deep learning machines by incremental block training with intra-block parallel optimization and blockwise model-update filtering """ def __init__(self, cfg: FairseqBMUFConfig, optimizer): super().__init__(cfg) self._optimizer = optimizer self._num_updates = 0 self.sync_iter = cfg.global_sync_iter self.block_momentum = cfg.block_momentum self.block_lr = cfg.block_lr self._reset_local_data() self.warmup_iteration = cfg.warmup_iterations self.use_nbm = cfg.use_nbm self.initial_state = self._optimizer.state_dict() self.average_sync = self.cfg.average_sync self.world_size = self.cfg.distributed_world_size @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" gen_parser_from_dataclass(parser, FairseqBMUFConfig()) @property def optimizer(self): return self._optimizer.optimizer @property def optimizer_config(self): return self._optimizer.optimizer_config def get_lr(self): return self._optimizer.get_lr() def set_lr(self, lr): self._optimizer.set_lr(lr) def state_dict(self): return self._optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): self._optimizer.load_state_dict(state_dict, optimizer_overrides) self.initial_state = self._optimizer.state_dict() def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" self._optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn) def average_params(self): self._optimizer.average_params() def _block_sync(self): if self.world_size <= 1: return # Update the global model using local models from all GPUs # (Step-1) Calculate grad between previously synced model and # currrent local model if self.block_momentum != 0: self._calc_grad() # (Step-2) Average gradient from all GPUs self._avg_grad_from_all_gpus() # (Step-3) Calculate global momentum and update the global model if self.block_momentum != 0: self._update_global_model() # (Step-4) Average local optimizer params if self.average_sync: self.average_params() def _is_warmup_end(self): # Check whether train iterations is equal to warmup iter if self.get_num_updates() == self.warmup_iteration: return True return False def _is_bmuf_iter(self): # Check whether train iterations is equal to bmuf sync iter if (self.get_num_updates() > self.warmup_iteration) and ( self.get_num_updates() % self.sync_iter == 0 ): return True return False def _warmup_sync(self, root_rank=0): if self.world_size <= 1: return # Broadcast the local model to all gpus for param in self.params: dist.broadcast(param.data, src=root_rank) # Update local optimizer state if self.average_sync: self._optimizer.average_params() else: self._optimizer.load_state_dict(self.initial_state) self._reset_local_data() def step(self, closure=None): """Performs a single optimization step.""" self._optimizer.step(closure) self.set_num_updates(self.get_num_updates() + 1) if self._is_warmup_end(): self._warmup_sync() elif self._is_bmuf_iter(): self._block_sync() def zero_grad(self): """Clears the gradients of all optimized parameters.""" self._optimizer.zero_grad() def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates @torch.no_grad() def _reset_local_data(self): # (Step-0) Initialize global momentum parameters and store global copy on each gpu self.global_params = [torch.zeros_like(p.data) for p in self.params] self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params] self.grads = [p.data.new_zeros(p.data.size()) for p in self.params] # saving the global model locally for calculating gradient during bmuf sync for param, global_param in zip(self.params, self.global_params): global_param.copy_(param.data) @torch.no_grad() def _calc_grad(self): # global_params is basically the global copy from the previously finished # synchronisation. param.data is local parameter after block_sync_freq # for the local gpu. so grad is difference between previously synced # model and currrent local model. for index, (param, global_param) in enumerate( zip(self.params, self.global_params) ): self.grads[index] = global_param - param.data def _avg_grad_from_all_gpus(self): for index, param in enumerate(self.params): sync_para = param.data if self.block_momentum == 0 else self.grads[index] sync_para /= float(dist.get_world_size()) dist.all_reduce(sync_para, op=dist.ReduceOp.SUM) @torch.no_grad() def _update_global_model(self): for index, (param, global_param, smoothed_grad, grad) in enumerate( zip( self.params, self.global_params, self.smoothed_grads, # all gpus would share the same value of smoothed_grad, since it is # always computed on synchronized gradients. self.grads, ) ): # global_param is basically last syncrhornized parameter. though # smoothed_grad is local, all processes will have same value of # smoothed_grad and hence param is globally synchronized copy. # smoothed_grad(t) = BM * smoothed_grad(t-1) + BM_lr * grad(t) smoothed_grad = self.block_momentum * smoothed_grad + self.block_lr * grad param.data.copy_(global_param - smoothed_grad) # A Nesterov momentum here is to do a partial weight update before # calculating the gradient if self.use_nbm: param.data.copy_(param.data - self.block_momentum * smoothed_grad) # backup for the next synchronization. self.smoothed_grads[index] = smoothed_grad global_param.copy_(param.data)
bart_ls-main
fairseq-py/fairseq/optim/bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class DynamicLossScaler(object): def __init__( self, init_scale=2.0 ** 15, scale_factor=2.0, scale_window=2000, tolerance=0.0, threshold=None, min_loss_scale=1e-4, ): self.loss_scale = init_scale self.scale_factor = scale_factor self.scale_window = scale_window self.tolerance = tolerance self.threshold = threshold self._iter = 0 self._last_overflow_iter = -1 self._last_rescale_iter = -1 self._overflows_since_rescale = 0 self.min_loss_scale = min_loss_scale def scale(self, outputs): return self.loss_scale * outputs def update(self): if (self._iter - self._last_overflow_iter) % self.scale_window == 0: self.loss_scale *= self.scale_factor self._last_rescale_iter = self._iter self._iter += 1 def _decrease_loss_scale(self): self.loss_scale /= self.scale_factor if self.threshold is not None: self.loss_scale = max(self.loss_scale, self.threshold) def check_overflow(self, grad_norm): # detect inf and nan if grad_norm == float("inf") or grad_norm != grad_norm: # overflow has occured prev_scale = self.loss_scale iter_since_rescale = self._iter - self._last_rescale_iter self._last_overflow_iter = self._iter self._overflows_since_rescale += 1 pct_overflow = self._overflows_since_rescale / float(iter_since_rescale) if pct_overflow >= self.tolerance: self._decrease_loss_scale() self._last_rescale_iter = self._iter self._overflows_since_rescale = 0 if self.loss_scale <= self.min_loss_scale: # Use FloatingPointError as an uncommon error that parent # functions can safely catch to stop training. self.loss_scale = prev_scale raise FloatingPointError( ( "Minimum loss scale reached ({}). Your loss is probably exploding. " "Try lowering the learning rate, using gradient clipping or " "increasing the batch size." ).format(self.min_loss_scale) ) self._iter += 1 raise OverflowError("setting loss scale to: " + str(self.loss_scale))
bart_ls-main
fairseq-py/fairseq/optim/dynamic_loss_scaler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adafactor") class FairseqAdafactor(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adafactor(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar="E", help='epsilons for Adafactor optimizer') parser.add_argument('--clip-threshold', type=float, default=1.0, metavar="C", help='threshold for clipping update root mean square') parser.add_argument('--decay-rate', type=float, default=-0.8, metavar="D", help='decay rate of the second moment estimator') parser.add_argument('--beta1', type=float, default=None, metavar="B", help='beta for first moment estimator. Optional') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--scale-parameter', action='store_true', help='scale learning rate by root mean square of parameter') parser.add_argument('--relative-step', action='store_true', help='set learning rate to inverse square root of timestep,' 'otherwise use external learning rate') parser.add_argument('--warmup-init', action='store_true', help='use relative step for warm-up learning rate schedule') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. Note : Convergence issues empirically observed with fp16 on. Might require search for appropriate configuration. """ return { "lr": self.args.lr[0], "eps": eval(self.args.adafactor_eps), "clip_threshold": self.args.clip_threshold, "decay_rate": self.args.decay_rate, "beta1": self.args.beta1, "weight_decay": self.args.weight_decay, "scale_parameter": self.args.scale_parameter, # defaults to False "relative_step": self.args.relative_step, # defaults to False "warmup_init": self.args.warmup_init, } class Adafactor(torch.optim.Optimizer): """Implements Adafactor algorithm. This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` (see https://arxiv.org/abs/1804.04235) Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): external learning rate (default: None) eps (tuple[float, float]): regularization constans for square gradient and parameter scale respectively (default: (1e-30, 1e-3)) clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) beta1 (float): coefficient used for computing running averages of gradient (default: None) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) relative_step (bool): if True, time-dependent learning rate is computed instead of external learning rate (default: True) warmup_init (bool): time-dependent learning rate computation depends on whether warm-up initialization is being used (default: False) """ def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): if lr is not None and relative_step: raise ValueError("Cannot combine manual lr and relative_step options") if warmup_init and not relative_step: raise ValueError("warmup_init requires relative_step=True") defaults = dict( lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init, ) super(Adafactor, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return False def _get_lr(self, param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = ( 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 ) rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz def _get_options(self, param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment def _rms(self, tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): r_factor = ( (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)) .rsqrt_() .unsqueeze(-1) ) c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() return torch.mul(r_factor, c_factor) def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0 if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros( grad_shape[:-2] + grad_shape[-1:] ).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) group["lr"] = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad ** 2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_( update.mean(dim=-1), alpha=1.0 - beta2t ) exp_avg_sq_col.mul_(beta2t).add_( update.mean(dim=-2), alpha=1.0 - beta2t ) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) update = exp_avg_sq.rsqrt().mul_(grad) update.div_( (self._rms(update) / group["clip_threshold"]).clamp_(min=1.0) ) update.mul_(group["lr"]) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"]) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.add_(-update) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
bart_ls-main
fairseq-py/fairseq/optim/adafactor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("sgd") class SGD(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.SGD(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--momentum', default=0.0, type=float, metavar='M', help='momentum factor') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "momentum": self.args.momentum, "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return True
bart_ls-main
fairseq-py/fairseq/optim/sgd.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.dataclass.utils import gen_parser_from_dataclass class FairseqOptimizer(object): def __init__(self, cfg): super().__init__() self.cfg = cfg @classmethod def add_args(cls, parser): """Add optimizer-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc()) @property def optimizer(self): """Return a torch.optim.optimizer.Optimizer instance.""" if not hasattr(self, "_optimizer"): raise NotImplementedError if not isinstance(self._optimizer, torch.optim.Optimizer): raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") return self._optimizer @optimizer.setter def optimizer(self, optimizer): """Reset optimizer instance.""" if not hasattr(self, "_optimizer"): raise NotImplementedError if not isinstance(self._optimizer, torch.optim.Optimizer): raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") self._optimizer = optimizer @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ raise NotImplementedError @property def params(self): """Return an iterable of the parameters held by the optimizer.""" for param_group in self.param_groups: for p in param_group["params"]: yield p @property def param_groups(self): return self.optimizer.param_groups def __getstate__(self): return self._optimizer.__getstate__() def get_lr(self): """Return the current learning rate.""" return self.param_groups[0]["lr"] def set_lr(self, lr): """Set the learning rate.""" for param_group in self.param_groups: param_group["lr"] = lr def state_dict(self): """Return the optimizer's state dict.""" return self.optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ self.optimizer.load_state_dict(state_dict) if optimizer_overrides is not None and len(optimizer_overrides) > 0: # override learning rate, momentum, etc. with latest values for group in self.param_groups: group.update(optimizer_overrides) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves.""" loss.backward() def all_reduce_grads(self, module): """Manually all-reduce gradients (if required).""" if hasattr(module, "all_reduce_grads"): module.all_reduce_grads() def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" for p in self.params: if p.grad is not None: if torch.is_tensor(c): c = c.to(p.grad.device) p.grad.data.mul_(c) def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn) def step(self, closure=None, scale=1.0, groups=None): """Performs a single optimization step.""" if self.supports_step_with_scale: if self.supports_groups: self.optimizer.step(closure, scale=scale, groups=groups) else: self.optimizer.step(closure, scale=scale) else: if scale != 1.0: self.multiply_grads(1.0 / scale) if self.supports_groups: self.optimizer.step(closure, groups=groups) else: self.optimizer.step(closure) def zero_grad(self): """Clears the gradients of all optimized parameters.""" for p in self.params: p.grad = None self.optimizer.zero_grad() @property def supports_memory_efficient_fp16(self): if hasattr(self.optimizer, "supports_memory_efficient_fp16"): return self.optimizer.supports_memory_efficient_fp16 return False @property def supports_step_with_scale(self): if hasattr(self.optimizer, "supports_step_with_scale"): return self.optimizer.supports_step_with_scale return False @property def supports_groups(self): if hasattr(self.optimizer, "supports_groups"): return self.optimizer.supports_groups return False @property def supports_flat_params(self): """ Whether the optimizer supports collapsing of the model parameters/gradients into a single contiguous Tensor. """ if hasattr(self.optimizer, "supports_flat_params"): return self.optimizer.supports_flat_params return False def average_params(self): pass def broadcast_global_state_dict(self, state_dict): """ Broadcasts a global state dict to all ranks. Useful for optimizers that shard state between ranks. """ if hasattr(self.optimizer, "broadcast_global_state_dict"): return self.optimizer.broadcast_global_state_dict(state_dict) else: return state_dict class LegacyFairseqOptimizer(FairseqOptimizer): def __init__(self, args): self.args = args
bart_ls-main
fairseq-py/fairseq/optim/fairseq_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import importlib import os from fairseq import registry from fairseq.optim.bmuf import FairseqBMUF # noqa from fairseq.optim.fairseq_optimizer import ( # noqa FairseqOptimizer, LegacyFairseqOptimizer, ) from fairseq.optim.amp_optimizer import AMPOptimizer from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer from fairseq.optim.shard import shard_ from omegaconf import DictConfig __all__ = [ "AMPOptimizer", "FairseqOptimizer", "FP16Optimizer", "MemoryEfficientFP16Optimizer", "shard_", ] ( _build_optimizer, register_optimizer, OPTIMIZER_REGISTRY, OPTIMIZER_DATACLASS_REGISTRY, ) = registry.setup_registry("--optimizer", base_class=FairseqOptimizer, required=True) def build_optimizer(cfg: DictConfig, params, *extra_args, **extra_kwargs): if all(isinstance(p, dict) for p in params): params = [t for p in params for t in p.values()] params = list(filter(lambda p: p.requires_grad, params)) return _build_optimizer(cfg, params, *extra_args, **extra_kwargs) # automatically import any Python files in the optim/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("fairseq.optim." + file_name)
bart_ls-main
fairseq-py/fairseq/optim/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adamax") class FairseqAdamax(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adamax(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--no-bias-correction', default=False, action='store_true', help='disable bias correction') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "betas": eval(self.args.adamax_betas), "eps": self.args.adamax_eps, "weight_decay": self.args.weight_decay, "bias_correction": not self.args.no_bias_correction, } class Adamax(torch.optim.Optimizer): """Implements Adamax algorithm (a variant of Adam based on infinity norm). It has been proposed in `Adam: A Method for Stochastic Optimization`__. Compared to the version in PyTorch, this version implements a fix for weight decay. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) bias_correction (bool, optional): enable bias correction (default: True) __ https://arxiv.org/abs/1412.6980 """ def __init__( self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, bias_correction=True, ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction, ) super(Adamax, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError("Adamax does not support sparse gradients") p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 state["exp_avg"] = torch.zeros_like(p_data_fp32) state["exp_inf"] = torch.zeros_like(p_data_fp32) else: state["exp_avg"] = state["exp_avg"].to(p_data_fp32) state["exp_inf"] = state["exp_inf"].to(p_data_fp32) exp_avg, exp_inf = state["exp_avg"], state["exp_inf"] beta1, beta2 = group["betas"] eps = group["eps"] state["step"] += 1 # Update biased first moment estimate. exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # Update the exponentially weighted infinity norm. torch.max( exp_inf.mul_(beta2), grad.abs_(), out=exp_inf, ) step_size = group["lr"] if group["bias_correction"]: bias_correction = 1 - beta1 ** state["step"] step_size /= bias_correction if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.addcdiv_(exp_avg, exp_inf.add(eps), value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
bart_ls-main
fairseq-py/fairseq/optim/adamax.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict from itertools import chain import torch from fairseq import optim from omegaconf import DictConfig from .dynamic_loss_scaler import DynamicLossScaler class _FP16OptimizerMixin(object): def __init__(self, *args, **kwargs): # forward __init__ call to the next class in mro(method resolution order) super().__init__(*args, **kwargs) self._multiply_factor = 1.0 @property def has_flat_params(self): return torch.is_tensor(self.fp32_params) or ( isinstance(self.fp32_params, dict) and all(torch.is_tensor(t) for t in self.fp32_params.values()) ) @classmethod def build_fp32_params(cls, args, params, flatten=True): # create FP32 copy of parameters and grads if flatten: is_pipeline_parallel = getattr( args, "pipeline_model_parallel", False ) and getattr(args, "distributed_no_spawn", False) total_param_size = sum(p.data.numel() for p in params) devices = [torch.cuda.current_device()] if is_pipeline_parallel: devices = list(set(args.pipeline_devices)) fp32_params = {} for device in devices: if is_pipeline_parallel: device_param_size = sum( p.data.numel() for p in params if p.device.index == device ) device_params = [p for p in params if p.device.index == device] else: device_param_size = total_param_size device_params = params fp32_params[device] = ( device_params[0].new(0).float().new(device_param_size) ) offset = 0 for p in device_params: numel = p.data.numel() fp32_params[device][offset : offset + numel].copy_(p.data.view(-1)) offset += numel fp32_params[device] = torch.nn.Parameter(fp32_params[device]) fp32_params[device].grad = fp32_params[device].data.new( device_param_size ) return fp32_params else: fp32_params = [] for p in params: p32 = torch.nn.Parameter(p.data.float()) if hasattr(p, 'expert'): p32.expert = True elif hasattr(p, 'base_expert'): p32.base_expert = True p32.grad = torch.zeros_like(p32.data) if hasattr(p, "param_group"): p32.param_group = p.param_group fp32_params.append(p32) return fp32_params def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.fp32_optimizer.state_dict() if self.scaler is not None: state_dict["loss_scale"] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if "loss_scale" in state_dict and self.scaler is not None: self.scaler.loss_scale = state_dict["loss_scale"] self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ if self.scaler is not None: loss = self.scaler.scale(loss) loss.backward() self._needs_sync = True def _sync_fp16_grads_to_fp32(self): if self._needs_sync: # copy FP16 grads to FP32 if self.has_flat_params: devices = list(self.fp32_params.keys()) device_params_dict = defaultdict(list) for p in self.fp16_params: if p.requires_grad: device_params_dict[p.device.index].append(p) for device in devices: device_params = device_params_dict[device] offset = 0 for p in device_params: grad_data = ( p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape) ) numel = grad_data.numel() self.fp32_params[device].grad.data[ offset : offset + numel ].copy_(grad_data.view(-1)) offset += numel else: for p, p32 in zip(self.fp16_params, self.fp32_params): if not p.requires_grad: continue if p.grad is not None: if p32.grad is None: p32.grad = p.grad.data.float() else: p32.grad.data.copy_(p.grad.data) else: p32.grad = torch.zeros_like(p.data, dtype=torch.float) self._needs_sync = False def _sync_fp32_params_to_fp16(self): # copy FP32 params back into FP16 model if self.has_flat_params: devices = list(self.fp32_params.keys()) device_params_dict = defaultdict(list) for p in self.fp16_params: device_params_dict[p.device.index].append(p) for device in devices: device_params = device_params_dict[device] offset = 0 for p in device_params: numel = p.data.numel() p.data.copy_( self.fp32_params[device] .data[offset : offset + numel] .view_as(p.data) ) offset += numel else: for p, p32 in zip(self.fp16_params, self.fp32_params): if not p.requires_grad: continue p.data.copy_(p32.data) def _unscale_grads(self): self._sync_fp16_grads_to_fp32() if ( # Skip the multiplication if it's a no-op (i.e., if _multiply_factor # is 1.0). At the same time, we want to avoid the device-to-host # transfer by comparing it to 1.0. Since _multiply_factor starts as # a Python float, we roughly assume that if it's a tensor then it's # probably not =1.0 anymore and we do the multiplication. Otherwise # we can safely check the value without a D2H transfer. torch.is_tensor(self._multiply_factor) or self._multiply_factor != 1.0 ): self.fp32_optimizer.multiply_grads(self._multiply_factor) self._multiply_factor = 1.0 def multiply_grads(self, c): """Multiplies grads by a constant ``c``.""" self._multiply_factor *= c def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm and updates dynamic loss scaler.""" self._sync_fp16_grads_to_fp32() grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm( 0, aggregate_norm_fn ) if self.scaler is not None: if grad_norm > max_norm > 0.0: self._multiply_factor *= max_norm / grad_norm self.scaler.check_overflow(grad_norm) elif max_norm > 0.0: clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1) self._multiply_factor *= clip_coef return grad_norm def step(self, closure=None, groups=None): """Performs a single optimization step.""" self._sync_fp16_grads_to_fp32() if getattr(self, "supports_step_with_scale", False): self.fp32_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups) else: self._unscale_grads() self.fp32_optimizer.step(closure, groups=groups) if self.scaler is not None: self.scaler.update() self._sync_fp32_params_to_fp16() def zero_grad(self): """Clears the gradients of all optimized parameters.""" for p in self.fp16_params: p.grad = None if self.has_flat_params: if torch.is_tensor(self.fp32_params): self.fp32_params.grad.zero_() elif isinstance(self.fp32_params, dict): for fp32_params in self.fp32_params.values(): fp32_params.grad.zero_() else: raise RuntimeError("self.fp32_params must be a tensor or dict") else: for p32 in self.fp32_params: if p32.grad is not None: p32.grad.zero_() self._needs_sync = False if self.scaler is not None: self._multiply_factor = 1.0 / float(self.scaler.loss_scale) class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer): """ Wrap an *optimizer* to support FP16 (mixed precision) training. """ def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs): super().__init__(cfg.optimizer) self.fp16_params = params self.fp32_optimizer = fp32_optimizer self.fp32_params = fp32_params if getattr(cfg.common, "fp16_scale_window", None) is None: if len(cfg.optimization.update_freq) > 1: raise ValueError( "--fp16-scale-window must be given explicitly when using a " "custom --update-freq schedule" ) data_parallel_size = int( cfg.distributed_training.distributed_world_size / cfg.common.model_parallel_size ) scale_window = int( 2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0] ) else: scale_window = cfg.common.fp16_scale_window if not getattr(cfg.common, "bf16", False): self.scaler = DynamicLossScaler( init_scale=cfg.common.fp16_init_scale, scale_window=scale_window, tolerance=cfg.common.fp16_scale_tolerance, threshold=cfg.common.threshold_loss_scale, min_loss_scale=cfg.common.min_loss_scale, ) else: # disable loss scaling for bfloat16 self.scaler = None @classmethod def build_optimizer(cls, cfg: DictConfig, params, **kwargs): """ Args: cfg (omegaconf.DictConfig): fairseq args params (iterable): iterable of parameters to optimize """ flatten = not getattr(cfg.common, "fp16_no_flatten_grads", False) if getattr(cfg.common, "bf16", False): flatten = False # mixed precision is faster on TPUs without flat grads fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten) if flatten: fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params]) else: fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params) if flatten and not fp32_optimizer.supports_flat_params: raise RuntimeError( f"chosen optimizer {fp32_optimizer.__class__.__name__} does not support flat params, please set --fp16-no-flatten-grads" ) return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs) @property def optimizer(self): return self.fp32_optimizer.optimizer @optimizer.setter def optimizer(self, optimizer): self.fp32_optimizer.optimizer = optimizer @property def lr_scheduler(self): return getattr(self.fp32_optimizer, "lr_scheduler", None) @property def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr) def all_reduce_grads(self, module): self.fp32_optimizer.all_reduce_grads(module) @property def supports_flat_params(self): return self.fp32_optimizer.supports_flat_params class _MemoryEfficientFP16OptimizerMixin(object): def __init__(self, *args, **kwargs): # forward __init__ call to the next class in MRO (method resolution order) super().__init__(*args, **kwargs) self._multiply_factor = 1.0 @property def has_flat_params(self): return False def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.wrapped_optimizer.state_dict() if self.scaler is not None: state_dict["loss_scale"] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if "loss_scale" in state_dict and self.scaler is not None: self.scaler.loss_scale = state_dict["loss_scale"] self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides) # Hack: PyTorch automatically casts the optimizer state to match the # type of the current parameters. But with --memory-efficient-fp16 the # params are FP16 while the optimizer state is FP32 and we don't want # to cast. A workaround is to manually copy back the original state # after the optimizer has been loaded. if not getattr(self.optimizer, "disable_mem_eff_fp16_loading_hack", False): groups = self.optimizer.param_groups saved_groups = state_dict["param_groups"] id_map = { old_id: p for old_id, p in zip( chain(*(g["params"] for g in saved_groups)), chain(*(g["params"] for g in groups)), ) } for k, v in state_dict["state"].items(): if k in id_map: param = id_map[k] self.optimizer.state[param] = v def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ if self.scaler is not None: loss = self.scaler.scale(loss) loss.backward() def _unscale_grads(self): if ( # Skip the multiplication if it's a no-op (i.e., if _multiply_factor # is 1.0). At the same time, we want to avoid the device-to-host # transfer by comparing it to 1.0. Since _multiply_factor starts as # a Python float, we roughly assume that if it's a tensor then it's # probably not =1.0 anymore and we do the multiplication. Otherwise # we can safely check the value without a D2H transfer. torch.is_tensor(self._multiply_factor) or self._multiply_factor != 1.0 ): self.wrapped_optimizer.multiply_grads(self._multiply_factor) self._multiply_factor = 1.0 def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" self._multiply_factor *= c def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm and updates dynamic loss scaler.""" max_norm = float(max_norm) grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm( 0, aggregate_norm_fn ) if self.scaler is not None: grad_norm_cpu = float(grad_norm) if grad_norm_cpu > max_norm > 0.0: self._multiply_factor *= max_norm / grad_norm_cpu # detect overflow and adjust loss scale self.scaler.check_overflow(grad_norm_cpu) elif max_norm > 0.0: clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1) self._multiply_factor *= clip_coef return grad_norm def step(self, closure=None, groups=None): """Performs a single optimization step.""" if getattr(self, "supports_step_with_scale", False): # NOTE(msb) optimizer divides by scale factor self.wrapped_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups) else: self._unscale_grads() self.wrapped_optimizer.step(closure, groups=groups) if self.scaler is not None: self.scaler.update() def zero_grad(self): """Clears the gradients of all optimized parameters.""" self.wrapped_optimizer.zero_grad() if self.scaler is not None: self._multiply_factor = 1.0 / float(self.scaler.loss_scale) else: self._multiply_factor = 1.0 @property def supports_flat_params(self): return self.wrapped_optimizer.supports_flat_params class MemoryEfficientFP16Optimizer( _MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer ): """ Wrap an *optimizer* to support FP16 (mixed precision) training. Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not maintain an FP32 copy of the model. We instead expect the optimizer to convert the gradients to FP32 internally and sync the results back to the FP16 model params. This significantly reduces memory usage but slightly increases the time spent in the optimizer. Since this wrapper depends on specific functionality in the wrapped optimizer (i.e., on-the-fly conversion of grads to FP32), only certain optimizers can be wrapped. This is determined by the *supports_memory_efficient_fp16* property. """ def __init__( self, cfg: DictConfig, params, optimizer, allow_unsupported=False, **kwargs ): if not allow_unsupported and not optimizer.supports_memory_efficient_fp16: raise ValueError( "Unsupported optimizer: {}".format(optimizer.__class__.__name__) ) super().__init__(getattr(cfg, "optimizer", None)) self.wrapped_optimizer = optimizer if getattr(cfg.common, "fp16_scale_window", None) is None: if len(cfg.optimization.update_freq) > 1: raise ValueError( "--fp16-scale-window must be given explicitly when using a " "custom --update-freq schedule" ) data_parallel_size = int( cfg.distributed_training.distributed_world_size / cfg.common.model_parallel_size ) scale_window = int( 2 ** 14 / data_parallel_size / cfg.optimization.update_freq[0] ) else: scale_window = cfg.common.fp16_scale_window if not getattr(cfg.common, "bf16", False): self.scaler = DynamicLossScaler( init_scale=cfg.common.fp16_init_scale, scale_window=scale_window, tolerance=cfg.common.fp16_scale_tolerance, threshold=cfg.common.threshold_loss_scale, min_loss_scale=cfg.common.min_loss_scale, ) else: # disable loss scaling for bfloat16 self.scaler = None @classmethod def build_optimizer(cls, cfg: DictConfig, params, **kwargs): """ Args: args (argparse.Namespace): fairseq args params (iterable): iterable of parameters to optimize """ fp16_optimizer = optim.build_optimizer(cfg.optimizer, params) return cls(cfg, params, fp16_optimizer, **kwargs) @property def optimizer(self): return self.wrapped_optimizer.optimizer @optimizer.setter def optimizer(self, optimizer): self.wrapped_optimizer.optimizer = optimizer @property def optimizer_config(self): return self.wrapped_optimizer.optimizer_config @property def lr_scheduler(self): return getattr(self.wrapped_optimizer, "lr_scheduler", None) def get_lr(self): return self.wrapped_optimizer.get_lr() def set_lr(self, lr): self.wrapped_optimizer.set_lr(lr) def all_reduce_grads(self, module): self.wrapped_optimizer.all_reduce_grads(module)
bart_ls-main
fairseq-py/fairseq/optim/fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adagrad") class Adagrad(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return False
bart_ls-main
fairseq-py/fairseq/optim/adagrad.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict from fairseq.distributed import utils try: from fairscale.optim import OSS _has_fairscale = True except ImportError: _has_fairscale = False def shard_(optimizer, group): if not _has_fairscale: raise ImportError( "\n\nPlease install the fairscale package:" "\n\n pip install fairscale" ) class FairseqOSS(OSS): @property def disable_mem_eff_fp16_loading_hack(self): return True def __getattr__(self, name): if name.startswith("supports") and hasattr(self.optim, name): return getattr(self.optim, name) raise AttributeError( "'FairseqOSS' object has no attribute {0!r}".format(name) ) def broadcast_global_state_dict( self, state_dict: Dict[str, Any] ) -> Dict[str, Any]: """ Broadcasts the entire state_dict to all other ranks each rank is responsible to load their own partition of data """ return utils.broadcast_object( state_dict, src_rank=0, group=self.group, ) torch_optimizer = optimizer.optimizer optim_cls = type(torch_optimizer) optimizer.optimizer = FairseqOSS( torch_optimizer.param_groups, optim_cls, group=group, **optimizer.optimizer_config )
bart_ls-main
fairseq-py/fairseq/optim/shard.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib from collections.abc import Collection from dataclasses import dataclass, field from typing import List import torch from fairseq.dataclass import FairseqDataclass from fairseq.optim import FairseqOptimizer, register_optimizer from omegaconf import II, DictConfig try: import deepspeed has_deepspeed = True except ImportError as e: has_deepspeed = False def _get_cpu_adam(): try: from deepspeed.ops.op_builder import CPUAdamBuilder return CPUAdamBuilder().load() except ImportError: # fbcode from deepspeed.ops.adam import DeepSpeedCPUAdam as ds_opt_adam return ds_opt_adam @dataclass class FairseqCPUAdamConfig(FairseqDataclass): adam_betas: str = field( default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"} ) adam_eps: float = field( default=1e-8, metadata={"help": "epsilon for Adam optimizer"} ) weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) fp16_adam_stats: bool = field( default=False, metadata={"help": "use FP16 stats (with automatic scaling)"} ) # TODO common vars below in parent lr: List[float] = II("optimization.lr") @register_optimizer("cpu_adam", dataclass=FairseqCPUAdamConfig) class FairseqCPUAdam(FairseqOptimizer): """Adam optimizer for fairseq, optimized for CPU tensors. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, cfg: DictConfig, params): super().__init__(cfg) self._optimizer = CPUAdam(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "betas": eval(self.cfg.adam_betas), "eps": self.cfg.adam_eps, "weight_decay": self.cfg.weight_decay, "use_fp16_stats": self.cfg.fp16_adam_stats, } class CPUAdam(torch.optim.Optimizer): optimizer_id = 0 def __init__( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, use_fp16_stats=False, ): defaults = { "lr": lr, "bias_correction": bias_correction, "betas": betas, "eps": eps, "weight_decay": weight_decay, } super().__init__(params, defaults) self.use_fp16_stats = use_fp16_stats self.FLOAT16_MAX = 65504.0 if not has_deepspeed: raise ImportError("Please install DeepSpeed: pip install deepspeed") self.opt_id = CPUAdam.optimizer_id CPUAdam.optimizer_id = CPUAdam.optimizer_id + 1 self.ds_opt_adam = _get_cpu_adam() adamw_mode = True self.ds_opt_adam.create_adam( self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode ) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() torch.cuda.synchronize() for group_id, group in enumerate(self.param_groups): for param_id, p in enumerate(group["params"]): if p.grad is None: continue state = self.state[p] if len(state) == 0: state["step"] = 0 dtype = torch.float16 if self.use_fp16_stats else p.data.dtype # gradient momentums state["exp_avg"] = torch.zeros_like( p.data, dtype=dtype, device="cpu" ) # gradient variances state["exp_avg_sq"] = torch.zeros_like( p.data, dtype=dtype, device="cpu" ) if self.use_fp16_stats: assert torch.is_floating_point(p.data) state["exp_avg_scale"] = 1.0 state["exp_avg_sq_scale"] = 1.0 exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] p_data_bak = p.data # backup of the original data pointer p.data = p.data.to(dtype=torch.float32, device="cpu") p.grad.data = p.grad.data.to(dtype=torch.float32, device="cpu") if self.use_fp16_stats: exp_avg = exp_avg.float() * state["exp_avg_scale"] exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"] state["step"] += 1 beta1, beta2 = group["betas"] self.ds_opt_adam.adam_update( self.opt_id, state["step"], group["lr"], beta1, beta2, group["eps"], group["weight_decay"], group["bias_correction"], p.data, p.grad.data, exp_avg, exp_avg_sq, ) if p_data_bak.data_ptr() != p.data.data_ptr(): p_data_bak.copy_(p.data) p.data = p_data_bak if self.use_fp16_stats: def inf_norm(t): return torch.norm(t, float("inf")) # from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py state["exp_avg_scale"], state["exp_avg_sq_scale"] = ( 1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX, 1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX, ) state["exp_avg"], state["exp_avg_sq"] = ( (exp_avg / state["exp_avg_scale"]).half(), (exp_avg_sq / state["exp_avg_sq_scale"]).half(), ) return loss
bart_ls-main
fairseq-py/fairseq/optim/cpu_adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math from collections.abc import Collection from dataclasses import dataclass, field from typing import Any, List import torch import torch.distributed as dist import torch.optim from fairseq.dataclass import FairseqDataclass from fairseq.optim import FairseqOptimizer, register_optimizer from fairseq.optim.fused_adam import get_fused_adam_class from omegaconf import II, OmegaConf logger = logging.getLogger(__name__) @dataclass class FairseqAdamConfig(FairseqDataclass): adam_betas: Any = field( default=(0.9, 0.999), metadata={"help": "betas for Adam optimizer"} ) adam_eps: float = field( default=1e-8, metadata={"help": "epsilon for Adam optimizer"} ) weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) use_old_adam: bool = field( default=False, metadata={"help": "Use fairseq.optim.adam.Adam"} ) fp16_adam_stats: bool = field( default=False, metadata={"help": "use FP16 stats (with automatic scaling)"} ) # TODO common vars below in parent tpu: bool = II("common.tpu") lr: List[float] = II("optimization.lr") @register_optimizer("adam", dataclass=FairseqAdamConfig) class FairseqAdam(FairseqOptimizer): """Adam optimizer for fairseq. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, cfg: FairseqAdamConfig, params): super().__init__(cfg) fused_adam_cls = get_fused_adam_class() use_fused_adam = ( not getattr(cfg, "use_old_adam", False) and fused_adam_cls is not None and torch.cuda.is_available() ) if getattr(cfg, "tpu", False): if self.cfg.fp16_adam_stats: raise NotImplementedError("--fp16-adam-stats is only supported on GPU") # on TPUs we use the Adam defined here, since it # automatically casts gradients to FP32 self._optimizer = Adam(params, **self.optimizer_config) elif use_fused_adam: logger.info("using FusedAdam") self._optimizer = fused_adam_cls( params, use_fp16_stats=self.cfg.fp16_adam_stats, **self.optimizer_config ) else: if self.cfg.fp16_adam_stats: raise NotImplementedError("--fp16-adam-stats is only supported with FusedAdamV1") self._optimizer = Adam(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "betas": eval(self.cfg.adam_betas) if isinstance(self.cfg.adam_betas, str) else OmegaConf.to_container(self.cfg.adam_betas), "eps": self.cfg.adam_eps, "weight_decay": self.cfg.weight_decay, } def average_params(self): """Reduce Params is only used during BMUF distributed training.""" state_dict = self.optimizer.state_dict() total_gpus = float(dist.get_world_size()) for _, value in state_dict["state"].items(): value["exp_avg"] /= total_gpus value["exp_avg_sq"] /= total_gpus dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM) dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM) class Adam(torch.optim.Optimizer): r"""Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, ): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad ) super(Adam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError( "Adam does not support sparse gradients, please consider SparseAdam instead" ) amsgrad = group.get("amsgrad", False) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32) else: state["exp_avg"] = state["exp_avg"].to(p_data_fp32) state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32) if amsgrad: state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to( p_data_fp32 ) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] if amsgrad: max_exp_avg_sq = state["max_exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group["eps"]) else: denom = exp_avg_sq.sqrt().add_(group["eps"]) bias_correction1 = 1 - beta1 ** state["step"] bias_correction2 = 1 - beta2 ** state["step"] step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1 if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
bart_ls-main
fairseq-py/fairseq/optim/adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from collections import defaultdict from dataclasses import dataclass, field from typing import Dict, Any, List, Optional import torch.optim from fairseq.dataclass import FairseqDataclass from fairseq.optim import FairseqOptimizer, register_optimizer, _build_optimizer from fairseq.optim.lr_scheduler import FairseqLRScheduler, build_lr_scheduler from omegaconf import II, open_dict logger = logging.getLogger(__name__) @dataclass class OptimizerAndSchedulerConfig(FairseqDataclass): optimizer: Any = None lr_scheduler: Optional[Any] = None lr: List = II("optimization.lr") lr_float: Optional[float] = None # this makes it easier to sweep on learning rate with auto sweepers @dataclass class CompositeOptimizerConfig(FairseqDataclass): groups: Dict[str, Any] = field( default_factory=lambda: {}, metadata={ "help": "optimizer name -> optimizer OptimizerAndSchedulerConfig. " "Configures a different optimizer and (optionally) lr scheduler for each parameter group" }, ) @register_optimizer("composite", dataclass=CompositeOptimizerConfig) class FairseqCompositeOptimizer(FairseqOptimizer): optimizers: Dict[str, FairseqOptimizer] = {} lr_schedulers: Dict[str, FairseqLRScheduler] = {} lr_scheduler: FairseqLRScheduler = None _optimizer: torch.optim.Optimizer def __init__(self, cfg: CompositeOptimizerConfig, params): super().__init__(cfg) assert ( len(params) > 1 ), "Composite optimizer only works when there are multiple parameter groups (try fp16_no_flatten_grads: true)" groupped_params = defaultdict(list) for p in params: group = getattr(p, "param_group", "default") groupped_params[group].append(p) assert groupped_params.keys() == cfg.groups.keys(), ( f"Parameter groups {groupped_params.keys()} and optimizer groups {cfg.groups.keys()} are not the same! " "Try setting 'param_group' on your parameters in the model." ) for group, group_params in groupped_params.items(): group_cfg = cfg.groups[group] with open_dict(group_cfg): if group_cfg.lr_float is not None: group_cfg.optimizer.lr = [group_cfg.lr_float] group_cfg.lr_scheduler.lr = [group_cfg.lr_float] else: group_cfg.optimizer.lr = group_cfg.lr group_cfg.lr_scheduler.lr = group_cfg.lr self.optimizers[group] = _build_optimizer(group_cfg.optimizer, group_params) if group_cfg.lr_scheduler is not None: self.lr_schedulers[group] = build_lr_scheduler( group_cfg.lr_scheduler, self.optimizers[group] ) if len(self.lr_schedulers) > 0: assert len(self.lr_schedulers) == len(self.optimizers), ( f"Please provide an lr scheduler for each optimizer to use pass_through scheduler. " f"Optimizers: {self.optimizers}; Lr scheds: {self.lr_schedulers}" ) self.lr_scheduler = CompositeLRScheduler(self.lr_schedulers) self._optimizer = CompositeOptimizer(self.optimizers) @property def supports_groups(self): return True @property def param_groups(self): for opt in self.optimizers.values(): for group in opt.param_groups: yield group def get_lr(self): """Return the current learning rate.""" k = ( "default" if "default" in self.optimizers else next(iter(self.optimizers.keys())) ) return self.optimizers[k].param_groups[0]["lr"] def state_dict(self): """Return the LR scheduler state dict.""" return {k: s.state_dict() for k, s in self.optimizers.items()} def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an LR scheduler state dict.""" for k, state in state_dict.items(): if k not in self.optimizers: # skip extra keys like "loss_scale" added by fp16 optimizer continue overrides = ( optimizer_overrides[k] if isinstance(optimizer_overrides, dict) and k in optimizer_overrides else None ) self.optimizers[k].load_state_dict(state, optimizer_overrides=overrides) class CompositeOptimizer(torch.optim.Optimizer): def __init__(self, optimizers: Dict[str, FairseqOptimizer]): self.optimizers = optimizers @property def supports_memory_efficient_fp16(self): return all(o.supports_memory_efficient_fp16 for o in self.optimizers.values()) @property def supports_flat_params(self): return all(o.supports_flat_params for o in self.optimizers.values()) def step(self, closure=None, groups=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for k, opt in self.optimizers.items(): if groups is None or k in groups: opt.step() return loss def zero_grad(self): for opt in self.optimizers.values(): opt.zero_grad() class CompositeLRScheduler(FairseqLRScheduler): def __init__(self, lr_schedulers): super().__init__(None, None) self.lr_schedulers = lr_schedulers def state_dict(self): """Return the LR scheduler state dict.""" return {k: s.state_dict() for k, s in self.lr_schedulers.items()} def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" for k, state in state_dict.items(): self.lr_schedulers[k].load_state_dict(state) def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" for s in self.lr_schedulers.values(): s.step_begin_epoch(epoch) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" for s in self.lr_schedulers.values(): s.step(epoch) def step_update(self, num_updates): """Update the learning rate after each update.""" return {k: s.step_update(num_updates) for k, s in self.lr_schedulers.items()}
bart_ls-main
fairseq-py/fairseq/optim/composite.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.optim import LegacyFairseqOptimizer, register_optimizer @register_optimizer("lamb") class FairseqLAMB(LegacyFairseqOptimizer): """LAMB optimizer.""" def __init__(self, args, params): super().__init__(args) try: from apex.optimizers import FusedLAMB self._optimizer = FusedLAMB(params, **self.optimizer_config) except ImportError: raise ImportError("Please install apex to use LAMB optimizer") @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B', help='betas for LAMB optimizer') parser.add_argument('--lamb-eps', type=float, default=1e-8, metavar='D', help='epsilon for LAMB optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "betas": eval(self.args.lamb_betas), "eps": self.args.lamb_eps, "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return False
bart_ls-main
fairseq-py/fairseq/optim/fused_lamb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adadelta") class Adadelta(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients') parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', help='term added to the denominator to improve numerical stability') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "rho": self.args.adadelta_rho, "eps": self.args.adadelta_eps, "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return True
bart_ls-main
fairseq-py/fairseq/optim/adadelta.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class PassThroughScheduleConfig(FairseqDataclass): pass @register_lr_scheduler("pass_through", dataclass=PassThroughScheduleConfig) class PassThroughScheduleSchedule(FairseqLRScheduler): """Delegate lr scheduling to the optimizer.""" def __init__(self, cfg: PassThroughScheduleConfig, optimizer): super().__init__(cfg, optimizer) assert ( hasattr(optimizer, "lr_scheduler") and optimizer.lr_scheduler is not None ), "Pass-through schedule can only be used with optimizers with their own schedulers" def state_dict(self): return self.optimizer.lr_scheduler.state_dict() def load_state_dict(self, state_dict): self.optimizer.lr_scheduler.load_state_dict(state_dict) def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" return self.optimizer.lr_scheduler.step_begin_epoch(epoch) def step_update(self, num_updates): """Update the learning rate after each update.""" return self.optimizer.lr_scheduler.step_update(num_updates)
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/pass_through.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import LegacyFairseqLRScheduler, register_lr_scheduler import logging import ast logger = logging.getLogger(__name__) logger.setLevel(logging.WARNING) @register_lr_scheduler("manual") class ManualSchedule(LegacyFairseqLRScheduler): """Decay the LR on a manual schedule.""" def __init__(self, args, optimizer): super().__init__(args, optimizer) self.epoch2lr = self.parse_manuallr_args(args.epoch2lr) self.update2lr = self.parse_manuallr_args(args.update2lr) logger.info("@@@ ManualSchedule epoch2lr={}".format(self.epoch2lr)) logger.info("@@@ ManualSchedule update2lr={}".format(self.update2lr)) if 1 in self.epoch2lr: self.lr = self.epoch2lr[1] elif 1 in self.update2lr: self.lr = self.update2lr[1] else: self.lr = args.lr[0] self.optimizer.set_lr(self.lr) # Set the beginning of the epoch. def parse_manuallr_args(self, lr_args_str): lr_dict = ast.literal_eval(lr_args_str.replace(' ', '')) if not isinstance(lr_dict, dict): raise ValueError("epoch2lr/update2lr must be abel to evaluated to a dict") lr_args = {} logger.info("@@@ after parsing input dictionary lr_dict = {}".format(lr_dict)) for key, val in lr_dict.items(): if "," in key: for k in key.split(","): lr_args[int(k)] = float(val) elif "-" in key: s = int(key.split("-")[0]) e = int(key.split("-")[1]) for k in range(s, e + 1, 1): lr_args[k] = float(val) else: lr_args[int(key)] = float(val) return lr_args @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument( "--epoch2lr", type=str, metavar="DICT", default="{}", help="a dictionary used to set lr for each epoch manually", ) parser.add_argument( "--update2lr", type=str, metavar="DICT", default="{}", help="a dictionary used to set lr for each update manually", ) # fmt: on def state_dict(self): return {"lr": self.lr} def load_state_dict(self, state_dict): if "lr" in state_dict: self.lr = state_dict["lr"] def get_next_lr(self, epoch): manual_keys = [k for k in self.epoch2lr if k <= epoch] if manual_keys: manual_lr = self.epoch2lr[max(manual_keys)] else: logger.warning("@@@ epoch={} does not exist in manual lr input. epoch2lr={}...".format( epoch, list(self.epoch2lr.items())[:min(10, len(self.epoch2lr.keys())-1)] )) manual_lr = self.optimizer.get_lr() return manual_lr def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" manual_keys = [k for k in self.update2lr if k <= num_updates] if manual_keys: manual_lr = self.update2lr[max(manual_keys)] else: logger.warning("epoch={} does not exist in manual lr input update2lr={}...".format( num_updates, list(self.update2lr.items())[:min(10, len(self.update2lr.keys())-1)])) manual_lr = self.optimizer.get_lr() self.optimizer.set_lr(manual_lr) return self.optimizer.get_lr()
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/manual_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional, List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class FixedLRScheduleConfig(FairseqDataclass): force_anneal: Optional[int] = field( default=None, metadata={"help": "force annealing at specified epoch"}, ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing, lr_new = (lr * lr_shrink)"}, ) warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("fixed", dataclass=FixedLRScheduleConfig) class FixedLRSchedule(FairseqLRScheduler): """Decay the LR on a fixed schedule.""" def __init__(self, cfg: FixedLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) self.lr = cfg.lr[0] if cfg.warmup_updates > 0: self.warmup_factor = 1.0 / cfg.warmup_updates else: self.warmup_factor = 1 def state_dict(self): return {"lr": self.lr} def load_state_dict(self, state_dict): if "lr" in state_dict: self.lr = state_dict["lr"] def get_next_lr(self, epoch): lrs = self.cfg.lr if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal: # use fixed LR schedule next_lr = lrs[min(epoch - 1, len(lrs) - 1)] else: # annneal based on lr_shrink next_lr = lrs[-1] * self.cfg.lr_shrink ** ( epoch + 1 - self.cfg.force_anneal ) return next_lr def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if self.cfg.warmup_updates > 0 and num_updates < self.cfg.warmup_updates: self.warmup_factor = (num_updates + 1) / float(self.cfg.warmup_updates) self.optimizer.set_lr(self.warmup_factor * self.lr) else: self.optimizer.set_lr(self.lr) return self.optimizer.get_lr()
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/fixed_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import List import torch.optim.lr_scheduler from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class ReduceLROnPlateauLRScheduleConfig(FairseqDataclass): lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) lr_threshold: float = field( default=1e-4, metadata={ "help": ( "threshold for measuring the new optimum, to only focus on " "significant changes" ) }, ) lr_patience: int = field( default=0, metadata={ "help": ( "number of epochs with no improvement after which learning rate will " "be reduced" ) }, ) warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = II("optimization.lr") maximize_best_checkpoint_metric: bool = II( "checkpoint.maximize_best_checkpoint_metric" ) @register_lr_scheduler( "reduce_lr_on_plateau", dataclass=ReduceLROnPlateauLRScheduleConfig ) class ReduceLROnPlateauLRSchedule(FairseqLRScheduler): """ Decay the LR by a factor every time the validation loss plateaus. Also comes with optional warmup phase, where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter the lr is adjusted according to original reduce_on_plateau scheme. During warmup:: lrs = torch.linspace( cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates ) lr = lrs[update_num] """ def __init__(self, cfg: ReduceLROnPlateauLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with reduce_lr_on_plateau." " Consider --lr-scheduler=fixed instead." ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer.optimizer, patience=cfg.lr_patience, factor=cfg.lr_shrink, mode="max" if cfg.maximize_best_checkpoint_metric else "min", threshold=cfg.lr_threshold, ) warmup_end_lr = cfg.lr[0] # if no warm up, sets initial lr to be cfg.lr[0] if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first cfg.warmup_updates if cfg.warmup_updates > 0: self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates # this flag is either set from arg when no warm up, or set by # step_update() when warmup finishes self.warmup_end = True if cfg.warmup_updates <= 0 else False # initial learning rate # this self.lr is used only during init and/or warm up period self.lr = warmup_end_lr if self.warmup_end else cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def state_dict(self): """Return the LR scheduler state dict.""" return { "best": self.lr_scheduler.best, "last_epoch": self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.lr_scheduler.best = state_dict["best"] if "last_epoch" in state_dict: self.lr_scheduler.last_epoch = state_dict["last_epoch"] def step(self, epoch, val_loss=None): """ Update the learning rate at the end of the given epoch if warmup finishes otherwise no update of lr on epoch boundaries """ if val_loss is not None and self.warmup_end is True: self.lr_scheduler.step(val_loss) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr() def step_update(self, num_updates): """ Update the learning rate after each update.""" # if there is warmup if self.cfg.warmup_updates > 0: if num_updates <= self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step self.optimizer.set_lr(self.lr) else: if self.warmup_end is False: self.warmup_end = True # else do nothing return self.optimizer.get_lr()
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import importlib import os from fairseq import registry from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import ( # noqa FairseqLRScheduler, LegacyFairseqLRScheduler, ) from omegaconf import DictConfig ( build_lr_scheduler_, register_lr_scheduler, LR_SCHEDULER_REGISTRY, LR_SCHEDULER_DATACLASS_REGISTRY, ) = registry.setup_registry( "--lr-scheduler", base_class=FairseqLRScheduler, default="fixed" ) def build_lr_scheduler(cfg: DictConfig, optimizer): return build_lr_scheduler_(cfg, optimizer) # automatically import any Python files in the optim/lr_scheduler/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("fairseq.optim.lr_scheduler." + file_name)
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional, List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class PolynomialDecayLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) force_anneal: Optional[int] = field( default=None, metadata={"help": "force annealing at specified epoch"}, ) end_learning_rate: float = field( default=0.0, metadata={"help": "learning rate to decay to"}, ) power: float = field( default=1.0, metadata={"help": "decay exponent"}, ) total_num_update: float = field( default=II("optimization.max_update"), metadata={"help": "total number of updates over which to decay learning rate"}, ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("polynomial_decay", dataclass=PolynomialDecayLRScheduleConfig) class PolynomialDecayLRSchedule(FairseqLRScheduler): """Decay the LR on a fixed schedule.""" def __init__(self, cfg: PolynomialDecayLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) assert cfg.total_num_update > 0 self.lr = cfg.lr[0] if cfg.warmup_updates > 0: self.warmup_factor = 1.0 / cfg.warmup_updates else: self.warmup_factor = 1 self.end_learning_rate = cfg.end_learning_rate self.total_num_update = cfg.total_num_update self.power = cfg.power self.optimizer.set_lr(self.warmup_factor * self.lr) def get_next_lr(self, epoch): lrs = self.cfg.lr if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal: # use fixed LR schedule next_lr = lrs[min(epoch, len(lrs) - 1)] else: # annneal based on lr_shrink next_lr = self.optimizer.get_lr() return next_lr def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if self.cfg.warmup_updates > 0 and num_updates <= self.cfg.warmup_updates: self.warmup_factor = num_updates / float(self.cfg.warmup_updates) lr = self.warmup_factor * self.lr elif num_updates >= self.total_num_update: lr = self.end_learning_rate else: warmup = self.cfg.warmup_updates lr_range = self.lr - self.end_learning_rate pct_remaining = 1 - (num_updates - warmup) / ( self.total_num_update - warmup ) lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate self.optimizer.set_lr(lr) return self.optimizer.get_lr()
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class InverseSquareRootLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=4000, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("inverse_sqrt", dataclass=InverseSquareRootLRScheduleConfig) class InverseSquareRootSchedule(FairseqLRScheduler): """Decay the LR based on the inverse square root of the update number. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter we decay proportional to the number of updates, with a decay factor set to align with the configured learning rate. During warmup:: lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates) lr = lrs[update_num] After warmup:: decay_factor = cfg.lr * sqrt(cfg.warmup_updates) lr = decay_factor / sqrt(update_num) """ def __init__(self, cfg: InverseSquareRootLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with inverse_sqrt." " Consider --lr-scheduler=fixed instead." ) warmup_end_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first cfg.warmup_updates self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates # then, decay prop. to the inverse square root of the update number self.decay_factor = warmup_end_lr * cfg.warmup_updates ** 0.5 # initial learning rate self.lr = cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step else: self.lr = self.decay_factor * num_updates ** -0.5 self.optimizer.set_lr(self.lr) return self.lr
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.optim import FairseqOptimizer class FairseqLRScheduler(object): def __init__(self, cfg, optimizer): super().__init__() if optimizer is not None and not isinstance(optimizer, FairseqOptimizer): raise ValueError("optimizer must be an instance of FairseqOptimizer") self.cfg = cfg self.optimizer = optimizer self.best = None @classmethod def add_args(cls, parser): """Add arguments to the parser for this LR scheduler.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc()) def state_dict(self): """Return the LR scheduler state dict.""" return {"best": self.best} def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.best = state_dict["best"] def step_begin_epoch(self, epoch): """Update the learning rate at the beginning of the given epoch.""" pass def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" if val_loss is not None: if self.best is None: self.best = val_loss else: self.best = min(self.best, val_loss) def step_update(self, num_updates): """Update the learning rate after each update.""" return self.optimizer.get_lr() class LegacyFairseqLRScheduler(FairseqLRScheduler): def __init__(self, args: Namespace, optimizer): if not isinstance(optimizer, FairseqOptimizer): raise ValueError("optimizer must be an instance of FairseqOptimizer") self.args = args self.optimizer = optimizer self.best = None
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import Optional, List, Tuple from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class TriStageLRScheduleConfig(FairseqDataclass): warmup_steps: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) hold_steps: int = field( default=0, metadata={"help": "steps in hold stage"}, ) decay_steps: int = field( default=0, metadata={"help": "steps in decay stages"}, ) phase_ratio: Optional[Tuple[float, float, float]] = field( default=None, metadata={ "help": ( "if set, automatically sets warmup/hold/decay steps to the ratio " "specified here from max_updates. the ratios must add up to 1.0" ) }, ) init_lr_scale: float = field( default=0.01, metadata={"help": "initial learning rate scale during warmup phase"}, ) final_lr_scale: float = field( default=0.01, metadata={"help": "final learning rate scale"}, ) max_update: float = II("optimization.max_update") lr: List[float] = II("optimization.lr") @register_lr_scheduler("tri_stage", dataclass=TriStageLRScheduleConfig) class TriStageLRSchedule(FairseqLRScheduler): """Tristage learning rate schedulr Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf Similar to inverse_squre_root scheduler, but tri_stage learning rate employs three stages LR scheduling: - warmup stage, starting from `lr` * `init_lr_scale`, linearly increased to `lr` in `warmup_steps` iterations - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps` iterations - decay stage, after hold stage, decay LR exponetially to `lr` * `final_lr_scale` in `decay_steps`; after that LR is keep as `final_lr_scale` * `lr` During warmup:: init_lr = cfg.init_lr_scale * cfg.lr lrs = torch.linspace(init_lr, cfg.lr, cfg.warmup_steps) lr = lrs[update_num] During hold:: lr = cfg.lr During decay:: decay_factor = - math.log(cfg.final_lr_scale) / cfg.decay_steps lr = cfg.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor) After that:: lr = cfg.lr * cfg.final_lr_scale """ def __init__(self, cfg: TriStageLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with tri-stage lr." " Consider --lr-scheduler=fixed instead." ) # calculate LR at each point self.peak_lr = cfg.lr[0] self.init_lr = cfg.init_lr_scale * cfg.lr[0] self.final_lr = cfg.final_lr_scale * cfg.lr[0] if cfg.phase_ratio is not None: assert cfg.max_update > 0 assert sum(cfg.phase_ratio) == 1, "phase ratios must add up to 1" self.warmup_steps = int(cfg.max_update * cfg.phase_ratio[0]) self.hold_steps = int(cfg.max_update * cfg.phase_ratio[1]) self.decay_steps = int(cfg.max_update * cfg.phase_ratio[2]) else: self.warmup_steps = cfg.warmup_steps self.hold_steps = cfg.hold_steps self.decay_steps = cfg.decay_steps assert ( self.warmup_steps + self.hold_steps + self.decay_steps > 0 ), "please specify steps or phase_ratio" self.warmup_rate = ( (self.peak_lr - self.init_lr) / self.warmup_steps if self.warmup_steps != 0 else 0 ) self.decay_factor = -math.log(cfg.final_lr_scale) / self.decay_steps # initial learning rate self.lr = self.init_lr self.optimizer.set_lr(self.lr) def _decide_stage(self, update_step): """ return stage, and the corresponding steps within the current stage """ if update_step < self.warmup_steps: # warmup state return 0, update_step offset = self.warmup_steps if update_step < offset + self.hold_steps: # hold stage return 1, update_step - offset offset += self.hold_steps if update_step <= offset + self.decay_steps: # decay stage return 2, update_step - offset offset += self.decay_steps # still here ? constant lr stage return 3, update_step - offset def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" stage, steps_in_stage = self._decide_stage(num_updates) if stage == 0: self.lr = self.init_lr + self.warmup_rate * steps_in_stage elif stage == 1: self.lr = self.peak_lr elif stage == 2: self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage) elif stage == 3: self.lr = self.final_lr else: raise ValueError("Undefined stage") self.optimizer.set_lr(self.lr) return self.lr
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class CosineLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = field( default=II("optimization.lr"), metadata={"help": "max learning rate, must be more than cfg.min_lr"}, ) min_lr: float = field(default=0.0, metadata={"help": "min learning rate"}) t_mult: float = field( default=1.0, metadata={"help": "factor to grow the length of each period"} ) lr_period_updates: float = field( default=-1, metadata={"help": "initial number of updates per period"} ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) # This is not required, but is for convenience in inferring lr_period_updates max_update: int = II("optimization.max_update") @register_lr_scheduler("cosine", dataclass=CosineLRScheduleConfig) class CosineLRSchedule(FairseqLRScheduler): """Assign LR based on a cyclical schedule that follows the cosine function. See https://arxiv.org/pdf/1608.03983.pdf for details. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured max learning rate (``--lr``). During warmup:: lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates) lr = lrs[update_num] After warmup:: lr = cfg.min_lr + 0.5*(cfg.lr - cfg.min_lr)*(1 + cos(t_curr / t_i)) where ``t_curr`` is current percentage of updates within the current period range and ``t_i`` is the current period range, which is scaled by ``t_mul`` after every iteration. """ def __init__(self, cfg: CosineLRScheduleConfig, fairseq_optimizer): super().__init__(cfg, fairseq_optimizer) if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with cosine." f" Consider --lr-scheduler=fixed instead. ({cfg.lr})" ) self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr assert ( self.max_lr > cfg.min_lr ), f"max_lr (={cfg.lr}) must be more than min_lr (={cfg.min_lr})" warmup_end_lr = self.max_lr if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = cfg.min_lr self.t_mult = cfg.t_mult self.period = cfg.lr_period_updates if self.period <= 0: assert ( cfg.max_update > 0 ), "Either --max_update or --lr-period-updates must be set" self.period = cfg.max_update - cfg.warmup_updates if cfg.warmup_updates > 0: # linearly warmup for the first cfg.warmup_updates self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates else: self.lr_step = 1 self.warmup_updates = cfg.warmup_updates self.lr_shrink = cfg.lr_shrink # initial learning rate self.lr = cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step else: curr_updates = num_updates - self.cfg.warmup_updates if self.t_mult != 1: i = math.floor( math.log( 1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult ) ) t_i = self.t_mult ** i * self.period t_curr = ( curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period ) else: i = math.floor(curr_updates / self.period) t_i = self.period t_curr = curr_updates - (self.period * i) lr_shrink = self.lr_shrink ** i min_lr = self.cfg.min_lr * lr_shrink max_lr = self.max_lr * lr_shrink self.lr = min_lr + 0.5 * (max_lr - min_lr) * ( 1 + math.cos(math.pi * t_curr / t_i) ) self.optimizer.set_lr(self.lr) return self.lr
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class StepLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = field( default=II("optimization.lr"), metadata={"help": "max learning rate, must be more than cfg.min_lr"}, ) min_lr: float = field(default=0.0, metadata={"help": "min learning rate"}) lr_deacy_period: int = field(default=25000, metadata={"help": "decay period"}) lr_decay: float = field(default=0.5, metadata={"help": "decay factor"}) @register_lr_scheduler("step", dataclass=StepLRScheduleConfig) class StepLRSchedule(FairseqLRScheduler): """Decay learning rate every k updates by a fixed factor """ def __init__(self, cfg: StepLRScheduleConfig, fairseq_optimizer): super().__init__(cfg, fairseq_optimizer) self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr self.min_lr = cfg.min_lr self.lr_deacy_period = cfg.lr_deacy_period self.lr_decay = cfg.lr_decay self.warmup_updates = cfg.warmup_updates self.warmup_init_lr = ( cfg.warmup_init_lr if cfg.warmup_init_lr >= 0 else self.min_lr ) assert(self.lr_deacy_period > 0) assert(self.lr_decay <= 1) assert(self.min_lr >= 0) assert(self.max_lr > self.min_lr) if cfg.warmup_updates > 0: # linearly warmup for the first cfg.warmup_updates self.warmup_lr_step = ( (self.max_lr - self.warmup_init_lr) / self.warmup_updates ) else: self.warmup_lr_step = 1 # initial learning rate self.lr = self.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.warmup_init_lr + num_updates * self.warmup_lr_step else: curr_updates = num_updates - self.cfg.warmup_updates lr_mult = self.lr_decay ** (curr_updates // self.lr_deacy_period) self.lr = max(self.max_lr * lr_mult, self.min_lr) self.optimizer.set_lr(self.lr) return self.lr
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/step_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class TriangularLRScheduleConfig(FairseqDataclass): max_lr: float = field( default="???", metadata={"help": "max learning rate, must be more than cfg.lr"} ) lr_period_updates: float = field( default=5000, metadata={"help": "initial number of updates per period (cycle length)"}, ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) shrink_min: bool = field( default=False, metadata={"help": "if set, also shrinks min lr"} ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("triangular", dataclass=TriangularLRScheduleConfig) class TriangularLRSchedule(FairseqLRScheduler): """Assign LR based on a triangular cyclical schedule. See https://arxiv.org/pdf/1506.01186.pdf for details. """ def __init__(self, cfg: TriangularLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with triangular." " Consider --lr-scheduler=fixed instead." ) lr = cfg.lr[0] assert cfg.max_lr > lr, "max_lr must be more than lr" self.min_lr = lr self.max_lr = cfg.max_lr self.stepsize = cfg.lr_period_updates // 2 self.lr_shrink = cfg.lr_shrink self.shrink_min = cfg.shrink_min # initial learning rate self.lr = self.min_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" cycle = math.floor(num_updates / (2 * self.stepsize)) lr_shrink = self.lr_shrink ** cycle max_lr = self.max_lr * lr_shrink if self.shrink_min: min_lr = self.min_lr * lr_shrink else: min_lr = self.min_lr x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1) self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x)) self.optimizer.set_lr(self.lr) return self.lr
bart_ls-main
fairseq-py/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional import torch from torch import Tensor @torch.jit.script def script_skip_tensor_list(x: List[Tensor], mask): res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x] outputs = [] for i, t in enumerate(res): if t.numel() != 0: outputs.append(t) else: outputs.append(x[i]) return outputs @torch.jit.script def script_skip_tensor(x: Tensor, mask): # None case if x.size(0) == 0: return x res = x[mask] if x.size(0) == mask.size(0) else x[:, mask] if res.numel() == 0: return x else: return res @torch.jit.script def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int): """ Expand 2D/3D tensor on dim=1 """ if x is None: return None assert x.dim() == 2 or x.dim() == 3 assert trg_dim >= x.size(1), (trg_dim, x.size()) if trg_dim == x.size(1): return x dims = [x.size(0), trg_dim - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1) return x @torch.jit.script def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor: return x if x is not None else y @torch.jit.script def fill_tensors( x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int ) -> Optional[Tensor]: """ Filling tensor x with y at masked positions (dim=0). """ if x is None or x.size()[0] == 0 or y is None: return x assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() if n_selected == 0: return x assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx) x[mask] = y elif x.size(1) > y.size(1): x[mask] = torch.tensor(padding_idx).type_as(x) if x.dim() == 2: x[mask, : y.size(1)] = y else: x[mask, : y.size(1), :] = y else: x[mask] = y return x
bart_ls-main
fairseq-py/fairseq/models/model_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Base classes for various fairseq models. """ import logging from argparse import Namespace from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import Dictionary from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, gen_parser_from_dataclass, ) from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig from torch import Tensor logger = logging.getLogger(__name__) def check_type(module, expected_type): if hasattr(module, "unwrapped_module"): assert isinstance(module.unwrapped_module, expected_type), \ f"{type(module.unwrapped_module)} != {expected_type}" else: assert isinstance(module, expected_type), f"{type(module)} != {expected_type}" class BaseFairseqModel(nn.Module): """Base class for fairseq models.""" def __init__(self): super().__init__() self._is_generation_fast = False @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: # do not set defaults so that settings defaults from various architectures still works gen_parser_from_dataclass(parser, dc(), delete_default=True) @classmethod def build_model(cls, args, task): """Build a new model instance.""" raise NotImplementedError("Model must implement the build_model method") def get_targets(self, sample, net_output): """Get targets from either the sample or the net's output.""" return sample["target"] def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def get_normalized_probs_scriptable( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Scriptable helper function for get_normalized_probs in ~BaseFairseqModel""" if hasattr(self, "decoder"): return self.decoder.get_normalized_probs(net_output, log_probs, sample) elif torch.is_tensor(net_output): # syntactic sugar for simple models which don't have a decoder # (e.g., the classification tutorial) logits = net_output.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def extract_features(self, *args, **kwargs): """Similar to *forward* but only return features.""" return self(*args, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return None def load_state_dict( self, state_dict, strict=True, model_cfg: Optional[DictConfig] = None, args: Optional[Namespace] = None, ): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ if model_cfg is None and args is not None: logger.warn("using 'args' is deprecated, please update your code to use dataclass config") model_cfg = convert_namespace_to_omegaconf(args).model self.upgrade_state_dict(state_dict) from fairseq.checkpoint_utils import prune_state_dict new_state_dict = prune_state_dict(state_dict, model_cfg) return super().load_state_dict(new_state_dict, strict) def upgrade_state_dict(self, state_dict): """Upgrade old state dicts to work with newer code.""" self.upgrade_state_dict_named(state_dict, "") def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code. Args: state_dict (dict): state dictionary to upgrade, in place name (str): the state dict key corresponding to the current module """ assert state_dict is not None def do_upgrade(m, prefix): if len(prefix) > 0: prefix += "." for n, c in m.named_children(): name = prefix + n if hasattr(c, "upgrade_state_dict_named"): c.upgrade_state_dict_named(state_dict, name) elif hasattr(c, "upgrade_state_dict"): c.upgrade_state_dict(state_dict) do_upgrade(c, name) do_upgrade(self, name) def set_num_updates(self, num_updates): """State from trainer to pass along to model at every update.""" for m in self.modules(): if hasattr(m, "set_num_updates") and m != self: m.set_num_updates(num_updates) def prepare_for_inference_(self, cfg: DictConfig): """Prepare model for inference.""" kwargs = {} kwargs["beamable_mm_beam_size"] = ( None if getattr(cfg.generation, "no_beamable_mm", False) else getattr(cfg.generation, "beam", 5) ) kwargs["need_attn"] = getattr(cfg.generation, "print_alignment", False) if getattr(cfg.generation, "retain_dropout", False): kwargs["retain_dropout"] = cfg.generation.retain_dropout kwargs["retain_dropout_modules"] = cfg.generation.retain_dropout_modules self.make_generation_fast_(**kwargs) def make_generation_fast_(self, **kwargs): """ Legacy entry point to optimize model for faster generation. Prefer prepare_for_inference_. """ if self._is_generation_fast: return # only apply once self._is_generation_fast = True # remove weight norm from all modules in the network def apply_remove_weight_norm(module): try: nn.utils.remove_weight_norm(module) except (AttributeError, ValueError): # this module didn't have weight norm return self.apply(apply_remove_weight_norm) def apply_make_generation_fast_(module, prefix): if len(prefix) > 0: prefix += "." base_func = BaseFairseqModel.make_generation_fast_ for n, m in module.named_modules(): if ( m != self and hasattr(m, "make_generation_fast_") # don't call this implementation again, e.g., if # children modules also inherit from BaseFairseqModel and m.make_generation_fast_.__func__ is not base_func ): name = prefix + n m.make_generation_fast_(name=name, **kwargs) apply_make_generation_fast_(self, "") def train(mode=True): if mode: raise RuntimeError("cannot train after make_generation_fast") # this model should no longer be used for training self.eval() self.train = train def prepare_for_onnx_export_(self, **kwargs): """Make model exportable via ONNX trace.""" seen = set() def apply_prepare_for_onnx_export_(module): if ( module != self and hasattr(module, "prepare_for_onnx_export_") and module not in seen ): seen.add(module) module.prepare_for_onnx_export_(**kwargs) self.apply(apply_prepare_for_onnx_export_) @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", **kwargs, ): """ Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model file. Downloads and caches the pre-trained model file if needed. The base implementation returns a :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to generate translations or sample from language models. The underlying :class:`~fairseq.models.FairseqModel` can be accessed via the *generator.models* attribute. Other models may override this to implement custom hub interfaces. Args: model_name_or_path (str): either the name of a pre-trained model to load or a path/URL to a pre-trained model state dict checkpoint_file (str, optional): colon-separated list of checkpoint files in the model archive to ensemble (default: 'model.pt') data_name_or_path (str, optional): point args.data to the archive at the given path/URL. Can start with '.' or './' to reuse the model archive path. """ from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), **kwargs, ) logger.info(x["args"]) return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"]) @classmethod def hub_models(cls): return {} class FairseqEncoderDecoderModel(BaseFairseqModel): """Base class for encoder-decoder models. Args: encoder (FairseqEncoder): the encoder decoder (FairseqDecoder): the decoder """ def __init__(self, encoder, decoder): super().__init__() self.encoder = encoder self.decoder = decoder check_type(self.encoder, FairseqEncoder) check_type(self.decoder, FairseqDecoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return decoder_out def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) features = self.decoder.extract_features( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return features def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return (self.encoder.max_positions(), self.decoder.max_positions()) def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() class FairseqModel(FairseqEncoderDecoderModel): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) utils.deprecation_warning( "FairseqModel is deprecated, please use FairseqEncoderDecoderModel " "or BaseFairseqModel instead", stacklevel=4, ) class FairseqMultiModel(BaseFairseqModel): """Base class for combining multiple encoder-decoder models.""" def __init__(self, encoders, decoders): super().__init__() assert encoders.keys() == decoders.keys() self.keys = list(encoders.keys()) for key in self.keys: check_type(encoders[key], FairseqEncoder) check_type(decoders[key], FairseqDecoder) self.models = nn.ModuleDict( { key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) for key in self.keys } ) @staticmethod def build_shared_embeddings( dicts: Dict[str, Dictionary], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str] = None, ): """ Helper function to build shared embeddings for a set of languages after checking that all dicts corresponding to those languages are equivalent. Args: dicts: Dict of lang_id to its corresponding Dictionary langs: languages that we want to share embeddings for embed_dim: embedding dimension build_embedding: callable function to actually build the embedding pretrained_embed_path: Optional path to load pretrained embeddings """ shared_dict = dicts[langs[0]] if any(dicts[lang] != shared_dict for lang in langs): raise ValueError( "--share-*-embeddings requires a joined dictionary: " "--share-encoder-embeddings requires a joined source " "dictionary, --share-decoder-embeddings requires a joined " "target dictionary, and --share-all-embeddings requires a " "joint source + target dictionary." ) return build_embedding(shared_dict, embed_dim, pretrained_embed_path) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return { key: ( self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions(), ) for key in self.keys } def max_decoder_positions(self): """Maximum length supported by the decoder.""" return min(model.decoder.max_positions() for model in self.models.values()) @property def encoder(self): return self.models[self.keys[0]].encoder @property def decoder(self): return self.models[self.keys[0]].decoder def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def load_state_dict( self, state_dict, strict=True, model_cfg=None, args: Optional[Namespace] = None, ): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ if model_cfg is None and args is not None: logger.warn("using 'args' is deprecated, please update your code to use dataclass config") model_cfg = convert_namespace_to_omegaconf(args).model self.upgrade_state_dict(state_dict) from fairseq.checkpoint_utils import prune_state_dict new_state_dict = prune_state_dict(state_dict, model_cfg) return super().load_state_dict(new_state_dict, strict) class FairseqLanguageModel(BaseFairseqModel): """Base class for decoder-only models. Args: decoder (FairseqDecoder): the decoder """ def __init__(self, decoder): super().__init__() self.decoder = decoder check_type(self.decoder, FairseqDecoder) def forward(self, src_tokens, **kwargs): """ Run the forward pass for a decoder-only model. Feeds a batch of tokens through the decoder to predict the next tokens. Args: src_tokens (LongTensor): tokens on which to condition the decoder, of shape `(batch, tgt_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: tuple: - the decoder's output of shape `(batch, seq_len, vocab)` - a dictionary with any model-specific outputs """ return self.decoder(src_tokens, **kwargs) def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, seq_len, embed_dim)` - a dictionary with any model-specific outputs """ return self.decoder.extract_features(src_tokens, **kwargs) def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return self.decoder.max_positions() def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() @property def supported_targets(self): return {"future"} class FairseqEncoderModel(BaseFairseqModel): """Base class for encoder-only models. Args: encoder (FairseqEncoder): the encoder """ def __init__(self, encoder): super().__init__() self.encoder = encoder check_type(self.encoder, FairseqEncoder) def forward(self, src_tokens, src_lengths, **kwargs): """ Run the forward pass for a encoder-only model. Feeds a batch of tokens through the encoder to generate features. Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: the encoder's output, typically of shape `(batch, src_len, features)` """ return self.encoder(src_tokens, src_lengths, **kwargs) def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" encoder_out = net_output["encoder_out"] if torch.is_tensor(encoder_out): logits = encoder_out.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return self.encoder.max_positions()
bart_ls-main
fairseq-py/fairseq/models/fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.model_utils import ( coalesce, expand_2d_or_3d_tensor, script_skip_tensor, ) from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules import TransformerDecoderLayer from fairseq.modules.transformer_sentence_encoder import init_bert_params from torch import Tensor @torch.jit.script def _fill(x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int): """ Filling tensor x with y at masked positions (dim=0). """ if x is None or x.size()[0] == 0 or y is None: return torch.empty([0]) assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() if n_selected == 0: return x assert n_selected == y.size(0) if n_selected == x.size(0): return y y = y.to(x) if x.size(1) < y.size(1): x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx) x[mask] = y elif x.size(1) > y.size(1): x[mask] = torch.tensor(padding_idx).type_as(x) if x.dim() == 2: x[mask, : y.size(1)] = y else: x[mask, : y.size(1), :] = y else: x[mask] = y return x def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] # generate labels masked_tgt_masks = [] for mask_input in mask_inputs: mask_label = [] for beam_size in mask_input[1:-1]: # HACK 1:-1 mask_label += [0] + [1 for _ in range(beam_size)] masked_tgt_masks.append( mask_label + [0 for _ in range(out_seq_len - len(mask_label))] ) mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor masked_tgt_masks = torch.tensor(masked_tgt_masks, device=out_tokens.device).bool() mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets def _get_del_targets(in_tokens, out_tokens, padding_idx): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e out_seq_len = out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] # transform to tensor word_del_targets = torch.tensor(word_del_targets) return word_del_targets def _get_del_ins_targets(in_tokens, out_tokens, padding_idx): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor mask_ins_targets = torch.tensor(mask_ins_targets) word_del_targets = torch.tensor(word_del_targets) return word_del_targets, mask_ins_targets @register_model("fb_levenshtein_transformer") class LevenshteinTransformerModel(TransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.tgt_dict = decoder.dictionary self.bos = decoder.dictionary.bos() self.eos = decoder.dictionary.eos() self.pad = decoder.dictionary.pad() self.unk = decoder.dictionary.unk() @staticmethod def add_args(parser): TransformerModel.add_args(parser) parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) parser.add_argument( "--early-exit", default="6,6,6", type=str, help="number of decoder layers for del_word, ins_mask, ins_word", ) parser.add_argument( "--no-share-discriminator", action="store_true", help="additional decoder-layers to learn deletion", ) parser.add_argument( "--no-share-maskpredictor", action="store_true", help="additional decoder-layers to learn predicting masks", ) parser.add_argument( "--sampling-for-deletion", action="store_true", help="instead of argmax, use sampling to predict the tokens", ) # Added for compatibility parser.add_argument( "--decoder-out-embed-dim", default=None, type=int, metavar="N", help="decoder output embedding dimension (bottleneck layer before" "output layer if specified.)", ) @property def validate(self): return {"length-beam": False} @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder @classmethod def build_encoder(cls, args, src_dict, embed_tokens): encoder = TransformerEncoder(args, src_dict, embed_tokens) if getattr(args, "apply_bert_init", False): encoder.apply(init_bert_params) return encoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk ) mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) mask_ins_out, _ = self.decoder.forward_mask_ins( prev_output_tokens, encoder_out=encoder_out ) word_ins_out, _ = self.decoder.forward_word_ins( masked_tgt_tokens, encoder_out=encoder_out ) # make online prediction if self.decoder.sampling_for_deletion: word_predictions = torch.multinomial( F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1 ).view(word_ins_out.size(0), -1) else: word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1] word_predictions.masked_scatter_( ~masked_tgt_masks, tgt_tokens[~masked_tgt_masks] ) # generate training labels for deletion word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad) word_del_out, _ = self.decoder.forward_word_del(word_predictions, encoder_out) return { "mask_ins": { "out": mask_ins_out, "tgt": mask_ins_targets, "mask": mask_ins_masks, "ls": 0.01, }, "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": masked_tgt_masks, "ls": self.args.label_smoothing, "nll_loss": True, }, "word_del": { "out": word_del_out, "tgt": word_del_targets, "mask": word_predictions.ne(self.pad), }, } def forward_encoder(self, encoder_inputs): return self.encoder(*encoder_inputs) def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn if max_ratio is not None and encoder_out["encoder_padding_mask"]: max_lengths = ( (~encoder_out["encoder_padding_mask"][0]).sum(1) * max_ratio ).clamp(min=10) else: max_lengths = output_tokens.new_full(output_tokens.size()[:1], 255) def skip_encoder_out(encoder_out, mask): if not mask.any(): return encoder_out else: return self.encoder.reorder_encoder_out( encoder_out, mask.nonzero(as_tuple=False).squeeze() ) @torch.jit.script def del_word( output_tokens, output_scores, attn: Optional[Tensor], word_del_attn: Optional[Tensor], word_del_out, can_del_word, pad_idx: int, bos_idx: int, eos_idx: int, ): # delete words # do not delete tokens if it is <s> </s> if can_del_word.sum() != 0: # we cannot delete, skip word_del_score = F.log_softmax(word_del_out, 2) word_del_pred = word_del_score.max(-1)[1].to(torch.bool) in_tokens = output_tokens[can_del_word] in_scores = output_scores[can_del_word] # apply deletion to a tensor in_masks = in_tokens.ne(pad_idx) bos_eos_masks = in_tokens.eq(bos_idx) + in_tokens.eq(eos_idx) max_len = in_tokens.size(1) word_del_pred.masked_fill_(torch.bitwise_not(in_masks), 1) word_del_pred.masked_fill_(bos_eos_masks, 0) reordering = ( torch.arange(max_len, device=in_tokens.device)[None, :] .expand_as(in_tokens) .contiguous() .masked_fill(word_del_pred, max_len) .sort(1)[1] ) _tokens = in_tokens.masked_fill(word_del_pred, pad_idx).gather( 1, reordering ) _scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) if word_del_attn is not None: _mask = word_del_pred[:, :, None].expand_as(word_del_attn) _reordering = reordering[:, :, None].expand_as(word_del_attn) _attn = word_del_attn.masked_fill(_mask, 0.0).gather(1, _reordering) attn = _fill(attn, can_del_word, _attn, 0) output_tokens = coalesce( _fill(output_tokens, can_del_word, _tokens, pad_idx), output_tokens ) output_scores = coalesce( _fill(output_scores, can_del_word, _scores, 0), output_scores ) return output_tokens, output_scores, attn @torch.jit.script def ins_placeholders( output_tokens, output_scores, mask_ins_out, can_ins_mask, pad_idx: int, unk_idx: int, eos_idx: int, eos_penalty: float, max_lengths: Optional[Tensor], ) -> Tuple[Tensor, Tensor]: # insert placeholders if can_ins_mask.sum() != 0: mask_ins_score = F.log_softmax(mask_ins_out, 2) if eos_penalty > 0.0: mask_ins_score[:, :, 0] -= eos_penalty mask_ins_pred = mask_ins_score.max(-1)[1] if max_lengths is not None: mask_ins_pred = torch.min( mask_ins_pred, max_lengths[can_ins_mask, None].expand_as(mask_ins_pred), ) in_tokens = output_tokens[can_ins_mask] in_scores = output_scores[can_ins_mask] in_masks = in_tokens.ne(pad_idx) in_lengths = in_masks.sum(1) # HACK: hacky way to shift all the paddings to eos first. in_tokens.masked_fill_(torch.bitwise_not(in_masks), eos_idx) mask_ins_pred.masked_fill_(torch.bitwise_not(in_masks[:, 1:]), 0) out_lengths = in_lengths + mask_ins_pred.sum(1) out_max_len = out_lengths.max() out_masks = ( torch.arange(out_max_len, device=out_lengths.device)[None, :].long() < out_lengths[:, None] ) reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1) out_tokens = ( torch.zeros( in_tokens.size()[0], out_max_len, device=in_tokens.device, dtype=in_tokens.dtype, ) .fill_(pad_idx) .masked_fill_(out_masks, unk_idx) ) out_tokens = torch.cat([in_tokens[:, :1], out_tokens[:, 1:]], 1) out_tokens.scatter_(1, reordering, in_tokens[:, 1:]) if in_scores is not None: in_scores.masked_fill_(torch.bitwise_not(in_masks), 0) out_scores = torch.zeros_like(out_tokens).to(in_scores) out_tokens = torch.cat([in_tokens[:, :1], out_tokens[:, 1:]], 1) out_scores.scatter_(1, reordering, in_scores[:, 1:]) else: out_scores = None output_tokens = coalesce( _fill(output_tokens, can_ins_mask, out_tokens, pad_idx), output_tokens, ) output_scores = coalesce( _fill(output_scores, can_ins_mask, out_scores, 0), output_scores ) return output_tokens, output_scores @torch.jit.script def ins_words( output_tokens, output_scores, attn: Optional[Tensor], word_ins_attn: Optional[Tensor], word_ins_out, can_ins_word, pad_idx: int, unk_idx: int, ) -> Tuple[Tensor, Tensor, Optional[Tensor]]: # insert words if can_ins_word.sum() != 0: word_ins_scores = F.log_softmax(word_ins_out, 2) word_ins_pred = word_ins_scores.max(-1)[1] in_tokens = output_tokens[can_ins_word] in_scores = output_scores[can_ins_word] word_ins_masks = in_tokens.eq(unk_idx) out_tokens = in_tokens.masked_scatter( word_ins_masks, word_ins_pred[word_ins_masks] ) if in_scores is not None: out_scores = in_scores.masked_scatter( word_ins_masks, word_ins_scores[word_ins_masks] ) else: out_scores = None output_tokens = coalesce( _fill(output_tokens, can_ins_word, out_tokens, pad_idx), output_tokens, ) output_scores = coalesce( _fill(output_scores, can_ins_word, out_scores, 0), output_scores ) if attn is not None: attn = coalesce(_fill(attn, can_ins_word, word_ins_attn, 0), attn) return output_tokens, output_scores, attn can_del_word = output_tokens.ne(self.pad).sum(1) > 2 word_del_out, word_del_attn = self.decoder.forward_word_del( script_skip_tensor(output_tokens, can_del_word), skip_encoder_out(encoder_out, can_del_word), ) output_tokens, output_scores, attn = del_word( output_tokens, output_scores, attn, word_del_attn, word_del_out, can_del_word, self.pad, self.bos, self.eos, ) can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lengths mask_ins_out, _ = self.decoder.forward_mask_ins( script_skip_tensor(output_tokens, can_ins_mask), skip_encoder_out(encoder_out, can_ins_mask), ) output_tokens, output_scores = ins_placeholders( output_tokens, output_scores, mask_ins_out, can_ins_mask, self.pad, self.unk, self.eos, eos_penalty, max_lengths=( max_lengths if max_ratio is not None and encoder_out["encoder_padding_mask"] else None ), ) can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 word_ins_out, word_ins_attn = self.decoder.forward_word_ins( script_skip_tensor(output_tokens, can_ins_word), skip_encoder_out(encoder_out, can_ins_word), ) output_tokens, output_scores, attn = ins_words( output_tokens, output_scores, attn, word_ins_attn, word_ins_out, can_ins_word, self.pad, self.unk, ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() @torch.jit.script def slice_wrap(x, l): return x[:, :l] @torch.jit.script def slice_wrap_attn(x: Optional[Tensor], l): if x is None or x.size()[0] == 0: return torch.empty([0]) else: return x[:, :l, :] output_tokens = slice_wrap(output_tokens, cut_off) output_scores = slice_wrap(output_scores, cut_off) attn = slice_wrap_attn(attn, cut_off) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, step=0, max_step=0, ) def initialize_output_tokens(self, encoder_out, src_tokens): initial_output_tokens = torch.cat( [ src_tokens.new_zeros(src_tokens.size(0), 1).fill_(self.bos), src_tokens.new_zeros(src_tokens.size(0), 1).fill_(self.eos), ], 1, ) initial_output_scores = torch.zeros_like(initial_output_tokens).to( encoder_out["encoder_out"][0] ) initial_attn = torch.empty([0]) if getattr(self.decoder.layers[-1], "need_attn", True): initial_attn = torch.zeros([src_tokens.size(0), 2, src_tokens.size(1)]).to( initial_output_tokens ) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=initial_attn, step=0, max_step=0, history=None, ) class LevenshteinTransformerDecoder(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None) self.embed_word_del = Embedding(2, self.output_embed_dim, None) # del_word, ins_mask, ins_word self.early_exit = [int(i) for i in args.early_exit.split(",")] assert len(self.early_exit) == 3 # copy layers for mask-predict/deletion self.layers_msk = None if getattr(args, "no_share_maskpredictor", False): self.layers_msk = nn.ModuleList( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[1]) ] ) self.layers_del = None if getattr(args, "no_share_discriminator", False): self.layers_del = nn.ModuleList( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[0]) ] ) def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, layers=None, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens.long()) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) layers = self.layers if layers is None else layers early_exit = len(layers) if early_exit is None else early_exit for _, layer in enumerate(layers[:early_exit]): x, attn, _ = layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, attn, inner_states def forward_mask_ins(self, prev_output_tokens, encoder_out=None, **unused): features, attn, _ = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[1], layers=self.layers_msk, **unused ) features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) return F.linear(features_cat, self.embed_mask_ins.weight), attn def forward_word_ins(self, prev_output_tokens, encoder_out=None, **unused): features, attn, _ = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[2], layers=self.layers, **unused ) return self.output_layer(features), attn def forward_word_del(self, prev_output_tokens, encoder_out=None, **unused): features, attn, _ = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[0], layers=self.layers_del, **unused ) return F.linear(features, self.embed_word_del.weight), attn @register_model_architecture("fb_levenshtein_transformer", "fb_levenshtein_transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.early_exit = getattr(args, "early_exit", "6,6,6") args.no_share_discriminator = getattr(args, "no_share_discriminator", False) args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False) @register_model_architecture( "fb_levenshtein_transformer", "fb_levenshtein_transformer_wmt_en_de" ) def levenshtein_transformer_wmt_en_de(args): base_architecture(args) # similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "fb_levenshtein_transformer", "fb_levenshtein_transformer_vaswani_wmt_en_de_big" ) def levenshtein_transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) # default parameters used in tensor2tensor implementation @register_model_architecture( "fb_levenshtein_transformer", "fb_levenshtein_transformer_wmt_en_de_big" ) def levenshtein_transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) levenshtein_transformer_vaswani_wmt_en_de_big(args)
bart_ls-main
fairseq-py/fairseq/models/fb_levenshtein_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, NamedTuple, Optional import torch import torch.nn as nn from torch import Tensor EncoderOut = NamedTuple( "EncoderOut", [ ("encoder_out", Tensor), # T x B x C ("encoder_padding_mask", Optional[Tensor]), # B x T ("encoder_embedding", Optional[Tensor]), # B x T x C ("encoder_states", Optional[List[Tensor]]), # List[T x B x C] ("src_tokens", Optional[Tensor]), # B x T ("src_lengths", Optional[Tensor]), # B x 1 ], ) class FairseqEncoder(nn.Module): """Base class for encoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary def forward(self, src_tokens, src_lengths=None, **kwargs): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` """ raise NotImplementedError def forward_torchscript(self, net_input: Dict[str, Tensor]): """A TorchScript-compatible version of forward. Encoders which use additional arguments may want to override this method for TorchScript compatibility. """ if torch.jit.is_scripting(): return self.forward( src_tokens=net_input["src_tokens"], src_lengths=net_input["src_lengths"], ) else: return self.forward_non_torchscript(net_input) @torch.jit.unused def forward_non_torchscript(self, net_input: Dict[str, Tensor]): encoder_input = { k: v for k, v in net_input.items() if k != "prev_output_tokens" } return self.forward(**encoder_input) def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to `new_order`. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: `encoder_out` rearranged according to `new_order` """ raise NotImplementedError def max_positions(self): """Maximum input length supported by the encoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code.""" return state_dict def set_num_updates(self, num_updates): """State from trainer to pass along to model at every update.""" def _apply(m): if hasattr(m, "set_num_updates") and m != self: m.set_num_updates(num_updates) self.apply(_apply)
bart_ls-main
fairseq-py/fairseq/models/fairseq_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, FairseqDropout, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) logger = logging.getLogger(__name__) @register_model("fconv_self_att") class FConvModelSelfAtt(FairseqEncoderDecoderModel): @classmethod def hub_models(cls): return { "conv.stories.pretrained": { "path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz", "checkpoint_file": "pretrained_checkpoint.pt", "tokenizer": "nltk", }, "conv.stories": { "path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz", "checkpoint_file": "fusion_checkpoint.pt", "tokenizer": "nltk", "pretrained": "True", "pretrained_checkpoint": "./pretrained_checkpoint.pt", }, # Test set containing dictionaries "data.stories": "https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2", } def __init__(self, encoder, decoder, pretrained_encoder=None): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum( layer is not None for layer in decoder.attention ) self.pretrained_encoder = pretrained_encoder if self.pretrained_encoder is None: encoders = {"encoder": encoder} else: encoders = {"encoder": encoder, "pretrained": self.pretrained_encoder} # for fusion model, CompositeEncoder contains both pretrained and training encoders # these are forwarded and then combined in the decoder self.encoder = CompositeEncoder(encoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--self-attention', type=str, metavar='EXPR', help='decoder self-attention layers, ex: [True] + [False]*5') parser.add_argument('--multihead-attention-nheads', type=int, help='Number of heads to use in attention') parser.add_argument('--multihead-self-attention-nheads', type=int, help='Number of heads to use in self-attention') parser.add_argument('--encoder-attention', type=str, metavar='EXPR', help='encoder attention [True, ...]') parser.add_argument('--encoder-attention-nheads', type=int, help='Number of heads to use in encoder attention') parser.add_argument('--project-input', type=str, metavar='EXPR', help='Use projections in self-attention [True, ...]') parser.add_argument('--gated-attention', type=str, metavar='EXPR', help='Use GLU layers in self-attention projections [True, ...]') parser.add_argument('--downsample', type=str, metavar='EXPR', help='Use downsampling in self-attention [True, ...]') parser.add_argument('--pretrained-checkpoint', metavar='DIR', help='path to load checkpoint from pretrained model') parser.add_argument('--pretrained', type=str, metavar='EXPR', help='use pretrained model when training [True, ...]') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" trained_encoder, trained_decoder = None, None pretrained = eval(args.pretrained) if pretrained: logger.info("loading pretrained model") if not os.path.exists(args.pretrained_checkpoint): new_pretrained_checkpoint = os.path.join( args.data, args.pretrained_checkpoint ) if os.path.exists(new_pretrained_checkpoint): args.pretrained_checkpoint = new_pretrained_checkpoint trained_model = checkpoint_utils.load_model_ensemble( filenames=[args.pretrained_checkpoint], task=task, )[0][0] trained_decoder = list(trained_model.children())[1] trained_encoder = list(trained_model.children())[0] # freeze pretrained model for param in trained_decoder.parameters(): param.requires_grad = False for param in trained_encoder.parameters(): param.requires_grad = False encoder = FConvEncoder( task.source_dictionary, embed_dim=args.encoder_embed_dim, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, attention=eval(args.encoder_attention), attention_nheads=args.encoder_attention_nheads, ) decoder = FConvDecoder( task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, selfattention=eval(args.self_attention), attention_nheads=args.multihead_attention_nheads, selfattention_nheads=args.multihead_self_attention_nheads, project_input=eval(args.project_input), gated_attention=eval(args.gated_attention), downsample=eval(args.downsample), pretrained=pretrained, trained_decoder=trained_decoder, ) model = FConvModelSelfAtt(encoder, decoder, trained_encoder) return model @property def pretrained(self): return self.pretrained_encoder is not None class FConvEncoder(FairseqEncoder): """Convolutional encoder""" def __init__( self, dictionary, embed_dim=512, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, attention=False, attention_nheads=1, ): super().__init__(dictionary) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout) ) self.attention.append( SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = self.dropout_module(x) input_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions for proj, conv, attention in zip( self.projections, self.convolutions, self.attention ): residual = x if proj is None else proj(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = self.dropout_module(x) padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if attention is not None: x = attention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5) return { "encoder_out": (x, y), "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = tuple( eo.index_select(0, new_order) for eo in encoder_out["encoder_out"] ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) if "pretrained" in encoder_out: encoder_out["pretrained"]["encoder_out"] = tuple( eo.index_select(0, new_order) for eo in encoder_out["pretrained"]["encoder_out"] ) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions @with_incremental_state class FConvDecoder(FairseqDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 8, attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None, ): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([2])) self.pretrained = pretrained self.pretrained_decoder = trained_decoder self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.need_attn = True in_channels = convolutions[0][0] def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) selfattention = expand_bool_array(selfattention) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError( "Attention is expected to be a list of booleans of " "length equal to the number of layers." ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, padding_idx, ) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.selfattention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( LinearizedConv1d( in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout, ) ) self.attention.append( DownsampledMultiHeadAttention( out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False, ) if attention[i] else None ) self.attproj.append( Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None ) self.selfattention.append( SelfAttention( out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample, ) if selfattention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, out_embed_dim) self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) # model fusion if self.pretrained: # independent gates are learned from the concatenated input self.gate1 = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid() ) self.gate2 = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid() ) # pretrained and trained models are joined self.joining = nn.Sequential( Linear(out_embed_dim * 2, out_embed_dim * 2), LayerNorm(out_embed_dim * 2), nn.GLU(), Linear(out_embed_dim, out_embed_dim * 2), LayerNorm(out_embed_dim * 2), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim), ) # pretrained model contains an output layer that is nhid -> vocab size # but the models are combined in their hidden state # the hook stores the output of the pretrained model forward self.pretrained_outputs = {} def save_output(): def hook(a, b, output): self.pretrained_outputs["out"] = output return hook self.pretrained_decoder.fc2.register_forward_hook(save_output()) def forward(self, prev_output_tokens, encoder_out): trained_encoder_out = encoder_out["pretrained"] if self.pretrained else None encoder_out = encoder_out["encoder"]["encoder_out"] encoder_a, encoder_b = self._split_encoder_out(encoder_out) # embed positions positions = self.embed_positions(prev_output_tokens) # embed tokens and positions x = self.embed_tokens(prev_output_tokens) + positions x = self.dropout_module(x) target_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions avg_attn_scores = None for proj, conv, attention, selfattention, attproj in zip( self.projections, self.convolutions, self.attention, self.selfattention, self.attproj, ): residual = x if proj is None else proj(x) x = self.dropout_module(x) x = conv(x) x = F.glu(x, dim=2) # attention if attention is not None: r = x x, attn_scores = attention( attproj(x) + target_embedding, encoder_a, encoder_b ) x = x + r if not self.training and self.need_attn: if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) if selfattention is not None: x = selfattention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(0, 1) # project back to size of vocabulary x = self.fc2(x) x = self.dropout_module(x) if not self.pretrained: x = self.fc3(x) # fusion gating if self.pretrained: trained_x, _ = self.pretrained_decoder.forward( prev_output_tokens, trained_encoder_out ) y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1) gate1 = self.gate1(y) gate2 = self.gate2(y) gated_x1 = gate1 * x gated_x2 = gate2 * self.pretrained_outputs["out"] fusion = torch.cat([gated_x1, gated_x2], dim=-1) fusion = self.joining(fusion) fusion_output = self.fc3(fusion) return fusion_output, avg_attn_scores else: return x, avg_attn_scores def max_positions(self): """Maximum output length supported by the decoder.""" return self.embed_positions.max_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _split_encoder_out(self, encoder_out): """Split and transpose encoder outputs.""" # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(0, 1).contiguous() encoder_b = encoder_b.transpose(0, 1).contiguous() result = (encoder_a, encoder_b) return result class SelfAttention(nn.Module): def __init__( self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False, ): super().__init__() self.attention = DownsampledMultiHeadAttention( out_channels, embed_dim, num_heads, dropout=0, bias=True, project_input=project_input, gated=gated, downsample=downsample, ) self.in_proj_q = Linear(out_channels, embed_dim) self.in_proj_k = Linear(out_channels, embed_dim) self.in_proj_v = Linear(out_channels, embed_dim) self.ln = LayerNorm(out_channels) def forward(self, x): residual = x query = self.in_proj_q(x) key = self.in_proj_k(x) value = self.in_proj_v(x) x, _ = self.attention( query, key, value, mask_future_timesteps=True, use_scalar_bias=True ) return self.ln(x + residual) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) m.weight.data.normal_(0, 0.1) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) m.weight.data.normal_(0, 0.1) return m def Linear(in_features, out_features, dropout=0.0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return m def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m @register_model_architecture("fconv_self_att", "fconv_self_att") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 3") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 8") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_attention = getattr(args, "decoder_attention", "True") args.self_attention = getattr(args, "self_attention", "False") args.encoder_attention = getattr(args, "encoder_attention", "False") args.multihead_attention_nheads = getattr(args, "multihead_attention_nheads", 1) args.multihead_self_attention_nheads = getattr( args, "multihead_self_attention_nheads", 1 ) args.encoder_attention_nheads = getattr(args, "encoder_attention_nheads", 1) args.project_input = getattr(args, "project_input", "False") args.gated_attention = getattr(args, "gated_attention", "False") args.downsample = getattr(args, "downsample", "False") args.pretrained_checkpoint = getattr(args, "pretrained_checkpoint", "") args.pretrained = getattr(args, "pretrained", "False") @register_model_architecture("fconv_self_att", "fconv_self_att_wp") def fconv_self_att_wp(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr( args, "encoder_layers", "[(128, 3)] * 2 + [(512,3)] * 1" ) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_layers = getattr( args, "decoder_layers", "[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1" ) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.self_attention = getattr(args, "self_attention", "True") args.multihead_self_attention_nheads = getattr( args, "multihead_self_attention_nheads", 4 ) args.project_input = getattr(args, "project_input", "True") args.gated_attention = getattr(args, "gated_attention", "True") args.downsample = getattr(args, "downsample", "True") base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/fconv_self_att.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.fconv import FConvDecoder from fairseq.utils import safe_hasattr @register_model("fconv_lm") class FConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-layers", type=str, metavar="EXPR", help="decoder layers [(dim, kernel_size), ...]", ) parser.add_argument( "--decoder-out-embed-dim", type=int, metavar="N", help="decoder output embedding dimension", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ) parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) parser.add_argument( "--decoder-attention", type=str, metavar="EXPR", help="decoder attention [True, ...]", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if safe_hasattr(args, "max_target_positions") and not safe_hasattr( args, "tokens_per_sample" ): args.tokens_per_sample = args.max_target_positions decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.tokens_per_sample, share_embed=False, positional_embeddings=False, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), adaptive_softmax_dropout=args.adaptive_softmax_dropout, ) return FConvLanguageModel(decoder) @register_model_architecture("fconv_lm", "fconv_lm") def base_lm_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13") args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) @register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103") def fconv_lm_dauphin_wikitext103(args): layers = "[(850, 6)] * 3" layers += " + [(850, 1)] * 1" layers += " + [(850, 5)] * 4" layers += " + [(850, 1)] * 1" layers += " + [(850, 4)] * 3" layers += " + [(1024, 4)] * 1" layers += " + [(2048, 4)] * 1" args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280) args.decoder_layers = getattr(args, "decoder_layers", layers) args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,20000,200000" ) base_lm_architecture(args) @register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw") def fconv_lm_dauphin_gbw(args): layers = "[(512, 5)]" layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3" layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3" layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6" layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]" args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_layers = getattr(args, "decoder_layers", layers) args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) base_lm_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/fconv_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.lstm import Embedding, LSTMDecoder DEFAULT_MAX_TARGET_POSITIONS = 1e5 @register_model("lstm_lm") class LSTMLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size') parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--residuals', default=False, action='store_true', help='applying residuals between LSTM layers') # Granular dropout settings (if not specified these default to --dropout) parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding') parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if getattr(args, "max_target_positions", None) is not None: max_target_positions = args.max_target_positions else: max_target_positions = getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim ) if args.share_decoder_input_output_embed: # double check all parameters combinations are valid if task.source_dictionary != task.target_dictionary: raise ValueError( "--share-decoder-input-output-embeddings requires a joint dictionary" ) if args.decoder_embed_dim != args.decoder_out_embed_dim: raise ValueError( "--share-decoder-input-output-embeddings requires " "--decoder-embed-dim to match --decoder-out-embed-dim" ) decoder = LSTMDecoder( dictionary=task.dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=False, # decoder-only language model doesn't support attention encoder_output_units=0, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), max_target_positions=max_target_positions, residuals=args.residuals, ) return cls(decoder) @register_model_architecture("lstm_lm", "lstm_lm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_hidden_size = getattr( args, "decoder_hidden_size", args.decoder_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 1) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) args.decoder_attention = getattr(args, "decoder_attention", "0") args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) args.residuals = getattr(args, "residuals", False)
bart_ls-main
fairseq-py/fairseq/models/lstm_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict from fairseq import utils from fairseq.models import ( FairseqMultiModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture, ) from fairseq.utils import safe_hasattr @register_model("multilingual_transformer") class MultilingualTransformerModel(FairseqMultiModel): """Train Transformer models for multiple language pairs simultaneously. Requires `--task multilingual_translation`. We inherit all arguments from TransformerModel and assume that all language pairs use a single Transformer architecture. In addition, we provide several options that are specific to the multilingual setting. Args: --share-encoder-embeddings: share encoder embeddings across all source languages --share-decoder-embeddings: share decoder embeddings across all target languages --share-encoders: share all encoder params (incl. embeddings) across all source languages --share-decoders: share all decoder params (incl. embeddings) across all target languages """ def __init__(self, encoders, decoders): super().__init__(encoders, decoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--share-encoder-embeddings", action="store_true", help="share encoder embeddings across languages", ) parser.add_argument( "--share-decoder-embeddings", action="store_true", help="share decoder embeddings across languages", ) parser.add_argument( "--share-encoders", action="store_true", help="share encoders across languages", ) parser.add_argument( "--share-decoders", action="store_true", help="share decoders across languages", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" from fairseq.tasks.multilingual_translation import MultilingualTranslationTask assert isinstance(task, MultilingualTranslationTask) # make sure all arguments are present in older models base_multilingual_architecture(args) if not safe_hasattr(args, "max_source_positions"): args.max_source_positions = 1024 if not safe_hasattr(args, "max_target_positions"): args.max_target_positions = 1024 src_langs = [lang_pair.split("-")[0] for lang_pair in task.model_lang_pairs] tgt_langs = [lang_pair.split("-")[1] for lang_pair in task.model_lang_pairs] if args.share_encoders: args.share_encoder_embeddings = True if args.share_decoders: args.share_decoder_embeddings = True def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb # build shared embeddings (if applicable) shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None if args.share_all_embeddings: if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=task.langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path, ) shared_decoder_embed_tokens = shared_encoder_embed_tokens args.share_decoder_input_output_embed = True else: if args.share_encoder_embeddings: shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=src_langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path, ) if args.share_decoder_embeddings: shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings( dicts=task.dicts, langs=tgt_langs, embed_dim=args.decoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.decoder_embed_path, ) # encoders/decoders for each language lang_encoders, lang_decoders = {}, {} def get_encoder(lang): if lang not in lang_encoders: if shared_encoder_embed_tokens is not None: encoder_embed_tokens = shared_encoder_embed_tokens else: encoder_embed_tokens = build_embedding( task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path, ) lang_encoders[lang] = cls._get_module_class( True, args, task.dicts[lang], encoder_embed_tokens, src_langs ) return lang_encoders[lang] def get_decoder(lang): if lang not in lang_decoders: if shared_decoder_embed_tokens is not None: decoder_embed_tokens = shared_decoder_embed_tokens else: decoder_embed_tokens = build_embedding( task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path, ) lang_decoders[lang] = cls._get_module_class( False, args, task.dicts[lang], decoder_embed_tokens, tgt_langs ) return lang_decoders[lang] # shared encoders/decoders (if applicable) shared_encoder, shared_decoder = None, None if args.share_encoders: shared_encoder = get_encoder(src_langs[0]) if args.share_decoders: shared_decoder = get_decoder(tgt_langs[0]) encoders, decoders = OrderedDict(), OrderedDict() for lang_pair, src, tgt in zip(task.model_lang_pairs, src_langs, tgt_langs): encoders[lang_pair] = ( shared_encoder if shared_encoder is not None else get_encoder(src) ) decoders[lang_pair] = ( shared_decoder if shared_decoder is not None else get_decoder(tgt) ) return MultilingualTransformerModel(encoders, decoders) @classmethod def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs): module_class = TransformerEncoder if is_encoder else TransformerDecoder return module_class(args, lang_dict, embed_tokens) def load_state_dict(self, state_dict, strict=True, model_cfg=None): state_dict_subset = state_dict.copy() for k, _ in state_dict.items(): assert k.startswith("models.") lang_pair = k.split(".")[1] if lang_pair not in self.models: del state_dict_subset[k] super().load_state_dict(state_dict_subset, strict=strict, model_cfg=model_cfg) @register_model_architecture("multilingual_transformer", "multilingual_transformer") def base_multilingual_architecture(args): base_architecture(args) args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", False) args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", False) args.share_encoders = getattr(args, "share_encoders", False) args.share_decoders = getattr(args, "share_decoders", False) @register_model_architecture( "multilingual_transformer", "multilingual_transformer_iwslt_de_en" ) def multilingual_transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_multilingual_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/multilingual_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, Linear from fairseq.modules import ( AdaptiveSoftmax, CharacterTokenEmbedder, FairseqDropout, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, TransformerDecoderLayer, ) from fairseq.modules.character_token_embedder import CHAR_PAD_IDX from fairseq.modules.fb_bidirectional_multihead_attention import ( BidirectionalMultiheadSelfAttention, ) logger = logging.getLogger(__name__) @register_model("bi_transformer_lm") class BiTransformerLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", default=0.1, type=float, metavar="D", help="dropout probability", ) parser.add_argument( "--attention-dropout", default=0.0, type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ) parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--no-token-positional-embeddings", action="store_true", help="if set, disables positional embeddings (outside self attention)", ) parser.add_argument( "--character-embeddings", action="store_true", help="if set, uses character embedding convolutions to produce token embeddings", ) parser.add_argument( "--character-filters", type=str, metavar="LIST", default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", help="size of character embeddings", ) parser.add_argument( "--character-embedding-dim", type=int, metavar="N", default=4, help="size of character embeddings", ) parser.add_argument( "--char-embedder-highway-layers", type=int, metavar="N", default=2, help="number of highway layers for character token embeddder", ) parser.add_argument( "--linear-final-layer", action="store_true", help="if set, uses a simple linear layer for the final prediction that combines the " "forward and backward tower instead of an attentional layer", ) parser.add_argument( "--linear-final-layer-bias", action="store_true", help="if set, has a bias on the final linear layer", ) parser.add_argument( "--no-bias-kv", action="store_true", help="if set, pads attn with zero instead of adding a learnable bias kv", ) parser.add_argument( "--max-char-len", type=int, metavar="N", default=50, help="if set and char_inputs, max characters to use per token", ) # below two arguments are only used during inference / finetuning parser.add_argument( "--char-inputs", action="store_true", help="if set, model takes character ids as input", ) parser.add_argument( "--unmask-curr-state", action="store_true", help="if set, there will be no mask for current state", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_bi_lm_architecture(args) if not hasattr(args, "max_source_positions"): args.max_source_positions = args.tokens_per_sample if not getattr(args, "max_target_positions", None): args.max_target_positions = args.tokens_per_sample if args.character_embeddings: embed_tokens = CharacterTokenEmbedder( task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, max_char_len=args.max_char_len, char_inputs=args.char_inputs, ) else: embed_tokens = Embedding( len(task.dictionary), args.decoder_embed_dim, task.dictionary.pad() ) logger.info(args) decoder = BiTransformerDecoder(args, task.output_dictionary, embed_tokens) return BiTransformerLanguageModel(decoder) @property def supported_targets(self): return {"self", "past", "future"} def get_layers_by_depth_for_fine_tuning(self): decoder_layers = self.decoder.get_layers_by_depth_for_fine_tuning() return [ {"decoder.%s" % name: layer for name, layer in layers.items()} for layers in decoder_layers ] class BiTransformerClassificationHead(nn.Module): def __init__(self, embed_dim, num_classes): super().__init__() self.proj = Linear(2 * embed_dim, num_classes) def forward(self, features, padding_mask=None, **kwargs): assert features.size(1) >= 2 # B x T x C # extract endpoints for classification x = features if x.size(1) == 2: x = x.view(x.size(0), -1) else: left = x[:, 0, :] if padding_mask is None: right = x[:, -1, :] else: eos_idx = (~padding_mask).int().sum(dim=1) - 1 eos_idx += (torch.arange(eos_idx.size(0)) * x.size(1)).type_as(eos_idx) right = x.contiguous().view(-1, x.size(-1))[eos_idx] x = torch.cat([left, right], dim=1) return self.proj(x) class BiTransformerDecoder(FairseqDecoder): """Transformer decoder.""" def __init__(self, args, dictionary, embed_tokens, classification_head=None): super().__init__(dictionary) self.onnx_trace = False self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.share_input_output_embed = args.share_decoder_input_output_embed self.embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.self_target = args.self_target self.future_target = args.future_target self.past_target = args.past_target self.char_inputs = args.char_inputs self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(self.embed_dim) self.embed_positions = ( PositionalEmbedding( args.max_target_positions, self.embed_dim, self.padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) self.forward_layers = nn.ModuleList( [ TransformerDecoderLayer( args, no_encoder_attn=True, add_bias_kv=not args.no_bias_kv, add_zero_attn=args.no_bias_kv, ) for _ in range(args.decoder_layers) ] ) self.backward_layers = nn.ModuleList( [ TransformerDecoderLayer( args, no_encoder_attn=True, add_bias_kv=not args.no_bias_kv, add_zero_attn=args.no_bias_kv, ) for _ in range(args.decoder_layers) ] ) self.full_attn_layer = None self.full_linear_layer = None if self.self_target: if args.linear_final_layer: self.full_linear_layer = Linear( self.embed_dim * 2, self.embed_dim, args.linear_final_layer_bias ) else: self.full_attn_layer = BidirectionalTransformerDecoderLayer(args) self.load_softmax = not getattr(args, "remove_head", False) self.embed_out = None self.adaptive_softmax = None self.classification_head = classification_head if self.load_softmax: if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), args.decoder_embed_dim, utils.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), self.embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=self.embed_dim ** -0.5) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward(self, src_tokens, **kwargs): x, extra = self.extract_features(src_tokens, **kwargs) x = self.output_layer(x) return x, extra def extract_features(self, src_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, seq_len, embed_dim)` - a dictionary of additional data, where 'attn' contains the attention over the final states (concatenated from forward and backward towers) and 'inner_states' is a list of internal model states used to compute the predictions (for example to use in ELMO). The first element is the token embeddings (with the positional embeddings added). The next n elements are tuples of the hidden states for the forward and backward towers. The last element is the output of the final full layer on top of the towers and would be equivalent to the logits if adaptive softmax is used. NOTE: unlike the logits, the format for all hidden states is T x B x C """ # compute padding mask if self.char_inputs: # casting to byte for onnx padding_mask = src_tokens[:, :, 0].eq(CHAR_PAD_IDX).bool() else: padding_mask = src_tokens.eq(self.padding_idx).bool() # embed positions positional_input = self.padding_idx * padding_mask.long() positions = ( self.embed_positions(positional_input) if self.embed_positions is not None else None ) # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C fwd_x = bwd_x = x.transpose(0, 1) inner_states = [fwd_x] future_mask = self.buffered_future_mask(fwd_x) past_mask = self.buffered_past_mask(bwd_x) if not padding_mask.any(): padding_mask = None # decoder layers for fwd, back in zip(self.forward_layers, self.backward_layers): fwd_x, _, _ = fwd( fwd_x, self_attn_mask=future_mask, self_attn_padding_mask=padding_mask, ) bwd_x, _, _ = back( bwd_x, self_attn_mask=past_mask, self_attn_padding_mask=padding_mask, ) inner_states.extend((fwd_x, bwd_x)) if self.self_target: if self.full_attn_layer is not None: x, attn = self.full_attn_layer( fwd_x, bwd_x, padding_mask, ) inner_states.append(x) elif self.full_linear_layer is not None: zeros = x.new_zeros(1, fwd_x.size(1), fwd_x.size(2)) fwd_x = torch.cat([zeros, fwd_x[:-1]], dim=0) bwd_x = torch.cat([bwd_x[1:], zeros], dim=0) x = torch.cat([fwd_x, bwd_x], dim=-1) x = self.full_linear_layer(x) attn = None inner_states.append(x) x = [x] else: x = [] attn = None if self.future_target: x.append(fwd_x) if self.past_target: x.append(bwd_x) # T x B x C -> B x T x C x = [z.transpose(0, 1) for z in x] if len(x) == 1: x = x[0] return x, {"attn": attn, "inner_states": inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.classification_head: return self.classification_head(features, **kwargs) x = features if not isinstance(x, list): x = [x] if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed and hasattr(self.embed_tokens, "weight"): x = [F.linear(x, self.embed_tokens.weight) for x in x] elif self.embed_out is not None: x = [F.linear(x, self.embed_out) for x in x] if len(x) == 1: x = x[0] return x def buffered_future_mask(self, tensor): dim = tensor.size(0) if self.onnx_trace: a = torch._dim_arange(tensor, 0).unsqueeze(0).repeat(dim, 1) b = torch._dim_arange(tensor, 0).unsqueeze(1).repeat(1, dim) future_mask = a > b future_mask_neg_inf = torch.where( future_mask, torch.Tensor([float("-Inf")]), torch.Tensor([0]) ).type_as(tensor) return future_mask_neg_inf if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) if self._future_mask.size(0) < dim: self._future_mask = torch.triu( utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def buffered_past_mask(self, tensor): dim = tensor.size(0) if self.onnx_trace: a = torch._dim_arange(tensor, 0).unsqueeze(0).repeat(dim, 1) b = torch._dim_arange(tensor, 0).unsqueeze(1).repeat(1, dim) past_mask = a < b past_mask_neg_inf = torch.where( past_mask, torch.Tensor([float("-Inf")]), torch.Tensor([0]) ).type_as(tensor) return past_mask_neg_inf if ( not hasattr(self, "_past_mask") or self._past_mask is None or self._past_mask.device != tensor.device ): self._past_mask = torch.tril( utils.fill_with_neg_inf(tensor.new(dim, dim)), -1 ) if self._past_mask.size(0) < dim: self._past_mask = torch.tril( utils.fill_with_neg_inf(self._past_mask.resize_(dim, dim)), -1 ) return self._past_mask[:dim, :dim] def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def upgrade_state_dict_named(self, state_dict, name): if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): state_dict[name + ".embed_positions._float_tensor"] = torch.FloatTensor(1) if not self.load_softmax: for k in list(state_dict.keys()): if k.startswith(name + ".adaptive_softmax.") or k.startswith( name + ".embed_out" ): del state_dict[k] return state_dict def get_layers_by_depth_for_fine_tuning(self): """ Returns a list of module dictionaries, where each module dictionary (name -> module) contains modules at the same "depth" in the model. The first module dictionary corresponds to the lowest level layer (embeddings) and the last corresponds to the highest level layer. """ emb_layers = self._module_dict(("embed_tokens", "embed_positions")) fwd_bwd_layers = [ {"forward_layers.%d" % i: fwd, "backward_layers.%d" % i: bwd} for i, (fwd, bwd) in enumerate( zip(self.forward_layers, self.backward_layers) ) ] top_layers = self._module_dict(("full_attn_layer", "full_linear_layer")) return [emb_layers] + fwd_bwd_layers + [top_layers] def _module_dict(self, attributes): return { attr: getattr(self, attr) for attr in attributes if getattr(self, attr, None) is not None } class BidirectionalTransformerDecoderLayer(nn.Module): """Decoder layer block.""" def __init__(self, args): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = BidirectionalMultiheadSelfAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, mask_curr_state=not args.unmask_curr_state, ) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.decoder_normalize_before self.fwd_layer_norm = LayerNorm(self.embed_dim, export=args.char_inputs) self.bwd_layer_norm = LayerNorm(self.embed_dim, export=args.char_inputs) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=args.char_inputs) def forward(self, fwd_x, bwd_x, key_padding_mask): fwd_x = self.maybe_layer_norm(self.fwd_layer_norm, fwd_x, before=True) bwd_x = self.maybe_layer_norm(self.bwd_layer_norm, bwd_x, before=True) x, attn = self.self_attn( fwd_x=fwd_x, bwd_x=bwd_x, key_padding_mask=key_padding_mask, ) x = self.dropout_module(x) x = self.maybe_layer_norm(self.fwd_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x @register_model_architecture("bi_transformer_lm", "bi_transformer_lm") def base_bi_lm_architecture(args): # by default bi-directional language models predict the current token (self) args.self_target = getattr( args, "self_target", not getattr(args, "exclude_self_target", False) ) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr( args, "adaptive_softmax_dropout", args.dropout ) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.character_embeddings = getattr(args, "character_embeddings", False) args.character_filters = getattr( args, "character_filters", "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", ) args.character_embedding_dim = getattr(args, "character_embedding_dim", 128) args.char_embedder_highway_layers = getattr(args, "char_embedder_highway_layers", 2) args.linear_final_layer = getattr(args, "linear_final_layer", False) args.linear_final_layer_bias = getattr(args, "linear_final_layer_bias", False) args.future_target = getattr(args, "future_target", False) args.past_target = getattr(args, "past_target", False) args.no_bias_kv = getattr(args, "no_bias_kv", False) args.char_inputs = getattr(args, "char_inputs", False) args.unmask_curr_state = getattr(args, "unmask_curr_state", False) args.max_char_len = getattr(args, "max_char_len", 50) # otherwise model training is unstable args.decoder_normalize_before = True @register_model_architecture("bi_transformer_lm", "bi_transformer_lm_big") def bi_transformer_lm_big(args): args.self_target = True args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) base_bi_lm_architecture(args) @register_model_architecture("bi_transformer_lm", "bi_transformer_lm_bpe_large") def bi_transformer_lm_bpe_large(args): args.self_target = True # TODO support query formulation args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32) base_bi_lm_architecture(args) @register_model_architecture("bi_transformer_lm", "bi_transformer_lm_big_non_cloze") def bi_transformer_lm_big_non_cloze(args): bi_transformer_lm_big(args) args.self_target = False args.future_target = True args.past_target = True @register_model_architecture("bi_transformer_lm", "bi_transformer_lm_huge") def bi_transformer_lm_huge(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 2048) # 2.6B params args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 8192) args.decoder_layers = getattr(args, "decoder_layers", 24) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32) args.activation_fn = getattr(args, "activation_fn", "gelu_fast") base_bi_lm_architecture(args) @register_model_architecture("bi_transformer_lm", "bi_transformer_lm_huge_relu") def bi_transformer_lm_huge_relu(args): args.activation_fn = getattr(args, "activation_fn", "relu") bi_transformer_lm_huge(args)
bart_ls-main
fairseq-py/fairseq/models/fb_bidirectional_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch.nn as nn from fairseq import utils from torch import Tensor class FairseqDecoder(nn.Module): """Base class for decoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary self.onnx_trace = False self.adaptive_softmax = None def forward(self, prev_output_tokens, encoder_out=None, **kwargs): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, **kwargs ) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def output_layer(self, features, **kwargs): """ Project features to the default output size, e.g., vocabulary size. Args: features (Tensor): features returned by *extract_features*. """ raise NotImplementedError def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def get_normalized_probs_scriptable( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None: if sample is not None: assert "target" in sample target = sample["target"] else: target = None out = self.adaptive_softmax.get_log_prob(net_output[0], target=target) return out.exp_() if not log_probs else out logits = net_output[0] if log_probs: return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace) else: return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace) def max_positions(self): """Maximum input length supported by the decoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code.""" return state_dict def prepare_for_onnx_export_(self): self.onnx_trace = True
bart_ls-main
fairseq-py/fairseq/models/fairseq_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import argparse import importlib import os from contextlib import ExitStack from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import merge_with_parent from hydra.core.config_store import ConfigStore from omegaconf import open_dict, OmegaConf from .composite_encoder import CompositeEncoder from .distributed_fairseq_model import DistributedFairseqModel from .fairseq_decoder import FairseqDecoder from .fairseq_encoder import FairseqEncoder from .fairseq_incremental_decoder import FairseqIncrementalDecoder from .fairseq_model import ( BaseFairseqModel, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqLanguageModel, FairseqModel, FairseqMultiModel, ) MODEL_REGISTRY = {} MODEL_DATACLASS_REGISTRY = {} ARCH_MODEL_REGISTRY = {} ARCH_MODEL_NAME_REGISTRY = {} ARCH_MODEL_INV_REGISTRY = {} ARCH_CONFIG_REGISTRY = {} __all__ = [ "BaseFairseqModel", "CompositeEncoder", "DistributedFairseqModel", "FairseqDecoder", "FairseqEncoder", "FairseqEncoderDecoderModel", "FairseqEncoderModel", "FairseqIncrementalDecoder", "FairseqLanguageModel", "FairseqModel", "FairseqMultiModel", ] def build_model(cfg: FairseqDataclass, task): model = None model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None) if not model_type and len(cfg) == 1: # this is hit if config object is nested in directory that is named after model type model_type = next(iter(cfg)) if model_type in MODEL_DATACLASS_REGISTRY: cfg = cfg[model_type] else: raise Exception( "Could not infer model type from directory. Please add _name field to indicate model type. " "Available models: " + str(MODEL_DATACLASS_REGISTRY.keys()) + " Requested model type: " + model_type ) if model_type in ARCH_MODEL_REGISTRY: # case 1: legacy models model = ARCH_MODEL_REGISTRY[model_type] elif model_type in MODEL_DATACLASS_REGISTRY: # case 2: config-driven models model = MODEL_REGISTRY[model_type] if model_type in MODEL_DATACLASS_REGISTRY: # set defaults from dataclass. note that arch name and model name can be the same dc = MODEL_DATACLASS_REGISTRY[model_type] if isinstance(cfg, argparse.Namespace): cfg = dc.from_namespace(cfg) else: cfg = merge_with_parent(dc(), cfg) else: if model_type in ARCH_CONFIG_REGISTRY: with open_dict(cfg) if OmegaConf.is_config(cfg) else ExitStack(): # this calls the different "arch" functions (like base_architecture()) that you indicate # if you specify --arch on the command line. this is only applicable to the old argparse based models # hydra models should expose different architectures via different config files # it will modify the cfg object and default parameters according to the arch ARCH_CONFIG_REGISTRY[model_type](cfg) assert model is not None, ( f"Could not infer model type from {cfg}. " "Available models: {}".format( MODEL_DATACLASS_REGISTRY.keys() ) + f" Requested model type: {model_type}" ) return model.build_model(cfg, task) def register_model(name, dataclass=None): """ New model types can be added to fairseq with the :func:`register_model` function decorator. For example:: @register_model('lstm') class LSTM(FairseqEncoderDecoderModel): (...) .. note:: All models must implement the :class:`BaseFairseqModel` interface. Typically you will extend :class:`FairseqEncoderDecoderModel` for sequence-to-sequence tasks or :class:`FairseqLanguageModel` for language modeling tasks. Args: name (str): the name of the model """ def register_model_cls(cls): if name in MODEL_REGISTRY: raise ValueError("Cannot register duplicate model ({})".format(name)) if not issubclass(cls, BaseFairseqModel): raise ValueError( "Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__) ) MODEL_REGISTRY[name] = cls if dataclass is not None and not issubclass(dataclass, FairseqDataclass): raise ValueError( "Dataclass {} must extend FairseqDataclass".format(dataclass) ) cls.__dataclass = dataclass if dataclass is not None: MODEL_DATACLASS_REGISTRY[name] = dataclass cs = ConfigStore.instance() node = dataclass() node._name = name cs.store(name=name, group="model", node=node, provider="fairseq") @register_model_architecture(name, name) def noop(_): pass return cls return register_model_cls def register_model_architecture(model_name, arch_name): """ New model architectures can be added to fairseq with the :func:`register_model_architecture` function decorator. After registration, model architectures can be selected with the ``--arch`` command-line argument. For example:: @register_model_architecture('lstm', 'lstm_luong_wmt_en_de') def lstm_luong_wmt_en_de(cfg): args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000) (...) The decorated function should take a single argument *cfg*, which is a :class:`omegaconf.DictConfig`. The decorated function should modify these arguments in-place to match the desired architecture. Args: model_name (str): the name of the Model (Model must already be registered) arch_name (str): the name of the model architecture (``--arch``) """ def register_model_arch_fn(fn): if model_name not in MODEL_REGISTRY: raise ValueError( "Cannot register model architecture for unknown model type ({})".format( model_name ) ) if arch_name in ARCH_MODEL_REGISTRY: raise ValueError( "Cannot register duplicate model architecture ({})".format(arch_name) ) if not callable(fn): raise ValueError( "Model architecture must be callable ({})".format(arch_name) ) ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name] ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name) ARCH_CONFIG_REGISTRY[arch_name] = fn return fn return register_model_arch_fn def import_models(models_dir, namespace): for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): model_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module(namespace + "." + model_name) # extra `model_parser` for sphinx if model_name in MODEL_REGISTRY: parser = argparse.ArgumentParser(add_help=False) group_archs = parser.add_argument_group("Named architectures") group_archs.add_argument( "--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name] ) group_args = parser.add_argument_group( "Additional command-line arguments" ) MODEL_REGISTRY[model_name].add_args(group_args) globals()[model_name + "_parser"] = parser # automatically import any Python files in the models/ directory models_dir = os.path.dirname(__file__) import_models(models_dir, "fairseq.models")
bart_ls-main
fairseq-py/fairseq/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from typing import Any, Dict from fairseq import checkpoint_utils from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, base_architecture as transformer_base_architecture, ) @register_model("transformer_from_pretrained_xlm") class TransformerFromPretrainedXLMModel(TransformerModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--pretrained-xlm-checkpoint", type=str, metavar="STR", help="XLM model to use for initializing transformer encoder and/or decoder", ) parser.add_argument( "--init-encoder-only", action="store_true", help="if set, don't load the XLM weights and embeddings into decoder", ) parser.add_argument( "--init-decoder-only", action="store_true", help="if set, don't load the XLM weights and embeddings into encoder", ) @classmethod def build_model(self, args, task, cls_dictionary=MaskedLMDictionary): assert hasattr(args, "pretrained_xlm_checkpoint"), ( "You must specify a path for --pretrained-xlm-checkpoint to use " "--arch transformer_from_pretrained_xlm" ) assert isinstance(task.source_dictionary, cls_dictionary) and isinstance( task.target_dictionary, cls_dictionary ), ( "You should use a MaskedLMDictionary when using --arch " "transformer_from_pretrained_xlm because the pretrained XLM model " "was trained using data binarized with MaskedLMDictionary. " "For translation, you may want to use --task " "translation_from_pretrained_xlm" ) assert not ( getattr(args, "init_encoder_only", False) and getattr(args, "init_decoder_only", False) ), "Only one of --init-encoder-only and --init-decoder-only can be set." return super().build_model(args, task) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderFromPretrainedXLM(args, tgt_dict, embed_tokens) def upgrade_state_dict_with_xlm_weights( state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str ) -> Dict[str, Any]: """ Load XLM weights into a Transformer encoder or decoder model. Args: state_dict: state dict for either TransformerEncoder or TransformerDecoder pretrained_xlm_checkpoint: checkpoint to load XLM weights from Raises: AssertionError: If architecture (num layers, attention heads, etc.) does not match between the current Transformer encoder or decoder and the pretrained_xlm_checkpoint """ if not os.path.exists(pretrained_xlm_checkpoint): raise IOError("Model file not found: {}".format(pretrained_xlm_checkpoint)) state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint) xlm_state_dict = state["model"] for key in xlm_state_dict.keys(): for search_key in ["embed_tokens", "embed_positions", "layers"]: if search_key in key: subkey = key[key.find(search_key) :] assert subkey in state_dict, ( "{} Transformer encoder / decoder " "state_dict does not contain {}. Cannot " "load {} from pretrained XLM checkpoint " "{} into Transformer.".format( str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint ) ) state_dict[subkey] = xlm_state_dict[key] return state_dict class TransformerEncoderFromPretrainedXLM(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) if getattr(args, "init_decoder_only", False): # Don't load XLM weights for encoder if --init-decoder-only return assert hasattr(args, "pretrained_xlm_checkpoint"), ( "--pretrained-xlm-checkpoint must be specified to load Transformer " "encoder from pretrained XLM" ) xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights( state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint, ) self.load_state_dict(xlm_loaded_state_dict, strict=True) class TransformerDecoderFromPretrainedXLM(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn) if getattr(args, "init_encoder_only", False): # Don't load XLM weights for decoder if --init-encoder-only return assert hasattr(args, "pretrained_xlm_checkpoint"), ( "--pretrained-xlm-checkpoint must be specified to load Transformer " "decoder from pretrained XLM" ) xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights( state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint, ) self.load_state_dict(xlm_loaded_state_dict, strict=True) @register_model_architecture( "transformer_from_pretrained_xlm", "transformer_from_pretrained_xlm" ) def base_architecture(args): transformer_base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/transformer_from_pretrained_xlm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import signal import threading import torch import torch.nn as nn from torch.nn.parallel import DistributedDataParallel from fairseq.distributed import ( DistributedTimeoutWrapper, LegacyDistributedDataParallel, ModuleProxyWrapper, TPUDistributedDataParallel, ) logger = logging.getLogger(__name__) _GOSSIP_DISABLED = False try: import gossip except ImportError: _GOSSIP_DISABLED = True def DistributedFairseqModel(args, model, process_group, device): """ Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap process_group: the c10d process group to be used for distributed data parallel all-reduction. device: device to move model to """ assert isinstance(model, nn.Module) if args.tpu: wrapped_model = TPUDistributedDataParallel( module=model.to(device), process_group=process_group, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend in {"c10d", "pytorch_ddp"}: wrapped_model = DistributedDataParallel( module=model.to(device), device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, bucket_cap_mb=args.bucket_cap_mb, process_group=process_group, find_unused_parameters=args.find_unused_parameters, gradient_as_bucket_view=args.gradient_as_bucket_view, ) if args.ddp_comm_hook == "fp16": logger.info("enable fp16 communication hook in DDP") try: from torch.distributed.algorithms.ddp_comm_hooks import ( register_ddp_comm_hook, DDPCommHookType, ) except: logger.error( "Could not import from torch.distributed.algorithms.ddp_comm_hooks; you may need to update your pytorch version" ) raise register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, wrapped_model) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend in {"no_c10d", "legacy_ddp"}: wrapped_model = LegacyDistributedDataParallel( module=model.to(device), buffer_size=2 ** 28, process_group=process_group, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend == "slow_mo": if _GOSSIP_DISABLED: raise ImportError( "Cannot find gossip library. Please install from: " "github.com/facebookresearch/stochastic_gradient_push" ) # The values of slowmo_momentum below were obtained by tuning on the # En-De 16 dataset by training the transformer_wmt_en_de_large model if args.slowmo_momentum is None: if args.distributed_world_size <= 16: args.slowmo_momentum = 0.0 elif args.distributed_world_size <= 32: args.slowmo_momentum = 0.2 elif args.distributed_world_size <= 64: args.slowmo_momentum = 0.5 else: args.slowmo_momentum = 0.6 wrapped_model = gossip.GossipDataParallel( module=model.to(device), device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, nprocs_per_node=args.nprocs_per_node, slowmo_momentum=args.slowmo_momentum, localsgd=(args.slowmo_algorithm == "LocalSGD"), localsgd_frequency=args.localsgd_frequency, ) # forward missing getattr and state_dict/load_state_dict to orig model wrapped_model = ModuleProxyWrapper(wrapped_model) elif args.ddp_backend == "fully_sharded": try: from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP except ImportError: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) assert isinstance(model, FSDP), "expected model to already be wrapped in FSDP" wrapped_model = model if args.memory_efficient_fp16: wrapped_model = wrapped_model.half() if not args.cpu_offload: wrapped_model = wrapped_model.to(device=device) else: raise ValueError("Unknown --ddp-backend: " + args.ddp_backend) # kill hung distributed jobs after a timeout if getattr(args, "heartbeat_timeout", -1) > 0: wrapped_model = DistributedTimeoutWrapper( wrapped_model, timeout=getattr(args, "heartbeat_timeout", -1) ) return wrapped_model
bart_ls-main
fairseq-py/fairseq/models/distributed_fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II DEFAULT_MAX_TARGET_POSITIONS = 1024 @dataclass class TransformerLanguageModelConfig(FairseqDataclass): activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="relu", metadata={"help": "activation function to use"} ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN."} ) relu_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN."} ) decoder_embed_dim: int = field( default=512, metadata={"help": "decoder embedding dimension"} ) decoder_output_dim: int = field( default=512, metadata={"help": "decoder output dimension"} ) decoder_input_dim: int = field( default=512, metadata={"help": "decoder input dimension"} ) decoder_ffn_embed_dim: int = field( default=2048, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"}) decoder_attention_heads: int = field( default=8, metadata={"help": "num decoder attention heads"} ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"} ) no_decoder_final_norm: bool = field( default=False, metadata={"help": "don't add an extra layernorm after the last decoder block"}, ) adaptive_softmax_cutoff: Optional[str] = field( default=None, metadata={ "help": "comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion" }, ) adaptive_softmax_dropout: float = field( default=0, metadata={"help": "sets adaptive softmax dropout for the tail projections"}, ) adaptive_softmax_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings (outside self attention)" }, ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) character_embeddings: bool = field( default=False, metadata={ "help": "if set, uses character embedding convolutions to produce token embeddings" }, ) character_filters: str = field( default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", metadata={"help": "size of character embeddings"}, ) character_embedding_dim: int = field( default=4, metadata={"help": "size of character embeddings"} ) char_embedder_highway_layers: int = field( default=2, metadata={"help": "number of highway layers for character token embeddder"}, ) adaptive_input: bool = field( default=False, metadata={"help": "if set, uses adaptive input"} ) adaptive_input_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) adaptive_input_cutoff: Optional[str] = field( default=None, metadata={"help": "comma separated list of adaptive input cutoff points."}, ) tie_adaptive_weights: bool = field( default=False, metadata={ "help": "if set, ties the weights of adaptive softmax and adaptive input" }, ) tie_adaptive_proj: bool = field( default=False, metadata={ "help": "if set, ties the projection weights of adaptive softmax and adaptive input" }, ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) layernorm_embedding: bool = field( default=False, metadata={"help": "add layernorm to embedding"} ) no_scale_embedding: bool = field( default=False, metadata={"help": "if True, dont scale embeddings"} ) checkpoint_activations: bool = field( default=False, metadata={"help": "checkpoint activations at each layer"} ) offload_activations: bool = field( default=False, metadata={"help": "move checkpointed activations to CPU after they are used."}, ) # config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) decoder_layerdrop: float = field( default=0.0, metadata={"help": "LayerDrop probability for decoder"} ) decoder_layers_to_keep: Optional[str] = field( default=None, metadata={ "help": "which layers to *keep* when pruning as a comma-separated list" }, ) # config for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) quant_noise_pq: float = field( default=0.0, metadata={"help": "iterative PQ quantization noise at training time"}, ) quant_noise_pq_block_size: int = field( default=8, metadata={"help": "block size of quantization noise at training time"}, ) quant_noise_scalar: float = field( default=0.0, metadata={ "help": "scalar quantization noise and scalar quantization at training time" }, ) # config for Fully Sharded Data Parallel (FSDP) training min_params_to_wrap: int = field( default=DEFAULT_MIN_PARAMS_TO_WRAP, metadata={ "help": ( "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." ) } ) # config for "BASE Layers: Simplifying Training of Large, Sparse Models" base_layers: Optional[int] = field( default=0, metadata={"help": "number of BASE layers in total"} ) base_sublayers: Optional[int] = field( default=1, metadata={"help": "number of sublayers in each BASE layer"} ) base_shuffle: Optional[int] = field( default=1, metadata={"help": "shuffle tokens between workers before computing assignment"} ) # options from other parts of the config add_bos_token: bool = II("task.add_bos_token") tokens_per_sample: int = II("task.tokens_per_sample") max_target_positions: Optional[int] = II("task.max_target_positions") tpu: bool = II("common.tpu") @register_model("transformer_lm", dataclass=TransformerLanguageModelConfig) class TransformerLanguageModel(FairseqLanguageModel): @classmethod def hub_models(cls): def moses_fastbpe(path): return {"path": path, "tokenizer": "moses", "bpe": "fastbpe"} def spm(path): return {"path": path, "tokenizer": "space", "bpe": "sentencepiece"} return { "transformer_lm.gbw.adaptive_huge": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2", "transformer_lm.wiki103.adaptive": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2", "transformer_lm.wmt19.en": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2" ), "transformer_lm.wmt19.de": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2" ), "transformer_lm.wmt19.ru": moses_fastbpe( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2" ), "transformer_lm.wmt20.en": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.en.tar.gz" ), "transformer_lm.wmt20.ta": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.ta.tar.gz" ), "transformer_lm.wmt20.iu.news": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.news.tar.gz" ), "transformer_lm.wmt20.iu.nh": spm( "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.nh.tar.gz" ), } def __init__(self, decoder): super().__init__(decoder) @classmethod def build_model(cls, args, task): """Build a new model instance.""" if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if safe_getattr(args, "max_target_positions", None) is None: args.max_target_positions = safe_getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) if args.character_embeddings: embed_tokens = CharacterTokenEmbedder( task.source_dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, ) elif args.adaptive_input: embed_tokens = AdaptiveInput( len(task.source_dictionary), task.source_dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int), args.quant_noise_pq, args.quant_noise_pq_block_size, ) else: embed_tokens = cls.build_embedding( args, task.source_dictionary, args.decoder_input_dim ) if args.tie_adaptive_weights: assert args.adaptive_input assert args.adaptive_input_factor == args.adaptive_softmax_factor assert ( args.adaptive_softmax_cutoff == args.adaptive_input_cutoff ), "{} != {}".format( args.adaptive_softmax_cutoff, args.adaptive_input_cutoff ) assert args.decoder_input_dim == args.decoder_output_dim decoder = TransformerDecoder( args, task.target_dictionary, embed_tokens, no_encoder_attn=True ) return cls(decoder) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad()) return embed_tokens def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True @register_model_architecture("transformer_lm", "transformer_lm_big") def transformer_lm_big(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_wiki103") @register_model_architecture("transformer_lm", "transformer_lm_baevski_wiki103") def transformer_lm_baevski_wiki103(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 16) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.dropout = safe_getattr(args, "dropout", 0.3) args.adaptive_input = safe_getattr(args, "adaptive_input", True) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", "20000,60000") args.adaptive_softmax_cutoff = safe_getattr( args, "adaptive_softmax_cutoff", "20000,60000" ) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True) transformer_lm_big(args) @register_model_architecture("transformer_lm", "transformer_lm_gbw") @register_model_architecture("transformer_lm", "transformer_lm_baevski_gbw") def transformer_lm_baevski_gbw(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) transformer_lm_big(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt") def transformer_lm_gpt(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072) args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_small") def transformer_lm_gpt2_small(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny") def transformer_lm_gpt2_tiny(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64) args.decoder_layers = safe_getattr(args, "decoder_layers", 2) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_medium") def transformer_lm_gpt2_medium(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120) args.decoder_layers = safe_getattr(args, "decoder_layers", 36) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt2_big") def transformer_lm_gpt2_big(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400) args.decoder_layers = safe_getattr(args, "decoder_layers", 48) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args) def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_small") def transformer_lm_gpt3_small(args): # 125M params args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_medium") def transformer_lm_gpt3_medium(args): # 350M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_large") def transformer_lm_gpt3_large(args): # 760M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_xl") def transformer_lm_gpt3_xl(args): # 1.3B params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_2_7") def transformer_lm_gpt3_2_7(args): # 2.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_6_7") def transformer_lm_gpt3_6_7(args): # 6.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_13") def transformer_lm_gpt3_13(args): # 13B params args.decoder_layers = safe_getattr(args, "decoder_layers", 40) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40) base_gpt3_architecture(args) @register_model_architecture("transformer_lm", "transformer_lm_gpt3_175") def transformer_lm_gpt3_175(args): # 175B params args.decoder_layers = safe_getattr(args, "decoder_layers", 96) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96) base_gpt3_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/transformer_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, BeamableMM, FairseqDropout, GradMultiply, LearnedPositionalEmbedding, LinearizedConvolution, ) @register_model("fconv") class FConvModel(FairseqEncoderDecoderModel): """ A fully convolutional model, i.e. a convolutional encoder and a convolutional decoder, as described in `"Convolutional Sequence to Sequence Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_. Args: encoder (FConvEncoder): the encoder decoder (FConvDecoder): the decoder The Convolutional model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.fconv_parser :prog: """ @classmethod def hub_models(cls): def moses_subword(path): return { "path": path, "tokenizer": "moses", "bpe": "subword_nmt", } return { "conv.wmt14.en-fr": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2" ), "conv.wmt14.en-de": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2" ), "conv.wmt17.en-de": moses_subword( "https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2" ), } def __init__(self, encoder, decoder): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum( layer is not None for layer in decoder.attention ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--share-input-output-embed', action='store_true', help='share input and output embeddings (requires' ' --decoder-out-embed-dim and --decoder-embed-dim' ' to be equal)') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) encoder_embed_dict = None if args.encoder_embed_path: encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path) utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary) decoder_embed_dict = None if args.decoder_embed_path: decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path) utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary) encoder = FConvEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, embed_dict=encoder_embed_dict, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, ) decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, embed_dict=decoder_embed_dict, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, share_embed=args.share_input_output_embed, ) return FConvModel(encoder, decoder) class FConvEncoder(FairseqEncoder): """ Convolutional encoder consisting of `len(convolutions)` layers. Args: dictionary (~fairseq.data.Dictionary): encoding dictionary embed_dim (int, optional): embedding dimension embed_dict (str, optional): filename from which to load pre-trained embeddings max_positions (int, optional): maximum supported input sequence length convolutions (list, optional): the convolutional layer structure. Each list item `i` corresponds to convolutional layer `i`. Layers are given as ``(out_channels, kernel_width, [residual])``. Residual connections are added between layers when ``residual=1`` (which is the default behavior). dropout (float, optional): dropout to be applied before each conv layer """ def __init__( self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, ): super().__init__(dictionary) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding( embed_dict, self.dictionary, self.embed_tokens ) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for _, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append( Linear(residual_dim, out_channels) if residual_dim != out_channels else None ) if kernel_size % 2 == 1: padding = kernel_size // 2 else: padding = 0 self.convolutions.append( ConvTBC( in_channels, out_channels * 2, kernel_size, dropout=dropout, padding=padding, ) ) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (tuple): a tuple with two elements, where the first element is the last encoder layer's output and the second element is the same quantity summed with the input embedding (used for attention). The shape of both tensors is `(batch, src_len, embed_dim)`. - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = self.dropout_module(x) input_embedding = x # project to size of convolution x = self.fc1(x) # used to mask padding in input encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) residuals = [x] # temporal convolutions for proj, conv, res_layer in zip( self.projections, self.convolutions, self.residuals ): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = self.dropout_module(x) if conv.kernel_size[0] % 2 == 1: # padding is implicit in the conv x = conv(x) else: padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding) * math.sqrt(0.5) return { "encoder_out": (x, y), "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = ( encoder_out["encoder_out"][0].index_select(0, new_order), encoder_out["encoder_out"][1].index_select(0, new_order), ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions class AttentionLayer(nn.Module): def __init__(self, conv_channels, embed_dim, bmm=None): super().__init__() # projects from output of convolution to embedding dimension self.in_projection = Linear(conv_channels, embed_dim) # projects from embedding dimension to convolution size self.out_projection = Linear(embed_dim, conv_channels) self.bmm = bmm if bmm is not None else torch.bmm def forward(self, x, target_embedding, encoder_out, encoder_padding_mask): residual = x # attention x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5) x = self.bmm(x, encoder_out[0]) # don't attend over padding if encoder_padding_mask is not None: x = ( x.float() .masked_fill(encoder_padding_mask.unsqueeze(1), float("-inf")) .type_as(x) ) # FP16 support: cast to float and back # softmax over last dim sz = x.size() x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1) x = x.view(sz) attn_scores = x x = self.bmm(x, encoder_out[1]) # scale attention output (respecting potentially different lengths) s = encoder_out[1].size(1) if encoder_padding_mask is None: x = x * (s * math.sqrt(1.0 / s)) else: s = s - encoder_padding_mask.type_as(x).sum( dim=1, keepdim=True ) # exclude padding s = s.unsqueeze(-1) x = x * (s * s.rsqrt()) # project back x = (self.out_projection(x) + residual) * math.sqrt(0.5) return x, attn_scores def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs): """Replace torch.bmm with BeamableMM.""" if beamable_mm_beam_size is not None: del self.bmm self.add_module("bmm", BeamableMM(beamable_mm_beam_size)) class FConvDecoder(FairseqIncrementalDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 20, attention=True, dropout=0.1, share_embed=False, positional_embeddings=True, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0.0, ): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([2])) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.need_attn = True convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] if isinstance(attention, bool): # expand True into [True, True, ...] and do the same with False attention = [attention] * len(convolutions) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError( "Attention is expected to be a list of booleans of " "length equal to the number of layers." ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding( embed_dict, self.dictionary, self.embed_tokens ) self.embed_positions = ( PositionalEmbedding( max_positions, embed_dim, padding_idx, ) if positional_embeddings else None ) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for i, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append( Linear(residual_dim, out_channels) if residual_dim != out_channels else None ) self.convolutions.append( LinearizedConv1d( in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout, ) ) self.attention.append( AttentionLayer(out_channels, embed_dim) if attention[i] else None ) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.adaptive_softmax = None self.fc2 = self.fc3 = None if adaptive_softmax_cutoff is not None: assert not share_embed self.adaptive_softmax = AdaptiveSoftmax( num_embeddings, in_channels, adaptive_softmax_cutoff, dropout=adaptive_softmax_dropout, ) else: self.fc2 = Linear(in_channels, out_embed_dim) if share_embed: assert out_embed_dim == embed_dim, ( "Shared embed weights implies same dimensions " " out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim) ) self.fc3 = nn.Linear(out_embed_dim, num_embeddings) self.fc3.weight = self.embed_tokens.weight else: self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): if encoder_out is not None: encoder_padding_mask = encoder_out["encoder_padding_mask"] encoder_out = encoder_out["encoder_out"] # split and transpose encoder outputs encoder_a, encoder_b = self._split_encoder_out( encoder_out, incremental_state ) if self.embed_positions is not None: pos_embed = self.embed_positions(prev_output_tokens, incremental_state) else: pos_embed = 0 if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] x = self._embed_tokens(prev_output_tokens, incremental_state) # embed tokens and combine with positional embeddings x += pos_embed x = self.dropout_module(x) target_embedding = x # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = self._transpose_if_training(x, incremental_state) # temporal convolutions avg_attn_scores = None num_attn_layers = len(self.attention) residuals = [x] for proj, conv, attention, res_layer in zip( self.projections, self.convolutions, self.attention, self.residuals ): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None x = self.dropout_module(x) x = conv(x, incremental_state) x = F.glu(x, dim=2) # attention if attention is not None: x = self._transpose_if_training(x, incremental_state) x, attn_scores = attention( x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask ) if not self.training and self.need_attn: attn_scores = attn_scores / num_attn_layers if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) x = self._transpose_if_training(x, incremental_state) # residual if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = self._transpose_if_training(x, incremental_state) # project back to size of vocabulary if not using adaptive softmax if self.fc2 is not None and self.fc3 is not None: x = self.fc2(x) x = self.dropout_module(x) x = self.fc3(x) return x, avg_attn_scores def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) encoder_out = utils.get_incremental_state( self, incremental_state, "encoder_out" ) if encoder_out is not None: encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out) utils.set_incremental_state( self, incremental_state, "encoder_out", encoder_out ) def max_positions(self): """Maximum output length supported by the decoder.""" return ( self.embed_positions.max_positions if self.embed_positions is not None else float("inf") ) def upgrade_state_dict(self, state_dict): if utils.item(state_dict.get("decoder.version", torch.Tensor([1]))[0]) < 2: # old models use incorrect weight norm dimension for i, conv in enumerate(self.convolutions): # reconfigure weight norm nn.utils.remove_weight_norm(conv) self.convolutions[i] = nn.utils.weight_norm(conv, dim=0) state_dict["decoder.version"] = torch.Tensor([1]) return state_dict def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _embed_tokens(self, tokens, incremental_state): if incremental_state is not None: # keep only the last token for incremental forward pass tokens = tokens[:, -1:] return self.embed_tokens(tokens) def _split_encoder_out(self, encoder_out, incremental_state): """Split and transpose encoder outputs. This is cached when doing incremental inference. """ cached_result = utils.get_incremental_state( self, incremental_state, "encoder_out" ) if cached_result is not None: return cached_result # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(1, 2).contiguous() result = (encoder_a, encoder_b) if incremental_state is not None: utils.set_incremental_state(self, incremental_state, "encoder_out", result) return result def _transpose_if_training(self, x, incremental_state): if incremental_state is None: x = x.transpose(0, 1) return x def extend_conv_spec(convolutions): """ Extends convolutional spec that is a list of tuples of 2 or 3 parameters (kernel size, dim size and optionally how many layers behind to look for residual) to default the residual propagation param if it is not specified """ extended = [] for spec in convolutions: if len(spec) == 3: extended.append(spec) elif len(spec) == 2: extended.append(spec + (1,)) else: raise Exception( "invalid number of parameters in convolution spec " + str(spec) + ". expected 2 or 3" ) return tuple(extended) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, dropout=0.0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features)) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m) def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) @register_model_architecture("fconv", "fconv") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 20") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 20") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_attention = getattr(args, "decoder_attention", "True") args.share_input_output_embed = getattr(args, "share_input_output_embed", False) @register_model_architecture("fconv", "fconv_iwslt_de_en") def fconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr(args, "encoder_layers", "[(256, 3)] * 4") args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_layers = getattr(args, "decoder_layers", "[(256, 3)] * 3") args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_ro") def fconv_wmt_en_ro(args): args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_de") def fconv_wmt_en_de(args): convs = "[(512, 3)] * 9" # first 9 layers have 512 units convs += " + [(1024, 3)] * 4" # next 4 layers have 1024 units convs += " + [(2048, 1)] * 2" # final 2 layers use 1x1 convolutions args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_layers = getattr(args, "encoder_layers", convs) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) args.decoder_layers = getattr(args, "decoder_layers", convs) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args) @register_model_architecture("fconv", "fconv_wmt_en_fr") def fconv_wmt_en_fr(args): convs = "[(512, 3)] * 6" # first 6 layers have 512 units convs += " + [(768, 3)] * 4" # next 4 layers have 768 units convs += " + [(1024, 3)] * 3" # next 3 layers have 1024 units convs += " + [(2048, 1)] * 1" # next 1 layer uses 1x1 convolutions convs += " + [(4096, 1)] * 1" # final 1 layer uses 1x1 convolutions args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_layers = getattr(args, "encoder_layers", convs) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) args.decoder_layers = getattr(args, "decoder_layers", convs) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/fconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax, FairseqDropout from torch import Tensor DEFAULT_MAX_SOURCE_POSITIONS = 1e5 DEFAULT_MAX_TARGET_POSITIONS = 1e5 @register_model("lstm") class LSTMModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-freeze-embed', action='store_true', help='freeze encoder embeddings') parser.add_argument('--encoder-hidden-size', type=int, metavar='N', help='encoder hidden size') parser.add_argument('--encoder-layers', type=int, metavar='N', help='number of encoder layers') parser.add_argument('--encoder-bidirectional', action='store_true', help='make all layers of encoder bidirectional') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-freeze-embed', action='store_true', help='freeze decoder embeddings') parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size') parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', default=False, action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') # Granular dropout settings (if not specified these default to --dropout) parser.add_argument('--encoder-dropout-in', type=float, metavar='D', help='dropout probability for encoder input embedding') parser.add_argument('--encoder-dropout-out', type=float, metavar='D', help='dropout probability for encoder output') parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding') parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) if args.encoder_layers != args.decoder_layers: raise ValueError("--encoder-layers must match --decoder-layers") max_source_positions = getattr( args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS ) max_target_positions = getattr( args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS ) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) if args.encoder_embed_path: pretrained_encoder_embed = load_pretrained_embedding_from_file( args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim ) else: num_embeddings = len(task.source_dictionary) pretrained_encoder_embed = Embedding( num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad() ) if args.share_all_embeddings: # double check all parameters combinations are valid if task.source_dictionary != task.target_dictionary: raise ValueError("--share-all-embeddings requires a joint dictionary") if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embed not compatible with --decoder-embed-path" ) if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to " "match --decoder-embed-dim" ) pretrained_decoder_embed = pretrained_encoder_embed args.share_decoder_input_output_embed = True else: # separate decoder input embeddings pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim, ) # one last double check of parameter combinations if args.share_decoder_input_output_embed and ( args.decoder_embed_dim != args.decoder_out_embed_dim ): raise ValueError( "--share-decoder-input-output-embeddings requires " "--decoder-embed-dim to match --decoder-out-embed-dim" ) if args.encoder_freeze_embed: pretrained_encoder_embed.weight.requires_grad = False if args.decoder_freeze_embed: pretrained_decoder_embed.weight.requires_grad = False encoder = LSTMEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, hidden_size=args.encoder_hidden_size, num_layers=args.encoder_layers, dropout_in=args.encoder_dropout_in, dropout_out=args.encoder_dropout_out, bidirectional=args.encoder_bidirectional, pretrained_embed=pretrained_encoder_embed, max_source_positions=max_source_positions, ) decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=utils.eval_bool(args.decoder_attention), encoder_output_units=encoder.output_units, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=( utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == "adaptive_loss" else None ), max_target_positions=max_target_positions, residuals=False, ) return cls(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): encoder_out = self.encoder(src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, ) return decoder_out class LSTMEncoder(FairseqEncoder): """LSTM encoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, bidirectional=False, left_pad=True, pretrained_embed=None, padding_idx=None, max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS, ): super().__init__(dictionary) self.num_layers = num_layers self.dropout_in_module = FairseqDropout( dropout_in*1.0, module_name=self.__class__.__name__ ) self.dropout_out_module = FairseqDropout( dropout_out*1.0, module_name=self.__class__.__name__ ) self.bidirectional = bidirectional self.hidden_size = hidden_size self.max_source_positions = max_source_positions num_embeddings = len(dictionary) self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) else: self.embed_tokens = pretrained_embed self.lstm = LSTM( input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, dropout=self.dropout_out_module.p if num_layers > 1 else 0.0, bidirectional=bidirectional, ) self.left_pad = left_pad self.output_units = hidden_size if bidirectional: self.output_units *= 2 def forward( self, src_tokens: Tensor, src_lengths: Tensor, enforce_sorted: bool = True, ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` enforce_sorted (bool, optional): if True, `src_tokens` is expected to contain sequences sorted by length in a decreasing order. If False, this condition is not required. Default: True. """ if self.left_pad: # nn.utils.rnn.pack_padded_sequence requires right-padding; # convert left-padding to right-padding src_tokens = utils.convert_padding_direction( src_tokens, torch.zeros_like(src_tokens).fill_(self.padding_idx), left_to_right=True, ) bsz, seqlen = src_tokens.size() # embed tokens x = self.embed_tokens(src_tokens) x = self.dropout_in_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # pack embedded source tokens into a PackedSequence packed_x = nn.utils.rnn.pack_padded_sequence( x, src_lengths.cpu(), enforce_sorted=enforce_sorted ) # apply LSTM if self.bidirectional: state_size = 2 * self.num_layers, bsz, self.hidden_size else: state_size = self.num_layers, bsz, self.hidden_size h0 = x.new_zeros(*state_size) c0 = x.new_zeros(*state_size) packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, _ = nn.utils.rnn.pad_packed_sequence( packed_outs, padding_value=self.padding_idx * 1.0 ) x = self.dropout_out_module(x) assert list(x.size()) == [seqlen, bsz, self.output_units] if self.bidirectional: final_hiddens = self.combine_bidir(final_hiddens, bsz) final_cells = self.combine_bidir(final_cells, bsz) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() return tuple( ( x, # seq_len x batch x hidden final_hiddens, # num_layers x batch x num_directions*hidden final_cells, # num_layers x batch x num_directions*hidden encoder_padding_mask, # seq_len x batch ) ) def combine_bidir(self, outs, bsz: int): out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous() return out.view(self.num_layers, bsz, -1) def reorder_encoder_out(self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order): return tuple( ( encoder_out[0].index_select(1, new_order), encoder_out[1].index_select(1, new_order), encoder_out[2].index_select(1, new_order), encoder_out[3].index_select(1, new_order), ) ) def max_positions(self): """Maximum input length supported by the encoder.""" return self.max_source_positions class AttentionLayer(nn.Module): def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False): super().__init__() self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias) self.output_proj = Linear( input_embed_dim + source_embed_dim, output_embed_dim, bias=bias ) def forward(self, input, source_hids, encoder_padding_mask): # input: bsz x input_embed_dim # source_hids: srclen x bsz x source_embed_dim # x: bsz x source_embed_dim x = self.input_proj(input) # compute attention attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2) # don't attend over padding if encoder_padding_mask is not None: attn_scores = ( attn_scores.float() .masked_fill_(encoder_padding_mask, float("-inf")) .type_as(attn_scores) ) # FP16 support: cast to float and back attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz # sum weighted sources x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0) x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1))) return x, attn_scores class LSTMDecoder(FairseqIncrementalDecoder): """LSTM decoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True, encoder_output_units=512, pretrained_embed=None, share_input_output_embed=False, adaptive_softmax_cutoff=None, max_target_positions=DEFAULT_MAX_TARGET_POSITIONS, residuals=False, ): super().__init__(dictionary) self.dropout_in_module = FairseqDropout( dropout_in*1.0, module_name=self.__class__.__name__ ) self.dropout_out_module = FairseqDropout( dropout_out*1.0, module_name=self.__class__.__name__ ) self.hidden_size = hidden_size self.share_input_output_embed = share_input_output_embed self.need_attn = True self.max_target_positions = max_target_positions self.residuals = residuals self.num_layers = num_layers self.adaptive_softmax = None num_embeddings = len(dictionary) padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) else: self.embed_tokens = pretrained_embed self.encoder_output_units = encoder_output_units if encoder_output_units != hidden_size and encoder_output_units != 0: self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size) self.encoder_cell_proj = Linear(encoder_output_units, hidden_size) else: self.encoder_hidden_proj = self.encoder_cell_proj = None # disable input feeding if there is no encoder # input feeding is described in arxiv.org/abs/1508.04025 input_feed_size = 0 if encoder_output_units == 0 else hidden_size self.layers = nn.ModuleList( [ LSTMCell( input_size=input_feed_size + embed_dim if layer == 0 else hidden_size, hidden_size=hidden_size, ) for layer in range(num_layers) ] ) if attention: # TODO make bias configurable self.attention = AttentionLayer( hidden_size, encoder_output_units, hidden_size, bias=False ) else: self.attention = None if hidden_size != out_embed_dim: self.additional_fc = Linear(hidden_size, out_embed_dim) if adaptive_softmax_cutoff is not None: # setting adaptive_softmax dropout to dropout_out for now but can be redefined self.adaptive_softmax = AdaptiveSoftmax( num_embeddings, hidden_size, adaptive_softmax_cutoff, dropout=dropout_out, ) elif not self.share_input_output_embed: self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) def forward( self, prev_output_tokens, encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, src_lengths: Optional[Tensor] = None, ): x, attn_scores = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) return self.output_layer(x), attn_scores def extract_features( self, prev_output_tokens, encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): """ Similar to *forward* but only return features. """ # get outputs from encoder if encoder_out is not None: encoder_outs = encoder_out[0] encoder_hiddens = encoder_out[1] encoder_cells = encoder_out[2] encoder_padding_mask = encoder_out[3] else: encoder_outs = torch.empty(0) encoder_hiddens = torch.empty(0) encoder_cells = torch.empty(0) encoder_padding_mask = torch.empty(0) srclen = encoder_outs.size(0) if incremental_state is not None and len(incremental_state) > 0: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() # embed tokens x = self.embed_tokens(prev_output_tokens) x = self.dropout_in_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental generation) if incremental_state is not None and len(incremental_state) > 0: prev_hiddens, prev_cells, input_feed = self.get_cached_state( incremental_state ) elif encoder_out is not None: # setup recurrent cells prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)] prev_cells = [encoder_cells[i] for i in range(self.num_layers)] if self.encoder_hidden_proj is not None: prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens] prev_cells = [self.encoder_cell_proj(y) for y in prev_cells] input_feed = x.new_zeros(bsz, self.hidden_size) else: # setup zero cells, since there is no encoder zero_state = x.new_zeros(bsz, self.hidden_size) prev_hiddens = [zero_state for i in range(self.num_layers)] prev_cells = [zero_state for i in range(self.num_layers)] input_feed = None assert ( srclen > 0 or self.attention is None ), "attention is not supported if there are no encoder outputs" attn_scores: Optional[Tensor] = ( x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None ) outs = [] for j in range(seqlen): # input feeding: concatenate context vector from previous time step if input_feed is not None: input = torch.cat((x[j, :, :], input_feed), dim=1) else: input = x[j] for i, rnn in enumerate(self.layers): # recurrent cell hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) # hidden state becomes the input to the next layer input = self.dropout_out_module(hidden) if self.residuals: input = input + prev_hiddens[i] # save state for next time step prev_hiddens[i] = hidden prev_cells[i] = cell # apply attention using the last layer's hidden state if self.attention is not None: assert attn_scores is not None out, attn_scores[:, j, :] = self.attention( hidden, encoder_outs, encoder_padding_mask ) else: out = hidden out = self.dropout_out_module(out) # input feeding if input_feed is not None: input_feed = out # save final output outs.append(out) # Stack all the necessary tensors together and store prev_hiddens_tensor = torch.stack(prev_hiddens) prev_cells_tensor = torch.stack(prev_cells) cache_state = torch.jit.annotate( Dict[str, Optional[Tensor]], { "prev_hiddens": prev_hiddens_tensor, "prev_cells": prev_cells_tensor, "input_feed": input_feed, }, ) self.set_incremental_state(incremental_state, "cached_state", cache_state) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) # T x B x C -> B x T x C x = x.transpose(1, 0) if hasattr(self, "additional_fc") and self.adaptive_softmax is None: x = self.additional_fc(x) x = self.dropout_out_module(x) # srclen x tgtlen x bsz -> bsz x tgtlen x srclen if not self.training and self.need_attn and self.attention is not None: assert attn_scores is not None attn_scores = attn_scores.transpose(0, 2) else: attn_scores = None return x, attn_scores def output_layer(self, x): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = self.fc_out(x) return x def get_cached_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]: cached_state = self.get_incremental_state(incremental_state, "cached_state") assert cached_state is not None prev_hiddens_ = cached_state["prev_hiddens"] assert prev_hiddens_ is not None prev_cells_ = cached_state["prev_cells"] assert prev_cells_ is not None prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)] prev_cells = [prev_cells_[j] for j in range(self.num_layers)] input_feed = cached_state[ "input_feed" ] # can be None for decoder-only language models return prev_hiddens, prev_cells, input_feed def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): if incremental_state is None or len(incremental_state) == 0: return prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state) prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens] prev_cells = [p.index_select(0, new_order) for p in prev_cells] if input_feed is not None: input_feed = input_feed.index_select(0, new_order) cached_state_new = torch.jit.annotate( Dict[str, Optional[Tensor]], { "prev_hiddens": torch.stack(prev_hiddens), "prev_cells": torch.stack(prev_cells), "input_feed": input_feed, }, ) self.set_incremental_state(incremental_state, "cached_state", cached_state_new), return def max_positions(self): """Maximum output length supported by the decoder.""" return self.max_target_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def LSTM(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def LSTMCell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def Linear(in_features, out_features, bias=True, dropout=0.0): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m @register_model_architecture("lstm", "lstm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False) args.encoder_hidden_size = getattr( args, "encoder_hidden_size", args.encoder_embed_dim ) args.encoder_layers = getattr(args, "encoder_layers", 1) args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False) args.decoder_hidden_size = getattr( args, "decoder_hidden_size", args.decoder_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 1) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) args.decoder_attention = getattr(args, "decoder_attention", "1") args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) @register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en") def lstm_wiseman_iwslt_de_en(args): args.dropout = getattr(args, "dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) base_architecture(args) @register_model_architecture("lstm", "lstm_luong_wmt_en_de") def lstm_luong_wmt_en_de(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000) args.encoder_layers = getattr(args, "encoder_layers", 4) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000) args.decoder_layers = getattr(args, "decoder_layers", 4) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0) base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/lstm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerModel, base_architecture, transformer_wmt_en_de_big, ) @register_model("transformer_align") class TransformerAlignModel(TransformerModel): """ See "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). """ def __init__(self, encoder, decoder, args): super().__init__(args, encoder, decoder) self.alignment_heads = args.alignment_heads self.alignment_layer = args.alignment_layer self.full_context_alignment = args.full_context_alignment @staticmethod def add_args(parser): # fmt: off super(TransformerAlignModel, TransformerAlignModel).add_args(parser) parser.add_argument('--alignment-heads', type=int, metavar='D', help='Number of cross attention heads per layer to supervised with alignments') parser.add_argument('--alignment-layer', type=int, metavar='D', help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.') parser.add_argument('--full-context-alignment', action='store_true', help='Whether or not alignment is supervised conditioned on the full target context.') # fmt: on @classmethod def build_model(cls, args, task): # set any default arguments transformer_align(args) transformer_model = TransformerModel.build_model(args, task) return TransformerAlignModel( transformer_model.encoder, transformer_model.decoder, args ) def forward(self, src_tokens, src_lengths, prev_output_tokens): encoder_out = self.encoder(src_tokens, src_lengths) return self.forward_decoder(prev_output_tokens, encoder_out) def forward_decoder( self, prev_output_tokens, encoder_out=None, incremental_state=None, features_only=False, **extra_args, ): attn_args = { "alignment_layer": self.alignment_layer, "alignment_heads": self.alignment_heads, } decoder_out = self.decoder(prev_output_tokens, encoder_out, **attn_args) if self.full_context_alignment: attn_args["full_context_alignment"] = self.full_context_alignment _, alignment_out = self.decoder( prev_output_tokens, encoder_out, features_only=True, **attn_args, **extra_args, ) decoder_out[1]["attn"] = alignment_out["attn"] return decoder_out @register_model_architecture("transformer_align", "transformer_align") def transformer_align(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", 4) args.full_context_alignment = getattr(args, "full_context_alignment", False) base_architecture(args) @register_model_architecture("transformer_align", "transformer_wmt_en_de_big_align") def transformer_wmt_en_de_big_align(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", 4) transformer_wmt_en_de_big(args)
bart_ls-main
fairseq-py/fairseq/models/transformer_align.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .fairseq_encoder import FairseqEncoder class CompositeEncoder(FairseqEncoder): """ A wrapper around a dictionary of :class:`FairseqEncoder` objects. We run forward on each encoder and return a dictionary of outputs. The first encoder's dictionary is used for initialization. Args: encoders (dict): a dictionary of :class:`FairseqEncoder` objects. """ def __init__(self, encoders): super().__init__(next(iter(encoders.values())).dictionary) self.encoders = encoders for key in self.encoders: self.add_module(key, self.encoders[key]) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: the outputs from each Encoder """ encoder_out = {} for key in self.encoders: encoder_out[key] = self.encoders[key](src_tokens, src_lengths) return encoder_out def reorder_encoder_out(self, encoder_out, new_order): """Reorder encoder output according to new_order.""" for key in self.encoders: encoder_out[key] = self.encoders[key].reorder_encoder_out( encoder_out[key], new_order ) return encoder_out def max_positions(self): return min(self.encoders[key].max_positions() for key in self.encoders) def upgrade_state_dict(self, state_dict): for key in self.encoders: self.encoders[key].upgrade_state_dict(state_dict) return state_dict
bart_ls-main
fairseq-py/fairseq/models/composite_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, SinusoidalPositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_hasattr logger = logging.getLogger(__name__) @register_model("masked_lm") class MaskedLMModel(FairseqEncoderModel): """ Class for training a Masked Language Model. It also supports an additional sentence level prediction if the sent-loss argument is set. """ def __init__(self, args, encoder): super().__init__(encoder) self.args = args # if specified then apply bert initialization on the model. We need # to explictly call this to make sure that the output embeddings # and projection layers are also correctly initialized if getattr(args, "apply_bert_init", False): self.apply(init_bert_params) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # Arguments related to dropout parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for" " attention weights", ) parser.add_argument( "--act-dropout", type=float, metavar="D", help="dropout probability after" " activation in FFN", ) # Arguments related to hidden states and self-attention parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) # Arguments related to input and output embeddings parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--share-encoder-input-output-embed", action="store_true", help="share encoder input" " and output embeddings", ) parser.add_argument( "--encoder-learned-pos", action="store_true", help="use learned positional embeddings in the encoder", ) parser.add_argument( "--no-token-positional-embeddings", action="store_true", help="if set, disables positional embeddings" " (outside self attention)", ) parser.add_argument( "--num-segment", type=int, metavar="N", help="num segment in the input" ) parser.add_argument( "--max-positions", type=int, help="number of positional embeddings to learn" ) # Arguments related to sentence level prediction parser.add_argument( "--sentence-class-num", type=int, metavar="N", help="number of classes for sentence task", ) parser.add_argument( "--sent-loss", action="store_true", help="if set," " calculate sentence level predictions", ) # Arguments related to parameter initialization parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) # misc params parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="Which activation function to use for pooler layer.", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) def forward(self, src_tokens, segment_labels=None, **kwargs): return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs) def max_positions(self): return self.encoder.max_positions @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not safe_hasattr(args, "max_positions"): args.max_positions = args.tokens_per_sample logger.info(args) encoder = MaskedLMEncoder(args, task.dictionary) return cls(args, encoder) class MaskedLMEncoder(FairseqEncoder): """ Encoder for Masked Language Modelling. """ def __init__(self, args, dictionary): super().__init__(dictionary) self.padding_idx = dictionary.pad() self.vocab_size = dictionary.__len__() self.max_positions = args.max_positions self.sentence_encoder = TransformerSentenceEncoder( padding_idx=self.padding_idx, vocab_size=self.vocab_size, num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=self.max_positions, num_segments=args.num_segment, use_position_embeddings=not args.no_token_positional_embeddings, encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, ) self.share_input_output_embed = args.share_encoder_input_output_embed self.embed_out = None self.sentence_projection_layer = None self.sentence_out_dim = args.sentence_class_num self.lm_output_learned_bias = None # Remove head is set to true during fine-tuning self.load_softmax = not getattr(args, "remove_head", False) self.masked_lm_pooler = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn) self.lm_head_transform_weight = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.activation_fn = utils.get_activation_fn(args.activation_fn) self.layer_norm = LayerNorm(args.encoder_embed_dim) self.lm_output_learned_bias = None if self.load_softmax: self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size)) if not self.share_input_output_embed: self.embed_out = nn.Linear( args.encoder_embed_dim, self.vocab_size, bias=False ) if args.sent_loss: self.sentence_projection_layer = nn.Linear( args.encoder_embed_dim, self.sentence_out_dim, bias=False ) def forward(self, src_tokens, segment_labels=None, masked_tokens=None, **unused): """ Forward pass for Masked LM encoder. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). Here we assume that the sentence representation corresponds to the output of the classification_token (see bert_task or cross_lingual_lm task for more details). Args: - src_tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Returns: - a tuple of the following: - logits for predictions in format B x T x C to be used in softmax afterwards - a dictionary of additional data, where 'pooled_output' contains the representation for classification_token and 'inner_states' is a list of internal model states used to compute the predictions (similar in ELMO). 'sentence_logits' is the prediction logit for NSP task and is only computed if this is specified in the input arguments. """ inner_states, sentence_rep = self.sentence_encoder( src_tokens, segment_labels=segment_labels, ) x = inner_states[-1].transpose(0, 1) # project masked tokens only if masked_tokens is not None: x = x[masked_tokens, :] x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x))) pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep)) # project back to size of vocabulary if self.share_input_output_embed and hasattr( self.sentence_encoder.embed_tokens, "weight" ): x = F.linear(x, self.sentence_encoder.embed_tokens.weight) elif self.embed_out is not None: x = self.embed_out(x) if self.lm_output_learned_bias is not None: x = x + self.lm_output_learned_bias sentence_logits = None if self.sentence_projection_layer: sentence_logits = self.sentence_projection_layer(pooled_output) return x, { "inner_states": inner_states, "pooled_output": pooled_output, "sentence_logits": sentence_logits, } def max_positions(self): """Maximum output length supported by the encoder.""" return self.max_positions def upgrade_state_dict_named(self, state_dict, name): if isinstance( self.sentence_encoder.embed_positions, SinusoidalPositionalEmbedding ): state_dict[ name + ".sentence_encoder.embed_positions._float_tensor" ] = torch.FloatTensor(1) if not self.load_softmax: for k in list(state_dict.keys()): if ( "embed_out.weight" in k or "sentence_projection_layer.weight" in k or "lm_output_learned_bias" in k ): del state_dict[k] return state_dict @register_model_architecture("masked_lm", "masked_lm") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.act_dropout = getattr(args, "act_dropout", 0.0) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.num_segment = getattr(args, "num_segment", 2) args.sentence_class_num = getattr(args, "sentence_class_num", 2) args.sent_loss = getattr(args, "sent_loss", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.activation_fn = getattr(args, "activation_fn", "relu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) @register_model_architecture("masked_lm", "bert_base") def bert_base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.num_segment = getattr(args, "num_segment", 2) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072) args.sentence_class_num = getattr(args, "sentence_class_num", 2) args.sent_loss = getattr(args, "sent_loss", True) args.apply_bert_init = getattr(args, "apply_bert_init", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) base_architecture(args) @register_model_architecture("masked_lm", "bert_large") def bert_large_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_layers = getattr(args, "encoder_layers", 24) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) bert_base_architecture(args) @register_model_architecture("masked_lm", "xlm_base") def xlm_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.num_segment = getattr(args, "num_segment", 1) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.sent_loss = getattr(args, "sent_loss", False) args.activation_fn = getattr(args, "activation_fn", "gelu") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.apply_bert_init = getattr(args, "apply_bert_init", True) base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, Optional from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.models import FairseqDecoder from torch import Tensor logger = logging.getLogger(__name__) @with_incremental_state class FairseqIncrementalDecoder(FairseqDecoder): """Base class for incremental decoders. Incremental decoding is a special mode at inference time where the Model only receives a single timestep of input corresponding to the previous output token (for teacher forcing) and must produce the next output *incrementally*. Thus the model must cache any long-term state that is needed about the sequence, e.g., hidden states, convolutional states, etc. Compared to the standard :class:`FairseqDecoder` interface, the incremental decoder interface allows :func:`forward` functions to take an extra keyword argument (*incremental_state*) that can be used to cache state across time-steps. The :class:`FairseqIncrementalDecoder` interface also defines the :func:`reorder_incremental_state` method, which is used during beam search to select and reorder the incremental state based on the selection of beams. To learn more about how incremental decoding works, refer to `this blog <http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_. """ def __init__(self, dictionary): super().__init__(dictionary) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention incremental_state (dict, optional): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ raise NotImplementedError def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder incremental state. This will be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ pass def reorder_incremental_state_scripting( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Main entry point for reordering the incremental state. Due to limitations in TorchScript, we call this function in :class:`fairseq.sequence_generator.SequenceGenerator` instead of calling :func:`reorder_incremental_state` directly. """ for module in self.modules(): if hasattr(module, "reorder_incremental_state"): result = module.reorder_incremental_state(incremental_state, new_order) if result is not None: incremental_state = result def set_beam_size(self, beam_size): """Sets the beam size in the decoder and all children.""" if getattr(self, "_beam_size", -1) != beam_size: seen = set() def apply_set_beam_size(module): if ( module != self and hasattr(module, "set_beam_size") and module not in seen ): seen.add(module) module.set_beam_size(beam_size) self.apply(apply_set_beam_size) self._beam_size = beam_size
bart_ls-main
fairseq-py/fairseq/models/fairseq_incremental_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, DynamicConv, FairseqDropout, LayerNorm, LightweightConv, MultiheadAttention, PositionalEmbedding, ) from fairseq.utils import safe_hasattr @register_model("lightconv") class LightConvModel(FairseqEncoderDecoderModel): """ LightConv and DynamicConv model from `"Pay Less Attention with Lightweight and Dynamic Convolutions" (Wu, et al, 2019) <https://openreview.net/pdf?id=SkVhlh09tX>`_. To use LightConv please set ``--encoder-conv-type lightweight --decoder-conv-type lightweight`` To use DynamicConv please set ``--encoder-conv-type dynamic --decoder-conv-type dynamic`` Args: encoder (LightConvEncoder): the encoder decoder (LightConvDecoder): the decoder The LightConv model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.lightconv_parser :prog: """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } return { 'lightconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz'), 'dynamicconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz'), 'lightconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz'), 'dynamicconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz'), 'lightconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz'), } # fmt: on def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--relu-dropout", type=float, metavar="D", help="dropout probability after ReLU in FFN", ) parser.add_argument( "--input-dropout", type=float, metavar="D", help="dropout probability of the inputs", ) parser.add_argument( "--encoder-embed-path", type=str, metavar="STR", help="path to pre-trained encoder embedding", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-conv-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--encoder-learned-pos", action="store_true", help="use learned positional embeddings in the encoder", ) parser.add_argument( "--decoder-embed-path", type=str, metavar="STR", help="path to pre-trained decoder embedding", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-conv-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--decoder-learned-pos", action="store_true", help="use learned positional embeddings in the decoder", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--share-all-embeddings", action="store_true", help="share encoder, decoder and output embeddings" " (requires shared dictionary and embed dim)", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ), parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) """LightConv and DynamicConv arguments""" parser.add_argument( "--encoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31,31]")', ) parser.add_argument( "--decoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")', ) parser.add_argument( "--encoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--decoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--encoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument( "--decoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool) parser.add_argument( "--weight-dropout", type=float, metavar="D", help="dropout probability for conv weights", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not safe_hasattr(args, "max_source_positions"): args.max_source_positions = 1024 if not safe_hasattr(args, "max_target_positions"): args.max_target_positions = 1024 src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise RuntimeError( "--share-all-embeddings requires a joined dictionary" ) if args.encoder_embed_dim != args.decoder_embed_dim: raise RuntimeError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise RuntimeError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) encoder = LightConvEncoder(args, src_dict, encoder_embed_tokens) decoder = LightConvDecoder(args, tgt_dict, decoder_embed_tokens) return LightConvModel(encoder, decoder) class LightConvEncoder(FairseqEncoder): """ LightConv encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`LightConvEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = ( PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) self.layers = nn.ModuleList([]) self.layers.extend( [ LightConvEncoderLayer( args, kernel_size=args.encoder_kernel_size_list[i] ) for i in range(args.encoder_layers) ] ) self.register_buffer("version", torch.Tensor([2])) self.normalize = args.encoder_normalize_before if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward(self, src_tokens, **unused): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.normalize: x = self.layer_norm(x) return { "encoder_out": x, # T x B x C "encoder_padding_mask": encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) class LightConvDecoder(FairseqIncrementalDecoder): """ LightConv decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`LightConvDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` """ def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, final_norm=True ): super().__init__(dictionary) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) self.layers = nn.ModuleList([]) self.layers.extend( [ LightConvDecoderLayer( args, no_encoder_attn, kernel_size=args.decoder_kernel_size_list[i] ) for i in range(args.decoder_layers) ] ) self.adaptive_softmax = None self.project_out_dim = ( Linear(embed_dim, output_embed_dim, bias=False) if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None ) if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), output_embed_dim, utils.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), output_embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5) self.register_buffer("version", torch.Tensor([2])) self.normalize = args.decoder_normalize_before and final_norm if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the last decoder layer's output of shape `(batch, tgt_len, vocab)` - the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)` """ # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["encoder_padding_mask"] if encoder_out is not None else None, incremental_state, ) inner_states.append(x) if self.normalize: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = F.linear(x, self.embed_out) return x, {"attn": attn, "inner_states": inner_states} def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) if self._future_mask.size(0) < dim: self._future_mask = torch.triu( utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 ) return self._future_mask[:dim, :dim] class LightConvEncoderLayer(nn.Module): """Encoder layer block. Args: args (argparse.Namespace): parsed command-line arguments kernel_size: kernel size of the convolution """ def __init__(self, args, kernel_size=0): super().__init__() self.embed_dim = args.encoder_embed_dim self.conv_dim = args.encoder_conv_dim padding_l = ( kernel_size // 2 if kernel_size % 2 == 1 else ((kernel_size - 1) // 2, kernel_size // 2) ) if args.encoder_glu: self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.encoder_conv_type == "lightweight": self.conv = LightweightConv( self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout, ) elif args.encoder_conv_type == "dynamic": self.conv = DynamicConv( self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout, ) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.relu_dropout_module = FairseqDropout( args.relu_dropout, module_name=self.__class__.__name__ ) self.input_dropout_module = FairseqDropout( args.input_dropout, module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for _ in range(2)]) def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(0, x, before=True) x = self.input_dropout_module(x) x = self.linear1(x) if self.act is not None: x = self.act(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0, 1).unsqueeze(2), 0) x = self.conv(x) x = self.linear2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(0, x, after=True) residual = x x = self.maybe_layer_norm(1, x, before=True) x = F.relu(self.fc1(x)) x = self.relu_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(1, x, after=True) return x def maybe_layer_norm(self, i, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return self.layer_norms[i](x) else: return x def extra_repr(self): return ( "dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format( self.dropout_module.p, self.relu_dropout_module.p, self.input_dropout_module.p, self.normalize_before, ) ) class LightConvDecoderLayer(nn.Module): """Decoder layer block. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` kernel_size: kernel size of the convolution """ def __init__(self, args, no_encoder_attn=False, kernel_size=0): super().__init__() self.embed_dim = args.decoder_embed_dim self.conv_dim = args.decoder_conv_dim if args.decoder_glu: self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.decoder_conv_type == "lightweight": self.conv = LightweightConv( self.conv_dim, kernel_size, padding_l=kernel_size - 1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout, ) elif args.decoder_conv_type == "dynamic": self.conv = DynamicConv( self.conv_dim, kernel_size, padding_l=kernel_size - 1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout, ) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.relu_dropout_module = FairseqDropout( args.relu_dropout, module_name=self.__class__.__name__ ) self.input_dropout_module = FairseqDropout( args.input_dropout, module_name=self.__class__.__name__ ) self.normalize_before = args.decoder_normalize_before self.conv_layer_norm = LayerNorm(self.embed_dim) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.need_attn = True def forward( self, x, encoder_out, encoder_padding_mask, incremental_state, prev_conv_state=None, prev_attn_state=None, conv_mask=None, conv_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.conv_layer_norm, x, before=True) if prev_conv_state is not None: if incremental_state is None: incremental_state = {} self.conv._set_input_buffer(incremental_state, prev_conv_state) x = self.input_dropout_module(x) x = self.linear1(x) if self.act is not None: x = self.act(x) x = self.conv(x, incremental_state=incremental_state) x = self.linear2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.conv_layer_norm, x, after=True) attn = None if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = F.relu(self.fc1(x)) x = self.relu_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def extra_repr(self): return ( "dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format( self.dropout_module.p, self.relu_dropout_module.p, self.input_dropout_module.p, self.normalize_before, ) ) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m @register_model_architecture("lightconv", "lightconv") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 7) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.encoder_conv_dim = getattr(args, "encoder_conv_dim", args.encoder_embed_dim) args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim) args.encoder_kernel_size_list = getattr( args, "encoder_kernel_size_list", [3, 7, 15, 31, 31, 31, 31] ) args.decoder_kernel_size_list = getattr( args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31] ) if len(args.encoder_kernel_size_list) == 1: args.encoder_kernel_size_list = ( args.encoder_kernel_size_list * args.encoder_layers ) if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = ( args.decoder_kernel_size_list * args.decoder_layers ) assert ( len(args.encoder_kernel_size_list) == args.encoder_layers ), "encoder_kernel_size_list doesn't match encoder_layers" assert ( len(args.decoder_kernel_size_list) == args.decoder_layers ), "decoder_kernel_size_list doesn't match decoder_layers" args.encoder_glu = getattr(args, "encoder_glu", True) args.decoder_glu = getattr(args, "decoder_glu", True) args.input_dropout = getattr(args, "input_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout) @register_model_architecture("lightconv", "lightconv_iwslt_de_en") def lightconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 7) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", 0.1) args.encoder_glu = getattr(args, "encoder_glu", False) args.decoder_glu = getattr(args, "decoder_glu", False) args.input_dropout = getattr(args, "input_dropout", 0.0) base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_de") def lightconv_wmt_en_de(args): base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_de_big") def lightconv_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) @register_model_architecture("lightconv", "lightconv_wmt_en_fr_big") def lightconv_wmt_en_fr_big(args): args.dropout = getattr(args, "dropout", 0.1) lightconv_wmt_en_de_big(args) @register_model_architecture("lightconv", "lightconv_wmt_zh_en_big") def lightconv_wmt_zh_en_big(args): args.dropout = getattr(args, "dropout", 0.2) args.attention_dropout = getattr(args, "attention_dropout", 0.2) args.weight_dropout = getattr(args, "weight_dropout", 0.2) lightconv_wmt_en_de_big(args)
bart_ls-main
fairseq-py/fairseq/models/lightconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.lightconv import Embedding, LightConvDecoder from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder @register_model("lightconv_lm") class LightConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", default=0.1, type=float, metavar="D", help="dropout probability", ) parser.add_argument( "--attention-dropout", default=0.0, type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--relu-dropout", default=0.0, type=float, metavar="D", help="dropout probability after ReLU in FFN", ) parser.add_argument( "--input-dropout", type=float, metavar="D", help="dropout probability of the inputs", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-output-dim", type=int, metavar="N", help="decoder output dimension", ) parser.add_argument( "--decoder-input-dim", type=int, metavar="N", help="decoder input dimension" ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads or LightConv/DynamicConv heads", ) parser.add_argument( "--decoder-normalize-before", default=False, action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--adaptive-softmax-cutoff", metavar="EXPR", help="comma separated list of adaptive softmax cutoff points. " "Must be used with adaptive_loss criterion", ) parser.add_argument( "--adaptive-softmax-dropout", type=float, metavar="D", help="sets adaptive softmax dropout for the tail projections", ) parser.add_argument( "--adaptive-softmax-factor", type=float, metavar="N", help="adaptive input factor", ) parser.add_argument( "--no-token-positional-embeddings", default=False, action="store_true", help="if set, disables positional embeddings (outside self attention)", ) parser.add_argument( "--share-decoder-input-output-embed", default=False, action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--character-embeddings", default=False, action="store_true", help="if set, uses character embedding convolutions to produce token embeddings", ) parser.add_argument( "--character-filters", type=str, metavar="LIST", default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", help="size of character embeddings", ) parser.add_argument( "--character-embedding-dim", type=int, metavar="N", default=4, help="size of character embeddings", ) parser.add_argument( "--char-embedder-highway-layers", type=int, metavar="N", default=2, help="number of highway layers for character token embeddder", ) parser.add_argument( "--adaptive-input", default=False, action="store_true", help="if set, uses adaptive input", ) parser.add_argument( "--adaptive-input-factor", type=float, metavar="N", help="adaptive input factor", ) parser.add_argument( "--adaptive-input-cutoff", metavar="EXPR", help="comma separated list of adaptive input cutoff points.", ) parser.add_argument( "--tie-adaptive-weights", action="store_true", help="if set, ties the weights of adaptive softmax and adaptive input", ) parser.add_argument( "--tie-adaptive-proj", action="store_true", help="if set, ties the projection weights of adaptive softmax and adaptive input", ) parser.add_argument( "--decoder-learned-pos", action="store_true", help="use learned positional embeddings in the decoder", ) """LightConv and DynamicConv arguments""" parser.add_argument( "--decoder-kernel-size-list", type=lambda x: utils.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")', ) parser.add_argument( "--decoder-glu", type=utils.eval_bool, help="glu after in proj" ) parser.add_argument( "--decoder-conv-type", default="dynamic", type=str, choices=["dynamic", "lightweight"], help="type of convolution", ) parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool) parser.add_argument( "--weight-dropout", type=float, metavar="D", help="dropout probability for conv weights", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_lm_architecture(args) if getattr(args, "max_source_positions", None) is None: args.max_source_positions = args.tokens_per_sample if getattr(args, "max_target_positions", None) is None: args.max_target_positions = args.tokens_per_sample if args.character_embeddings: embed_tokens = CharacterTokenEmbedder( task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers, ) elif args.adaptive_input: embed_tokens = AdaptiveInput( len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, utils.eval_str_list(args.adaptive_input_cutoff, type=int), ) else: embed_tokens = Embedding( len(task.dictionary), args.decoder_input_dim, task.dictionary.pad() ) if args.tie_adaptive_weights: assert args.adaptive_input assert args.adaptive_input_factor == args.adaptive_softmax_factor assert ( args.adaptive_softmax_cutoff == args.adaptive_input_cutoff ), "{} != {}".format( args.adaptive_softmax_cutoff, args.adaptive_input_cutoff ) assert args.decoder_input_dim == args.decoder_output_dim decoder = LightConvDecoder( args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False, ) return LightConvLanguageModel(decoder) @register_model_architecture("lightconv_lm", "lightconv_lm") def base_lm_architecture(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.character_embeddings = getattr(args, "character_embeddings", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim) # The model training is not stable without this args.decoder_normalize_before = True args.adaptive_input = getattr(args, "adaptive_input", False) args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) args.decoder_kernel_size_list = getattr( args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31] ) if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = ( args.decoder_kernel_size_list * args.decoder_layers ) assert ( len(args.decoder_kernel_size_list) == args.decoder_layers ), "decoder_kernel_size_list doesn't match decoder_layers" args.decoder_glu = getattr(args, "decoder_glu", True) args.input_dropout = getattr(args, "input_dropout", 0.1) args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout) @register_model_architecture("lightconv_lm", "lightconv_lm_gbw") def lightconv_lm_gbw(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/lightconv_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerDecoder, TransformerModel from fairseq.modules import FairseqDropout, LayerNorm, MultiheadAttention from torch import Tensor DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model("aan_transformer") class AANTransformerModel(TransformerModel): """ Implements a variant of the model described in "Accelerating Neural Transformer via an Average Attention Network" (Zhang et al., 2018) <https://arxiv.org/abs/1805.00631`_. Different from paper, we use a single gate for AAN gating function (mixing AAN and residual via sigmoid(z) and 1-sigmoid(z) rather than sigmoid(z_1) and sigmoid (z_2). Fixed configuration for FB production: No additional FFN for AAN block. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder """ @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return AANTransformerDecoder( args, tgt_dict, embed_tokens, no_encoder_attn=getattr(args, "no_cross_attention", False), ) @with_incremental_state class AverageAttention(nn.Module): def __init__(self, embed_dim, dropout=0.0, bias=True): super().__init__() self.embed_dim = embed_dim self.dropout = dropout def forward( self, value, mask_trick: bool = False, mask_future_timesteps: bool = False, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): """Input shape: Time x Batch x Channel ` mask_trick` is to use matrix multiplication instead of cumulative sum to average the inputs. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ assert mask_future_timesteps or incremental_state is None if incremental_state is None: return self._forward(value, mask_trick, mask_future_timesteps) else: return self._forward_incremental( value, mask_trick, mask_future_timesteps, incremental_state ) def _forward(self, value, mask_trick: bool, mask_future_timesteps: bool): length, batch_size = value.size()[:2] if not mask_future_timesteps: attn = value.mean(dim=0, keepdim=True).repeat(length, 1, 1) attn_weights = None elif mask_trick: v = value.transpose(0, 1) # no TorchScript support for specifying start in arange() attn_weights = torch.arange(length, out=torch.zeros([0]).to(v)) + 1 attn_weights = ( attn_weights.reciprocal_().unsqueeze_(1).repeat(1, length).tril(0) ) attn_weights = attn_weights.unsqueeze_(0).repeat(batch_size, 1, 1) attn_weights = F.dropout( attn_weights, p=self.dropout, training=self.training ) attn = torch.bmm(attn_weights, v) attn = attn.transpose(0, 1).contiguous() else: # no TorchScript support for specifying start in arange() attn_weights = ( torch.arange(length, out=torch.zeros([0]).to(value)) + 1 ).view(length, 1, 1) attn = value.cumsum(0) / attn_weights attn_weights = None return attn, attn_weights def _forward_incremental( self, value, mask_trick: bool, mask_future_timesteps: bool, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], ): if mask_trick: saved_state = self._get_input_buffer(incremental_state) if "prev_vec" in saved_state: prev_vec = saved_state["prev_vec"] assert prev_vec is not None value = torch.cat([prev_vec, value], dim=0) saved_state["prev_vec"] = value assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) attn_weights = None attn = value.mean(0, keepdim=True) else: saved_state = self._get_input_buffer(incremental_state) if "prev_sum" in saved_state: prev_sum = saved_state["prev_sum"] assert prev_sum is not None curr_sum = prev_sum + value prev_pos = saved_state["prev_pos"] assert prev_pos is not None pos = prev_pos + 1 attn = curr_sum / pos else: curr_sum = value attn = value pos = torch.ones([1]).int() saved_state["prev_sum"] = curr_sum saved_state["prev_pos"] = pos assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) attn_weights = None return attn, attn_weights def extra_repr(self): return "embed_dim={}, dropout={}".format(self.embed_dim, self.dropout) def reorder_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in ("prev_vec", "prev_sum"): if k in input_buffer: input_buffer_k = input_buffer[k] if input_buffer_k is not None and input_buffer_k.size(1) > 1: input_buffer[k] = input_buffer_k.index_select(1, new_order) if incremental_state is not None: incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) class AANTransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__() self.embed_dim = args.decoder_embed_dim self.cross_self_attention = getattr(args, "cross_self_attention", False) self.avg_attn = AverageAttention(self.embed_dim, dropout=args.attention_dropout) # differently than original paper, we use a single gate self.aan_gating_fc = Linear(self.embed_dim * 2, self.embed_dim) self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, "char_inputs", False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = self.build_encoder_attention(self.embed_dim, args) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = self.build_fc1(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention( self, embed_dim, args, add_bias_kv=False, add_zero_attn=False ): return MultiheadAttention( embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=not getattr(args, "cross_self_attention", False), ) def build_encoder_attention(self, embed_dim, args): return MultiheadAttention( embed_dim, args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, ) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out: Optional[torch.Tensor] = None, encoder_padding_mask: Optional[torch.Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, prev_self_attn_state: Optional[List[torch.Tensor]] = None, prev_attn_state: Optional[List[torch.Tensor]] = None, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, need_attn: bool = False, need_head_weights: bool = False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, _ = self.avg_attn( value=x, mask_trick=self.training, mask_future_timesteps=True, incremental_state=incremental_state, ) # differently than original paper, we use a single gate gate = torch.sigmoid(self.aan_gating_fc(torch.cat([residual, x], dim=-1))) x = gate * x + (1 - gate) * residual x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) if self.encoder_attn is not None: residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) return x, attn, None def make_generation_fast_(self, need_attn: bool = False, **kwargs): self.need_attn = need_attn class AANTransformerDecoder(TransformerDecoder): def build_decoder_layer(self, args, no_encoder_attn=False): return AANTransformerDecoderLayer(args, no_encoder_attn) def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m @register_model_architecture("aan_transformer", "aan_transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
bart_ls-main
fairseq-py/fairseq/models/fb_aan_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #!/usr/bin/env python3 """ This module has the EMA class used to store a copy of the exponentially decayed model params. Typical usage of EMA class involves initializing an object using an existing model (random or from a seed model) and setting the config like ema_decay, ema_start_update which determine how the EMA model is updated. After every update of the model i.e. at the end of the train_step, the EMA should be updated by passing the new model to the EMA.step function. The EMA model state dict can be stored in the extra state under the key of "ema" and dumped into a checkpoint and loaded. The EMA object can be passed to tasks by setting task.uses_ema property. EMA is a smoothed/ensemble model which might have better performance when used for inference or further fine-tuning. EMA class has a reverse function to load the EMA params into a model and use it like a regular model. """ import copy import logging import torch from fairseq import checkpoint_utils class EMA(object): """Exponential Moving Average of Fairseq Models EMA keeps a copy of the exponentially decayed model params. The set of params should include both gradient-descent and non-gradient descent params, such as batch mean/var and buffers. This is a modified implementation of the open source code in https://github.com/zhawe01/fairseq-gec.git, and internal source code in fbcode/mobile-vision/projects/classification_pytorch/lib/utils/model_ema.py. Similar to TF EMA. https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage. EMA provides a averaged and smoothed set of model weights, and has been shown to improve vision models. EMA class does all necessary functions to update, reload, or init EMA methods. EMA object is initialized from an arbitrary model. By default, it is stored in the same device (unless device specified at initialization) and with the same precision as the model (unless ema_fp32 is True). ema_fp32 is recommended. This stores the EMA parameters in fp32 only for the EMA update step, and is used at the default precision otherwise. EMA is usually enabled using EMAConfig with store_ema=True. Some important parameters to configure EMA are 1) ema_decay - The decay of EMA 2) ema_update_freq - EMA is updated every this many model updates. 3) ema_start_update - Start EMA update after this many model updates [default 0] Key methods: 1) step - One update of EMA using new model 2) restore - Update EMA from a state dict 3) reverse - Load EMA into a model 4) get_decay, _set_decay - Used to get or set the decay. Note _set_decay is called from step. 5) build_fp32_params - Used to initialize or update the fp32 copy of EMA params. Note this is enabled only when ema_fp32=True """ def __init__(self, model, config, device=None): """ @param model model to initialize the EMA with @param config EMAConfig object with configuration like ema_decay, ema_update_freq, ema_fp32 @param device If provided, copy EMA to this device (e.g. gpu). Otherwise EMA is in the same device as the model. """ self.decay = config.ema_decay self.model = copy.deepcopy(model) self.model.requires_grad_(False) self.config = config self.fp32_params = {} if self.config.ema_seed_model is not None: state = checkpoint_utils.load_ema_from_checkpoint(self.config.ema_seed_model) self.model.load_state_dict(state["model"], strict=True) if device is not None: logging.info(f"Copying EMA model to device {device}") self.model = self.model.to(device=device) if self.config.ema_fp32: self.build_fp32_params() self.update_freq_counter = 0 def get_model(self): return self.model def build_fp32_params(self, state_dict=None): """ Store a copy of the EMA params in fp32. If state dict is passed, the EMA params is copied from the provided state dict. Otherwise, it is copied from the current EMA model parameters. """ if not self.config.ema_fp32: raise RuntimeError( "build_fp32_params should not be called if ema_fp32=False. " "Use ema_fp32=True if this is really intended." ) if state_dict is None: state_dict = self.model.state_dict() def _to_float(t): return t.float() if torch.is_floating_point(t) else t for param_key in state_dict: if param_key in self.fp32_params: self.fp32_params[param_key].copy_(state_dict[param_key]) else: self.fp32_params[param_key] = _to_float(state_dict[param_key]) def restore(self, state_dict, build_fp32_params=False): """ Load data from a model spec into EMA model """ self.model.load_state_dict(state_dict, strict=False) if build_fp32_params: self.build_fp32_params(state_dict) def _set_decay(self, decay): self.decay = decay def get_decay(self): return self.decay def _step_internal(self, new_model, updates=None): """ One update of the EMA model based on new model weights """ decay = self.decay ema_state_dict = {} ema_params = self.fp32_params if self.config.ema_fp32 else self.model.state_dict() for key, param in new_model.state_dict().items(): try: ema_param = ema_params[key] except KeyError: ema_param = param.float().clone() if param.ndim == 1 else copy.deepcopy(param) if param.shape != ema_param.shape: raise ValueError( "incompatible tensor shapes between model param and ema param" + "{} vs. {}".format(param.shape, ema_param.shape) ) if "version" in key: # Do not decay a model.version pytorch param continue ema_param.mul_(decay) ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1-decay) ema_state_dict[key] = ema_param self.restore(ema_state_dict, build_fp32_params=False) def step(self, new_model, updates=None): """ One update of EMA which is done every self.config.ema_update_freq updates of the model. @param updates The current number of model updates done. Decay is set of 0 if model updates < ema_start_update, which means the model will be simply copied over to the EMA. When model updates >= ema_start_updates, then EMA is updated with a decay of self.config.ema_decay. """ self._set_decay( 0 if updates is not None and updates < self.config.ema_start_update else self.config.ema_decay ) if updates is not None and self.config.ema_update_freq > 1: self.update_freq_counter += 1 if self.update_freq_counter >= self.config.ema_update_freq: self._step_internal(new_model, updates) self.update_freq_counter = 0 else: self._step_internal(new_model, updates) def reverse(self, model): """ Load the model parameters from EMA model. Useful for inference or fine-tuning from the EMA model. """ model.load_state_dict(self.model.state_dict(), strict=False) return model
bart_ls-main
fairseq-py/fairseq/models/ema/ema.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from .ema import EMA def build_ema(model, cfg, device): return EMA(model, cfg, device) # automatically import any Python files in the models/ema/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("fairseq.models.ema." + file_name)
bart_ls-main
fairseq-py/fairseq/models/ema/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #!/usr/bin/env python3 import logging import math from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer from torch import Tensor logger = logging.getLogger(__name__) @register_model("convtransformer") class ConvTransformerModel(FairseqEncoderDecoderModel): """ Transformer-based Speech translation model from ESPNet-ST https://arxiv.org/abs/2004.10234 """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--decoder-output-dim", type=int, metavar="N", help="decoder output dimension (extra linear layer if different from decoder embed dim)", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)", ) parser.add_argument( "--conv-out-channels", type=int, metavar="INT", help="the number of output channels of conv layer", ) @classmethod def build_encoder(cls, args): encoder = ConvTransformerEncoder(args) if getattr(args, "load_pretrained_encoder_from", None): encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @classmethod def build_decoder(cls, args, task, embed_tokens): decoder = TransformerDecoderNoExtra(args, task.target_dictionary, embed_tokens) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding( task.target_dictionary, args.decoder_embed_dim ) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) @staticmethod @torch.jit.unused def set_batch_first(lprobs): lprobs.batch_first = True def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) if self.training: self.set_batch_first(lprobs) return lprobs def output_layout(self): return "BTD" """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overrites the forward method definition without **kwargs. """ def forward(self, src_tokens, src_lengths, prev_output_tokens): encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) return decoder_out class ConvTransformerEncoder(FairseqEncoder): """Conv + Transformer encoder""" def __init__(self, args): """Construct an Encoder object.""" super().__init__(None) self.dropout = args.dropout self.embed_scale = ( 1.0 if args.no_scale_embedding else math.sqrt(args.encoder_embed_dim) ) self.padding_idx = 1 self.in_channels = 1 self.input_dim = args.input_feat_per_channel self.conv = torch.nn.Sequential( torch.nn.Conv2d(1, args.conv_out_channels, 3, stride=2, padding=3 // 2), torch.nn.ReLU(), torch.nn.Conv2d( args.conv_out_channels, args.conv_out_channels, 3, stride=2, padding=3 // 2, ), torch.nn.ReLU(), ) transformer_input_dim = self.infer_conv_output_dim( self.in_channels, self.input_dim, args.conv_out_channels ) self.out = torch.nn.Linear(transformer_input_dim, args.encoder_embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx, learned=False, ) self.transformer_layers = nn.ModuleList([]) self.transformer_layers.extend( [TransformerEncoderLayer(args) for i in range(args.encoder_layers)] ) if args.encoder_normalize_before: self.layer_norm = LayerNorm(args.encoder_embed_dim) else: self.layer_norm = None def pooling_ratio(self): return 4 def infer_conv_output_dim(self, in_channels, input_dim, out_channels): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim) x = torch.nn.Conv2d(1, out_channels, 3, stride=2, padding=3 // 2)(x) x = torch.nn.Conv2d(out_channels, out_channels, 3, stride=2, padding=3 // 2)(x) x = x.transpose(1, 2) mb, seq = x.size()[:2] return x.contiguous().view(mb, seq, -1).size(-1) def forward(self, src_tokens, src_lengths): """Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]: """ bsz, max_seq_len, _ = src_tokens.size() x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) x = self.conv(x) bsz, _, output_seq_len, _ = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) x = self.out(x) x = self.embed_scale * x subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5) input_len_0 = (src_lengths.float() / subsampling_factor).ceil().long() input_len_1 = x.size(0) * torch.ones([src_lengths.size(0)]).long().to( input_len_0.device ) input_lengths = torch.min(input_len_0, input_len_1) encoder_padding_mask = lengths_to_padding_mask(input_lengths) positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = F.dropout(x, p=self.dropout, training=self.training) for layer in self.transformer_layers: x = layer(x, encoder_padding_mask) if not encoder_padding_mask.any(): maybe_encoder_padding_mask = None else: maybe_encoder_padding_mask = encoder_padding_mask return { "encoder_out": [x], "encoder_padding_mask": [maybe_encoder_padding_mask] if maybe_encoder_padding_mask is not None else [], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)] if len(encoder_out["encoder_padding_mask"]) == 0: new_encoder_padding_mask = [] else: new_encoder_padding_mask = [ (encoder_out["encoder_padding_mask"][0]).index_select(0, new_order) ] if len(encoder_out["encoder_embedding"]) == 0: new_encoder_embedding = [] else: new_encoder_embedding = [ (encoder_out["encoder_embedding"][0]).index_select(0, new_order) ] encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, "encoder_padding_mask": new_encoder_padding_mask, "encoder_embedding": new_encoder_embedding, "encoder_states": encoder_states, "src_tokens": [], "src_lengths": [], } class TransformerDecoderNoExtra(TransformerDecoder): def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): # call scriptable method from parent class x, _ = self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) return x, None @register_model_architecture(model_name="convtransformer", arch_name="convtransformer") def base_architecture(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.max_source_positions = getattr(args, "max_source_positions", 3000) args.max_target_positions = getattr(args, "max_target_positions", 1024) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.conv_out_channels = getattr(args, "conv_out_channels", args.encoder_embed_dim) @register_model_architecture("convtransformer", "convtransformer_espnet") def convtransformer_espnet(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
bart_ls-main
fairseq-py/fairseq/models/speech_to_text/convtransformer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import copy from typing import Dict, List, Optional, Tuple from fairseq import utils, checkpoint_utils from fairseq.models import (FairseqEncoderDecoderModel, FairseqEncoder, register_model, register_model_architecture) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.models.wav2vec import Wav2VecEncoder from fairseq.modules.layer_norm import LayerNorm from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.utils import safe_hasattr from torch import Tensor import torch.nn as nn logger = logging.getLogger(__name__) class Conv1dAdaptor(nn.Module): def __init__(self, in_dim, out_dim, n_layers=3, kernel_size=3, stride=2, add_layernorm=False): super().__init__() self.layers = nn.ModuleList( nn.Conv1d(in_dim if i == 0 else out_dim, out_dim * 2, kernel_size, stride=stride, padding=kernel_size // 2) for i in range(n_layers) ) self.layernorms = None if add_layernorm: self.layernorms = nn.ModuleList(LayerNorm(out_dim) for _ in range(n_layers)) self.stride = stride @classmethod def add_args(cls, parser): parser.add_argument("--adaptor-n-layers", type=int) parser.add_argument("--adaptor-kernel-size", type=int) parser.add_argument("--adaptor-stride", type=int) parser.add_argument("--adaptor-layernorm", action='store_true') def get_out_seq_lens_tensor(self, in_seq_lens_tensor): out = in_seq_lens_tensor.clone() for _ in self.layers: out = ((out.float() - 1) / self.stride + 1).floor().long() return out def forward(self, x, padding_mask): # T x B x C -> B x C x T x = x.transpose(0, 1).transpose(1, 2) for i, layer in enumerate(self.layers): x = nn.functional.glu(layer(x), dim=1) if self.layernorms is not None: x = self.layernorms[i](x.transpose(1, 2)).transpose(1, 2) # B x C x T -> T x B x C x = x.transpose(1, 2).transpose(0, 1) if padding_mask is None: out_padding_mask = None else: out_lengths = self.get_out_seq_lens_tensor((~padding_mask).sum(1)) out_padding_mask = lengths_to_padding_mask(out_lengths) return x, out_padding_mask def add_wav2vec_asr_args(parser): parser.add_argument("--w2v-path", help="path to wav2vec 2.0 model") parser.add_argument( "--no-pretrained-weights", action="store_true", help="if true, does not load pretrained weights", ) parser.add_argument( "--dropout-input", type=float, metavar="D", help="dropout to apply to the input (after feat extr)", ) parser.add_argument( "--final-dropout", type=float, metavar="D", help="dropout after transformer and before final projection", ) parser.add_argument( "--apply-mask", action="store_true", help="apply masking during fine-tuning" ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability inside wav2vec 2.0 model", ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights inside wav2vec 2.0 model", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN inside wav2vec 2.0 model", ) parser.add_argument( "--mask-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-prob", type=float, help="probability of replacing a token with mask" ) parser.add_argument( "--mask-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--mask-channel-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-channel-prob", type=float, help="probability of replacing a token with mask", ) parser.add_argument( "--mask-channel-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-channel-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-channel-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--freeze-finetune-updates", default=0, type=int, help="dont finetune wav2vec for this many updates", ) parser.add_argument( "--feature-grad-mult", default=None, type=float, help="reset feature grad mult in wav2vec 2.0 to this", ) parser.add_argument( "--layerdrop", default=0.0, type=float, help="probability of dropping a layer in wav2vec 2.0", ) parser.add_argument("--w2v-args", default=None) class Wav2VecEncoderWithAdaptor(FairseqEncoder): def __init__(self, args): super().__init__(None) self.w2v_encoder = Wav2VecEncoder(args) encoder_out_dim = self.w2v_encoder.w2v_model.encoder.embedding_dim # Projection + 8x shrinking self.adaptor = Conv1dAdaptor( encoder_out_dim, args.decoder_embed_dim, n_layers=args.adaptor_n_layers, kernel_size=args.adaptor_kernel_size, stride=args.adaptor_stride, add_layernorm=args.adaptor_layernorm ) for k, p in self.w2v_encoder.w2v_model.named_parameters(): # Freeze pretrained models by default if safe_hasattr(args, 'finetune_w2v_params') and XMTransformerModel.finetune_params( args.finetune_w2v_params, k): p.requires_grad = True else: p.requires_grad = False @classmethod def add_args(cls, parser): add_wav2vec_asr_args(parser) parser.add_argument( "--normalize", action="store_true", help="if set, normalizes input to have 0 mean and unit variance", ) parser.add_argument("--finetune-w2v-params", type=str, metavar="STR", help="comma-separated param strings to finetune.") Conv1dAdaptor.add_args(parser) def forward(self, src_tokens, src_lengths=None, **kwargs): padding_mask = lengths_to_padding_mask(src_lengths) out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True) x = out["encoder_out"] enc_padding_mask = None if out["encoder_padding_mask"] is not None: enc_padding_mask = out["encoder_padding_mask"].transpose(0, 1) # T X B --> B X T x, enc_padding_mask = self.adaptor(x, enc_padding_mask) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [enc_padding_mask] if enc_padding_mask.any() else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": [], # List[T x B x C] "src_tokens": [], "src_lengths": [], } def reorder_encoder_out(self, encoder_out, new_order): new_encoder_out = ( [] if len(encoder_out["encoder_out"]) == 0 else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] ) new_encoder_padding_mask = ( [] if len(encoder_out["encoder_padding_mask"]) == 0 else [x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"]] ) new_encoder_embedding = ( [] if len(encoder_out["encoder_embedding"]) == 0 else [x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]] ) encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], # B x T "src_lengths": [], # B x 1 } def add_decoder_args(parser): parser.add_argument("--activation-fn", type=str, default='relu', choices=utils.get_available_activation_fns(), help="activation function to use") parser.add_argument("--decoder-dropout", type=float, metavar="D", help="dropout probability") parser.add_argument("--decoder-attention-dropout", type=float, metavar="D", help="dropout probability for attention weights") parser.add_argument("--decoder-activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.") parser.add_argument("--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension") parser.add_argument("--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN") parser.add_argument("--decoder-layers", type=int, metavar="N", help="num decoder layers") parser.add_argument("--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads") parser.add_argument("--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block") parser.add_argument("--layernorm-embedding", action="store_true", help="add layernorm to embedding") parser.add_argument("--no-scale-embedding", action="store_true", help="if True, dont scale embeddings") parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)" ) parser.add_argument("--finetune-decoder-params", type=str, metavar="STR", help="comma-separated param strings to finetune.") parser.add_argument("--checkpoint-activations", action="store_true") @register_model("xm_transformer") class XMTransformerModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" Wav2VecEncoderWithAdaptor.add_args(parser) add_decoder_args(parser) @classmethod def build_encoder(cls, args): _args = copy.deepcopy(args) state = checkpoint_utils.load_checkpoint_to_cpu(args.w2v_path) if state.get("cfg") is not None: encoder_embed_dim = state["cfg"]._content["model"]["encoder_embed_dim"] elif state.get("args") is not None: encoder_embed_dim = state["args"].encoder_embed_dim else: raise ValueError(f"Invalid config in {args.w2v_path}") _args.decoder_embed_dim = encoder_embed_dim encoder = Wav2VecEncoderWithAdaptor(_args) return encoder @classmethod def build_decoder(cls, args, task, embed_tokens): _args = copy.deepcopy(args) _args.dropout = args.decoder_dropout _args.attention_dropout = args.decoder_attention_dropout _args.activation_dropout = args.decoder_activation_dropout _args.max_target_positions = 1024 decoder = TransformerDecoder(_args, task.target_dictionary, embed_tokens) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) for k, p in decoder.named_parameters(): # Freeze pretrained models by default if safe_hasattr(args, 'finetune_decoder_params') and XMTransformerModel.finetune_params( args.finetune_decoder_params, k): p.requires_grad = True else: p.requires_grad = False return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding(task.target_dictionary, args.decoder_embed_dim) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) lprobs.batch_first = True return lprobs def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overrites the forward method definition without **kwargs. """ encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out) return decoder_out def upgrade_state_dict(self, state_dict): for k, _ in state_dict.items(): if 'adaptor.layers' in state_dict: print(k) new = k.replace('adaptor.layers', 'adaptor_layers') state_dict[new] = state_dict[k] del state_dict[k] @staticmethod def finetune_params(finetune_params, param_name): if finetune_params == "all": return True finetune_params_list = finetune_params.split(",") for finetune_param in finetune_params_list: if finetune_param in param_name: return True return False def set_default_w2v_encoder_args(args): args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False) args.dropout_input = getattr(args, "dropout_input", 0) args.final_dropout = getattr(args, "final_dropout", 0) args.apply_mask = getattr(args, "apply_mask", False) args.dropout = getattr(args, "dropout", 0) args.attention_dropout = getattr(args, "attention_dropout", 0) args.activation_dropout = getattr(args, "activation_dropout", 0) args.mask_length = getattr(args, "mask_length", 10) args.mask_prob = getattr(args, "mask_prob", 0.5) args.mask_selection = getattr(args, "mask_selection", "static") args.mask_other = getattr(args, "mask_other", 0) args.no_mask_overlap = getattr(args, "no_mask_overlap", False) args.mask_channel_length = getattr(args, "mask_channel_length", 10) args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5) args.mask_channel_before = getattr(args, "mask_channel_before", False) args.mask_channel_selection = getattr(args, "mask_channel_selection", "static") args.mask_channel_other = getattr(args, "mask_channel_other", 0) args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False) args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0) args.feature_grad_mult = 0.1 args.layerdrop = getattr(args, "layerdrop", 0.0) args.normalize = getattr(args, "normalize", False) def set_default_adaptor_args(args): args.adaptor_n_layers = getattr(args, "adaptor_n_layers", 3) args.adaptor_kernel_size = getattr(args, "adaptor_kernel_size", 3) args.adaptor_stride = getattr(args, "adaptor_stride", 2) args.adaptor_layernorm = getattr(args, "adaptor_layernorm", False) def set_default_mbart_decoder_args(args): args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4 * 1024) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_attention_dropout = getattr(args, 'decoder_attention_dropout', 0.) args.decoder_activation_dropout = getattr(args, 'decoder_activation_dropout', 0.) args.decoder_dropout = getattr(args, 'decoder_dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr( args, 'share_decoder_input_output_embed', True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.no_scale_embedding = getattr(args, 'no_scale_embedding', False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.layernorm_embedding = getattr(args, 'layernorm_embedding', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) @register_model_architecture(model_name="xm_transformer", arch_name="xm_transformer") def base_architecture(args): set_default_w2v_encoder_args(args) set_default_adaptor_args(args) set_default_mbart_decoder_args(args)
bart_ls-main
fairseq-py/fairseq/models/speech_to_text/xm_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #!/usr/bin/env python3 from ast import literal_eval from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) @register_model("s2t_berard") class BerardModel(FairseqEncoderDecoderModel): """Implementation of a model similar to https://arxiv.org/abs/1802.04200 Paper title: End-to-End Automatic Speech Translation of Audiobooks An implementation is available in tensorflow at https://github.com/eske/seq2seq Relevant files in this implementation are the config (https://github.com/eske/seq2seq/blob/master/config/LibriSpeech/AST.yaml) and the model code (https://github.com/eske/seq2seq/blob/master/translate/models.py). The encoder and decoder try to be close to the original implementation. The attention is an MLP as in Bahdanau et al. (https://arxiv.org/abs/1409.0473). There is no state initialization by averaging the encoder outputs. """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): parser.add_argument( "--input-layers", type=str, metavar="EXPR", help="List of linear layer dimensions. These " "layers are applied to the input features and " "are followed by tanh and possibly dropout.", ) parser.add_argument( "--dropout", type=float, metavar="D", help="Dropout probability to use in the encoder/decoder. " "Note that this parameters control dropout in various places, " "there is no fine-grained control for dropout for embeddings " "vs LSTM layers for example.", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="Number of encoder input channels. " "Typically value is 1.", ) parser.add_argument( "--conv-layers", type=str, metavar="EXPR", help="List of conv layers " "(format: (channels, kernel, stride)).", ) parser.add_argument( "--num-blstm-layers", type=int, metavar="N", help="Number of encoder bi-LSTM layers.", ) parser.add_argument( "--lstm-size", type=int, metavar="N", help="LSTM hidden size." ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="Embedding dimension of the decoder target tokens.", ) parser.add_argument( "--decoder-hidden-dim", type=int, metavar="N", help="Decoder LSTM hidden dimension.", ) parser.add_argument( "--decoder-num-layers", type=int, metavar="N", help="Number of decoder LSTM layers.", ) parser.add_argument( "--attention-dim", type=int, metavar="N", help="Hidden layer dimension in MLP attention.", ) parser.add_argument( "--output-layer-dim", type=int, metavar="N", help="Hidden layer dim for linear layer prior to output projection.", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)", ) @classmethod def build_encoder(cls, args, task): encoder = BerardEncoder( input_layers=literal_eval(args.input_layers), conv_layers=literal_eval(args.conv_layers), in_channels=args.input_channels, input_feat_per_channel=args.input_feat_per_channel, num_blstm_layers=args.num_blstm_layers, lstm_size=args.lstm_size, dropout=args.dropout, ) if getattr(args, "load_pretrained_encoder_from", None): encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @classmethod def build_decoder(cls, args, task): decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, num_layers=args.decoder_num_layers, hidden_size=args.decoder_hidden_dim, dropout=args.dropout, encoder_output_dim=2 * args.lstm_size, # bidirectional attention_dim=args.attention_dim, output_layer_dim=args.output_layer_dim, ) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) return decoder @classmethod def build_model(cls, args, task): """Build a new model instance.""" encoder = cls.build_encoder(args, task) decoder = cls.build_decoder(args, task) return cls(encoder, decoder) def get_normalized_probs(self, net_output, log_probs, sample=None): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = super().get_normalized_probs(net_output, log_probs, sample) # lprobs is a (B, T, D) tensor lprobs.batch_first = True return lprobs class BerardEncoder(FairseqEncoder): def __init__( self, input_layers: List[int], conv_layers: List[Tuple[int]], in_channels: int, input_feat_per_channel: int, num_blstm_layers: int, lstm_size: int, dropout: float, ): """ Args: input_layers: list of linear layer dimensions. These layers are applied to the input features and are followed by tanh and possibly dropout. conv_layers: list of conv2d layer configurations. A configuration is a tuple (out_channels, conv_kernel_size, stride). in_channels: number of input channels. input_feat_per_channel: number of input features per channel. These are speech features, typically 40 or 80. num_blstm_layers: number of bidirectional LSTM layers. lstm_size: size of the LSTM hidden (and cell) size. dropout: dropout probability. Dropout can be applied after the linear layers and LSTM layers but not to the convolutional layers. """ super().__init__(None) self.input_layers = nn.ModuleList() in_features = input_feat_per_channel for out_features in input_layers: if dropout > 0: self.input_layers.append( nn.Sequential( nn.Linear(in_features, out_features), nn.Dropout(p=dropout) ) ) else: self.input_layers.append(nn.Linear(in_features, out_features)) in_features = out_features self.in_channels = in_channels self.input_dim = input_feat_per_channel self.conv_kernel_sizes_and_strides = [] self.conv_layers = nn.ModuleList() lstm_input_dim = input_layers[-1] for conv_layer in conv_layers: out_channels, conv_kernel_size, conv_stride = conv_layer self.conv_layers.append( nn.Conv2d( in_channels, out_channels, conv_kernel_size, stride=conv_stride, padding=conv_kernel_size // 2, ) ) self.conv_kernel_sizes_and_strides.append((conv_kernel_size, conv_stride)) in_channels = out_channels lstm_input_dim //= conv_stride lstm_input_dim *= conv_layers[-1][0] self.lstm_size = lstm_size self.num_blstm_layers = num_blstm_layers self.lstm = nn.LSTM( input_size=lstm_input_dim, hidden_size=lstm_size, num_layers=num_blstm_layers, dropout=dropout, bidirectional=True, ) self.output_dim = 2 * lstm_size # bidirectional if dropout > 0: self.dropout = nn.Dropout(p=dropout) else: self.dropout = None def forward(self, src_tokens, src_lengths=None, **kwargs): """ Args src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ bsz, max_seq_len, _ = src_tokens.size() # (B, C, T, feat) x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) for input_layer in self.input_layers: x = input_layer(x) x = torch.tanh(x) for conv_layer in self.conv_layers: x = conv_layer(x) bsz, _, output_seq_len, _ = x.size() # (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> # (T, B, C * feat) x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) input_lengths = src_lengths.clone() for k, s in self.conv_kernel_sizes_and_strides: p = k // 2 input_lengths = (input_lengths.float() + 2 * p - k) / s + 1 input_lengths = input_lengths.floor().long() packed_x = nn.utils.rnn.pack_padded_sequence(x, input_lengths) h0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_() c0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_() packed_outs, _ = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_outs) if self.dropout is not None: x = self.dropout(x) encoder_padding_mask = ( lengths_to_padding_mask(output_lengths).to(src_tokens.device).t() ) return { "encoder_out": x, # (T, B, C) "encoder_padding_mask": encoder_padding_mask, # (T, B) } def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out class MLPAttention(nn.Module): """The original attention from Badhanau et al. (2014) https://arxiv.org/abs/1409.0473, based on a Multi-Layer Perceptron. The attention score between position i in the encoder and position j in the decoder is: alpha_ij = V_a * tanh(W_ae * enc_i + W_ad * dec_j + b_a) """ def __init__(self, decoder_hidden_state_dim, context_dim, attention_dim): super().__init__() self.context_dim = context_dim self.attention_dim = attention_dim # W_ae and b_a self.encoder_proj = nn.Linear(context_dim, self.attention_dim, bias=True) # W_ad self.decoder_proj = nn.Linear( decoder_hidden_state_dim, self.attention_dim, bias=False ) # V_a self.to_scores = nn.Linear(self.attention_dim, 1, bias=False) def forward(self, decoder_state, source_hids, encoder_padding_mask): """The expected input dimensions are: decoder_state: bsz x decoder_hidden_state_dim source_hids: src_len x bsz x context_dim encoder_padding_mask: src_len x bsz """ src_len, bsz, _ = source_hids.size() # (src_len*bsz) x context_dim (to feed through linear) flat_source_hids = source_hids.view(-1, self.context_dim) # (src_len*bsz) x attention_dim encoder_component = self.encoder_proj(flat_source_hids) # src_len x bsz x attention_dim encoder_component = encoder_component.view(src_len, bsz, self.attention_dim) # 1 x bsz x attention_dim decoder_component = self.decoder_proj(decoder_state).unsqueeze(0) # Sum with broadcasting and apply the non linearity # src_len x bsz x attention_dim hidden_att = torch.tanh( (decoder_component + encoder_component).view(-1, self.attention_dim) ) # Project onto the reals to get attentions scores (src_len x bsz) attn_scores = self.to_scores(hidden_att).view(src_len, bsz) # Mask + softmax (src_len x bsz) if encoder_padding_mask is not None: attn_scores = ( attn_scores.float() .masked_fill_(encoder_padding_mask, float("-inf")) .type_as(attn_scores) ) # FP16 support: cast to float and back # srclen x bsz normalized_masked_attn_scores = F.softmax(attn_scores, dim=0) # Sum weighted sources (bsz x context_dim) attn_weighted_context = ( source_hids * normalized_masked_attn_scores.unsqueeze(2) ).sum(dim=0) return attn_weighted_context, normalized_masked_attn_scores class LSTMDecoder(FairseqIncrementalDecoder): def __init__( self, dictionary, embed_dim, num_layers, hidden_size, dropout, encoder_output_dim, attention_dim, output_layer_dim, ): """ Args: dictionary: target text dictionary. embed_dim: embedding dimension for target tokens. num_layers: number of LSTM layers. hidden_size: hidden size for LSTM layers. dropout: dropout probability. Dropout can be applied to the embeddings, the LSTM layers, and the context vector. encoder_output_dim: encoder output dimension (hidden size of encoder LSTM). attention_dim: attention dimension for MLP attention. output_layer_dim: size of the linear layer prior to output projection. """ super().__init__(dictionary) self.num_layers = num_layers self.hidden_size = hidden_size num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx) if dropout > 0: self.dropout = nn.Dropout(p=dropout) else: self.dropout = None self.layers = nn.ModuleList() for layer_id in range(num_layers): input_size = embed_dim if layer_id == 0 else encoder_output_dim self.layers.append( nn.LSTMCell(input_size=input_size, hidden_size=hidden_size) ) self.context_dim = encoder_output_dim self.attention = MLPAttention( decoder_hidden_state_dim=hidden_size, context_dim=encoder_output_dim, attention_dim=attention_dim, ) self.deep_output_layer = nn.Linear( hidden_size + encoder_output_dim + embed_dim, output_layer_dim ) self.output_projection = nn.Linear(output_layer_dim, num_embeddings) def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs ): encoder_padding_mask = encoder_out["encoder_padding_mask"] encoder_outs = encoder_out["encoder_out"] if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() srclen = encoder_outs.size(0) # embed tokens embeddings = self.embed_tokens(prev_output_tokens) x = embeddings if self.dropout is not None: x = self.dropout(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental # generation) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is not None: prev_hiddens, prev_cells = cached_state else: prev_hiddens = [encoder_out["encoder_out"].mean(dim=0)] * self.num_layers prev_cells = [x.new_zeros(bsz, self.hidden_size)] * self.num_layers attn_scores = x.new_zeros(bsz, srclen) attention_outs = [] outs = [] for j in range(seqlen): input = x[j, :, :] attention_out = None for i, layer in enumerate(self.layers): # the previous state is one layer below except for the bottom # layer where the previous state is the state emitted by the # top layer hidden, cell = layer( input, ( prev_hiddens[(i - 1) % self.num_layers], prev_cells[(i - 1) % self.num_layers], ), ) if self.dropout is not None: hidden = self.dropout(hidden) prev_hiddens[i] = hidden prev_cells[i] = cell if attention_out is None: attention_out, attn_scores = self.attention( hidden, encoder_outs, encoder_padding_mask ) if self.dropout is not None: attention_out = self.dropout(attention_out) attention_outs.append(attention_out) input = attention_out # collect the output of the top layer outs.append(hidden) # cache previous states (no-op except during incremental generation) utils.set_incremental_state( self, incremental_state, "cached_state", (prev_hiddens, prev_cells) ) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) attention_outs_concat = torch.cat(attention_outs, dim=0).view( seqlen, bsz, self.context_dim ) # T x B x C -> B x T x C x = x.transpose(0, 1) attention_outs_concat = attention_outs_concat.transpose(0, 1) # concat LSTM output, attention output and embedding # before output projection x = torch.cat((x, attention_outs_concat, embeddings), dim=2) x = self.deep_output_layer(x) x = torch.tanh(x) if self.dropout is not None: x = self.dropout(x) # project back to size of vocabulary x = self.output_projection(x) # to return the full attn_scores tensor, we need to fix the decoder # to account for subsampling input frames # return x, attn_scores return x, None def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is None: return def reorder_state(state): if isinstance(state, list): return [reorder_state(state_i) for state_i in state] return state.index_select(0, new_order) new_state = tuple(map(reorder_state, cached_state)) utils.set_incremental_state(self, incremental_state, "cached_state", new_state) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard") def berard(args): """The original version: "End-to-End Automatic Speech Translation of Audiobooks" (https://arxiv.org/abs/1802.04200) """ args.input_layers = getattr(args, "input_layers", "[256, 128]") args.conv_layers = getattr(args, "conv_layers", "[(16, 3, 2), (16, 3, 2)]") args.num_blstm_layers = getattr(args, "num_blstm_layers", 3) args.lstm_size = getattr(args, "lstm_size", 256) args.dropout = getattr(args, "dropout", 0.2) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_num_layers = getattr(args, "decoder_num_layers", 2) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 128) args.load_pretrained_encoder_from = getattr( args, "load_pretrained_encoder_from", None ) args.load_pretrained_decoder_from = getattr( args, "load_pretrained_decoder_from", None ) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_256_3_3") def berard_256_3_3(args): """Used in * "Harnessing Indirect Training Data for End-to-End Automatic Speech Translation: Tricks of the Trade" (https://arxiv.org/abs/1909.06515) * "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus" (https://arxiv.org/pdf/2002.01320.pdf) * "Self-Supervised Representations Improve End-to-End Speech Translation" (https://arxiv.org/abs/2006.12124) """ args.decoder_num_layers = getattr(args, "decoder_num_layers", 3) berard(args) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_3_2") def berard_512_3_2(args): args.num_blstm_layers = getattr(args, "num_blstm_layers", 3) args.lstm_size = getattr(args, "lstm_size", 512) args.dropout = getattr(args, "dropout", 0.3) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_num_layers = getattr(args, "decoder_num_layers", 2) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 256) berard(args) @register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_5_3") def berard_512_5_3(args): args.num_blstm_layers = getattr(args, "num_blstm_layers", 5) args.lstm_size = getattr(args, "lstm_size", 512) args.dropout = getattr(args, "dropout", 0.3) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_num_layers = getattr(args, "decoder_num_layers", 3) args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024) args.attention_dim = getattr(args, "attention_dim", 512) args.output_layer_dim = getattr(args, "output_layer_dim", 256) berard(args)
bart_ls-main
fairseq-py/fairseq/models/speech_to_text/berard.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .berard import * # noqa from .convtransformer import * # noqa from .s2t_transformer import * # noqa from .xm_transformer import * # noqa
bart_ls-main
fairseq-py/fairseq/models/speech_to_text/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #!/usr/bin/env python3 import logging import math from typing import Dict, List, Optional, Tuple from pathlib import Path import torch import torch.nn as nn from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) from torch import Tensor logger = logging.getLogger(__name__) class Conv1dSubsampler(nn.Module): """Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) Args: in_channels (int): the number of input channels mid_channels (int): the number of intermediate channels out_channels (int): the number of output channels kernel_sizes (List[int]): the kernel size for each convolutional layer """ def __init__( self, in_channels: int, mid_channels: int, out_channels: int, kernel_sizes: List[int] = (3, 3), ): super(Conv1dSubsampler, self).__init__() self.n_layers = len(kernel_sizes) self.conv_layers = nn.ModuleList( nn.Conv1d( in_channels if i == 0 else mid_channels // 2, mid_channels if i < self.n_layers - 1 else out_channels * 2, k, stride=2, padding=k // 2, ) for i, k in enumerate(kernel_sizes) ) def get_out_seq_lens_tensor(self, in_seq_lens_tensor): out = in_seq_lens_tensor.clone() for _ in range(self.n_layers): out = ((out.float() - 1) / 2 + 1).floor().long() return out def forward(self, src_tokens, src_lengths): bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D) x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T for conv in self.conv_layers: x = conv(x) x = nn.functional.glu(x, dim=1) _, _, out_seq_len = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D) return x, self.get_out_seq_lens_tensor(src_lengths) @register_model("s2t_transformer") class S2TTransformerModel(FairseqEncoderDecoderModel): """Adapted Transformer model (https://arxiv.org/abs/1706.03762) for speech-to-text tasks. The Transformer encoder/decoder remains the same. A trainable input subsampler is prepended to the Transformer encoder to project inputs into the encoder dimension as well as downsample input sequence for computational efficiency.""" def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # input parser.add_argument( "--conv-kernel-sizes", type=str, metavar="N", help="kernel sizes of Conv1d subsampling layers", ) parser.add_argument( "--conv-channels", type=int, metavar="N", help="# of channels in Conv1d subsampling layers", ) # Transformer parser.add_argument( "--activation-fn", type=str, default="relu", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="N", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="num encoder layers" ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="N", help="num encoder attention heads", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--no-scale-embedding", action="store_true", help="if True, dont scale embeddings", ) parser.add_argument( "--load-pretrained-encoder-from", type=str, metavar="STR", help="model to take encoder weights from (for initialization)", ) parser.add_argument( '--encoder-freezing-updates', type=int, metavar='N', help='freeze encoder for first N updates' ) @classmethod def build_encoder(cls, args): encoder = S2TTransformerEncoder(args) pretraining_path = getattr(args, "load_pretrained_encoder_from", None) if pretraining_path is not None: if not Path(pretraining_path).exists(): logger.warning( f"skipped pretraining because {pretraining_path} does not exist" ) else: encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=pretraining_path ) logger.info(f"loaded pretrained encoder from: {pretraining_path}") return encoder @classmethod def build_decoder(cls, args, task, embed_tokens): return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) decoder_embed_tokens = build_embedding( task.target_dictionary, args.decoder_embed_dim ) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, task, decoder_embed_tokens) return cls(encoder, decoder) def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) lprobs.batch_first = True return lprobs def forward(self, src_tokens, src_lengths, prev_output_tokens): """ The forward method inherited from the base class has a **kwargs argument in its input, which is not supported in torchscript. This method overwrites the forward method definition without **kwargs. """ encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) return decoder_out class S2TTransformerEncoder(FairseqEncoder): """Speech-to-text Transformer encoder that consists of input subsampler and Transformer encoder.""" def __init__(self, args): super().__init__(None) self.encoder_freezing_updates = args.encoder_freezing_updates self.num_updates = 0 self.dropout_module = FairseqDropout( p=args.dropout, module_name=self.__class__.__name__ ) self.embed_scale = math.sqrt(args.encoder_embed_dim) if args.no_scale_embedding: self.embed_scale = 1.0 self.padding_idx = 1 self.subsample = Conv1dSubsampler( args.input_feat_per_channel * args.input_channels, args.conv_channels, args.encoder_embed_dim, [int(k) for k in args.conv_kernel_sizes.split(",")], ) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx ) self.transformer_layers = nn.ModuleList( [TransformerEncoderLayer(args) for _ in range(args.encoder_layers)] ) if args.encoder_normalize_before: self.layer_norm = LayerNorm(args.encoder_embed_dim) else: self.layer_norm = None def _forward(self, src_tokens, src_lengths, return_all_hiddens=False): x, input_lengths = self.subsample(src_tokens, src_lengths) x = self.embed_scale * x encoder_padding_mask = lengths_to_padding_mask(input_lengths) positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = self.dropout_module(x) encoder_states = [] for layer in self.transformer_layers: x = layer(x, encoder_padding_mask) if return_all_hiddens: encoder_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [encoder_padding_mask] if encoder_padding_mask.any() else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [], } def forward(self, src_tokens, src_lengths, return_all_hiddens=False): if self.num_updates < self.encoder_freezing_updates: with torch.no_grad(): x = self._forward(src_tokens, src_lengths, return_all_hiddens=return_all_hiddens) else: x = self._forward(src_tokens, src_lengths, return_all_hiddens=return_all_hiddens) return x def reorder_encoder_out(self, encoder_out, new_order): new_encoder_out = ( [] if len(encoder_out["encoder_out"]) == 0 else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] ) new_encoder_padding_mask = ( [] if len(encoder_out["encoder_padding_mask"]) == 0 else [x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"]] ) new_encoder_embedding = ( [] if len(encoder_out["encoder_embedding"]) == 0 else [x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]] ) encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], # B x T "src_lengths": [], # B x 1 } def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self.num_updates = num_updates class TransformerDecoderScriptable(TransformerDecoder): def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): # call scriptable method from parent class x, _ = self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) return x, None @register_model_architecture(model_name="s2t_transformer", arch_name="s2t_transformer") def base_architecture(args): args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0) # Convolutional subsampler args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") args.conv_channels = getattr(args, "conv_channels", 1024) # Transformer args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", args.dropout) args.activation_dropout = getattr(args, "activation_dropout", args.dropout) args.activation_fn = getattr(args, "activation_fn", "relu") args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) @register_model_architecture("s2t_transformer", "s2t_transformer_s") def s2t_transformer_s(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.dropout = getattr(args, "dropout", 0.1) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_xs") def s2t_transformer_xs(args): args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 3) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) args.dropout = getattr(args, "dropout", 0.3) s2t_transformer_s(args) @register_model_architecture("s2t_transformer", "s2t_transformer_sp") def s2t_transformer_sp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_s(args) @register_model_architecture("s2t_transformer", "s2t_transformer_m") def s2t_transformer_m(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.dropout = getattr(args, "dropout", 0.15) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_mp") def s2t_transformer_mp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_m(args) @register_model_architecture("s2t_transformer", "s2t_transformer_l") def s2t_transformer_l(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.2) base_architecture(args) @register_model_architecture("s2t_transformer", "s2t_transformer_lp") def s2t_transformer_lp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_l(args)
bart_ls-main
fairseq-py/fairseq/models/speech_to_text/s2t_transformer.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging from collections.abc import Iterable from itertools import repeat from typing import List, Optional, Tuple import torch from torch import Tensor # ------------------------------------------------------------------------------ # assert_equal() # ------------------------------------------------------------------------------ def assert_equal(value1, value2, name1=None, name2=None): """Asserts two values are equal otherwise raise an error.""" str_name1 = "" if name1 is None else "{} ".format(name1) str_name2 = "" if name2 is None else "{} ".format(name2) if value1 != value2: str_value1 = "{}" if name1 is None else "({})" str_value1 = str_value1.format(value1) str_value2 = "{}" if name2 is None else "({})" str_value2 = str_value2.format(value2) raise ValueError( "Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2) ) def fill_config(config, key, value): if value is not None: if key not in config or config[key] is None: config[key] = value assert_equal(value, config[key], "value", f'config["{key}"]') # ------------------------------------------------------------------------------ # check_and_return_expected() # ------------------------------------------------------------------------------ def check_and_return_expected(value, undefined_value, expected_value, name=None): """ Return the expected value while checking if the given value is undefined or equal to the expected value. """ if (undefined_value is None and value is None) or (undefined_value == value): return expected_value if value != expected_value: str_name = "" if name is None else "{} ".format(name) str_value = "{}" if name is None else "({})" str_value = str_value.format(value) raise ValueError( "Expected {}{} == {}".format(str_name, str_value, expected_value) ) return expected_value # ------------------------------------------------------------------------------ # get_time_axis() # ------------------------------------------------------------------------------ def get_time_axis(layout): """ Extract the time axis from the layout, for example for breaking sequence into segments. """ if layout in ["TB", "TBD"]: return 0 if layout in ["BT", "BTD"]: return 1 if layout in ["BCTD"]: return 2 raise ValueError("Unsupported layout = {}".format(layout)) # ------------------------------------------------------------------------------ # get_batch_axis() # ------------------------------------------------------------------------------ def get_batch_axis(layout): """ Extract the batch axis from the layout """ if layout in ["TB", "TBD"]: return 1 if layout in ["BT", "BTD", "BCTD"]: return 0 raise ValueError("Unsupported layout = {}".format(layout)) # ------------------------------------------------------------------------------ # monotonically_increasing_and_bounded() # ------------------------------------------------------------------------------ def monotonically_increasing_and_bounded(iterable, min=None, max=None): """ Check if the elements in the given iterable are monotonically increasing and bounded by upper/lower bounds. """ if not isinstance(iterable, Iterable): raise TypeError( "Expected iterable to be of type Iterable, got ({})".format( iterable.__class__.__name__ ) ) for i in range(len(iterable)): if min is not None and iterable[i] < min: return False if max is not None and iterable[i] > max: return False if i > 0 and iterable[i] <= iterable[i - 1]: return False return True # ------------------------------------------------------------------------------ # to_pair() # ------------------------------------------------------------------------------ def to_pair(value, name): """Make a pair (of type tuple) of given value.""" if isinstance(value, Iterable): if len(value) != 2: raise ValueError( "Expected `{}` to have exactly 2 elements, got: ({})".format( name, value ) ) return value return tuple(repeat(value, 2)) # ------------------------------------------------------------------------------ # infer_conv_output_attrs() # ------------------------------------------------------------------------------ # TODO(cfyeh): figure out if we can get `output_dim` without calling the module. def infer_conv_output_attrs( module, input_channels, input_dim, batch_size=1, max_length=8 ): """Get output attributes of a module with input.""" input = torch.randn(batch_size, input_channels, max_length, input_dim) output = module(input) output_channels = output.shape[1] output_dim = output.shape[-1] return output_channels, output_dim # ------------------------------------------------------------------------------ # NoOp # ------------------------------------------------------------------------------ class NoOp(torch.nn.Module): """ NoOp simply passes the input as the output. """ def __init__(self): super().__init__() def forward(self, input: Tensor) -> Tensor: return input # ------------------------------------------------------------------------------ # Permute: a torch.nn.Module applies permutation on the input tensor. # ------------------------------------------------------------------------------ class Permute(torch.nn.Module): def __init__(self, dims): super().__init__() self.dims = dims def forward(self, input: Tensor) -> Tensor: return input.permute(self.dims).contiguous() # ------------------------------------------------------------------------------ # lengths_to_padding_mask() # ------------------------------------------------------------------------------ def lengths_to_padding_mask(lengths: Tensor) -> Tensor: """Convert lengths of shape (B, ) to padding mask.""" batch_size = lengths.shape[0] max_length = int(torch.max(lengths).item()) padding_mask = torch.arange( # [0, ..., T-1] max_length, device=lengths.device, dtype=lengths.dtype ).expand(batch_size, max_length) >= lengths.unsqueeze(1) return padding_mask # ------------------------------------------------------------------------------ # lengths_to_attention_mask() # ------------------------------------------------------------------------------ def lengths_to_attention_mask( lengths: Tensor, left_context: Optional[int] = None, right_context: Optional[int] = None, ) -> Optional[Tensor]: """ Generate attention mask based on (lengths, left_context, right_context). left_context is None means unlimited left context. right_context is None means unlimited right context. """ if left_context is None and right_context is None: return None max_length = int(torch.max(lengths).item()) # For example, with `max_length` == 5, # indices = tensor([ # [ 0, 1, 2, 3, 4, 5], # [-1, 0, 1, 2, 3, 4], # [-2, -1, 0, 1, 2, 3], # [-3, -2, -1, 0, 1, 2], # [-4, -3, -2, -1, 0, 1], # [-5, -4, -3, -2, -1, 0], # ]) # In some cases the second torch.arange is created on cpu which causes a # failure. Adding the device option to guard against it. indices = torch.arange( max_length, device=lengths.device, dtype=lengths.dtype ).expand(max_length, max_length) - torch.arange( max_length, device=lengths.device ).view( max_length, -1 ) # For example, with `max_length` == 5, # bool_mask = tensor([ # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # ]) bool_mask = ( torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length) ) # For example, with `max_length` == 5, left_context == 2 # left_mask = tensor([ # [ True, True, True, True, True], # [ True, True, True, True, True], # [ True, True, True, True, True], # [False, True, True, True, True], # [False, False, True, True, True], # ]) if left_context is not None: left_mask = indices >= -left_context bool_mask = bool_mask & left_mask # For example, with `max_length` == 5, right_context == 1 # right_mask = tensor([ # [True, True, False, False, False], # [True, True, True, False, False], # [True, True, True, True, False], # [True, True, True, True, True], # [True, True, True, True, True], # ]) if right_context is not None: right_mask = indices <= right_context bool_mask = bool_mask & right_mask bool_mask = (~bool_mask).to(device=lengths.device) return bool_mask # ------------------------------------------------------------------------------ # infer_output_norm() # ------------------------------------------------------------------------------ def infer_output_norm(module, output_norm=None): """ Infer the output norm (string and module) needed on the module gvien desired output normalization. """ if output_norm == module.output_norm(): # output_norm already matches module.output_norm(). return (None, NoOp()) if output_norm is None and module.output_norm() is not None: logger = logging.getLogger("infer_output_norm()") logger.warning( "trying to set output_norm ({}) ".format(output_norm) + "but got module.output_norm() ({}), ".format(module.output_norm()) + "the combined output_norm() will be ({})".format(module.output_norm()) ) return (None, NoOp()) if output_norm == "log_softmax": if module.output_norm() is not None: raise ValueError( "incompatible output_norm ({}) ".format(output_norm) + "and module.output_norm() ({})".format(module.output_norm()) ) else: return ("log_softmax", torch.nn.LogSoftmax(dim=-1)) if output_norm == "softmax": if module.output_norm() is not None: raise ValueError( "incompatible output_norm ({}) ".format(output_norm) + "and module.output_norm() ({})".format(module.output_norm()) ) else: return ("softmax", torch.nn.Softmax(dim=-1)) raise ValueError( "output_norm ({}) not in ".format(output_norm) + "supported list = [None, softmax, log_softmax]" ) # ------------------------------------------------------------------------------ # infer_channels_from_layout() # ------------------------------------------------------------------------------ def infer_channels_from_layout(layout, channels): """Extract the number of channels from the layout.""" if layout in ("TBD", "BTD"): if channels is not None and channels != 1: raise ValueError( "Expected channels ({}) to be 1 for layout = {}".format( channels, layout ) ) if channels is None: return 1 return channels # ------------------------------------------------------------------------------ # pad_sequence() # ------------------------------------------------------------------------------ @torch.jit.export def pad_sequence( sequence: Tensor, time_axis: int, extra_left_context: int = 0, extra_right_context: int = 0, ) -> Tensor: """Pad extra left/right contexts to the sequence.""" if extra_left_context == 0 and extra_right_context == 0: return sequence tensors_to_concat = [] if extra_left_context: size = (extra_left_context,) fill_value = 0 indices = torch.full( size=size, fill_value=fill_value, dtype=torch.long, device=sequence.device, ) left_padding = torch.index_select(sequence, time_axis, indices) tensors_to_concat.append(left_padding) tensors_to_concat.append(sequence) # NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for # extra right contexts. if extra_right_context: size = list(sequence.shape) size[time_axis] = extra_right_context right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device) tensors_to_concat.append(right_padding) padded_sequence = torch.cat(tensors_to_concat, dim=time_axis) return padded_sequence # ------------------------------------------------------------------------------ # sequence_to_segments() # ------------------------------------------------------------------------------ @torch.jit.export def sequence_to_segments( sequence: Tensor, time_axis: int, lengths: Tensor, segment_size: Optional[int] = None, extra_left_context: int = 0, extra_right_context: int = 0, ) -> List[Tuple[Tensor, Tensor]]: """Breaks sequence into segments.""" sequence = pad_sequence( sequence=sequence, time_axis=time_axis, extra_left_context=extra_left_context, extra_right_context=extra_right_context, ) lengths = lengths + extra_left_context + extra_right_context segments: List[Tuple[Tensor, Tensor]] = [] if segment_size is None: segments.append((sequence, lengths)) return segments offset = 0 end = sequence.shape[time_axis] step = segment_size size = extra_left_context + segment_size + extra_right_context while offset + extra_left_context + extra_right_context < end: clamped_size = min(size, end - offset) segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size) indices = torch.arange( start=offset, end=(offset + clamped_size), step=1, dtype=torch.long, device=sequence.device, ) segment_tensor = torch.index_select(sequence, time_axis, indices) segments.append((segment_tensor, segment_lengths)) offset = offset + step return segments # ------------------------------------------------------------------------------ # segments_to_sequence() # ------------------------------------------------------------------------------ @torch.jit.export def segments_to_sequence( segments: List[Tuple[Tensor, Tensor]], time_axis: int ) -> Tuple[Tensor, Tensor]: """Concatenate segments into a full sequence.""" if len(segments) == 1: return segments[0] tensors_to_concat: List[Tensor] = [] lengths_to_stack: List[Tensor] = [] for tensor, lengths in segments: tensors_to_concat.append(tensor) lengths_to_stack.append(lengths) sequence = torch.cat(tensors_to_concat, dim=time_axis) lengths = torch.stack(lengths_to_stack, dim=0) lengths = torch.sum(lengths, dim=0) return sequence, lengths def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False): """ convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor batch_first: whether to return a (B, T) tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = False for t < lengths[b] and True otherwise TODO: kernelize this function if benchmarking shows this function is slow """ max_lengths = torch.max(lengths).item() bsz = lengths.size(0) encoder_padding_mask = torch.arange( max_lengths ).to( # a (T, ) tensor with [0, ..., T-1] lengths.device ).view( # move to the right device 1, max_lengths ).expand( # reshape to (1, T)-shaped tensor bsz, -1 ) > lengths.view( # expand to (B, T)-shaped tensor bsz, 1 ).expand( -1, max_lengths ) if not batch_first: return encoder_padding_mask.t(), max_lengths else: return encoder_padding_mask, max_lengths # ------------------------------------------------------------------------------ # attention suppression # ------------------------------------------------------------------------------ def attention_suppression(attention_weights: Tensor, scale: float): # B, H, qlen, klen -> B, H, qlen, 1 attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1) attention_nozeros = attention_prob.to(torch.bool) nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True) # For very sparse situation, we need get round about 0s key_sum = torch.sum(attention_prob, dim=-1, keepdim=True) # nozeros_sum should > 1 key_mean = key_sum / (nozeros_sum + 1e-8) # std calculation dis = (attention_prob - key_mean) * (attention_prob - key_mean) # if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i dis_masked = torch.where( attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size()) ) key_var = torch.sum(dis_masked, dim=-1, keepdim=True) key_var = key_var / (nozeros_sum - 1.0 + 1e-8) key_std = torch.sqrt(key_var) key_thread = key_mean - scale * key_std # if attention_prob[i] >= key_thread, then attention_prob[i] # , otherwise "-inf" inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach() inf_tensor[:] = float("-inf") attention_weights_float = torch.where( attention_prob < key_thread, inf_tensor, attention_weights.float(), ) return attention_weights_float.type_as(attention_weights) def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value): return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
bart_ls-main
fairseq-py/fairseq/models/speech_to_text/utils.py
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import re from functools import partial from typing import List, Optional, Tuple import torch import torch.nn as nn from fairseq.models import ( FairseqEncoder, ) from fairseq.models.speech_to_text.utils import ( NoOp, lengths_to_padding_mask, segments_to_sequence, ) from fairseq.models.speech_to_text.utils import ( attention_suppression, layer_norm_backward_hook, ) from torch import Tensor, device as Device from torch.quantization.qconfig import ( default_dynamic_qconfig, per_channel_dynamic_qconfig, ) class RelativePositionEmbedding(nn.Module): """ Implementation according to https://arxiv.org/abs/1803.02155 """ def __init__(self, head_dim, max_position, norm_init=True): super().__init__() self.head_dim = head_dim self.max_position = max_position self.embeddings = nn.Parameter(torch.Tensor(max_position * 2 + 1, head_dim)) if norm_init: nn.init.xavier_normal_(self.embeddings) else: nn.init.xavier_uniform_(self.embeddings) def forward(self, input: Tensor): output = nn.functional.embedding(input.long(), self.embeddings) return output class Fp32LayerNorm(nn.Module): def __init__( self, input_dim, clamp_grad=True, max_grad_value=256, eps=1e-5, elementwise_affine=True, ): super().__init__() self.torch_module = torch.nn.LayerNorm( input_dim, eps=eps, elementwise_affine=elementwise_affine ) if clamp_grad: hook = partial(layer_norm_backward_hook, clamp_value=max_grad_value) self.torch_module.register_backward_hook(hook) def forward(self, input): output = torch.nn.functional.layer_norm( input.float(), self.torch_module.normalized_shape, self.torch_module.weight.float() if self.torch_module.weight is not None else None, self.torch_module.bias.float() if self.torch_module.bias is not None else None, self.torch_module.eps, ).type_as(input) return output # ------------------------------------------------------------------------------ # PositionwiseFF # ------------------------------------------------------------------------------ class PositionwiseFF(nn.Module): """ FFN layer in transformer. Args: input_dim: input embedding dimension ffn_dim: FFN layer inner dimension dropout_on_fc1: dropout for first linear layer dropout_on_fc2: dropout fr second linear layer activation_fn: activation function used after first linear layer. \ Only relu or gelu is supported. """ def __init__( self, input_dim, ffn_dim, dropout_on_fc1, dropout_on_fc2, activation_fn ): super(PositionwiseFF, self).__init__() self.input_dim = input_dim self.ffn_dim = ffn_dim if activation_fn == "relu": ac = nn.ReLU() elif activation_fn == "gelu": ac = nn.GELU() else: raise ValueError("Unsupported activation_fn = ({})".format(activation_fn)) # fc1 -> ac -> dropout -> fc2 -> dropout self.module = nn.Sequential( nn.Linear(input_dim, ffn_dim), ac, nn.Dropout(dropout_on_fc1), nn.Linear(ffn_dim, input_dim), nn.Dropout(dropout_on_fc2), ) self.layer_norm = Fp32LayerNorm(input_dim) def forward(self, input): module_out = self.module(self.layer_norm(input)) output = module_out + input return output def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig torch.quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self # ------------------------------------------------------------------------------ # SummarizationLayer # ------------------------------------------------------------------------------ class SummarizationLayer(nn.Module): def __init__(self, method, segment_size, embedding_dim): super(SummarizationLayer, self).__init__() self.segment_size = segment_size self.embedding_dim = embedding_dim nonlin_match = re.match(r"nonlinear\((?P<act>[a-z]+),(?P<dim>[0-9]+)\)", method) self.method = method if method == "mean": self.module = nn.AvgPool1d( kernel_size=segment_size, stride=segment_size, ceil_mode=True, ) elif method == "max": self.module = nn.MaxPool1d( kernel_size=segment_size, stride=segment_size, ceil_mode=True, ) elif method == "linear": self.module = nn.Linear(segment_size, 1) elif nonlin_match: nonlin_args = nonlin_match.groupdict() act_type = nonlin_args["act"] hid_dim = int(nonlin_args["dim"]) if act_type == "relu": act = nn.ReLU() elif act_type == "gelu": act = nn.GELU() else: raise ValueError("Unsupported activation_fn = ({})".format(act_type)) self.module = nn.Sequential( nn.Linear(segment_size, hid_dim), act, nn.Linear(hid_dim, 1), ) else: raise ValueError("Unsupported summarization method = ({})".format(method)) def forward(self, input): # T, B, D -> B, D, T input = input.permute(1, 2, 0) if self.method == "mean" or self.method == "max": output = self.module(input) output = output.permute(2, 0, 1) return output full_seg_length = input.size(2) // self.segment_size * self.segment_size if full_seg_length > 0: # at least one seg is full B = input.size(0) D = input.size(1) input_todo = ( input[:, :, :full_seg_length] .contiguous() .view(B, -1, self.segment_size) ) output = self.module(input_todo) output = output.view(B, D, -1) else: output = input.new_zeros(input.size(0), input.size(1), 0) left = input.size(2) - full_seg_length if left > 0: # when last seg is not full, use zeros as last memory placeholder zeros = input.new_zeros(input.size(0), input.size(1), 1) output = torch.cat([output, zeros], dim=2) output = output.permute(2, 0, 1) return output # ------------------------------------------------------------------------------ # NoSegAugmentedMemoryMultiheadAttentionBmm # ------------------------------------------------------------------------------ class NoSegAugmentedMemoryMultiheadAttentionBmm(nn.Module): """ Whole utterance augmented memory multihead attention using BMM. Different with previous augmented memory multihead attention where the utterance is chunked into segments. Here we use attention mask achieve so. The input embedding [right_context, utterance, summary] is a concatenation of right context, utterance and summary. Right context block is the concatenation of all the right context for each segments. [right_context_0, right_context_1, ..., right_context_n] For example, if we have utterance = [v0, v1, v2, ...., v20]. segment size 8, right_context size 4. Then the right context blocks = [v8, v9, v10, v11, v16, v17, v18, v19, 0, 0, 0, 0], where v8, v9, v10, and v11 are the right context for first segment. v16, v17, v18 and v19 are the right context for second segment. 0, 0, 0 and 0 are right context for the last segment. utterance is corresponding to input embedding sequence summary is concatenation of average of each segments. [summary_0, summary_1, ..., ]. In augmented memory multihead attention, the query is [right_context, utterance, summary], key is [memory, right_context, utterance]. Different with AugmentedMemoryMultiheadAttentionBmm, memory here is passed from previous attention layer. For the first attention layer, memory is average of each segment. Memory is a concatenation of memory from each segments in previous attention layer. For example, current layer is i, then memory is [m_0, m_1, ..., m_n]. Each m_k is the output from seg_k in layer i-1. args: input_dim: input embedding dimension num_heads: number of heads in multihead self-attention dropout: attention dropout std_scale: if std_scale is not None. The weak attention suppression is turned on. For std_scale = 0.5, all the attention smaller than mean + 0.5 * std will be suppressed. scaled_init: whether to use scaled init for linear weight tanh_on_mem: whether to use tanh on memory output use_mem: whether to use memory or not. When max_memory_size is 0, then we don't have memory anymore. layer_index: current self-attention layer index that is used in depth initialization max_relative_position: max relative position used in relative position embedding rpe_old_option: To be compatible with previous model. The previous model was trained with attention += attention + rpe. The correct equation should be attention = attention + rpe """ def __init__( self, input_dim, num_heads, dropout=0.0, std_scale=None, scaled_init=False, tanh_on_mem=False, use_mem=True, mini_batches=False, negative_inf="-inf", layer_index=-1, max_relative_position=0, rpe_old_option=True, ): if input_dim % num_heads: raise ValueError( "input_dim ({}) must be divisible by num_heads ({})".format( input_dim, num_heads ) ) super().__init__() embed_dim = input_dim self.e2h_kv = torch.nn.Linear(input_dim, 2 * input_dim, bias=True) self.e2h_q = torch.nn.Linear(input_dim, input_dim, bias=True) self.rpe_old_option = rpe_old_option if max_relative_position > 0: self.use_rpe = True self.rpe_k = RelativePositionEmbedding( head_dim=input_dim // num_heads, max_position=max_relative_position, ) self.rpe_v = RelativePositionEmbedding( head_dim=input_dim // num_heads, max_position=max_relative_position, ) else: self.use_rpe = False self.rpe_k = None self.rpe_v = None if scaled_init: if layer_index == -1: gain = 1.0 / math.sqrt(2) else: # https://arxiv.org/abs/2005.09684 depthwise initialization # stablize the training greatly. Use depthwise initialization to # replace incremental loss. gain = 1.0 / math.sqrt(layer_index + 1) torch.nn.init.xavier_uniform_(self.e2h_kv.weight, gain=gain) torch.nn.init.xavier_uniform_(self.e2h_q.weight, gain=gain) self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.scaling = self.head_dim ** -0.5 self.std_scale = std_scale self.use_mem = use_mem self.mini_batches = mini_batches self.negative_inf = negative_inf if tanh_on_mem: self.squash_mem = torch.tanh self.nonlinear_squash_mem = True else: self.squash_mem = NoOp() self.nonlinear_squash_mem = False def prepare_qkv( self, input: Tensor, mems: Tensor, lengths: Tensor, summary_length: int, lc_length: int, ): # T: right_context length + utterance_length + summary_length T, B, D = input.shape mem_length = mems.size(0) utterance_length = torch.max(lengths) right_context_blocks_length = T - utterance_length - summary_length rc_block = input[:right_context_blocks_length, :, :] utterance_block = input[right_context_blocks_length : T - summary_length, :, :] if B == 1: padding_mask = None else: klengths = lengths + mem_length + right_context_blocks_length + lc_length padding_mask = lengths_to_padding_mask(lengths=klengths) mem_rc_input = torch.cat([mems, rc_block, utterance_block], dim=0) # In training lc_length = 0 key_length = mem_rc_input.size(0) + lc_length rc_input_sum = input q = self.e2h_q(rc_input_sum) kv = self.e2h_kv(mem_rc_input) k, v = kv.chunk(chunks=2, dim=2) result_qkv = (q, k, v) input_shape = (T, B, D) result_lengths_info = ( mem_length, utterance_length, right_context_blocks_length, key_length, ) if padding_mask is not None: assert padding_mask.size(0) == B assert padding_mask.size(1) == key_length return result_qkv, input_shape, result_lengths_info, padding_mask def prepare_attention_weights( self, q: Tensor, new_k: Tensor, new_v: Tensor, input_shape: Tuple[int, int, int], rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor]: T, B, D = input_shape q = ( q.contiguous().view(-1, B * self.num_heads, self.head_dim).transpose(0, 1) * self.scaling ) k = ( new_k.contiguous() .view(-1, B * self.num_heads, self.head_dim) .transpose(0, 1) ) v = ( new_v.contiguous() .view(-1, B * self.num_heads, self.head_dim) .transpose(0, 1) ) attention_weights = torch.bmm(q, k.transpose(1, 2)) if self.use_rpe and rpe is not None and self.rpe_v is not None: r_k = self.rpe_k(rpe) # [q, B*h, d] * [q, k, d] -> [B*h, q, k] attention_weights_rpe = torch.matmul( q.transpose(0, 1), r_k.transpose(1, 2) ).transpose(0, 1) attention_weights = attention_weights + attention_weights_rpe attention_weights_float = attention_weights.float() return attention_weights, attention_weights_float, v def prepare_attention_output( self, attention_weights: Tensor, attention_weights_float: Tensor, v: Tensor, input_shape: Tuple[int, int, int], key_length: int, padding_mask: Optional[Tensor], rpe: Optional[Tensor], ) -> Tensor: T, B, D = input_shape if padding_mask is not None: attention_weights_float = attention_weights_float.view( B, self.num_heads, T, key_length ) attention_weights_float = attention_weights_float.masked_fill( padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf") ) attention_weights_float = attention_weights_float.view( B * self.num_heads, T, key_length ) if self.std_scale is not None: attention_weights_float = attention_suppression( attention_weights_float, self.std_scale ) attention_weights_float = torch.nn.functional.softmax( attention_weights_float, dim=-1 ) attention_weights = attention_weights_float.type_as(attention_weights) attention_probs = torch.nn.functional.dropout( attention_weights, p=self.dropout, training=self.training ) # [T, key_length, B, n_head]+ [key_length, B, n_head, d_head] # -> [T, B, n_head, d_head] attention = torch.bmm(attention_probs, v) if self.use_rpe and rpe is not None and self.rpe_v is not None: r_v = self.rpe_v(rpe) attention_rpe = torch.matmul( attention_probs.transpose(0, 1), r_v ).transpose(0, 1) if self.rpe_old_option: attention += attention + attention_rpe else: attention = attention + attention_rpe assert list(attention.shape) == [B * self.num_heads, T, self.head_dim] attention = attention.transpose(0, 1).contiguous().view(T, B, self.embed_dim) rc_output_memory = self.out_proj(attention) return rc_output_memory @torch.jit.unused def forward( self, input: Tensor, lengths: Tensor, mems: Tensor, attention_mask: Tensor, pre_mems: Optional[Tensor] = None, left_context_key: Optional[Tensor] = None, left_context_val: Optional[Tensor] = None, rpe: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """ forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in training. args: input: formed in the following way [right_context_0, right_contex_1, ..., seg_0, seg_1, ..., summary_0, summary_1,..] lengths: the length of query which is [seg_0, seg_1, ....] mems: [mem_0, mem_1, ...]. attention_mask: attention mask for query = [right_context, query, summary] key = [mem, right_context, query]. This is only used for traing. """ if self.use_mem: mem_length = mems.size(0) summary_length = mem_length + 1 if pre_mems is not None: mems = torch.cat([pre_mems, mems], dim=0) else: mem_length = 0 summary_length = 0 # In training, lc_length = 0 if left_context_key is not None: lc_length = left_context_key.size(0) else: lc_length = 0 results = self.prepare_qkv( input=input, mems=mems, lengths=lengths, summary_length=summary_length, lc_length=lc_length, ) result_qkv, input_shape, result_lengths_info, padding_mask = results q, k, v = result_qkv ( mem_length, utterance_length, right_context_blocks_length, key_length, ) = result_lengths_info if left_context_key is not None: # add the cache key and value new_k = torch.cat( [ k[: mem_length + right_context_blocks_length, :, :], left_context_key, k[-utterance_length:, :, :], ], dim=0, ) new_v = torch.cat( [ v[: mem_length + right_context_blocks_length, :, :], left_context_val, v[-utterance_length:, :, :], ], dim=0, ) next_k = new_k[mem_length + right_context_blocks_length :, :, :] next_v = new_v[mem_length + right_context_blocks_length :, :, :] else: new_k = k new_v = v next_k = None next_v = None attention_weights, attention_weights_float, v = self.prepare_attention_weights( q=q, new_k=new_k, new_v=new_v, input_shape=input_shape, rpe=rpe, ) # mask attention attention_mask = attention_mask.unsqueeze(0) attention_weights_float = attention_weights_float.masked_fill( attention_mask, float(self.negative_inf) ) rc_output_memory = self.prepare_attention_output( attention_weights=attention_weights, attention_weights_float=attention_weights_float, v=v, input_shape=input_shape, key_length=key_length, padding_mask=padding_mask, rpe=rpe, ) if self.use_mem: # next_m length equals to summary length - 1 # last memory is ignored if self.mini_batches: next_m = rc_output_memory[-summary_length:] else: next_m = rc_output_memory[-summary_length:-1] next_m = self.squash_mem(next_m) # rc and output rc_output = rc_output_memory[:-summary_length] if not self.nonlinear_squash_mem: next_m = torch.clamp(next_m, min=-10, max=10) else: next_m = mems rc_output = rc_output_memory return rc_output, next_m, next_k, next_v @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, mems: Tensor, left_context_key: Tensor, left_context_val: Tensor, rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """ forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in decoding. args: input: formed in the following way [right_context_0, right_contex_1, ..., seg_0, seg_1, ..., summary_0, summary_1,..] lengths: the length of query which is [seg_0, seg_1, ....] mems: [mem_0, mem_1, ...]. left_context_key: left_context for key part. This is only used for online decoding. In training, this is empty tensor left_context_val: left_context for value part. This is only used for online decoding. In training, this is empty tensor """ lc_length = left_context_key.size(0) # In decoding, summary_length = 1 or 0 if self.use_mem: summary_length = 1 else: summary_length = 0 results = self.prepare_qkv( input=input, mems=mems, lengths=lengths, summary_length=summary_length, lc_length=lc_length, ) result_qkv, input_shape, result_lengths_info, padding_mask = results q, k, v = result_qkv ( mem_length, utterance_length, right_context_blocks_length, key_length, ) = result_lengths_info # add the cache key and value new_k = torch.cat( [ k[: mem_length + right_context_blocks_length, :, :], left_context_key, k[-utterance_length:, :, :], ], dim=0, ) new_v = torch.cat( [ v[: mem_length + right_context_blocks_length, :, :], left_context_val, v[-utterance_length:, :, :], ], dim=0, ) next_k = new_k[mem_length + right_context_blocks_length :, :, :] next_v = new_v[mem_length + right_context_blocks_length :, :, :] attention_weights, attention_weights_float, v = self.prepare_attention_weights( q=q, new_k=new_k, new_v=new_v, input_shape=input_shape, rpe=rpe, ) # In online decoding, we don't have attention mask. But we still need # to disable the attention from summary query to memory attention_weights_float[:, -1, :mem_length] = float(self.negative_inf) rc_output_memory = self.prepare_attention_output( attention_weights=attention_weights, attention_weights_float=attention_weights_float, v=v, input_shape=input_shape, key_length=key_length, padding_mask=padding_mask, rpe=rpe, ) # In decoding, summary length is 1 if self.use_mem: next_m = rc_output_memory[-1:] next_m = self.squash_mem(next_m) # rc and output rc_output = rc_output_memory[:-1] if not self.nonlinear_squash_mem: next_m = torch.clamp(next_m, min=-10, max=10) else: rc_output = rc_output_memory # empty tensor as input mems next_m = mems return rc_output, next_m, next_k, next_v def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig torch.quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self class NoSegAugmentedMemoryTransformer(nn.Module): """ Whole utterance augmented memory transformer. This is not pyspeech nn layer. It is used as a module in a master layer where multiple transformers is used. """ def __init__( self, input_dim, num_heads, ffn_dim, dropout_in_attn=0.0, dropout_on_attn=None, dropout_on_fc1=None, dropout_on_fc2=None, activation_fn="relu", tanh_on_mem=False, std_scale=None, scaled_init=False, segment_size=128, use_mem=True, mini_batches=False, negative_inf="-inf", layer_index=-1, summarization_method="mean", max_relative_position=0, rpe_old_option=True, ): super(NoSegAugmentedMemoryTransformer, self).__init__() self.attention = NoSegAugmentedMemoryMultiheadAttentionBmm( input_dim=input_dim, num_heads=num_heads, dropout=dropout_in_attn, scaled_init=scaled_init, tanh_on_mem=tanh_on_mem, std_scale=std_scale, use_mem=use_mem, mini_batches=mini_batches, negative_inf=negative_inf, layer_index=layer_index, max_relative_position=max_relative_position, ) self.dropout = nn.Dropout(dropout_on_attn) self.pos_ff = PositionwiseFF( input_dim=input_dim, ffn_dim=ffn_dim, dropout_on_fc1=dropout_on_fc1, dropout_on_fc2=dropout_on_fc2, activation_fn=activation_fn, ) self.layer_norm_pre = Fp32LayerNorm(input_dim) self.layer_norm = Fp32LayerNorm(input_dim) self.segment_size = segment_size self.use_mem = use_mem self.memory_op = SummarizationLayer( summarization_method, segment_size, input_dim ) def set_mini_batches(self, mini_batches): self.attention.mini_batches = mini_batches def gen_summary_queries(self, input): sum_input = self.memory_op(input) return sum_input def pre_attention_ops(self, input, right_context_blocks): rc_length = right_context_blocks.size(0) input_length = input.size(0) rc_and_input = torch.cat([right_context_blocks, input], dim=0) residual_input = rc_and_input rc_and_input = self.layer_norm_pre(rc_and_input) query_input = rc_and_input[-input_length:, :, :] return rc_length, input_length, residual_input, query_input, rc_and_input def after_attention_ops(self, attention_output, residual_input): output = self.dropout(attention_output) output = output + residual_input output = self.pos_ff(output) output = self.layer_norm(output) return output @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, mems: Tensor, left_context_key: Tensor, left_context_val: Tensor, right_context_blocks: Tensor, rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: results = self.pre_attention_ops(input, right_context_blocks) rc_length, input_length, residual_input, query_input, rc_and_input = results # In online decoding, the summary query size is always 1 or 0 if self.use_mem: summary_query = self.gen_summary_queries(query_input) summary_query = summary_query[0:1, :, :] rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0) else: rc_qu_su = rc_and_input rc_output, next_m, next_k, next_v = self.attention.forward_jit( input=rc_qu_su, lengths=lengths, mems=mems, left_context_key=left_context_key, left_context_val=left_context_val, rpe=rpe, ) rc_output = self.after_attention_ops(rc_output, residual_input) results = ( rc_output[-input_length:, :, :], next_m, rc_output[0:rc_length, :, :], next_k, next_v, ) return results @torch.jit.unused def forward( self, input, lengths, mems, right_context_blocks, attention_mask, pre_mems, left_context_key, left_context_val, rpe, ): results = self.pre_attention_ops(input, right_context_blocks) rc_length, input_length, residual_input, query_input, rc_and_input = results if self.use_mem: summary_query = self.gen_summary_queries(query_input) rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0) else: rc_qu_su = rc_and_input rc_output, next_m, next_k, next_v = self.attention( input=rc_qu_su, lengths=lengths, mems=mems, attention_mask=attention_mask, pre_mems=pre_mems, left_context_key=left_context_key, left_context_val=left_context_val, rpe=rpe, ) # [TODO] Note memory did not go through pos_ff. What happen if we pass # memory through the pos_ff as well? rc_output = self.after_attention_ops(rc_output, residual_input) results = ( rc_output[-input_length:, :, :], next_m, rc_output[0:rc_length, :, :], next_k, next_v, ) return results class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder): """ Whole utterance augmented memory transformer encoder layer. This is a master layer where we can define multiple augmented memory transformers. There are two reasons to setup the master layer. 1. We only need to define once about the attention mask. All the layers in the master layer share the same mask. 2. pyspeech nn layer has special input and output format. Defining one master layer is easier to passing memory between different layes inside the master layer args: input_dim: input embedding dimension num_heads: number of heads in multihead self-attention ffn_dim: ffn dimension in FFN layer num_layers: number of augmented memory transformer layers dropout_in_attn: dropout used in multi-head self-attention dropout_on_attn: dropout used for output from te multihead self-attention dropout_on_fc1: dropout used in FFN layer for the first linear layer dropout_on_fc2: dropout used in FFN layer for the second linear layer segment_size: segment size for each segment context_config: (left_context_size, right_context_size) defines the surround context size for each segment max_memory_size: maximum memory size used for each segment scaled_init: whether use scaled init for weight initialization in attention layer std_scale: if std_scale is not None. The weak attention suppression is turned on. For std_scale = 0.5, all the attention smaller than mean + 0.5 * std will be suppressed. activation_fn: activation function used in FFN layer. [ReLU, GELU] supported tanh_on_mem: whether use tanh on memory mini_batches: use mini-btach training negative_inf: the negative infinity value used in attention masking. default is "-inf". For some situation, e.g. LM. it is better to use "-1e8" to avoid nan issue. summarization_method: method to generate segment summrization embedding max_relative_position: max relatie position for relative position embedding rpe_old_option: To be compatible with previous model. The previous model was trained with attention += attention + rpe. The correct equation should be attention = attention + rpe [TODO]: remove the rpe_old_option by the end of 2021 Q1. """ def __init__( self, input_dim, num_heads, ffn_dim, num_layers=1, dropout_in_attn=0.0, dropout_on_attn=0.0, dropout_on_fc1=0.0, dropout_on_fc2=0.0, segment_size=128, context_config=(0, 0), max_memory_size=0, scaled_init=True, std_scale=None, activation_fn="relu", tanh_on_mem=False, mini_batches=False, negative_inf="-inf", deep_init=True, summarization_method="mean", max_relative_position=0, rpe_old_option=True, ): super().__init__(None) if input_dim % num_heads: raise ValueError( "input_dim ({}) must be divisible by num_heads ({})".format( input_dim, num_heads ) ) # we used to support growing memory size. However, it will cause # cross stream batching failure. Now we need to have exact max memory size if max_memory_size < 0: raise ValueError("max_memory_size must be >= 0") # Only assign right_context. In decoding, left context will be cached. # No need to let the online decoder to re-assign the left context self.left_context, self.right_context = context_config self.segment_size = segment_size self.memory_dim = input_dim self.max_memory_size = max_memory_size self.mini_batches = mini_batches if self.max_memory_size != 0: self.use_mem = True else: self.use_mem = False self.memory_op = SummarizationLayer( summarization_method, segment_size, input_dim ) self.layers = torch.nn.ModuleList() self.num_layers = num_layers self.max_relative_position = max_relative_position if self.max_relative_position > 0: self.use_rpe = True else: self.use_rpe = False for i in range(self.num_layers): if deep_init: layer_index = i else: layer_index = -1 self.layers.append( NoSegAugmentedMemoryTransformer( num_heads=num_heads, input_dim=input_dim, ffn_dim=ffn_dim, dropout_in_attn=dropout_in_attn, dropout_on_attn=dropout_on_attn, dropout_on_fc1=dropout_on_fc1, dropout_on_fc2=dropout_on_fc2, segment_size=segment_size, std_scale=std_scale, activation_fn=activation_fn, tanh_on_mem=tanh_on_mem, scaled_init=scaled_init, use_mem=self.use_mem, mini_batches=mini_batches, negative_inf=negative_inf, layer_index=layer_index, summarization_method=summarization_method, max_relative_position=max_relative_position, rpe_old_option=rpe_old_option, ) ) def set_mini_batches(self, mini_batches): # handy function only used for unit test self.mini_batches = mini_batches for layer in self.layers: layer.set_mini_batches(mini_batches) def _get_relative_position( self, input: Tensor, max_relative_position: int, left_context_length: int, past_length: int, is_decoding: bool, ): # For training, we copy the right context to the start of the utterance # First dimension in distance is corresponding to query. # [right context, utterance, summary vector] # Second dimension in distance is corresponding to key. # [Memory bank, right context, utterance] # For summary vector in query part, the distance with # all other position is 2*max_position. For memory bank in key, # the distance with all other positions is 0. T, B, D = input.shape num_segs = math.ceil((T - self.right_context) / self.segment_size) # utterance u_st = past_length * self.segment_size u_ed = u_st + T utterance_ranges = torch.arange(u_st, u_ed - self.right_context) # left context. Only in minibatch or decoding left_context_ranges = torch.arange(u_st - left_context_length, u_st) # Right context block # right context + utterance right_context_blocks = [] for i in range(0, num_segs - 1): st = (i + 1) * self.segment_size + u_st ed = st + self.right_context assert ed < u_ed temp = torch.arange(st, ed) right_context_blocks.append(temp) right_context_blocks.append(torch.arange(u_ed - self.right_context, u_ed)) right_context_ranges = torch.cat(right_context_blocks) if self.use_mem: # Memory bank # The position for memory -n, .., -1 if is_decoding: memory_size = min(past_length, self.max_memory_size) else: memory_size = num_segs + past_length - 1 memory_bank_ranges = torch.arange( -max_relative_position - 1, -max_relative_position - 1 - memory_size, -1 ) # summary vector # The position for summary vector as the T+max_relative_position+1. # After the clamping, the relative position is max_relative_position summary_pos_st = u_ed + max_relative_position + 1 summary_vector_ranges = torch.arange( summary_pos_st, summary_pos_st + num_segs ) key_ranges = torch.cat( [ memory_bank_ranges, right_context_ranges, left_context_ranges, utterance_ranges, ] ) query_ranges = torch.cat( [right_context_ranges, utterance_ranges, summary_vector_ranges] ) else: key_ranges = torch.cat( [right_context_ranges, left_context_ranges, utterance_ranges] ) query_ranges = torch.cat([right_context_ranges, utterance_ranges]) distance = key_ranges[None, :] - query_ranges[:, None] distance_clamp = ( torch.clamp(distance, -max_relative_position, max_relative_position) + max_relative_position ) distance_clamp = distance_clamp.to(input.device).long().detach() return distance_clamp def _get_attention_mask(self, input, past_length=0, left_context_cache=0): # attention mask for each query contains three parts: # 1. memory part # 2. left_context + segment # 3. right_context_block # so for each segment and its correspoinding right context block, # the attention matrix is formed by 9 parts: # [0, m, 0, 0, right_context, 0, 0, seg, 0] # [before memory, memory, after memory, before right context, right_context, # after right context, before seg, seg, after seg] # # Query is formed in the way as [right_context_blocks, utterance, summary] # # Note: put m and right_context before segment is convenient # for padding_mask operation. # Key lengths = m_length + right_context_block_length + lengths utterance_length, batch_size, _ = input.shape summary_length = math.ceil(utterance_length / self.segment_size) num_segs = summary_length rc_length = self.right_context * num_segs rc = self.right_context lc = self.left_context # using mini-batches, there is left context cache available for current # sequence. lcc = left_context_cache # max_memory_size is 0 then we don't have memory and summary # past_length is the memory carry from previous sequence if self.use_mem: mem_length = num_segs - 1 + past_length else: mem_length = 0 rc_mask = [] query_mask = [] summary_mask = [] for j in range(0, num_segs): ssize = min(self.segment_size, utterance_length - j * self.segment_size) rc_size = rc rc_mat = [] q_mat = [] s_mat = [] m_start = max(j + past_length - self.max_memory_size, 0) # max_memory_size is 0, then we don't use memory if self.use_mem: # part 0: before memory rc_mat.append(input.new_zeros(rc_size, m_start)) q_mat.append(input.new_zeros(ssize, m_start)) s_mat.append(input.new_zeros(1, m_start)) # part 1: memory col_1 = j + past_length - m_start rc_mat.append(torch.ones(rc_size, col_1, device=input.device)) q_mat.append(torch.ones(ssize, col_1, device=input.device)) # based on D22875746, disable summary query attention # on memeory is better for long form utterance s_mat.append(input.new_zeros(1, col_1)) # part 2: after memory col_2 = mem_length - (j + past_length) rc_mat.append(input.new_zeros(rc_size, col_2)) q_mat.append(input.new_zeros(ssize, col_2)) s_mat.append(input.new_zeros(1, col_2)) # part 3: before right context rc_start = j * rc rc_mat.append(input.new_zeros(rc_size, rc_start)) q_mat.append(input.new_zeros(ssize, rc_start)) s_mat.append(input.new_zeros(1, rc_start)) # part 4: right context rc_end = rc_start + rc col_4 = rc rc_mat.append(torch.ones(rc_size, col_4, device=input.device)) q_mat.append(torch.ones(ssize, col_4, device=input.device)) s_mat.append(torch.ones(1, col_4, device=input.device)) # part 5: after right context col_5 = rc_length - rc_end rc_mat.append(input.new_zeros(rc_size, col_5)) q_mat.append(input.new_zeros(ssize, col_5)) s_mat.append(input.new_zeros(1, col_5)) # part 6: before query segment seg_start = max(j * self.segment_size + lcc - lc, 0) rc_mat.append(input.new_zeros(rc_size, seg_start)) q_mat.append(input.new_zeros(ssize, seg_start)) s_mat.append(input.new_zeros(1, seg_start)) # part 7: query segment # note: right context is put in right context block # here we only need to consider about left context seg_end = min((j + 1) * self.segment_size + lcc, utterance_length + lcc) col_7 = seg_end - seg_start rc_mat.append(torch.ones(rc_size, col_7, device=input.device)) q_mat.append(torch.ones(ssize, col_7, device=input.device)) s_mat.append(torch.ones(1, col_7, device=input.device)) # part 8: after query segment col_8 = utterance_length + lcc - seg_end rc_mat.append(input.new_zeros(rc_size, col_8)) q_mat.append(input.new_zeros(ssize, col_8)) s_mat.append(input.new_zeros(1, col_8)) rc_mask.append(torch.cat(rc_mat, dim=1)) query_mask.append(torch.cat(q_mat, dim=1)) summary_mask.append(torch.cat(s_mat, dim=1)) # no memory, then we don't need summary either if self.use_mem: attention_mask = ( 1 - torch.cat( [ torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0), torch.cat(summary_mask, dim=0), ], dim=0, ) ).to(torch.bool) else: attention_mask = ( 1 - torch.cat( [torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0)], dim=0 ) ).to(torch.bool) return attention_mask @torch.jit.export def init_state( self, batch_size: int, device: Optional[Device] = None ) -> List[Tensor]: empty_memory = torch.zeros( self.num_layers, self.max_memory_size, batch_size, self.memory_dim, device=device, ) left_context_key = torch.zeros( self.num_layers, self.left_context, batch_size, self.memory_dim, device=device, ) left_context_val = torch.zeros( self.num_layers, self.left_context, batch_size, self.memory_dim, device=device, ) past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device) return [empty_memory, left_context_key, left_context_val, past_length] @torch.jit.export def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]: if len(states) == 0: return [] batched_m = [] batched_lc_key = [] batched_lc_val = [] batched_past_length = [] for state in states: if len(state) == 0: continue m, lc_key, lc_val, past_length = state batched_m.append(m) batched_lc_key.append(lc_key) batched_lc_val.append(lc_val) batched_past_length.append(past_length) if ( (len(batched_m) == 0) or (len(batched_lc_key) == 0) or (len(batched_lc_val) == 0) or (len(batched_past_length) == 0) ): return [ torch.tensor([]), torch.tensor([]), torch.tensor([]), torch.tensor([]), ] batched_m = torch.cat(batched_m, dim=2) batched_lc_key = torch.cat(batched_lc_key, dim=2) batched_lc_val = torch.cat(batched_lc_val, dim=2) batched_past_length = torch.cat(batched_past_length, dim=1) return [batched_m, batched_lc_key, batched_lc_val, batched_past_length] @torch.jit.export def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]: if len(state) == 0: return [] m, lc_key, lc_val, past_length = state indices = indices.to(device=m.device) reord_m = torch.index_select(m, 2, indices) reord_lc_key = torch.index_select(lc_key, 2, indices) reord_lc_val = torch.index_select(lc_val, 2, indices) reord_past_length = torch.index_select(past_length, 1, indices) return [reord_m, reord_lc_key, reord_lc_val, reord_past_length] @torch.jit.export def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]: m, lc_key, lc_val, past_length = state m = m.index_fill(dim=2, index=indices, value=0.0) lc_key = lc_key.index_fill(dim=2, index=indices, value=0.0) lc_val = lc_val.index_fill(dim=2, index=indices, value=0.0) past_length = past_length.index_fill(dim=1, index=indices, value=0) return [m, lc_key, lc_val, past_length] @torch.jit.export def state_size(self) -> int: return 4 @torch.jit.export def batch_size_in_state( self, state: Optional[List[Tensor]], sloppy: bool = True ) -> Optional[int]: if state is None: return None return state[0].size(2) def gen_summary_queries(self, input): sum_input = self.memory_op(input) return sum_input def _gen_right_context_padded_input(self, input): # This function deals with input that is already # padded with right context (e.g. minibatch training) right_context_blocks = [] T, B, D = input.shape num_segs = math.ceil((T - self.right_context) / self.segment_size) for i in range(0, num_segs - 1): st = (i + 1) * self.segment_size ed = st + self.right_context assert ed < T temp = input[st:ed, :, :] right_context_blocks.append(temp) # last segment right context is already available right_context_blocks.append(input[T - self.right_context :, :, :]) return torch.cat(right_context_blocks, dim=0) def _gen_segs_right_context(self, input, lengths): segments = [] T, B, D = input.size() nT = T - self.right_context # assume input is right context padded num_segs = math.ceil(nT / self.segment_size) # pad zeros to the utterance to make sure each # segment has the same right context. For the for i in range(0, num_segs - 1): st = i * self.segment_size ed = min(T, st + self.segment_size + self.right_context) temp = input[st:ed, :, :] rest_lengths = torch.clamp( lengths - self.segment_size, min=0, max=nT - (i + 1) * self.segment_size ) segments.append((temp, lengths - rest_lengths + self.right_context)) lengths = rest_lengths last_seg = input[st + self.segment_size :, :, :] segments.append((last_seg, rest_lengths + self.right_context)) return segments @torch.jit.unused def forward( self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]: # Xutai: originally the second argument is lengths. lengths = (~padding_masks).sum(dim=1).long() # mini batch training. if self.mini_batches: return self.forward_mini_batches(input, lengths, state) # regular full sequence training. Note, assume the right context in provided # in the input. T, B, D = input.size() right_context_blocks = self._gen_right_context_padded_input(input) # generate the relative positional embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=0, past_length=0, is_decoding=False, ) else: rpe = None input = input[: T - self.right_context, :, :] attention_mask = self._get_attention_mask(input) # firt layer use each segment mean as memory # ignore the last one seg average if self.use_mem: mems = self.gen_summary_queries(input)[:-1, :, :] else: mems = torch.zeros(0, input.size(1), input.size(2), device=input.device) mems = mems.type_as(input) output = input all_outputs = [] for layer in self.layers: output, mems, right_context_blocks, _, _ = layer( input=output, lengths=lengths, attention_mask=attention_mask, mems=mems, right_context_blocks=right_context_blocks, pre_mems=None, left_context_key=None, left_context_val=None, rpe=rpe, ) all_outputs.append(output) return output, padding_masks, [], all_outputs def forward_jit_mini_batch_init( self, seg: Tensor, state: Optional[List[Tensor]] = None, is_decoding: bool = False, ): # Prepare state. In whole sequence training, state is ignored. # For minibatch training, we need to prepare state if state is None: state = self.init_state(batch_size=seg.size(1), device=seg.device) if seg.dtype == torch.half: state = [state[0].half(), state[1].half(), state[2].half(), state[3]] if self.use_mem: # note input average only on seg, not on right context # first layer use each segmetn mean as memory. the last # one segment average is used in state full_mems = self.gen_summary_queries(seg) if is_decoding: mems = full_mems[0:1, :, :] state_mems = torch.cat([state[0][0], mems], dim=0) else: mems = full_mems[:-1, :, :] state_mems = torch.cat([state[0][0], full_mems], dim=0) else: mems = state[0][0] state_mems = mems # track processed segment number or memory number # the same batch as the same bumber of past length past_length = state[3][0][0].item() past_left_context = min(past_length * self.segment_size, self.left_context) past_length = min(self.max_memory_size, past_length) return state, mems, state_mems, past_length, past_left_context def state_update_before( self, layer: int, state: List[Tensor], past_length: int, past_left_context: int ): pre_mems = state[0][layer][self.max_memory_size - past_length :, :, :] lc_key = state[1][layer][self.left_context - past_left_context :, :, :] lc_val = state[2][layer][self.left_context - past_left_context :, :, :] return pre_mems, lc_key, lc_val def state_update_after( self, layer: int, state: List[Tensor], mems: Tensor, next_key: Tensor, next_val: Tensor, mems_list: List[Tensor], lc_key_list: List[Tensor], lc_val_list: List[Tensor], ): # mems is used for next layer if layer < self.num_layers - 1: state_mems = torch.cat([state[0][layer + 1], mems], dim=0) mems_list.append(state_mems[-self.max_memory_size :, :, :]) # when mems pass to next sequence, we need the last memory. when mems # use for the next layer, we can ignore the last memory mems = mems[:-1, :, :] # note state[1][i] and state[2][i] original length equals to self.left_context new_k = torch.cat([state[1][layer], next_key], dim=0) new_v = torch.cat([state[2][layer], next_val], dim=0) lc_key_list.append(new_k[-self.left_context :, :, :]) lc_val_list.append(new_v[-self.left_context :, :, :]) return mems_list, lc_key_list, lc_val_list, mems def state_update_after_loop( self, state: List[Tensor], mems_list: List[Tensor], lc_key_list: List[Tensor], lc_val_list: List[Tensor], update_length: int, ): state[0] = torch.stack(mems_list, dim=0) state[1] = torch.stack(lc_key_list, dim=0) state[2] = torch.stack(lc_val_list, dim=0) state[3] = state[3] + update_length return state @torch.jit.unused def forward_mini_batches( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]: T, B, D = input.size() # input without right context seg = input[: T - self.right_context, :, :] # get right context blocks right_context_blocks = self._gen_right_context_padded_input(input) mems_list = [] lc_key_list = [] lc_val_list = [] results = self.forward_jit_mini_batch_init(seg, state, False) state, mems, state_mems, past_length, past_left_context = results # relative position embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=past_left_context, past_length=past_length, is_decoding=False, ) else: rpe = None # get attention mask based on seg (not include right context) and available # left context attention_mask = self._get_attention_mask(seg, past_length, past_left_context) mems_list.append(state_mems[-self.max_memory_size :, :, :]) output = seg i = 0 all_outputs = [] for layer in self.layers: # In order to make cross stream batching work, mem, left context key # and left context value in the state should always be the same shape. # We use the past length to track the processed segment number. In this # way, we take out the essential memory, left context key and left # context val from the state. After finish the forward for current segment # we add the new memory, left context key and left context value into the # staate and trim out the oldest part to keep the shape consistent. pre_mems, lc_key, lc_val = self.state_update_before( i, state, past_length, past_left_context ) output, mems, right_context_blocks, next_key, next_val = layer.forward( input=output, lengths=lengths, attention_mask=attention_mask, mems=mems, right_context_blocks=right_context_blocks, pre_mems=pre_mems, left_context_key=lc_key, left_context_val=lc_val, rpe=rpe, ) all_outputs.append(output) mems_list, lc_key_list, lc_val_list, mems = self.state_update_after( layer=i, state=state, mems=mems, next_key=next_key, next_val=next_val, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, ) i += 1 # update state update_length = math.ceil((T - self.right_context) / self.segment_size) state = self.state_update_after_loop( state=state, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, update_length=update_length, ) return output, lengths, state, all_outputs def forward_jit_test( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor]]: """ This one simulate sequence encoder forward jit. This is for unit test purpose. It is not used in training or decoding. Note, extra_right_context is set in the model. In unit test, input = [utterance, right_context], lengths = [utterance_length]. args: input: input utterance lengths: utterance input length state: None here. input is whole utterance """ # [TODO] sequence_to_segment has bug in lengths. seg_src_tokens_lengths = self._gen_segs_right_context(input, lengths) seg_enc_tokens_lengths: List[Tuple[Tensor, Tensor]] = [] state: Optional[List[Tensor]] = None for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths: seg_enc_tokens, seg_enc_lengths, state = self.forward_jit( input=seg_src_tokens, lengths=seg_src_lengths, state=state ) seg_enc_tokens_lengths.append((seg_enc_tokens, seg_enc_lengths)) enc_tokens, enc_lengths = segments_to_sequence( segments=seg_enc_tokens_lengths, time_axis=0 ) state = [] # returns trivial state return enc_tokens, enc_lengths, state @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor]]: """ Forward helper for online decoding. args: input: [seg, right_context]. We assume in online we always padding the right context to the preset right context size. For the last segment, we may have short segment size, but right context size is the same as other segments lengths: utterance input length is the utterance segment length and right context size state: [memory, left_context_key, left_context_val]. To improve throughput, in addition to memory, we also cache key and value for left_context in multihead self-attention """ # In online decoding, input = [segment, right_context] # Lengths = [segment_length, right_context_length] # so we need strip right context in output T, B, D = input.size() rc_str = T - self.right_context rc_end = T right_context_blocks = input[rc_str:rc_end, :, :] seg = input[:rc_str, :, :] lengths = torch.clamp(lengths - self.right_context, min=0) mems_list = [] lc_key_list = [] lc_val_list = [] results = self.forward_jit_mini_batch_init(seg, state, True) state, mems, state_mems, past_length, past_left_context = results # relative position embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=past_left_context, past_length=past_length, is_decoding=True, ) else: rpe = None # memory for first layer. mems_list.append(state_mems[-self.max_memory_size :, :, :]) output = seg i = 0 for layer in self.layers: # In order to make cross stream batching work, mem, left context key # and left context value in the state should always be the same shape. # We use the past length to track the processed segment number. In this # way, we take out the essential memory, left context key and left # context val from the state. After finish the forward for current segment # we add the new memory, left context key and left context value into the # staate and trim out the oldest part to keep the shape consistent. true_mems, lc_key, lc_val = self.state_update_before( layer=i, state=state, past_length=past_length, past_left_context=past_left_context, ) output, mems, right_context_blocks, next_key, next_val = layer.forward_jit( input=output, lengths=lengths, mems=true_mems, right_context_blocks=right_context_blocks, left_context_key=lc_key, left_context_val=lc_val, rpe=rpe, ) # mems is used for next layer mems_list, lc_key_list, lc_val_list, _ = self.state_update_after( layer=i, state=state, mems_list=mems_list, mems=mems, next_key=next_key, next_val=next_val, lc_key_list=lc_key_list, lc_val_list=lc_val_list, ) i += 1 # update state state = self.state_update_after_loop( state=state, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, update_length=1, ) return output, lengths, state def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig torch.quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self # ------------------------------------------------------------------------------ # Emformer encoder for seq2seq model # This is a wrapper over the original emformer # ------------------------------------------------------------------------------ def emformer_encoder(klass): class SpeechEncoder(klass): def __init__(self, args): super().__init__(args) stride = SpeechEncoder.conv_layer_stride(args) trf_left_context = args.segment_left_context // stride trf_right_context = args.segment_right_context // stride context_config = [trf_left_context, trf_right_context] self.transformer_layers = nn.ModuleList( [ NoSegAugmentedMemoryTransformerEncoderLayer( input_dim=args.encoder_embed_dim, num_heads=args.encoder_attention_heads, ffn_dim=args.encoder_ffn_embed_dim, num_layers=args.encoder_layers, dropout_in_attn=args.dropout, dropout_on_attn=args.dropout, dropout_on_fc1=args.dropout, dropout_on_fc2=args.dropout, activation_fn=args.activation_fn, context_config=context_config, segment_size=args.segment_length, max_memory_size=args.max_memory_size, scaled_init=True, # TODO: use constant for now. tanh_on_mem=args.amtrf_tanh_on_mem, ) ] ) def forward(self, src_tokens, src_lengths): encoder_out = super().forward(src_tokens, src_lengths) output = encoder_out["encoder_out"][0] encoder_padding_masks = encoder_out["encoder_padding_mask"][0] # This is because that in the original implementation # the output didn't consider the last segment as right context. encoder_padding_masks = encoder_padding_masks[:, : output.size(0)] return { "encoder_out": [output], "encoder_padding_mask": [encoder_padding_masks], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @staticmethod def conv_layer_stride(args): # TODO: make it configurable from the args return 4 SpeechEncoder.__name__ = klass.__name__ return SpeechEncoder
bart_ls-main
fairseq-py/fairseq/models/speech_to_text/modules/emformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Tuple, List import torch import torch.nn.functional as F from fairseq.models import FairseqEncoder from fairseq.models.speech_to_text import ( ConvTransformerEncoder, ) from fairseq.models.speech_to_text.utils import attention_suppression from fairseq.models.speech_to_text.utils import ( lengths_to_encoder_padding_mask, segments_to_sequence, sequence_to_segments, ) from fairseq.modules import MultiheadAttention, TransformerEncoderLayer from torch import nn, Tensor # ------------------------------------------------------------------------------ # AugmentedMemoryConvTransformerEncoder # ------------------------------------------------------------------------------ class AugmentedMemoryConvTransformerEncoder(ConvTransformerEncoder): def __init__(self, args): super().__init__(args) args.encoder_stride = self.stride() self.left_context = args.left_context // args.encoder_stride self.right_context = args.right_context // args.encoder_stride self.left_context_after_stride = args.left_context // args.encoder_stride self.right_context_after_stride = args.right_context // args.encoder_stride self.transformer_layers = nn.ModuleList([]) self.transformer_layers.extend( [ AugmentedMemoryTransformerEncoderLayer(args) for i in range(args.encoder_layers) ] ) def stride(self): # Hard coded here. Should infer from convs in future stride = 4 return stride def forward(self, src_tokens, src_lengths, states=None): """Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]: """ bsz, max_seq_len, _ = src_tokens.size() x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) x = self.conv(x) bsz, _, output_seq_len, _ = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) x = self.out(x) x = self.embed_scale * x subsampling_factor = 1.0 * max_seq_len / output_seq_len input_lengths = torch.max( (src_lengths.float() / subsampling_factor).ceil().long(), x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long(), ) encoder_padding_mask, _ = lengths_to_encoder_padding_mask( input_lengths, batch_first=True ) # TODO: fix positional embedding positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = F.dropout(x, p=self.dropout, training=self.training) # State to store memory banks etc. if states is None: states = [ {"memory_banks": None, "encoder_states": None} for i in range(len(self.transformer_layers)) ] for i, layer in enumerate(self.transformer_layers): # x size: # (self.left_size + self.segment_size + self.right_size) # / self.stride, num_heads, dim # TODO: Consider mask here x = layer(x, states[i]) states[i]["encoder_states"] = x[ self.left_context_after_stride : -self.right_context_after_stride ] lengths = ( ( ~encoder_padding_mask[ :, self.left_context_after_stride : -self.right_context_after_stride ] ) .sum(dim=1, keepdim=True) .long() ) return states[-1]["encoder_states"], lengths, states # ------------------------------------------------------------------------------ # AugmentedMemoryTransformerEncoderLayer # ------------------------------------------------------------------------------ class AugmentedMemoryTransformerEncoderLayer(TransformerEncoderLayer): def __init__(self, args): super().__init__(args) self.left_context = args.left_context // args.encoder_stride self.right_context = args.right_context // args.encoder_stride def forward(self, x, state): length, batch_size, x_dim = x.size() residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) # init_state if state.get("memory_banks", None) is None: state["memory_banks"] = [] # TODO reseach new sum_query method seg_start = self.left_context seg_end = length - self.right_context if seg_start < seg_end: summarization_query = torch.mean(x[seg_start:seg_end], keepdim=True, dim=0) else: summarization_query = x.new_zeros(1, batch_size, x_dim) x = torch.cat([x, summarization_query], dim=0) x = self.self_attn(input_and_summary=x, state=state) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) return x def build_self_attention(self, embed_dim, args): return AugmentedMemoryMultiheadAttention( embed_dim=embed_dim, num_heads=args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, tanh_on_mem=True, max_memory_size=args.max_memory_size, ) # ------------------------------------------------------------------------------ # AugmentedMemoryMultiheadAttention # ------------------------------------------------------------------------------ class AugmentedMemoryMultiheadAttention(MultiheadAttention): """ Augmented Memory Attention from Streaming Transformer-based Acoustic Models Using Self-attention with Augmented Memory https://arxiv.org/abs/2005.08042 """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, tanh_on_mem=False, memory_dim=None, std_scale=0.5, # 0.5 based on https://arxiv.org/abs/2005.09137 max_memory_size=-1, disable_mem_on_mem_attn=True, ): super().__init__( embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention, q_noise, qn_block_size, ) self.memory_dim = memory_dim if memory_dim is not None else embed_dim self.std_scale = std_scale self.disable_mem_on_mem_attn = disable_mem_on_mem_attn # This Operator was used for factorization in PySpeech self.v2e = lambda x: x if tanh_on_mem: self.squash_mem = torch.tanh self.nonlinear_squash_mem = True else: self.squash_mem = lambda x: x self.nonlinear_squash_mem = False self.max_memory_size = max_memory_size def forward(self, input_and_summary, state): """ input: Encoder states of current segment with left or right context, plus one summarization query """ length, batch_size, _ = input_and_summary.shape length = length - 1 # not include sum_query, last index memory = state["memory_banks"] # TODO: positional embedding on memory if self.max_memory_size > -1 and len(memory) > self.max_memory_size: # TODO: need to fix here if self.max_memory_size == 0: memory = memory.new_zeros(1, memory.size(1), self.memory_dim) else: memory = memory[-self.max_memory_size :] memory_and_input = torch.cat(memory + [input_and_summary[:-1]], dim=0) input_and_sum_query = input_and_summary q = self.q_proj(self.v2e(input_and_sum_query)) k = self.k_proj(self.v2e(memory_and_input)) v = self.v_proj(self.v2e(memory_and_input)) q = ( q.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) * self.scaling ) k = ( k.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) v = ( v.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) attention_weights = torch.bmm(q, k.transpose(1, 2)) if self.disable_mem_on_mem_attn: attention_weights = self.suppress_mem_on_mem_attention( batch_size, self.num_heads, len(memory), attention_weights ) if self.std_scale is not None: attention_weights = attention_suppression(attention_weights, self.std_scale) assert list(attention_weights.shape) == [ batch_size * self.num_heads, length + 1, length + len(memory), ] attention_weights = torch.nn.functional.softmax( attention_weights.float(), dim=-1 ).type_as(attention_weights) attention_probs = self.dropout_module(attention_weights) # [T, T, B, n_head] + [T, B, n_head, d_head] -> [T, B, n_head, d_head] attention = torch.bmm(attention_probs, v) assert list(attention.shape) == [ batch_size * self.num_heads, length + 1, self.head_dim, ] attention = ( attention.transpose(0, 1) .contiguous() .view(length + 1, batch_size, self.embed_dim) ) output_and_memory = self.out_proj(attention) next_m = output_and_memory[-1:] next_m = self.squash_mem(next_m) output = output_and_memory[:-1] state["memory_banks"].append(next_m) return output def suppress_mem_on_mem_attention( self, B: int, num_heads: int, mem_size: int, attention_weight: Tensor ): """ Arguments: - B: batch size - num_heads: number of attention heads - mem_size: size of memory bank - attention_weight: a [B*num_heads, T + 1, T + mem_size] vector Return: modified attention_weight with [B*num_heads, -1, :mem_size] = -inf """ attention_weight[:, -1, :mem_size] = float("-inf") return attention_weight # ------------------------------------------------------------------------------ # SequenceEncoder # ------------------------------------------------------------------------------ class SequenceEncoder(FairseqEncoder): """ SequenceEncoder encodes sequences. More specifically, `src_tokens` and `src_lengths` in `forward()` should describe a batch of "complete" sequences rather than segments. Segment-by-segment inference can be triggered by `segment_size`: 1) `segment_size` is None: SequenceEncoder treats the input sequence as one single segment. 2) `segment_size` is not None (some int instead): SequenceEncoder does the following: 1. breaks the input sequence into several segments 2. inference on each segment and collect the outputs 3. concatanete segment outputs into the output sequence. Note that `segment_size` here shouldn't include additional left/right contexts needed, for example if we wish to infer with LC-BLSTM where the middle chunk size is 100 and right context is 20, `segment_size` should be 100. """ def __init__(self, args, module): super().__init__(None) self.module = module self.input_time_axis = 1 self.output_time_axis = 0 self.segment_size = args.segment_size self.left_context = args.left_context self.right_context = args.right_context def forward( self, src_tokens: Tensor, src_lengths: Tensor, states=None, ): seg_src_tokens_lengths = sequence_to_segments( sequence=src_tokens, time_axis=self.input_time_axis, lengths=src_lengths, segment_size=self.segment_size, extra_left_context=self.left_context, extra_right_context=self.right_context, ) seg_encoder_states_lengths: List[Tuple[Tensor, Tensor]] = [] for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths: (seg_encoder_states, seg_enc_lengths, states) = self.module( seg_src_tokens, seg_src_lengths, states=states, ) seg_encoder_states_lengths.append((seg_encoder_states, seg_enc_lengths)) encoder_out, enc_lengths = segments_to_sequence( segments=seg_encoder_states_lengths, time_axis=self.output_time_axis ) encoder_padding_mask, _ = lengths_to_encoder_padding_mask( enc_lengths, batch_first=True ) if not encoder_padding_mask.any(): encoder_padding_mask = None return { "encoder_out": [encoder_out], "encoder_padding_mask": [encoder_padding_mask], "encoder_embedding": [], "encoder_states": [states], "src_tokens": [], "src_lengths": [], } def incremental_encode( self, seg_src_tokens: Tensor, seg_src_lengths: Tensor, states=None, ): """ Different from forward function, this function takes segmented speech as input, and append encoder states to previous states """ (seg_encoder_states, seg_enc_lengths, states) = self.module( seg_src_tokens, seg_src_lengths, states=states, ) return seg_encoder_states, seg_enc_lengths, states # ------------------------------------------------------------------------------ # Augmented memory model decorator # ------------------------------------------------------------------------------ def augmented_memory(klass): class StreamSeq2SeqModel(klass): @staticmethod def add_args(parser): super(StreamSeq2SeqModel, StreamSeq2SeqModel).add_args(parser) parser.add_argument( "--segment-size", type=int, required=True, help="Length of the segment." ) parser.add_argument( "--left-context", type=int, default=0, help="Left context for the segment.", ) parser.add_argument( "--right-context", type=int, default=0, help="Right context for the segment.", ) parser.add_argument( "--max-memory-size", type=int, default=-1, help="Right context for the segment.", ) StreamSeq2SeqModel.__name__ = klass.__name__ return StreamSeq2SeqModel
bart_ls-main
fairseq-py/fairseq/models/speech_to_text/modules/augmented_memory_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hubert import * # noqa from .hubert_asr import * # noqa
bart_ls-main
fairseq-py/fairseq/models/hubert/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from argparse import Namespace from typing import Any import torch import torch.nn as nn from dataclasses import dataclass, field from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models import BaseFairseqModel, FairseqEncoder, register_model from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES from fairseq.tasks import FairseqTask from omegaconf import II, MISSING @dataclass class HubertAsrConfig(FairseqDataclass): w2v_path: str = field( default=MISSING, metadata={"help": "path to hubert model"} ) no_pretrained_weights: bool = field( default=False, metadata={"help": "if true, does not load pretrained weights"}, ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) final_dropout: float = field( default=0.0, metadata={ "help": "dropout after transformer and before final projection" }, ) dropout: float = field( default=0.0, metadata={"help": "dropout probability inside hubert model"}, ) attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights " "inside hubert model" }, ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN " "inside hubert model" }, ) # masking apply_mask: bool = field( default=False, metadata={"help": "apply masking during fine-tuning"} ) mask_length: int = field( default=10, metadata={"help": "repeat the mask indices multiple times"} ) mask_prob: float = field( default=0.5, metadata={ "help": "probability of replacing a token with mask " "(normalized by length)" }, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose masks"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"}, ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"}, ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"}, ) freeze_finetune_updates: int = field( default=0, metadata={"help": "dont finetune hubert for this many updates"}, ) feature_grad_mult: float = field( default=0.0, metadata={"help": "reset feature grad mult in hubert to this"}, ) layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a layer in hubert"}, ) normalize: bool = II("task.normalize") data: str = II("task.data") # this holds the loaded hubert args w2v_args: Any = None @dataclass class HubertCtcConfig(HubertAsrConfig): pass @register_model("hubert_ctc", dataclass=HubertCtcConfig) class HubertCtc(BaseFairseqModel): def __init__(self, cfg: HubertCtcConfig, w2v_encoder: BaseFairseqModel): super().__init__() self.cfg = cfg self.w2v_encoder = w2v_encoder def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: HubertCtcConfig, task: FairseqTask): """Build a new model instance.""" w2v_encoder = HubertEncoder(cfg, task.target_dictionary) return cls(cfg, w2v_encoder) def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output["encoder_out"] if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def get_logits(self, net_output): logits = net_output["encoder_out"] padding = net_output["encoder_padding_mask"] if padding is not None and padding.any(): padding = padding.T logits[padding][..., 0] = 0 logits[padding][..., 1:] = float("-inf") return logits def forward(self, **kwargs): x = self.w2v_encoder(**kwargs) return x @dataclass class HubertSeq2SeqConfig(HubertAsrConfig): decoder_embed_dim: int = field( default=768, metadata={"help": "decoder embedding dimension"} ) decoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field( default=6, metadata={"help": "num of decoder layers"} ) decoder_layerdrop: float = field( default=0.0, metadata={"help": "decoder layerdrop chance"} ) decoder_attention_heads: int = field( default=4, metadata={"help": "num decoder attention heads"} ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"}, ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings " "(outside self attention)" }, ) decoder_dropout: float = field( default=0.0, metadata={"help": "dropout probability in the decoder"} ) decoder_attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights " "inside the decoder" }, ) decoder_activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN " "inside the decoder" }, ) max_target_positions: int = field( default=2048, metadata={"help": "max target positions"} ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"}, ) class HubertEncoder(FairseqEncoder): def __init__(self, cfg: HubertAsrConfig, tgt_dict=None): self.apply_mask = cfg.apply_mask arg_overrides = { "dropout": cfg.dropout, "activation_dropout": cfg.activation_dropout, "dropout_input": cfg.dropout_input, "attention_dropout": cfg.attention_dropout, "mask_length": cfg.mask_length, "mask_prob": cfg.mask_prob, "mask_selection": cfg.mask_selection, "mask_other": cfg.mask_other, "no_mask_overlap": cfg.no_mask_overlap, "mask_channel_length": cfg.mask_channel_length, "mask_channel_prob": cfg.mask_channel_prob, "mask_channel_selection": cfg.mask_channel_selection, "mask_channel_other": cfg.mask_channel_other, "no_mask_channel_overlap": cfg.no_mask_channel_overlap, "encoder_layerdrop": cfg.layerdrop, "feature_grad_mult": cfg.feature_grad_mult, } if cfg.w2v_args is None: state = checkpoint_utils.load_checkpoint_to_cpu( cfg.w2v_path, arg_overrides ) w2v_args = state.get("cfg", None) if w2v_args is None: w2v_args = convert_namespace_to_omegaconf(state["args"]) cfg.w2v_args = w2v_args else: state = None w2v_args = cfg.w2v_args if isinstance(w2v_args, Namespace): cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf( w2v_args ) assert cfg.normalize == w2v_args.task.normalize, ( "Fine-tuning works best when data normalization is the same. " "Please check that --normalize is set or unset for " "both pre-training and here" ) w2v_args.task.data = cfg.data task = tasks.setup_task(w2v_args.task) if state is not None and "task_state" in state: # This will load the stored "dictionaries" object task.load_state_dict(state["task_state"]) model = task.build_model(w2v_args.model) if state is not None and not cfg.no_pretrained_weights: # set strict=False because we omit some modules model.load_state_dict(state["model"], strict=False) model.remove_pretraining_modules() super().__init__(task.source_dictionary) d = w2v_args.model.encoder_embed_dim self.w2v_model = model self.final_dropout = nn.Dropout(cfg.final_dropout) self.freeze_finetune_updates = cfg.freeze_finetune_updates self.num_updates = 0 if tgt_dict is not None: self.proj = Linear(d, len(tgt_dict)) elif getattr(cfg, "decoder_embed_dim", d) != d: self.proj = Linear(d, cfg.decoder_embed_dim) else: self.proj = None def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, source, padding_mask, tbc=True, **kwargs): w2v_args = { "source": source, "padding_mask": padding_mask, "mask": self.apply_mask and self.training, } ft = self.freeze_finetune_updates <= self.num_updates with torch.no_grad() if not ft else contextlib.ExitStack(): x, padding_mask = self.w2v_model.extract_features(**w2v_args) if tbc: # B x T x C -> T x B x C x = x.transpose(0, 1) x = self.final_dropout(x) if self.proj: x = self.proj(x) return { "encoder_out": x, # T x B x C "encoder_padding_mask": padding_mask, # B x T "padding_mask": padding_mask, } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out[ "encoder_out" ].index_select(1, new_order) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return None def upgrade_state_dict_named(self, state_dict, name): return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
bart_ls-main
fairseq-py/fairseq/models/hubert/hubert_asr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, List, Optional, Tuple import numpy as np import torch import torch.nn as nn from dataclasses import dataclass, field from fairseq import utils from fairseq.data.data_utils import compute_mask_indices from fairseq.data.dictionary import Dictionary from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.models.wav2vec.wav2vec2 import ( ConvFeatureExtractionModel, TransformerEncoder, ) from fairseq.modules import GradMultiply, LayerNorm from fairseq.tasks.hubert_pretraining import ( HubertPretrainingConfig, HubertPretrainingTask, ) from omegaconf import II logger = logging.getLogger(__name__) EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"]) MASKING_DISTRIBUTION_CHOICES = ChoiceEnum( ["static", "uniform", "normal", "poisson"] ) @dataclass class HubertConfig(FairseqDataclass): label_rate: int = II("task.label_rate") extractor_mode: EXTRACTOR_MODE_CHOICES = field( default="default", metadata={ "help": "mode for feature extractor. default has a single group " "norm with d groups in the first conv block, whereas layer_norm " "has layer norms in every block (meant to use with normalize=True)" }, ) encoder_layers: int = field( default=12, metadata={"help": "num encoder layers in the transformer"} ) encoder_embed_dim: int = field( default=768, metadata={"help": "encoder embedding dimension"} ) encoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "encoder embedding dimension for FFN"} ) encoder_attention_heads: int = field( default=12, metadata={"help": "num encoder attention heads"} ) activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="gelu", metadata={"help": "activation function to use"} ) # dropouts dropout: float = field( default=0.1, metadata={"help": "dropout probability for the transformer"}, ) attention_dropout: float = field( default=0.1, metadata={"help": "dropout probability for attention weights"}, ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"}, ) encoder_layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}, ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) dropout_features: float = field( default=0.0, metadata={ "help": "dropout to apply to the features (after feat extr)" }, ) final_dim: int = field( default=0, metadata={ "help": "project final representations and targets to this many " "dimensions. set to encoder_embed_dim is <= 0" }, ) untie_final_proj: bool = field( default=False, metadata={"help": "use separate projection for each target"}, ) layer_norm_first: bool = field( default=False, metadata={"help": "apply layernorm first in the transformer"}, ) conv_feature_layers: str = field( default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2", metadata={ "help": "string describing convolutional feature extraction " "layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" }, ) conv_bias: bool = field( default=False, metadata={"help": "include bias in conv encoder"} ) logit_temp: float = field( default=0.1, metadata={"help": "temperature to divide logits by"} ) target_glu: bool = field( default=False, metadata={"help": "adds projection + glu to targets"} ) feature_grad_mult: float = field( default=1.0, metadata={"help": "multiply feature extractor var grads by this"}, ) # masking mask_length: int = field(default=10, metadata={"help": "mask length"}) mask_prob: float = field( default=0.65, metadata={"help": "probability of replacing a token with mask"}, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: int = field( default=1, metadata={ "help": "min space between spans (if no overlap is enabled)" }, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"}, ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"}, ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"}, ) mask_channel_min_space: int = field( default=1, metadata={ "help": "min space between spans (if no overlap is enabled)" }, ) # positional embeddings conv_pos: int = field( default=128, metadata={ "help": "number of filters for convolutional positional embeddings" }, ) conv_pos_groups: int = field( default=16, metadata={ "help": "number of groups for convolutional positional embedding" }, ) latent_temp: Tuple[float, float, float] = field( default=(2, 0.5, 0.999995), metadata={"help": "legacy (to be removed)"}, ) # loss computation skip_masked: bool = field( default=False, metadata={"help": "skip computing losses over masked frames"}, ) skip_nomask: bool = field( default=False, metadata={"help": "skip computing losses over unmasked frames"}, ) @register_model("hubert", dataclass=HubertConfig) class HubertModel(BaseFairseqModel): def __init__( self, cfg: HubertConfig, task_cfg: HubertPretrainingConfig, dictionaries: List[Dictionary], ) -> None: super().__init__() logger.info(f"HubertModel Config: {cfg}") feature_enc_layers = eval(cfg.conv_feature_layers) # noqa self.embed = feature_enc_layers[-1][0] self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, mode=cfg.extractor_mode, conv_bias=cfg.conv_bias, ) feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers]) self.feat2tar_ratio = ( cfg.label_rate * feature_ds_rate / task_cfg.sample_rate ) self.post_extract_proj = ( nn.Linear(self.embed, cfg.encoder_embed_dim) if self.embed != cfg.encoder_embed_dim else None ) self.mask_prob = cfg.mask_prob self.mask_selection = cfg.mask_selection self.mask_other = cfg.mask_other self.mask_length = cfg.mask_length self.no_mask_overlap = cfg.no_mask_overlap self.mask_min_space = cfg.mask_min_space self.mask_channel_prob = cfg.mask_channel_prob self.mask_channel_selection = cfg.mask_channel_selection self.mask_channel_other = cfg.mask_channel_other self.mask_channel_length = cfg.mask_channel_length self.no_mask_channel_overlap = cfg.no_mask_channel_overlap self.mask_channel_min_space = cfg.mask_channel_min_space self.dropout_input = nn.Dropout(cfg.dropout_input) self.dropout_features = nn.Dropout(cfg.dropout_features) self.feature_grad_mult = cfg.feature_grad_mult self.logit_temp = cfg.logit_temp self.skip_masked = cfg.skip_masked self.skip_nomask = cfg.skip_nomask final_dim = ( cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim ) self.mask_emb = nn.Parameter( torch.FloatTensor(cfg.encoder_embed_dim).uniform_() ) self.encoder = TransformerEncoder(cfg) self.layer_norm = LayerNorm(self.embed) self.target_glu = None if cfg.target_glu: self.target_glu = nn.Sequential( nn.Linear(final_dim, final_dim * 2), nn.GLU() ) self.untie_final_proj = cfg.untie_final_proj if self.untie_final_proj: self.final_proj = nn.Linear( cfg.encoder_embed_dim, final_dim * len(dictionaries) ) else: self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim) # modules below are not needed during fine-tuning if any([d is None for d in dictionaries]): logger.info( "cannot find dictionary. assume will be used for fine-tuning" ) else: self.num_classes = [len(d) for d in dictionaries] self.label_embs_concat = nn.Parameter( torch.FloatTensor(sum(self.num_classes), final_dim) ) nn.init.uniform_(self.label_embs_concat) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: HubertConfig, task: HubertPretrainingTask): """Build a new model instance.""" model = HubertModel(cfg, task.cfg, task.dictionaries) return model def apply_mask(self, x, padding_mask, target_list): B, T, C = x.shape if self.mask_prob > 0: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=2, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space, ) mask_indices = torch.from_numpy(mask_indices).to(x.device) x[mask_indices] = self.mask_emb else: mask_indices = None if self.mask_channel_prob > 0: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x[mask_channel_indices] = 0 return x, mask_indices def compute_nce(self, x, pos, negs): neg_is_pos = (pos == negs).all(-1) pos = pos.unsqueeze(0) targets = torch.cat([pos, negs], dim=0) logits = torch.cosine_similarity( x.float(), targets.float(), dim=-1 ).type_as(x) logits /= self.logit_temp if neg_is_pos.any(): logits[1:][neg_is_pos] = float("-inf") logits = logits.transpose(0, 1) # (num_x, num_cls+1) return logits def forward_features(self, source: torch.Tensor) -> torch.Tensor: if self.feature_grad_mult > 0: features = self.feature_extractor(source) if self.feature_grad_mult != 1.0: features = GradMultiply.apply(features, self.feature_grad_mult) else: with torch.no_grad(): features = self.feature_extractor(source) return features def forward_targets( self, features: torch.Tensor, target_list: List[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: # Trim features to ensure labels exist and then get aligned labels feat_tsz = features.size(2) targ_tsz = min([t.size(1) for t in target_list]) if self.feat2tar_ratio * feat_tsz > targ_tsz: feat_tsz = int(targ_tsz / self.feat2tar_ratio) features = features[..., :feat_tsz] target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio target_list = [t[:, target_inds.long()] for t in target_list] return features, target_list def forward_padding_mask( self, features: torch.Tensor, padding_mask: torch.Tensor, ) -> torch.Tensor: extra = padding_mask.size(1) % features.size(1) if extra > 0: padding_mask = padding_mask[:, :-extra] padding_mask = padding_mask.view( padding_mask.size(0), features.size(1), -1 ) padding_mask = padding_mask.all(-1) return padding_mask def forward( self, source: torch.Tensor, target_list: Optional[List[torch.Tensor]] = None, padding_mask: Optional[torch.Tensor] = None, mask: bool = True, features_only: bool = False, output_layer: Optional[int] = None, ) -> Dict[str, torch.Tensor]: """output layer is 1-based""" features = self.forward_features(source) if target_list is not None: features, target_list = self.forward_targets(features, target_list) features_pen = features.float().pow(2).mean() features = features.transpose(1, 2) features = self.layer_norm(features) unmasked_features = features.clone() if padding_mask is not None: padding_mask = self.forward_padding_mask(features, padding_mask) if self.post_extract_proj is not None: features = self.post_extract_proj(features) features = self.dropout_input(features) unmasked_features = self.dropout_features(unmasked_features) if mask: x, mask_indices = self.apply_mask( features, padding_mask, target_list ) else: x = features mask_indices = None # feature: (B, T, D), float # target: (B, T), long # x: (B, T, D), float # padding_mask: (B, T), bool # mask_indices: (B, T), bool x, _ = self.encoder( x, padding_mask=padding_mask, layer=None if output_layer is None else output_layer - 1 ) if features_only: return {"x": x, "padding_mask": padding_mask, "features": features} def compute_pred(proj_x, target, label_embs): # compute logits for the i-th label set y = torch.index_select(label_embs, 0, target.long()) negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1) if self.target_glu: y = self.target_glu(y) negs = self.target_glu(negs) # proj_x: (S, D) # y: (S, D) # negs: (Neg, S, D) return self.compute_nce(proj_x, y, negs) label_embs_list = self.label_embs_concat.split(self.num_classes, 0) if not self.skip_masked: masked_indices = torch.logical_and(~padding_mask, mask_indices) proj_x_m = self.final_proj(x[masked_indices]) if self.untie_final_proj: proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1) else: proj_x_m_list = [proj_x_m for _ in range(len(target_list))] logit_m_list = [ compute_pred(proj_x_m, t[masked_indices], label_embs_list[i]) for i, (proj_x_m, t) in enumerate( zip(proj_x_m_list, target_list) ) ] else: logit_m_list = [None for _ in target_list] if not self.skip_nomask: nomask_indices = torch.logical_and(~padding_mask, ~mask_indices) proj_x_u = self.final_proj(x[nomask_indices]) if self.untie_final_proj: proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1) else: proj_x_u_list = [proj_x_u for _ in range(len(target_list))] logit_u_list = [ compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i]) for i, (proj_x_u, t) in enumerate( zip(proj_x_u_list, target_list) ) ] else: logit_u_list = [None for _ in target_list] result = { "logit_m_list": logit_m_list, "logit_u_list": logit_u_list, "padding_mask": padding_mask, "features_pen": features_pen, } return result def extract_features( self, source: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, mask: bool = False, ret_conv: bool = False, output_layer: Optional[int] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: res = self.forward( source, padding_mask=padding_mask, mask=mask, features_only=True, output_layer=output_layer, ) feature = res["features"] if ret_conv else res["x"] return feature, res["padding_mask"] def get_logits(self, net_output, is_masked=True): if is_masked: logits_list = net_output["logit_m_list"] else: logits_list = net_output["logit_u_list"] logits_list = [x.float() for x in logits_list if x is not None] return logits_list def get_targets(self, net_output, is_masked=True): logits_list = self.get_logits(net_output, is_masked) targets_list = [ x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list ] return targets_list def get_extra_losses(self, net_output): extra_losses = [] names = [] if "features_pen" in net_output: extra_losses.append(net_output["features_pen"]) names.append("features_pen") return extra_losses, names def remove_pretraining_modules(self): self.target_glu = None self.final_proj = None
bart_ls-main
fairseq-py/fairseq/models/hubert/hubert.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from dataclasses import dataclass, field, fields from typing import List, Optional from fairseq import utils from fairseq.dataclass import FairseqDataclass, ChoiceEnum from omegaconf import II DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8) _NAME_PARSER = r"(decoder|encoder|quant_noise)_(.*)" @dataclass class EncDecBaseConfig(FairseqDataclass): embed_path: Optional[str] = field( default=None, metadata={"help": "path to pre-trained embedding"} ) embed_dim: Optional[int] = field( default=512, metadata={"help": "embedding dimension"} ) ffn_embed_dim: int = field( default=2048, metadata={"help": "embedding dimension for FFN"} ) layers: int = field(default=6, metadata={"help": "number of layers"}) attention_heads: int = field( default=8, metadata={"help": "number of attention heads"} ) normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each block"} ) learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings"} ) # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) layerdrop: float = field(default=0, metadata={"help": "LayerDrop probability"}) layers_to_keep: Optional[List[int]] = field( default=None, metadata={"help": "which layers to *keep* when pruning"} ) @dataclass class DecoderConfig(EncDecBaseConfig): input_dim: int = II("model.decoder.embed_dim") output_dim: int = field( default=II("model.decoder.embed_dim"), metadata={ "help": "decoder output dimension (extra linear layer if different from decoder embed dim)" }, ) def __post_init__(self): # II doesn't work if we are just creating the object outside of hydra so fix that if self.input_dim == II("model.decoder.embed_dim"): self.input_dim = self.embed_dim if self.output_dim == II("model.decoder.embed_dim"): self.output_dim = self.embed_dim @dataclass class QuantNoiseConfig(FairseqDataclass): pq: float = field( default=0.0, metadata={"help": "iterative PQ quantization noise at training time"}, ) pq_block_size: int = field( default=8, metadata={"help": "block size of quantization noise at training time"}, ) scalar: float = field( default=0.0, metadata={ "help": "scalar quantization noise and scalar quantization at training time" }, ) @dataclass class TransformerConfig(FairseqDataclass): activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="relu", metadata={"help": "activation function to use"}, ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN.", "alias": "--relu-dropout", }, ) adaptive_input: bool = False encoder: EncDecBaseConfig = EncDecBaseConfig() # TODO should really be in the encoder config max_source_positions: int = field( default=DEFAULT_MAX_SOURCE_POSITIONS, metadata={"help": "Maximum input length supported by the encoder"}, ) decoder: DecoderConfig = DecoderConfig() # TODO should really be in the decoder config max_target_positions: int = field( default=DEFAULT_MAX_TARGET_POSITIONS, metadata={"help": "Maximum output length supported by the decoder"}, ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) share_all_embeddings: bool = field( default=False, metadata={ "help": "share encoder, decoder and output embeddings (requires shared dictionary and embed dim)" }, ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if True, disables positional embeddings (outside self attention)" }, ) adaptive_softmax_cutoff: Optional[List[int]] = field( default=None, metadata={ "help": "list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion" }, ) adaptive_softmax_dropout: float = field( default=0.0, metadata={"help": "sets adaptive softmax dropout for the tail projections"}, ) adaptive_softmax_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) layernorm_embedding: bool = field( default=False, metadata={"help": "add layernorm to embedding"} ) tie_adaptive_weights: bool = field( default=False, metadata={ "help": "if set, ties the weights of adaptive softmax and adaptive input" }, ) tie_adaptive_proj: bool = field( default=False, metadata={ "help": "if set, ties the projection weights of adaptive softmax and adaptive input" }, ) no_scale_embedding: bool = field( default=False, metadata={"help": "if True, dont scale embeddings"} ) checkpoint_activations: bool = field( default=False, metadata={ "help": "checkpoint activations at each layer, which saves GPU memory usage at the cost of some additional compute" }, ) offload_activations: bool = field( default=False, metadata={ "help": "checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations." }, ) # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019) no_cross_attention: bool = field( default=False, metadata={"help": "do not perform cross-attention"} ) cross_self_attention: bool = field( default=False, metadata={"help": "perform cross+self-attention"} ) # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) quant_noise: QuantNoiseConfig = field(default=QuantNoiseConfig()) min_params_to_wrap: int = field( default=DEFAULT_MIN_PARAMS_TO_WRAP, metadata={ "help": "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." }, ) # DEPRECATED field, but some old checkpoints might have it char_inputs: bool = field( default=False, metadata={"help": "if set, model takes character ids as input"} ) relu_dropout: float = 0.0 # config for "BASE Layers: Simplifying Training of Large, Sparse Models" base_layers: Optional[int] = field( default=0, metadata={"help": "number of BASE layers in total"} ) base_sublayers: Optional[int] = field( default=1, metadata={"help": "number of sublayers in each BASE layer"} ) base_shuffle: Optional[int] = field( default=1, metadata={"help": "shuffle tokens between workers before computing assignment"}, ) export: bool = field( default=False, metadata={"help": "make the layernorm exportable with torchscript."}, ) # copied from transformer_lm but expected in transformer_decoder: no_decoder_final_norm: bool = field( default=False, metadata={"help": "don't add an extra layernorm after the last decoder block"}, ) ### ### @xwhan additional configs for long-context tasks #### # positional encodings alternatives alibi: bool = field( default=False, metadata={"help": "ALiBi position encodings"}, ) truncate_alibi: int = field( default=None, metadata={"help": "ALiBi position encodings"}, ) # xFormers integration use_xformers: bool = field( default=False, metadata={"help": "whether to use attention mechanisms from xFormers"}, ) attention_name: str = field( default="block_noglobal", metadata={"help": "choose attention mechanisms"} ) xformer_config: str = field( default="{}", metadata={"help": "additional hyperparameters of each attention mechanism"} ) # pooling layers at the top of the encoder pooling_layers: int = field( default=0, metadata={"help": "how many top layers in the transformer encoders to do pooling"} ) # We need to make this hierarchical dataclass like the flat namespace # __getattr__ and __setattr__ here allow backward compatibility # for subclasses of Transformer(Legacy) that depend on read/write on # the flat namespace. def __getattr__(self, name): match = re.match(_NAME_PARSER, name) if match: sub = getattr(self, match[1]) return getattr(sub, match[2]) raise AttributeError(f"invalid argument {name}.") def __setattr__(self, name, value): match = re.match(_NAME_PARSER, name) if match: sub = getattr(self, match[1]) setattr(sub, match[2], value) else: super().__setattr__(name, value) @staticmethod def _copy_keys(args, cls, prefix, seen): """ copy the prefixed keys (decoder_embed_dim) to the DC fields: decoder.embed_dim """ cfg = cls() for fld in fields(cls): # for all the fields in the DC, find the fields (e.g. embed_dim) # in the namespace with the prefix (e.g. decoder) # and set it on the dc. args_key = f"{prefix}_{fld.name}" if hasattr(args, args_key): seen.add(args_key) setattr(cfg, fld.name, getattr(args, args_key)) if hasattr(args, fld.name): seen.add(fld.name) setattr(cfg, fld.name, getattr(args, fld.name)) return cfg @classmethod def from_namespace(cls, args): if args is None: return None if not isinstance(args, cls): seen = set() config = cls() # currently, we can go generically from DC fields to args hierarchically # but we can't easily deconstruct a flat namespace to a hierarchical # DC. Mostly because we could have a sub-dc called `decoder-foo` that should not # go to the sub struct called `decoder`. There are ways to go around this, but let's keep it simple # for now. for fld in fields(cls): # concretelly, the transformer_config know what sub-dc it has, so we go through all the dc fields # and if it's one that has a sub-dc, we build that sub-dc with `copy_keys()` if fld.name == "decoder": if hasattr(args, "decoder"): # in some cases, the args we receive is already structured (as DictConfigs), so let's just build the correct DC seen.add("decoder") config.decoder = DecoderConfig(**args.decoder) else: config.decoder = cls._copy_keys( args, DecoderConfig, "decoder", seen ) elif fld.name == "encoder": # same but for encoder if hasattr(args, "encoder"): seen.add("encoder") config.encoder = EncDecBaseConfig(**args.encoder) else: config.encoder = cls._copy_keys( args, EncDecBaseConfig, "encoder", seen ) elif fld.name == "quant_noise": # same but for quant_noise if hasattr(args, "quant_noise"): seen.add("quant_noise") config.quant_noise = QuantNoiseConfig(**args.quant_noise) else: config.quant_noise = cls._copy_keys( args, QuantNoiseConfig, "quant_noise", seen ) elif hasattr(args, fld.name): # if it's not a structure field, it's just a normal field, copy it over seen.add(fld.name) setattr(config, fld.name, getattr(args, fld.name)) # we got all the fields defined in the dataclass, but # the argparse namespace might have extra args for two reasons: # - we are in a legacy class so all the args are not declared in the dataclass. Ideally once everyone has defined a dataclass for their model, we won't need this # - some places expect args to be there but never define them args_dict = args._asdict() if hasattr(args, '_asdict') else vars(args) if hasattr(args, '__dict__') else {} # namedtupled doesn't have __dict__ :-/ for key, value in args_dict.items(): if key not in seen: setattr(config, key, value) return config else: return args
bart_ls-main
fairseq-py/fairseq/models/transformer/transformer_config.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer.transformer_config import ( TransformerConfig, DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, DEFAULT_MIN_PARAMS_TO_WRAP, ) from fairseq.models.transformer.transformer_base import ( TransformerModelBase, ) @register_model("transformer") class TransformerModel(TransformerModelBase): """ This is the legacy implementation of the transformer model that uses argparse for configuration. """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } def moses_fastbpe(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'fastbpe', } def spm(path): return { 'path': path, 'bpe': 'sentencepiece', 'tokenizer': 'space', } return { 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'), 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2', 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'), 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'), 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'), 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'), 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'), 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'), 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'), 'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'), 'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'), 'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'), 'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'), 'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'), 'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'), 'transformer.flores101.mm100.615M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_615M.tar.gz'), 'transformer.flores101.mm100.175M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_175M.tar.gz'), } # fmt: on def __init__(self, args, encoder, decoder): cfg = TransformerConfig.from_namespace(args) super().__init__(cfg, encoder, decoder) self.args = args @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" # we want to build the args recursively in this case. # do not set defaults so that settings defaults from various architectures still works gen_parser_from_dataclass( parser, TransformerConfig(), delete_default=True, with_prefix="" ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, "max_source_positions", None) is None: args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if getattr(args, "max_target_positions", None) is None: args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) args.share_decoder_input_output_embed = True if getattr(args, "offload_activations", False): args.checkpoint_activations = True # offloading implies checkpointing if not args.share_all_embeddings: args.min_params_to_wrap = getattr( args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP ) cfg = TransformerConfig.from_namespace(args) return super().build_model(cfg, task) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): return super().build_embedding( TransformerConfig.from_namespace(args), dictionary, embed_dim, path ) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return super().build_encoder( TransformerConfig.from_namespace(args), src_dict, embed_tokens ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return super().build_decoder( TransformerConfig.from_namespace(args), tgt_dict, embed_tokens ) # architectures @register_model_architecture("transformer", "transformer_tiny") def tiny_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64) args.encoder_layers = getattr(args, "encoder_layers", 2) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2) args.decoder_layers = getattr(args, "decoder_layers", 2) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2) return base_architecture(args) @register_model_architecture("transformer", "transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) @register_model_architecture("transformer", "transformer_iwslt_de_en") def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_architecture(args) @register_model_architecture("transformer", "transformer_wmt_en_de") def transformer_wmt_en_de(args): base_architecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big") def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) @register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big") def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, "dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture("transformer", "transformer_wmt_en_de_big") def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t") def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args)
bart_ls-main
fairseq-py/fairseq/models/transformer/transformer_legacy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoder from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from fairseq.models.transformer import ( TransformerConfig, ) # rewrite name for backward compatibility in `make_generation_fast_` def module_name_fordropout(module_name: str) -> str: if module_name == 'TransformerEncoderBase': return 'TransformerEncoder' else: return module_name class TransformerEncoderBase(FairseqEncoder): """ Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, cfg, dictionary, embed_tokens): self.cfg = cfg super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) self.dropout_module = FairseqDropout( cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__) ) self.encoder_layerdrop = cfg.encoder.layerdrop embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = cfg.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim) self.embed_positions = ( PositionalEmbedding( cfg.max_source_positions, embed_dim, self.padding_idx, learned=cfg.encoder.learned_pos, block=False ) if not cfg.no_token_positional_embeddings and not cfg.alibi else None ) if cfg.layernorm_embedding: self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export) else: self.layernorm_embedding = None if not cfg.adaptive_input and cfg.quant_noise.pq > 0: self.quant_noise = apply_quant_noise_( nn.Linear(embed_dim, embed_dim, bias=False), cfg.quant_noise.pq, cfg.quant_noise.pq_block_size, ) else: self.quant_noise = None if self.encoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.encoder_layerdrop) else: self.layers = nn.ModuleList([]) if cfg.pooling_layers: self.layers.extend( [self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers - cfg.pooling_layers)] ) self.layers.extend( [self.build_encoder_layer(cfg, True) for i in range(cfg.pooling_layers)] ) else: self.layers.extend( [self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)] ) self.num_layers = len(self.layers) if cfg.encoder.normalize_before: self.layer_norm = LayerNorm(embed_dim, export=cfg.export) else: self.layer_norm = None if cfg.alibi: from fairseq.models.long_transformers import get_slopes maxpos = self.max_source_positions attn_heads = cfg.encoder_attention_heads context_position = torch.arange(maxpos)[:, None] memory_position = torch.arange(maxpos)[None, :] relative_position = memory_position - context_position relative_position = torch.abs(relative_position) if cfg.truncate_alibi is not None: relative_position[relative_position >= cfg.truncate_alibi] = cfg.truncate_alibi relative_position = relative_position.unsqueeze(0).expand(attn_heads, -1,-1) slopes = torch.Tensor(get_slopes(attn_heads)) * -1 self.alibi = slopes.unsqueeze(1).unsqueeze(1) * relative_position self.alibi = self.alibi.view(1, attn_heads, maxpos, maxpos).cuda() def build_encoder_layer(self, cfg, pooling=False): if pooling: from fairseq.models.long_transformers import pooling_layer # layer = pooling_layer.TwoLevelEncoderLayer(cfg) layer = pooling_layer.PoolEncoderLayer(cfg) else: layer = transformer_layer.TransformerEncoderLayerBase(cfg) checkpoint = cfg.checkpoint_activations if checkpoint: offload_to_cpu = cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) # if we are checkpointing, enforce that FSDP always wraps the # checkpointed layer, regardless of layer size min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def forward_embedding( self, src_tokens, token_embedding: Optional[torch.Tensor] = None ): # embed tokens and positions if token_embedding is None: token_embedding = self.embed_tokens(src_tokens) x = embed = self.embed_scale * token_embedding if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) if self.quant_noise is not None: x = self.quant_noise(x) return x, embed def forward( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.Tensor] = None, # @xwhan in order to add global mask ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). token_embeddings (torch.Tensor, optional): precomputed embeddings default `None` will recompute embeddings Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ return self.forward_scriptable( src_tokens, src_lengths, return_all_hiddens, token_embeddings, key_padding_mask ) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def forward_scriptable( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.Tensor] = None, # @xwhan in order to add global mask ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). token_embeddings (torch.Tensor, optional): precomputed embeddings default `None` will recompute embeddings Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ # compute padding mask if key_padding_mask is None: encoder_padding_mask = src_tokens.eq(self.padding_idx) key_padding_mask = encoder_padding_mask else: encoder_padding_mask = key_padding_mask.eq(1) # key_padding_mask might -1 elements has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any() x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings) # account for padding while computing the representation if has_pads: x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x)) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_states = [] if return_all_hiddens: encoder_states.append(x) # encoder layers for layer in self.layers: x = layer( # x, encoder_padding_mask=encoder_padding_mask if has_pads else None x, encoder_padding_mask=key_padding_mask, attn_bias=self.alibi if self.cfg.alibi else None # always pass key_padding_mask ) if return_all_hiddens: assert encoder_states is not None encoder_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x) # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in # `forward` so we use a dictionary instead. # TorchScript does not support mixed values so the values are all lists. # The empty list is equivalent to None. src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape(-1, 1).contiguous() return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [encoder_padding_mask], # B x T "encoder_embedding": [encoder_embedding], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [src_lengths], } @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if len(encoder_out["encoder_out"]) == 0: new_encoder_out = [] else: new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)] if len(encoder_out["encoder_padding_mask"]) == 0: new_encoder_padding_mask = [] else: new_encoder_padding_mask = [ encoder_out["encoder_padding_mask"][0].index_select(0, new_order) ] if len(encoder_out["encoder_embedding"]) == 0: new_encoder_embedding = [] else: new_encoder_embedding = [ encoder_out["encoder_embedding"][0].index_select(0, new_order) ] if len(encoder_out["src_tokens"]) == 0: src_tokens = [] else: src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)] if len(encoder_out["src_lengths"]) == 0: src_lengths = [] else: src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)] encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": src_tokens, # B x T "src_lengths": src_lengths, # B x 1 } def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: print("deleting {0}".format(weights_key)) del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) for i in range(self.num_layers): # update layer norms self.layers[i].upgrade_state_dict_named( state_dict, "{}.layers.{}".format(name, i) ) version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoder(TransformerEncoderBase): def __init__(self, args, dictionary, embed_tokens): self.args = args super().__init__( TransformerConfig.from_namespace(args), dictionary, embed_tokens, ) def build_encoder_layer(self, args): return super().build_encoder_layer( TransformerConfig.from_namespace(args), )
bart_ls-main
fairseq-py/fairseq/models/transformer/transformer_encoder.py
# Copyright (c) Facebook Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .transformer_config import ( TransformerConfig, DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, DEFAULT_MIN_PARAMS_TO_WRAP, ) from .transformer_decoder import TransformerDecoder, TransformerDecoderBase, Linear from .transformer_encoder import TransformerEncoder, TransformerEncoderBase from .transformer_legacy import ( TransformerModel, base_architecture, tiny_architecture, transformer_iwslt_de_en, transformer_wmt_en_de, transformer_vaswani_wmt_en_de_big, transformer_vaswani_wmt_en_fr_big, transformer_wmt_en_de_big, transformer_wmt_en_de_big_t2t, ) from .transformer_base import TransformerModelBase, Embedding __all__ = [ "TransformerModelBase", "TransformerConfig", "TransformerDecoder", "TransformerDecoderBase", "TransformerEncoder", "TransformerEncoderBase", "TransformerModel", "Embedding", "Linear", "base_architecture", "tiny_architecture", "transformer_iwslt_de_en", "transformer_wmt_en_de", "transformer_vaswani_wmt_en_de_big", "transformer_vaswani_wmt_en_fr_big", "transformer_wmt_en_de_big", "transformer_wmt_en_de_big_t2t", "DEFAULT_MAX_SOURCE_POSITIONS", "DEFAULT_MAX_TARGET_POSITIONS", "DEFAULT_MIN_PARAMS_TO_WRAP", ]
bart_ls-main
fairseq-py/fairseq/models/transformer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Any, Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqIncrementalDecoder from fairseq.models.transformer import TransformerConfig from fairseq.modules import ( AdaptiveSoftmax, BaseLayer, FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor # rewrite name for backward compatibility in `make_generation_fast_` def module_name_fordropout(module_name: str) -> str: if module_name == 'TransformerDecoderBase': return 'TransformerDecoder' else: return module_name class TransformerDecoderBase(FairseqIncrementalDecoder): """ Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg, dictionary, embed_tokens, no_encoder_attn=False, output_projection=None, ): self.cfg = cfg super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) self._future_mask = torch.empty(0) self.dropout_module = FairseqDropout( cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__) ) self.decoder_layerdrop = cfg.decoder.layerdrop self.share_input_output_embed = cfg.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = cfg.decoder.embed_dim self.embed_dim = embed_dim self.output_embed_dim = cfg.decoder.output_dim self.padding_idx = embed_tokens.padding_idx self.max_target_positions = cfg.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim) if not cfg.adaptive_input and cfg.quant_noise.pq > 0: self.quant_noise = apply_quant_noise_( nn.Linear(embed_dim, embed_dim, bias=False), cfg.quant_noise.pq, cfg.quant_noise.pq_block_size, ) else: self.quant_noise = None self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( self.max_target_positions, embed_dim, self.padding_idx, learned=cfg.decoder.learned_pos, ) if not cfg.no_token_positional_embeddings and not cfg.alibi else None ) if cfg.layernorm_embedding: self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export) else: self.layernorm_embedding = None self.cross_self_attention = cfg.cross_self_attention if self.decoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.decoder_layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend( [ self.build_decoder_layer(cfg, no_encoder_attn) for _ in range(cfg.decoder.layers) ] ) self.num_layers = len(self.layers) if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm: self.layer_norm = LayerNorm(embed_dim, export=cfg.export) else: self.layer_norm = None self.project_out_dim = ( Linear(embed_dim, self.output_embed_dim, bias=False) if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights else None ) self.adaptive_softmax = None self.output_projection = output_projection if self.output_projection is None: self.build_output_projection(cfg, dictionary, embed_tokens) # ALiBi position encodings if cfg.alibi: from fairseq.models.long_transformers import get_slopes maxpos = self.max_target_positions attn_heads = cfg.decoder_attention_heads slopes = torch.Tensor(get_slopes(attn_heads)) #In the next line, the part after the * is what constructs the diagonal matrix (right matrix in Figure 3 in the paper). #If you run it you'll see that it doesn't exactly print out the same matrix as we have in Figure 3, but one where all rows are identical. #This works because the softmax operation is invariant to translation, and our bias functions are always linear. self.alibi = slopes.unsqueeze(1).unsqueeze(1) * torch.arange(maxpos).unsqueeze(0).unsqueeze(0).expand(attn_heads, -1, -1) self.alibi = self.alibi.view(attn_heads, 1, maxpos) # breakpoint() # self.alibi = self.alibi.repeat(args.max_tokens//maxpos, 1, 1) # batch_size, 1, 1 def build_output_projection(self, cfg, dictionary, embed_tokens): if cfg.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int), dropout=cfg.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None, factor=cfg.adaptive_softmax_factor, tie_proj=cfg.tie_adaptive_proj, ) elif self.share_input_output_embed: self.output_projection = nn.Linear( self.embed_tokens.weight.shape[1], self.embed_tokens.weight.shape[0], bias=False, ) self.output_projection.weight = self.embed_tokens.weight else: self.output_projection = nn.Linear( self.output_embed_dim, len(dictionary), bias=False ) nn.init.normal_( self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5 ) num_base_layers = cfg.base_layers for i in range(num_base_layers): self.layers.insert( ((i + 1) * cfg.decoder.layers) // (num_base_layers + 1), BaseLayer(cfg), ) def build_decoder_layer(self, cfg, no_encoder_attn=False): layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn) checkpoint = cfg.checkpoint_activations if checkpoint: offload_to_cpu = cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) # if we are checkpointing, enforce that FSDP always wraps the # checkpointed layer, regardless of layer size min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention, should be of size T x B x C incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, full_context_alignment=full_context_alignment, alignment_layer=alignment_layer, alignment_heads=alignment_heads, ) if not features_only: x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): return self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) """ A scriptable subclass of this class has an extract_features method and calls super().extract_features, but super() is not supported in torchscript. A copy of this function is made to be used in the subclass instead. """ def extract_features_scriptable( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): """ Similar to *forward* but only return features. Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). alignment_layer (int, optional): return mean alignment over heads at this layer (default: last layer). alignment_heads (int, optional): only average alignment over this many heads (default: all heads). Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ bs, slen = prev_output_tokens.size() if alignment_layer is None: alignment_layer = self.num_layers - 1 enc: Optional[Tensor] = None padding_mask: Optional[Tensor] = None if encoder_out is not None and len(encoder_out["encoder_out"]) > 0: enc = encoder_out["encoder_out"][0] assert ( enc.size()[1] == bs ), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}" if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0: padding_mask = encoder_out["encoder_padding_mask"][0] # embed positions positions = None if self.embed_positions is not None: positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.quant_noise is not None: x = self.quant_noise(x) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: # should be None when passed ALiBi x += positions if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) self_attn_padding_mask: Optional[Tensor] = None if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # decoder layers attn: Optional[Tensor] = None inner_states: List[Optional[Tensor]] = [x] for idx, layer in enumerate(self.layers): if incremental_state is None and not full_context_alignment: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = layer( x, enc, padding_mask, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: if alignment_heads is not None: attn = attn[:alignment_heads] # average probabilities over heads attn = attn.mean(dim=0) if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": [attn], "inner_states": inner_states} def output_layer(self, features): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary return self.output_projection(features) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround. bsz = tensor.size(1) if ( self._future_mask.size(0) == 0 or (not self._future_mask.device == tensor.device) or (not self.cfg.alibi and self._future_mask.size(0) < dim) or (self.cfg.alibi and self._future_mask.size(1) < dim) # sequence length gets longer or (self.cfg.alibi and self._future_mask.size(0) < bsz*self.cfg.decoder_attention_heads) # batch size change during train/valid ): self._future_mask = torch.triu( utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1 ) if self.cfg.alibi: self._future_mask = self._future_mask.unsqueeze(0) + self.alibi.repeat(bsz, 1, 1)[:, :dim, :dim] self._future_mask = self._future_mask.to(tensor) if self.cfg.alibi: self._future_mask = self._future_mask[:bsz*self.cfg.decoder_attention_heads, :dim, :dim] return self._future_mask return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) if f"{name}.output_projection.weight" not in state_dict: if self.share_input_output_embed: embed_out_key = f"{name}.embed_tokens.weight" else: embed_out_key = f"{name}.embed_out" if embed_out_key in state_dict: state_dict[f"{name}.output_projection.weight"] = state_dict[ embed_out_key ] if not self.share_input_output_embed: del state_dict[embed_out_key] for i in range(self.num_layers): # update layer norms layer_norm_map = { "0": "self_attn_layer_norm", "1": "encoder_attn_layer_norm", "2": "final_layer_norm", } for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m) if k in state_dict: state_dict[ "{}.layers.{}.{}.{}".format(name, i, new, m) ] = state_dict[k] del state_dict[k] version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m class TransformerDecoder(TransformerDecoderBase): def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, output_projection=None, ): self.args = args super().__init__( TransformerConfig.from_namespace(args), dictionary, embed_tokens, no_encoder_attn=no_encoder_attn, output_projection=output_projection, ) def build_output_projection(self, args, dictionary, embed_tokens): super().build_output_projection( TransformerConfig.from_namespace(args), dictionary, embed_tokens ) def build_decoder_layer(self, args, no_encoder_attn=False): return super().build_decoder_layer( TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn )
bart_ls-main
fairseq-py/fairseq/models/transformer/transformer_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from fairseq import utils from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoderDecoderModel from fairseq.models.transformer import ( TransformerEncoderBase, TransformerDecoderBase, TransformerConfig, ) from torch import Tensor class TransformerModelBase(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, cfg, encoder, decoder): super().__init__(encoder, decoder) self.cfg = cfg self.supports_align_args = True @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" # we want to build the args recursively in this case. gen_parser_from_dataclass( parser, TransformerConfig(), delete_default=False, with_prefix="" ) @classmethod def build_model(cls, cfg, task): """Build a new model instance.""" # -- TODO T96535332 # bug caused by interaction between OmegaConf II and argparsing cfg.decoder.input_dim = int(cfg.decoder.input_dim) cfg.decoder.output_dim = int(cfg.decoder.output_dim) # -- if cfg.encoder.layers_to_keep: cfg.encoder.layers = len(cfg.encoder.layers_to_keep.split(",")) if cfg.decoder.layers_to_keep: cfg.decoder.layers = len(cfg.decoder.layers_to_keep.split(",")) src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if cfg.share_all_embeddings: if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if cfg.encoder.embed_dim != cfg.decoder.embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if cfg.decoder.embed_path and ( cfg.decoder.embed_path != cfg.encoder.embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = cls.build_embedding( cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path ) decoder_embed_tokens = encoder_embed_tokens cfg.share_decoder_input_output_embed = True else: encoder_embed_tokens = cls.build_embedding( cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path ) decoder_embed_tokens = cls.build_embedding( cfg, tgt_dict, cfg.decoder.embed_dim, cfg.decoder.embed_path ) if cfg.offload_activations: cfg.checkpoint_activations = True # offloading implies checkpointing encoder = cls.build_encoder(cfg, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens) if not cfg.share_all_embeddings: # fsdp_wrap is a no-op when --ddp-backend != fully_sharded encoder = fsdp_wrap(encoder, min_num_params=cfg.min_params_to_wrap) decoder = fsdp_wrap(decoder, min_num_params=cfg.min_params_to_wrap) return cls(cfg, encoder, decoder) @classmethod def build_embedding(cls, cfg, dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb @classmethod def build_encoder(cls, cfg, src_dict, embed_tokens): return TransformerEncoderBase(cfg, src_dict, embed_tokens) @classmethod def build_decoder(cls, cfg, tgt_dict, embed_tokens): return TransformerDecoderBase( cfg, tgt_dict, embed_tokens, no_encoder_attn=cfg.no_cross_attention, ) # TorchScript doesn't support optional arguments with variable length (**kwargs). # Current workaround is to add union of all arguments in child classes. def forward( self, src_tokens, src_lengths, prev_output_tokens, return_all_hiddens: bool = True, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): """ Run the forward pass for an encoder-decoder model. Copied from the base class, but without ``**kwargs``, which are not supported by TorchScript. """ encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens ) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) return decoder_out # Since get_normalized_probs is in the Fairseq Model which is not scriptable, # I rewrite the get_normalized_probs from Base Class to call the # helper function in the Base Class. @torch.jit.export def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m
bart_ls-main
fairseq-py/fairseq/models/transformer/transformer_base.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.utils import new_arange # -------------- Helper Functions --------------------------------------------------- # def load_libnat(): try: from fairseq import libnat_cuda return libnat_cuda, True except ImportError as e: print(str(e) + "... fall back to CPU version") try: from fairseq import libnat return libnat, False except ImportError as e: import sys sys.stderr.write( "ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n" ) raise e def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): libnat, use_cuda = load_libnat() def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels( out_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int(), ), ) masked_tgt_masks = masked_tgt_masks.bool() & out_masks mask_ins_targets = mask_ins_targets.type_as(in_tokens)[ :, 1 : in_masks.size(1) ].masked_fill_(~in_masks[:, 1:], 0) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx): in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] # generate labels masked_tgt_masks = [] for mask_input in mask_inputs: mask_label = [] for beam_size in mask_input[1:-1]: # HACK 1:-1 mask_label += [0] + [1 for _ in range(beam_size)] masked_tgt_masks.append( mask_label + [0 for _ in range(out_seq_len - len(mask_label))] ) mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor masked_tgt_masks = torch.tensor( masked_tgt_masks, device=out_tokens.device ).bool() mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets if use_cuda: return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx) return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx) def _get_del_targets(in_tokens, out_tokens, padding_idx): libnat, use_cuda = load_libnat() def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) word_del_targets = libnat.generate_deletion_labels( in_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int(), ), ) word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_( ~in_masks, 0 ) return word_del_targets def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx): out_seq_len = out_tokens.size(1) with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] # transform to tensor word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) return word_del_targets if use_cuda: return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx) return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx) def _apply_ins_masks( in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx ): in_masks = in_tokens.ne(padding_idx) in_lengths = in_masks.sum(1) # HACK: hacky way to shift all the paddings to eos first. in_tokens.masked_fill_(~in_masks, eos_idx) mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0) out_lengths = in_lengths + mask_ins_pred.sum(1) out_max_len = out_lengths.max() out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None] reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1) out_tokens = ( in_tokens.new_zeros(in_tokens.size(0), out_max_len) .fill_(padding_idx) .masked_fill_(out_masks, unk_idx) ) out_tokens[:, 0] = in_tokens[:, 0] out_tokens.scatter_(1, reordering, in_tokens[:, 1:]) out_scores = None if in_scores is not None: in_scores.masked_fill_(~in_masks, 0) out_scores = in_scores.new_zeros(*out_tokens.size()) out_scores[:, 0] = in_scores[:, 0] out_scores.scatter_(1, reordering, in_scores[:, 1:]) return out_tokens, out_scores def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx): word_ins_masks = in_tokens.eq(unk_idx) out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks]) if in_scores is not None: out_scores = in_scores.masked_scatter( word_ins_masks, word_ins_scores[word_ins_masks] ) else: out_scores = None return out_tokens, out_scores def _apply_del_words( in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx ): # apply deletion to a tensor in_masks = in_tokens.ne(padding_idx) bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx) max_len = in_tokens.size(1) word_del_pred.masked_fill_(~in_masks, 1) word_del_pred.masked_fill_(bos_eos_masks, 0) reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1] out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering) out_scores = None if in_scores is not None: out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) out_attn = None if in_attn is not None: _mask = word_del_pred[:, :, None].expand_as(in_attn) _reordering = reordering[:, :, None].expand_as(in_attn) out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering) return out_tokens, out_scores, out_attn def _skip(x, mask): """ Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors. """ if isinstance(x, int): return x if x is None: return None if isinstance(x, torch.Tensor): if x.size(0) == mask.size(0): return x[mask] elif x.size(1) == mask.size(0): return x[:, mask] if isinstance(x, list): return [_skip(x_i, mask) for x_i in x] if isinstance(x, dict): return {k: _skip(v, mask) for k, v in x.items()} raise NotImplementedError def _skip_encoder_out(encoder, encoder_out, mask): if not mask.any(): return encoder_out else: return encoder.reorder_encoder_out( encoder_out, mask.nonzero(as_tuple=False).squeeze() ) def _fill(x, mask, y, padding_idx): """ Filling tensor x with y at masked positions (dim=0). """ if x is None: return y assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): dims = [x.size(0), y.size(1) - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1) x[mask] = y elif x.size(1) > y.size(1): x[mask] = padding_idx if x.dim() == 2: x[mask, : y.size(1)] = y else: x[mask, : y.size(1), :] = y else: x[mask] = y return x
bart_ls-main
fairseq-py/fairseq/models/nat/levenshtein_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1): # s: input batch # V: vocabulary size rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device) choices = torch.rand(size=s.size(), device=s.device) choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1) replace = choices < beta / 3 repeat = (choices >= beta / 3) & (choices < beta * 2 / 3) swap = (choices >= beta * 2 / 3) & (choices < beta) safe = choices >= beta for i in range(s.size(1) - 1): rand_word = rand_words[:, i] next_word = s[:, i + 1] self_word = s[:, i] replace_i = replace[:, i] swap_i = swap[:, i] & (next_word != 3) repeat_i = repeat[:, i] & (next_word != 3) safe_i = safe[:, i] | ((next_word == 3) & (~replace_i)) s[:, i] = ( self_word * (safe_i | repeat_i).long() + next_word * swap_i.long() + rand_word * replace_i.long() ) s[:, i + 1] = ( next_word * (safe_i | replace_i).long() + self_word * (swap_i | repeat_i).long() ) return s def gumbel_noise(input, TINY=1e-8): return ( input.new_zeros(*input.size()) .uniform_() .add_(TINY) .log_() .neg_() .add_(TINY) .log_() .neg_() ) @register_model("iterative_nonautoregressive_transformer") class IterNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) parser.add_argument( "--train-step", type=int, help="number of refinement iterations during training", ) parser.add_argument( "--dae-ratio", type=float, help="the probability of switching to the denoising auto-encoder loss", ) parser.add_argument( "--stochastic-approx", action="store_true", help="sampling from the decoder as the inputs for next iteration", ) @classmethod def build_model(cls, args, task): model = super().build_model(args, task) model.train_step = getattr(args, "train_step", 4) model.dae_ratio = getattr(args, "dae_ratio", 0.5) model.stochastic_approx = getattr(args, "stochastic_approx", False) return model def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): B, T = prev_output_tokens.size() # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_outs, word_ins_tgts, word_ins_masks = [], [], [] for t in range(self.train_step): word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, step=t, ) word_ins_tgt = tgt_tokens word_ins_mask = word_ins_tgt.ne(self.pad) word_ins_outs.append(word_ins_out) word_ins_tgts.append(word_ins_tgt) word_ins_masks.append(word_ins_mask) if t < (self.train_step - 1): # prediction for next iteration if self.stochastic_approx: word_ins_prediction = ( word_ins_out + gumbel_noise(word_ins_out) ).max(-1)[1] else: word_ins_prediction = word_ins_out.max(-1)[1] prev_output_tokens = prev_output_tokens.masked_scatter( word_ins_mask, word_ins_prediction[word_ins_mask] ) if self.dae_ratio > 0: # we do not perform denoising for the first iteration corrputed = ( torch.rand(size=(B,), device=prev_output_tokens.device) < self.dae_ratio ) corrputed_tokens = _sequential_poisoning( tgt_tokens[corrputed], len(self.tgt_dict), 0.33, self.bos, self.eos, self.pad, ) prev_output_tokens[corrputed] = corrputed_tokens # concat everything word_ins_out = torch.cat(word_ins_outs, 0) word_ins_tgt = torch.cat(word_ins_tgts, 0) word_ins_mask = torch.cat(word_ins_masks, 0) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer" ) def inat_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) args.train_step = getattr(args, "train_step", 4) args.dae_ratio = getattr(args, "dae_ratio", 0.5) args.stochastic_approx = getattr(args, "stochastic_approx", False) @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer_wmt_en_de", ) def iter_nat_wmt_en_de(args): inat_base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/nat/iterative_nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def ensemble_encoder(func): def wrapper(self, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func(self, *args, **kwargs) encoder_outs = [func(model, *args, **kwargs, return_all_hiddens=True) for model in self.ensemble_models] _encoder_out = encoder_outs[0].copy() def stack(key): outs = [e[key][0] for e in encoder_outs] return [torch.stack(outs, -1) if outs[0] is not None else None] _encoder_out["encoder_out"] = stack("encoder_out") _encoder_out["encoder_embedding"] = stack("encoder_embedding") num_layers = len(_encoder_out["encoder_states"]) if num_layers > 0: _encoder_out["encoder_states"] = [ torch.stack([e["encoder_states"][i] for e in encoder_outs], -1) for i in range(num_layers) ] return _encoder_out return wrapper def ensemble_decoder(func): def wrapper(self, normalize=False, encoder_out=None, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func( self, normalize=normalize, encoder_out=encoder_out, *args, **kwargs ) def _replace(encoder_out, new_val): new_encoder_out = encoder_out.copy() new_encoder_out["encoder_out"] = [new_val] return new_encoder_out action_outs = [ func( model, normalize=normalize, encoder_out=_replace( encoder_out, encoder_out["encoder_out"][0][:, :, :, i] ), *args, **kwargs ) for i, model in enumerate(self.ensemble_models) ] if not isinstance(action_outs[0], tuple): # return multiple values action_outs = [[a] for a in action_outs] else: action_outs = [list(a) for a in action_outs] ensembled_outs = [] for i in range(len(action_outs[0])): if i == 0 and normalize: ensembled_outs += [ torch.logsumexp( torch.stack([a[i] for a in action_outs], -1), dim=-1 ) - math.log(len(self.ensemble_models)) ] elif action_outs[0][i] is not None: ensembled_outs += [torch.stack([a[i] for a in action_outs], -1)] else: ensembled_outs += [None] if len(ensembled_outs) == 1: return ensembled_outs[0] return tuple(ensembled_outs) return wrapper class FairseqNATModel(TransformerModel): """ Abstract class for all nonautoregressive-based models """ def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.tgt_dict = decoder.dictionary self.bos = decoder.dictionary.bos() self.eos = decoder.dictionary.eos() self.pad = decoder.dictionary.pad() self.unk = decoder.dictionary.unk() self.ensemble_models = None @property def allow_length_beam(self): return False @property def allow_ensemble(self): return True def enable_ensemble(self, models): self.encoder.ensemble_models = [m.encoder for m in models] self.decoder.ensemble_models = [m.decoder for m in models] @staticmethod def add_args(parser): TransformerModel.add_args(parser) parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = FairseqNATDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder @classmethod def build_encoder(cls, args, src_dict, embed_tokens): encoder = FairseqNATEncoder(args, src_dict, embed_tokens) if getattr(args, "apply_bert_init", False): encoder.apply(init_bert_params) return encoder def forward_encoder(self, encoder_inputs): return self.encoder(*encoder_inputs) def forward_decoder(self, *args, **kwargs): return NotImplementedError def initialize_output_tokens(self, *args, **kwargs): return NotImplementedError def forward(self, *args, **kwargs): return NotImplementedError class FairseqNATEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.ensemble_models = None @ensemble_encoder def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class FairseqNATDecoder(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn) self.ensemble_models = None
bart_ls-main
fairseq-py/fairseq/models/nat/fairseq_nat_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .fairseq_nat_model import * from .nonautoregressive_transformer import * from .nat_crf_transformer import * from .iterative_nonautoregressive_transformer import * from .cmlm_transformer import * from .levenshtein_transformer import * from .insertion_transformer import *
bart_ls-main
fairseq-py/fairseq/models/nat/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder from fairseq.models.transformer import Embedding from fairseq.modules import TransformerDecoderLayer from fairseq.modules.transformer_sentence_encoder import init_bert_params from .levenshtein_utils import ( _apply_del_words, _apply_ins_masks, _apply_ins_words, _fill, _get_del_targets, _get_ins_targets, _skip, _skip_encoder_out, ) @register_model("levenshtein_transformer") class LevenshteinTransformerModel(FairseqNATModel): @property def allow_length_beam(self): return False @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument( "--early-exit", default="6,6,6", type=str, help="number of decoder layers before word_del, mask_ins, word_ins", ) parser.add_argument( "--no-share-discriminator", action="store_true", help="separate parameters for discriminator", ) parser.add_argument( "--no-share-maskpredictor", action="store_true", help="separate parameters for mask-predictor", ) parser.add_argument( "--share-discriminator-maskpredictor", action="store_true", help="share the parameters for both mask-predictor and discriminator", ) parser.add_argument( "--sampling-for-deletion", action="store_true", help="instead of argmax, use sampling to predict the tokens", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk ) mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) mask_ins_out, _ = self.decoder.forward_mask_ins( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_out, _ = self.decoder.forward_word_ins( normalize=False, prev_output_tokens=masked_tgt_tokens, encoder_out=encoder_out, ) # make online prediction if self.decoder.sampling_for_deletion: word_predictions = torch.multinomial( F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1 ).view(word_ins_out.size(0), -1) else: word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1] word_predictions.masked_scatter_( ~masked_tgt_masks, tgt_tokens[~masked_tgt_masks] ) # generate training labels for deletion word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad) word_del_out, _ = self.decoder.forward_word_del( normalize=False, prev_output_tokens=word_predictions, encoder_out=encoder_out, ) word_del_masks = word_predictions.ne(self.pad) return { "mask_ins": { "out": mask_ins_out, "tgt": mask_ins_targets, "mask": mask_ins_masks, "ls": 0.01, }, "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": masked_tgt_masks, "ls": self.args.label_smoothing, "nll_loss": True, }, "word_del": { "out": word_del_out, "tgt": word_del_targets, "mask": word_del_masks, }, } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn history = decoder_out.history bsz = output_tokens.size(0) if max_ratio is None: max_lens = torch.zeros_like(output_tokens).fill_(255) else: if not encoder_out["encoder_padding_mask"]: max_src_len = encoder_out["encoder_out"].size(0) src_lens = encoder_out["encoder_out"].new(bsz).fill_(max_src_len) else: src_lens = (~encoder_out["encoder_padding_mask"][0]).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip word_del_score, word_del_attn = self.decoder.forward_word_del( normalize=True, prev_output_tokens=_skip(output_tokens, can_del_word), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word), ) word_del_pred = word_del_score.max(-1)[1].bool() _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.0) if history is not None: history.append(output_tokens.clone()) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: mask_ins_score, _ = self.decoder.forward_mask_ins( normalize=True, prev_output_tokens=_skip(output_tokens, can_ins_mask), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask), ) if eos_penalty > 0.0: mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty mask_ins_pred = mask_ins_score.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) if history is not None: history.append(output_tokens.clone()) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: word_ins_score, word_ins_attn = self.decoder.forward_word_ins( normalize=True, prev_output_tokens=_skip(output_tokens, can_ins_word), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word), ) word_ins_score, word_ins_pred = word_ins_score.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) if history is not None: history.append(output_tokens.clone()) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=history, ) def initialize_output_tokens(self, encoder_out, src_tokens): initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2) initial_output_tokens[:, 0] = self.bos initial_output_tokens[:, 1] = self.eos initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out["encoder_out"][0]) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None, ) class LevenshteinTransformerDecoder(FairseqNATDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None) self.embed_word_del = Embedding(2, self.output_embed_dim, None) # del_word, ins_mask, ins_word self.early_exit = [int(i) for i in args.early_exit.split(",")] assert len(self.early_exit) == 3 # copy layers for mask-predict/deletion self.layers_msk = None if getattr(args, "no_share_maskpredictor", False): self.layers_msk = nn.ModuleList( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[1]) ] ) self.layers_del = None if getattr(args, "no_share_discriminator", False): self.layers_del = nn.ModuleList( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[0]) ] ) if getattr(args, "share_discriminator_maskpredictor", False): assert getattr( args, "no_share_discriminator", False ), "must set saperate discriminator" self.layers_msk = self.layers_del def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, layers=None, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) layers = self.layers if layers is None else layers early_exit = len(layers) if early_exit is None else early_exit for _, layer in enumerate(layers[:early_exit]): x, attn, _ = layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} @ensemble_decoder def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[1], layers=self.layers_msk, **unused ) features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) decoder_out = F.linear(features_cat, self.embed_mask_ins.weight) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @ensemble_decoder def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[2], layers=self.layers, **unused ) decoder_out = self.output_layer(features) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @ensemble_decoder def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[0], layers=self.layers_del, **unused ) decoder_out = F.linear(features, self.embed_word_del.weight) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @register_model_architecture("levenshtein_transformer", "levenshtein_transformer") def levenshtein_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.early_exit = getattr(args, "early_exit", "6,6,6") args.no_share_discriminator = getattr(args, "no_share_discriminator", False) args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False) args.share_discriminator_maskpredictor = getattr( args, "share_discriminator_maskpredictor", False ) args.no_share_last_layer = getattr(args, "no_share_last_layer", False) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de" ) def levenshtein_transformer_wmt_en_de(args): levenshtein_base_architecture(args) # similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big" ) def levenshtein_transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) levenshtein_base_architecture(args) # default parameters used in tensor2tensor implementation @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big" ) def levenshtein_transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) levenshtein_transformer_vaswani_wmt_en_de_big(args)
bart_ls-main
fairseq-py/fairseq/models/nat/levenshtein_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq.models.nat import ( _apply_del_words, _apply_ins_masks, _apply_ins_words, _fill, _skip, _skip_encoder_out, ) class _EnsembleModelEncoder(object): def __init__(self, models): self.models = models def reorder_encoder_out(self, encoder_outs, new_order): encoder_outs = [ model.encoder.reorder_encoder_out(encoder_out, new_order) for model, encoder_out in zip(self.models, encoder_outs) ] return encoder_outs class BasicEnsembleModel(torch.nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models = torch.nn.ModuleList(models) self.bos = self.models[0].decoder.dictionary.bos() self.eos = self.models[0].decoder.dictionary.eos() self.pad = self.models[0].decoder.dictionary.pad() self.unk = self.models[0].decoder.dictionary.unk() self.encoder = _EnsembleModelEncoder(self.models) def has_encoder(self): return hasattr(self.models[0], "encoder") def max_decoder_positions(self): return min(m.max_decoder_positions() for m in self.models) @torch.no_grad() def forward_encoder(self, encoder_input): if not self.has_encoder(): return None return [model.forward_encoder(encoder_input) for model in self.models] @torch.no_grad() def forward_decoder(self, *inputs): raise NotImplementedError def initialize_output_tokens(self, *inputs): raise NotImplementedError class EnsembleLevT(BasicEnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) @torch.no_grad() def forward_decoder( self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs ): # LevT ensembling # A pipeline of three steps: deletion, placeholder, and word insertion. # We need to average scores in each step in a pipeline way because of dependence. # deletion output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn bsz = output_tokens.size(0) if max_ratio is None: max_lens = output_tokens.new().fill_(255) else: if not encoder_outs[0]["encoder_padding_mask"]: src_lens = ( encoder_outs[0]["encoder_out"][0].new(bsz) .fill_(encoder_outs[0]["encoder_out"][0].size(1)) ) else: src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip output_tokens, output_scores, attn = self.forward_word_del( encoder_outs, output_tokens, output_scores, attn, can_del_word, ) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: output_tokens, output_scores = self.forward_mask_ins( encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens, ) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: output_tokens, output_scores, attn = self.forward_word_ins( encoder_outs, output_tokens, output_scores, attn, can_ins_word, ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=None, ) def forward_word_del( self, encoder_outs, output_tokens, output_scores, attn, can_del_word ): word_del_score_avg = [] word_del_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_del_out, word_del_attn = model.decoder.forward_word_del( _skip(output_tokens, can_del_word), _skip_encoder_out(model.encoder, encoder_out, can_del_word), ) word_del_score = F.log_softmax(word_del_out, 2) word_del_score_avg.append(word_del_score) word_del_attn_avg.append(word_del_attn) word_del_score_avg = torch.logsumexp( torch.stack(word_del_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) word_del_pred = word_del_score_avg.max(-1)[1].bool() if word_del_attn_avg[0] is not None: word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models) else: word_del_attn_avg = None _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn_avg, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.0) return output_tokens, output_scores, attn def forward_mask_ins( self, encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens, ): mask_ins_score_avg = [] for model, encoder_out in zip(self.models, encoder_outs): mask_ins_out, _ = model.decoder.forward_mask_ins( _skip(output_tokens, can_ins_mask), _skip_encoder_out(model.encoder, encoder_out, can_ins_mask), ) mask_ins_score = F.log_softmax(mask_ins_out, 2) if eos_penalty > 0.0: mask_ins_score[:, :, 0] -= eos_penalty mask_ins_score_avg.append(mask_ins_score) mask_ins_score_avg = torch.logsumexp( torch.stack(mask_ins_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) mask_ins_pred = mask_ins_score_avg.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) return output_tokens, output_scores def forward_word_ins( self, encoder_outs, output_tokens, output_scores, attn, can_ins_word ): word_ins_score_avg = [] word_ins_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_ins_out, word_ins_attn = model.decoder.forward_word_ins( _skip(output_tokens, can_ins_word), _skip_encoder_out(model.encoder, encoder_out, can_ins_word), ) word_ins_score = F.log_softmax(word_ins_out, 2) word_ins_score_avg.append(word_ins_score) word_ins_attn_avg.append(word_ins_attn) word_ins_score_avg = torch.logsumexp( torch.stack(word_ins_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) if word_ins_attn_avg[0] is not None: word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models) else: word_ins_attn_avg = None word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score_max, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) return output_tokens, output_scores, attn def initialize_output_tokens(self, encoder_outs, src_tokens): # LevT doesn't do length prediction. return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
bart_ls-main
fairseq-py/fairseq/models/nat/nonautoregressive_ensembles.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder from fairseq.models.transformer import Embedding from fairseq.modules.transformer_sentence_encoder import init_bert_params def _mean_pooling(enc_feats, src_masks): # enc_feats: T x B x C # src_masks: B x T or None if src_masks is None: enc_feats = enc_feats.mean(0) else: src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats) enc_feats = ( (enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None] ).sum(0) return enc_feats def _argmax(x, dim): return (x == x.max(dim, keepdim=True)[0]).type_as(x) def _uniform_assignment(src_lens, trg_lens): max_trg_len = trg_lens.max() steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size # max_trg_len index_t = utils.new_arange(trg_lens, max_trg_len).float() index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len index_t = torch.round(index_t).long().detach() return index_t @register_model("nonautoregressive_transformer") class NATransformerModel(FairseqNATModel): @property def allow_length_beam(self): return True @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) # length prediction parser.add_argument( "--src-embedding-copy", action="store_true", help="copy encoder word embeddings as the initial input of the decoder", ) parser.add_argument( "--pred-length-offset", action="store_true", help="predicting the length difference between the target and source sentences", ) parser.add_argument( "--sg-length-pred", action="store_true", help="stop the gradients back-propagated from the length predictor", ) parser.add_argument( "--length-loss-factor", type=float, help="weights on the length prediction loss", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = NATransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) return { "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": tgt_tokens.ne(self.pad), "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.ne(self.pad) _scores, _tokens = self.decoder( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out, step=step, ).max(-1) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) def initialize_output_tokens(self, encoder_out, src_tokens): # length prediction length_tgt = self.decoder.forward_length_prediction( self.decoder.forward_length(normalize=True, encoder_out=encoder_out), encoder_out=encoder_out, ) max_length = length_tgt.clamp_(min=2).max() idx_length = utils.new_arange(src_tokens, max_length) initial_output_tokens = src_tokens.new_zeros( src_tokens.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out["encoder_out"][0]) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None, ) def regenerate_length_beam(self, decoder_out, beam_size): output_tokens = decoder_out.output_tokens length_tgt = output_tokens.ne(self.pad).sum(1) length_tgt = ( length_tgt[:, None] + utils.new_arange(length_tgt, 1, beam_size) - beam_size // 2 ) length_tgt = length_tgt.view(-1).clamp_(min=2) max_length = length_tgt.max() idx_length = utils.new_arange(length_tgt, max_length) initial_output_tokens = output_tokens.new_zeros( length_tgt.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(decoder_out.output_scores) return decoder_out._replace( output_tokens=initial_output_tokens, output_scores=initial_output_scores ) class NATransformerDecoder(FairseqNATDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.encoder_embed_dim = args.encoder_embed_dim self.sg_length_pred = getattr(args, "sg_length_pred", False) self.pred_length_offset = getattr(args, "pred_length_offset", False) self.length_loss_factor = getattr(args, "length_loss_factor", 0.1) self.src_embedding_copy = getattr(args, "src_embedding_copy", False) self.embed_length = Embedding(256, self.encoder_embed_dim, None) @ensemble_decoder def forward(self, normalize, encoder_out, prev_output_tokens, step=0, **unused): features, _ = self.extract_features( prev_output_tokens, encoder_out=encoder_out, embedding_copy=(step == 0) & self.src_embedding_copy, ) decoder_out = self.output_layer(features) return F.log_softmax(decoder_out, -1) if normalize else decoder_out @ensemble_decoder def forward_length(self, normalize, encoder_out): enc_feats = encoder_out["encoder_out"][0] # T x B x C if len(encoder_out["encoder_padding_mask"]) > 0: src_masks = encoder_out["encoder_padding_mask"][0] # B x T else: src_masks = None enc_feats = _mean_pooling(enc_feats, src_masks) if self.sg_length_pred: enc_feats = enc_feats.detach() length_out = F.linear(enc_feats, self.embed_length.weight) return F.log_softmax(length_out, -1) if normalize else length_out def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, embedding_copy=False, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embedding if embedding_copy: src_embd = encoder_out["encoder_embedding"][0] if len(encoder_out["encoder_padding_mask"]) > 0: src_mask = encoder_out["encoder_padding_mask"][0] else: src_mask = None src_mask = ( ~src_mask if src_mask is not None else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool() ) x, decoder_padding_mask = self.forward_embedding( prev_output_tokens, self.forward_copying_source( src_embd, src_mask, prev_output_tokens.ne(self.padding_idx) ), ) else: x, decoder_padding_mask = self.forward_embedding(prev_output_tokens) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for i, layer in enumerate(self.layers): # early exit from the decoder. if (early_exit is not None) and (i >= early_exit): break x, attn, _ = layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} def forward_embedding(self, prev_output_tokens, states=None): # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions if states is None: x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) else: x = states if positions is not None: x += positions x = self.dropout_module(x) decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) return x, decoder_padding_mask def forward_copying_source(self, src_embeds, src_masks, tgt_masks): length_sources = src_masks.sum(1) length_targets = tgt_masks.sum(1) mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill( ~tgt_masks, 0 ) copied_embedding = torch.gather( src_embeds, 1, mapped_inputs.unsqueeze(-1).expand( *mapped_inputs.size(), src_embeds.size(-1) ), ) return copied_embedding def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None): enc_feats = encoder_out["encoder_out"][0] # T x B x C if len(encoder_out["encoder_padding_mask"]) > 0: src_masks = encoder_out["encoder_padding_mask"][0] # B x T else: src_masks = None if self.pred_length_offset: if src_masks is None: src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_( enc_feats.size(0) ) else: src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0) src_lengs = src_lengs.long() if tgt_tokens is not None: # obtain the length target tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long() if self.pred_length_offset: length_tgt = tgt_lengs - src_lengs + 128 else: length_tgt = tgt_lengs length_tgt = length_tgt.clamp(min=0, max=255) else: # predict the length target (greedy for now) # TODO: implementing length-beam pred_lengs = length_out.max(-1)[1] if self.pred_length_offset: length_tgt = pred_lengs - 128 + src_lengs else: length_tgt = pred_lengs return length_tgt @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer" ) def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de" ) def nonautoregressive_transformer_wmt_en_de(args): base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/nat/nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This file implements: Ghazvininejad, Marjan, et al. "Constant-time machine translation with conditional masked language models." arXiv preprint arXiv:1904.09324 (2019). """ from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel from fairseq.utils import new_arange def _skeptical_unmasking(output_scores, output_masks, p): sorted_index = output_scores.sort(-1)[1] boundary_len = ( (output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p ).long() skeptical_mask = new_arange(output_masks) < boundary_len return skeptical_mask.scatter(1, sorted_index, skeptical_mask) @register_model("cmlm_transformer") class CMLMNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert not self.decoder.src_embedding_copy, "do not support embedding copy." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_mask = prev_output_tokens.eq(self.unk) return { "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step max_step = decoder_out.max_step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.eq(self.unk) _scores, _tokens = self.decoder( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out, ).max(-1) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) # skeptical decoding (depend on the maximum decoding steps.) if (step + 1) < max_step: skeptical_mask = _skeptical_unmasking( output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step ) output_tokens.masked_fill_(skeptical_mask, self.unk) output_scores.masked_fill_(skeptical_mask, 0.0) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) @register_model_architecture("cmlm_transformer", "cmlm_transformer") def cmlm_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture("cmlm_transformer", "cmlm_transformer_wmt_en_de") def cmlm_wmt_en_de(args): cmlm_base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/nat/cmlm_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel, base_architecture from fairseq.modules import DynamicCRF @register_model("nacrf_transformer") class NACRFTransformerModel(NATransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.crf_layer = DynamicCRF( num_embedding=len(self.tgt_dict), low_rank=args.crf_lowrank_approx, beam_size=args.crf_beam_approx, ) @property def allow_ensemble(self): return False @staticmethod def add_args(parser): NATransformerModel.add_args(parser) parser.add_argument( "--crf-lowrank-approx", type=int, help="the dimension of low-rank approximation of transition", ) parser.add_argument( "--crf-beam-approx", type=int, help="the beam size for apporixmating the normalizing factor", ) parser.add_argument( "--word-ins-loss-factor", type=float, help="weights on NAT loss used to co-training with CRF loss.", ) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad) # compute the log-likelihood of CRF crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask) crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean() return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, "factor": self.args.word_ins_loss_factor, }, "word_crf": {"loss": crf_nll}, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder and get emission scores output_masks = output_tokens.ne(self.pad) word_ins_out = self.decoder( normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out ) # run viterbi decoding through CRF _scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) @register_model_architecture("nacrf_transformer", "nacrf_transformer") def nacrf_base_architecture(args): args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32) args.crf_beam_approx = getattr(args, "crf_beam_approx", 64) args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/nat/nat_crf_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import ( FairseqNATModel, LevenshteinTransformerDecoder, LevenshteinTransformerModel, ensemble_decoder, ) from fairseq.models.transformer import Linear from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange class NegativeDistanceScore(object): def __init__(self): # pre-compute some values self.scores = {} self.scores[0.5] = self.compute_score_full(50, 0.5) self.scores[1.0] = self.compute_score_full(50, 1.0) self.scores[2.0] = self.compute_score_full(50, 2.0) def __call__(self, i, L, tau): if (tau is None) or (tau > 1000): return 1 / L if tau in self.scores: if L < self.scores[tau].shape[0]: return self.scores[tau][L - 1, i] return self.compute_score(L, tau)[i] def compute_score(self, L, tau): s = np.array([-abs(L / 2 - i) / tau for i in range(L)]) s = np.exp(s - s.max()) return s / s.sum() def compute_score_full(self, L, tau): s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau s = np.tril(s, 0) + np.triu(s - float("inf"), 1) s = np.exp(s - s.max(1, keepdims=True)) return s / s.sum(1, keepdims=True) neg_scorer = NegativeDistanceScore() def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e B = in_tokens.size(0) T = in_tokens.size(1) V = vocab_size with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) insert_labels = [a[:-1] for a in full_labels] # numericalize1 insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() insert_index, insert_labels = zip( *[ (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) for i, labels in enumerate(insert_labels) for j, label in enumerate(labels[1:-1]) for k, w in enumerate(label) ] ) # HACK 1:-1 insert_index, insert_labels = [ torch.tensor(list(a), device=in_tokens.device) for a in [insert_index, insert_labels] ] insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) insert_label_tensors = insert_label_tensors.view(B, T - 1, V) return insert_label_tensors def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): padding_masks = in_tokens[:, 1:].eq(padding_idx) word_ins_scores.masked_fill_(padding_masks, 0.0) word_ins_pred.masked_fill_(padding_masks, padding_idx) in_coords = new_arange(in_tokens).type_as(in_scores) # shift all padding predictions to infinite out_coords = (in_coords[:, 1:] - 0.5).masked_fill( word_ins_pred.eq(padding_idx), float("inf") ) out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) return out_tokens, out_scores @register_model("insertion_transformer") class InsertionTransformerModel(LevenshteinTransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument("--label-tau", default=None, type=float) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion word_ins_out = self.decoder.forward_word_ins( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_tgt = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk, len(self.tgt_dict), tau=self.decoder.label_tau, ).type_as(word_ins_out) word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_masks, "ls": self.args.label_smoothing, "nll_loss": True, } } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # TODO: decoding for InsertionTransformer word_ins_score = self.decoder.forward_word_ins( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out ) if eos_penalty > 0.0: word_ins_score[:, :, self.pad] -= eos_penalty word_ins_score, word_ins_pred = word_ins_score.max(-1) output_tokens, output_scores = _apply_ins_words( output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) class InsertionTransformerDecoder(LevenshteinTransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): # use the TransformerDecoder's __init__ super(LevenshteinTransformerDecoder, self).__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim) self.label_tau = getattr(args, "label_tau", None) @ensemble_decoder def forward_word_ins(self, normalize, encoder_out, prev_output_tokens): features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0] features = self.pool_out( torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) ) decoder_out = self.output_layer(features) return F.log_softmax(decoder_out, -1) if normalize else decoder_out def forward_mask_ins(self, *args, **kwargs): raise NotImplementedError def forward_word_del(self, *args, **kwargs): raise NotImplementedError @register_model_architecture("insertion_transformer", "insertion_transformer") def insertion_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # special for insertion transformer args.label_tau = getattr(args, "label_tau", None)
bart_ls-main
fairseq-py/fairseq/models/nat/insertion_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.roberta import ( init_bert_params, roberta_base_architecture, RobertaEncoder, RobertaModel, ) import math from typing import Dict, Optional from torch import Tensor, nn from fairseq.modules import ( LayerDropModuleList, TransformerEncoderLayer, MultiheadAttention, PositionalEmbedding, ) from fairseq.modules.quant_noise import quant_noise import torch.nn.functional as F from fairseq.models.transformer import TransformerEncoder from .utils import sliding_chunks_matmul_pv, sliding_chunks_matmul_qk logger = logging.getLogger(__name__) @register_model("sliding_window_roberta") class SlidingWindownModel(RobertaModel): @staticmethod def add_args(parser): RobertaModel.add_args(parser) parser.add_argument( "--attention-window", type=int, ) parser.add_argument( "--train-global", action="store_true", help="Whether to set CLS as global token during pre-training" ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" from omegaconf import OmegaConf if OmegaConf.is_config(args): OmegaConf.set_struct(args, False) # make sure all arguments are present base_architecture(args) if not hasattr(args, "max_positions"): args.max_positions = args.tokens_per_sample encoder = SlidingWindowEncoder(args, task.source_dictionary) if OmegaConf.is_config(args): OmegaConf.set_struct(args, True) return cls(args, encoder) def safe_getattr(obj, k, default=None): from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) @register_model_architecture("sliding_window_roberta", "sliding_window_base") def base_architecture(args): args.attention_window = safe_getattr(args, "attention_window", 1024) # equavalent to 512 in longformer args.train_global = safe_getattr(args, "train_global", False) roberta_base_architecture(args) @register_model_architecture("sliding_window_roberta", "sliding_window_large") def large_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 24) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16) base_architecture(args) class SlidingWindowEncoder(RobertaEncoder): def build_encoder(self, args, dictionary, embed_tokens): encoder = SWTransformerEncoder(args, dictionary, embed_tokens) encoder.apply(init_bert_params) return encoder def forward(self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused): # pad src_tokens to multiplier of attention window size _, seqlen = src_tokens.size() w = max(self.sentence_encoder.window_per_layer) * 2 padding_len = (w - seqlen % w) % w x, extra = self.extract_features( src_tokens, return_all_hiddens=return_all_hiddens, key_padding_mask=unused.get("key_padding_mask", None) ) if masked_tokens is not None: masked_tokens = F.pad(masked_tokens, (0, padding_len), value=False) if not features_only: x = self.output_layer(x, masked_tokens=masked_tokens) return x, extra def extract_features(self, src_tokens, return_all_hiddens=False, **kwargs): encoder_out = self.sentence_encoder( src_tokens, return_all_hiddens=return_all_hiddens, token_embeddings=kwargs.get("token_embeddings", None), key_padding_mask=kwargs.get("key_padding_mask", None), ) # T x B x C -> B x T x C features = encoder_out["encoder_out"][0].transpose(0, 1) inner_states = encoder_out["encoder_states"] if return_all_hiddens else None return features, {"inner_states": inner_states} class SWTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) if self.encoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.encoder_layerdrop) else: self.layers = torch.nn.ModuleList([]) self.window_per_layer = [int(args.attention_window[i // (self.num_layers // len(args.attention_window))]) for i in range(self.num_layers)] self.layers.extend( [self.build_sw_encoder_layer(args, self.window_per_layer[i], self.padding_idx) for i in range(args.encoder_layers)]) self.num_layers = len(self.layers) def build_sw_encoder_layer(self, args, window_size, padding_idx): return SWTransformerEncoderLayer(args, window_size, padding_idx) def forward( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.Tensor] = None, # @xwhan in order to add global mask ): # sliding-window attention sequence length requirements _, seqlen = src_tokens.size() w = max(self.window_per_layer) * 2 padding_len = (w - seqlen % w) % w src_tokens = F.pad(src_tokens, (0, padding_len), value=self.padding_idx) if key_padding_mask is not None: key_padding_mask = F.pad(key_padding_mask, (0, padding_len), value=1) return self.forward_scriptable( src_tokens, src_lengths, return_all_hiddens, token_embeddings, key_padding_mask ) class SWTransformerEncoderLayer(TransformerEncoderLayer): def __init__(self, args, window_size, padding_idx): super().__init__(args) # replace self-attn self.window_size = window_size self.padding_idx = padding_idx self.self_attn = self.build_sw_self_attention(self.embed_dim, window_size, padding_idx, args) def forward( self, x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None ): if attn_mask is not None: attn_mask = (attn_mask * -1e8).type_as(attn_mask) # -1 in attn_mask means global attention return super().forward(x, encoder_padding_mask, attn_mask=attn_mask) def build_sw_self_attention(self, embed_dim, window_size, padding_idx, args): return SWSelfAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, window_size=window_size, padding_idx=padding_idx, max_source_positions=args.max_source_positions, train_global=args.train_global, ) class SWSelfAttention(MultiheadAttention): def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, window_size=256, padding_idx=1, max_source_positions=1024, train_global=False ): super().__init__(embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention, q_noise, qn_block_size) self.attention_window = window_size self.train_global = train_global self.padding_idx = padding_idx self.k_proj_global = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.v_proj_global = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj_global = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) def forward(self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False ): if need_head_weights: need_weights = True # attn_mark None # key padding mask 0,1 bool tensor 1 means masked position (bsz x seqlen), -1 means global attention if key_padding_mask is None: seqlen, bsz, _= query.size() key_padding_mask = query.new_zeros(bsz, seqlen).bool() if attn_mask is None: # bos token as global attention attn_mask = key_padding_mask.type_as(query) num_global_masks = attn_mask.eq(-1).sum() #TODO whether to use first token as global token at pretraining time if self.train_global: if num_global_masks == 0: attn_mask[:,0] = -1 attn_mask = (attn_mask * -1e8).type_as(query) if len(attn_mask.size()) > 2: attention_mask = attn_mask.squeeze(dim=2).squeeze(dim=1) else: attention_mask = attn_mask key_padding_mask = attention_mask < 0 extra_attention_mask = attention_mask > 0 remove_from_windowed_attention_mask = attention_mask != 0 # num of global tokens num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1) max_num_extra_indices_per_batch = num_extra_indices_per_batch.max() if max_num_extra_indices_per_batch <= 0: extra_attention_mask = None else: extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True) zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, device=extra_attention_mask.device) # mask indicating which values are actually going to be padding num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1) selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1) # 2) location of the non-padding values in the selected global attention selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True) # 3) location of the padding values in the selected global attention selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True) seq_len, bsz, embed_dim = query.size() assert self.self_attention q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) q *= self.scaling q = q.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) k = k.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) attn_weights = sliding_chunks_matmul_qk(q, k, self.attention_window, padding_value=0) # bsz, seq_len, num_heads, 2 * w + 1 if remove_from_windowed_attention_mask is not None: # This implementation is fast and takes very little memory because num_heads x hidden_size = 1 # from (bsz x seq_len) to (bsz x seq_len x num_heads x hidden_size) remove_from_windowed_attention_mask = remove_from_windowed_attention_mask.unsqueeze(dim=-1).unsqueeze(dim=-1) # cast to float/half then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(q).masked_fill(remove_from_windowed_attention_mask, -10000.0) # repeat_size = 1 if isinstance(self.attention_dilation, int) else len(self.attention_dilation) repeat_size = 1 float_mask = float_mask.repeat(1, 1, repeat_size, 1) ones = float_mask.new_ones(size=float_mask.size()) # tensor of ones # diagonal mask with zeros everywhere and -inf inplace of padding d_mask = sliding_chunks_matmul_qk(ones, float_mask, self.attention_window, padding_value=0) attn_weights += d_mask assert list(attn_weights.size()) == [bsz, seq_len, self.num_heads, self.attention_window * 2 + 1] # the extra attention if extra_attention_mask is not None: selected_k = k.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, self.head_dim) selected_k[selection_padding_mask_nonzeros] = k[extra_attention_mask_nonzeros] # (bsz, seq_len, num_heads, max_num_extra_indices_per_batch) selected_attn_weights = torch.einsum('blhd,bshd->blhs', (q, selected_k)) selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000 # concat to attn_weights # (bsz, seq_len, num_heads, extra attention count + 2*window+1) attn_weights = torch.cat((selected_attn_weights, attn_weights), dim=-1) attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32) # use fp32 for numerical stability if key_padding_mask is not None: # softmax sometimes inserts NaN if all positions are masked, replace them with 0 attn_weights_float = torch.masked_fill(attn_weights_float, key_padding_mask.unsqueeze(-1).unsqueeze(-1), 0.0) attn_weights = attn_weights_float.type_as(attn_weights) #attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) attn_probs = self.dropout_module(attn_weights) v = v.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) attn = 0 if extra_attention_mask is not None: selected_attn_probs = attn_probs.narrow(-1, 0, max_num_extra_indices_per_batch) selected_v = v.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, self.head_dim) selected_v[selection_padding_mask_nonzeros] = v[extra_attention_mask_nonzeros] # use `matmul` because `einsum` crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) attn += torch.matmul(selected_attn_probs.transpose(1, 2), selected_v.transpose(1, 2).type_as(selected_attn_probs)).transpose(1, 2) attn_probs = attn_probs.narrow(-1, max_num_extra_indices_per_batch, attn_probs.size(-1) - max_num_extra_indices_per_batch).contiguous() attn += sliding_chunks_matmul_pv(attn_probs, v, self.attention_window) attn = attn.type_as(query) assert list(attn.size()) == [bsz, seq_len, self.num_heads, self.head_dim] attn = attn.transpose(0, 1).reshape(seq_len, bsz, embed_dim).contiguous() if extra_attention_mask is not None: selected_hidden_states = query.new_zeros(max_num_extra_indices_per_batch, bsz, embed_dim) selected_hidden_states[selection_padding_mask_nonzeros[::-1]] = query[extra_attention_mask_nonzeros[::-1]] q = self.q_proj_global(selected_hidden_states) k = self.k_proj_global(query) v = self.v_proj_global(query) q /= math.sqrt(self.head_dim) q = q.contiguous().view(max_num_extra_indices_per_batch, bsz * self.num_heads, self.head_dim).transpose(0, 1) # (bsz*self.num_heads, max_num_extra_indices_per_batch, head_dim) k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) # bsz * self.num_heads, seq_len, head_dim) v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) # bsz * self.num_heads, seq_len, head_dim) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, max_num_extra_indices_per_batch, seq_len] attn_weights = attn_weights.view(bsz, self.num_heads, max_num_extra_indices_per_batch, seq_len) attn_weights[selection_padding_mask_zeros[0], :, selection_padding_mask_zeros[1], :] = -10000.0 if key_padding_mask is not None: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), -10000.0, ) attn_weights = attn_weights.view(bsz * self.num_heads, max_num_extra_indices_per_batch, seq_len) attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32) attn_weights = attn_weights_float.type_as(attn_weights) # use fp32 for numerical stability # attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) attn_probs = self.dropout_module(attn_weights) selected_attn = torch.bmm(attn_probs, v) assert list(selected_attn.size()) == [bsz * self.num_heads, max_num_extra_indices_per_batch, self.head_dim] selected_attn_4d = selected_attn.view(bsz, self.num_heads, max_num_extra_indices_per_batch, self.head_dim) nonzero_selected_attn = selected_attn_4d[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]] attn[extra_attention_mask_nonzeros[::-1]] = nonzero_selected_attn.view(len(selection_padding_mask_nonzeros[0]), -1).type_as(query) # context_layer = attn # seqlen x bsz x embed_dim attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: if extra_attention_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, max_num_extra_indices_per_batch, seq_len) else: attn_weights = attn_weights.permute(0, 2, 1, 3) #bsz x head x seqlen x head_dim return attn, attn_weights
bart_ls-main
fairseq-py/fairseq/models/long_transformers/sliding_window.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from audioop import cross import logging from typing import Dict, Optional, Tuple import torch from typing import Dict, Optional from torch import Tensor, nn from fairseq.modules import ( LayerNorm, TransformerEncoderLayer, MultiheadAttention, ) import torch.nn.functional as F from fairseq.models.transformer import TransformerEncoder from functools import partial, reduce from fairseq.distributed import fsdp_wrap from inspect import isfunction from operator import mul from fairseq.modules.checkpoint_activations import checkpoint_wrapper from .block import BlockTransformerEncoderLayer, BlockSelfAttention """ A hacky implementation of simple block attention transformer """ class TopDownTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.args = args del self.layers layers = [self.build_sw_encoder_layer(args, args.window_size, self.padding_idx) for i in range(args.encoder_n1)] layers += [self.build_td_encoder_layer(args, args.window_size, self.padding_idx) for i in range(args.encoder_n3)] self.layers = nn.ModuleList(layers) self.top_pool = nn.AvgPool1d(32, stride=24) self.top_layers = nn.ModuleList([self.build_encoder_layer(args) for i in range(args.encoder_n2)]) self.n1 = args.encoder_n1 self.n2 = args.encoder_n2 self.n3 = args.encoder_n3 self.num_layers = len(self.layers) def build_sw_encoder_layer(self, args, window_size, padding_idx): layer = BlockTransformerEncoderLayer(args, window_size, padding_idx) checkpoint = args.checkpoint_activations if checkpoint: offload_to_cpu = self.cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) min_params_to_wrap = self.cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def build_td_encoder_layer(self, args, window_size, padding_idx): layer = TopDownEncoderLayer(args, window_size, padding_idx) checkpoint = args.checkpoint_activations if checkpoint: offload_to_cpu = self.cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) min_params_to_wrap = self.cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def forward( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.Tensor] = None, # @xwhan in order to add global mask ): return self.forward_scriptable( src_tokens, src_lengths, return_all_hiddens, token_embeddings, key_padding_mask ) def forward_scriptable( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.Tensor] = None, # @xwhan in order to add global mask ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). token_embeddings (torch.Tensor, optional): precomputed embeddings default `None` will recompute embeddings Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ # compute padding mask if key_padding_mask is None: encoder_padding_mask = src_tokens.eq(self.padding_idx) key_padding_mask = encoder_padding_mask else: encoder_padding_mask = key_padding_mask.eq(1) # key_padding_mask might -1 elements has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any() x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings) # account for padding while computing the representation if has_pads: x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x)) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_states = [] if return_all_hiddens: encoder_states.append(x) # bottom up for layer in self.layers[:self.n1]: x = layer( # x, encoder_padding_mask=encoder_padding_mask if has_pads else None x, encoder_padding_mask=key_padding_mask # always pass key_padding_mask ) if return_all_hiddens: assert encoder_states is not None encoder_states.append(x) # Pool and compute top level top_x = x.transpose(0, 2) # T x B x C -> C x B x T # Can multiple with weights here later for weighted pooling top_x = self.top_pool(top_x) top_x = top_x.transpose(0, 2) # T_pool x B x C for layer in self.top_layers: top_x = layer(top_x,encoder_padding_mask=None) # Top Down layers with cross attention for layer in self.layers[self.n1:]: x = layer( # x, encoder_padding_mask=encoder_padding_mask if has_pads else None x, top_x, encoder_padding_mask=key_padding_mask # always pass key_padding_mask ) if return_all_hiddens: assert encoder_states is not None encoder_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x) # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in # `forward` so we use a dictionary instead. # TorchScript does not support mixed values so the values are all lists. # The empty list is equivalent to None. src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape(-1, 1).contiguous() return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [encoder_padding_mask], # B x T "encoder_embedding": [encoder_embedding], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [src_lengths], } class TopDownEncoderLayer(TransformerEncoderLayer): def __init__(self, args, window_size, padding_idx): super().__init__(args) # replace self-attn self.window_size = window_size self.padding_idx = padding_idx self.self_attn = self.build_sw_self_attention(self.embed_dim, window_size, padding_idx, args) # init cross attention self.cross_attn = self.build_self_attention(self.embed_dim, args) # init cross layernorm self.cross_attn_layer_norm = LayerNorm(self.embed_dim, export=args.export) def forward( self, x, top_x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None ): if attn_mask is not None: attn_mask = attn_mask.masked_fill( attn_mask.to(torch.bool), -1e8 if x.dtype == torch.float32 else -1e4 ) residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, need_weights=False, attn_mask=attn_mask, ) cross_x, _ = self.cross_attn( query=x, key=top_x, value=top_x, need_weights=False, ) x = (cross_x + x)/2 # divide by 2 to maintain similar scale for subsequent pre-trained layers x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) return x def build_sw_self_attention(self, embed_dim, window_size, padding_idx, args): return BlockSelfAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, window_size=window_size, padding_idx=padding_idx, ) def build_cross_attention(self, embed_dim, cfg): return MultiheadAttention( embed_dim, cfg.encoder.attention_heads, dropout=cfg.attention_dropout, self_attention=False, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, use_xformers=cfg.use_xformers, attention_name=cfg.attention_name, xformer_config=None if not cfg.use_xformers else cfg.xformer_config, )
bart_ls-main
fairseq-py/fairseq/models/long_transformers/top_down.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Long-context model pretraining with fast blocksparse and extrapolation attentions """ from typing import Optional import logging import math import torch import torch.nn as nn from fairseq import utils, modules from fairseq.utils import safe_getattr from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel, TransformerConfig from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.models.transformer.transformer_config import TransformerConfig import copy from fairseq.models.roberta import RobertaEncoder logger = logging.getLogger(__name__) @register_model("loco_variant") class LOCOVariantModel(TransformerModel): __jit_unused_properties__ = ["supported_targets"] def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) generator_architecture(args) self.generator = RobertaEncoder(args, encoder.dictionary) if not self.args.train_generator: for p in self.generator.parameters(): p.requires_grad = False self.pad_idx = self.encoder.dictionary.pad() self.bos = self.encoder.dictionary.bos() self.sentinel_start_idx = self.encoder.dictionary.index("<sentinel_0>") self.sentinel_end_idx = len(self.encoder.dictionary) - 1 # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() if hasattr(self.encoder, "dictionary"): self.eos: int = self.encoder.dictionary.eos() @staticmethod def add_args(parser): super(LOCOVariantModel, LOCOVariantModel).add_args(parser) parser.add_argument( "--pooler-dropout", type=float, metavar="D", help="dropout probability in the masked_lm pooler layers", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use for pooler layer", ) parser.add_argument( "--finetune", action="store_true", help="different forwards used for pretraining and finetuning" ) parser.add_argument( "--train-generator", action="store_true", ) parser.add_argument( "--generator-xformer-config", type=str, metavar="D", ) parser.add_argument( "--easy-span-ops", default='sample', type=str, metavar="D", help="do we want to unmask the easy spans or replace those with <mask> or sampled tokens from the MLM" ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): # use vanilla attention for now args.use_xformers = False # HACK, disable efficient attentions for cross-attention & decoder-side attention return super().build_decoder( TransformerConfig.from_namespace(args), tgt_dict, embed_tokens ) @classmethod def build_generator(cls, args, src_dict, embed_tokens): return super().build_encoder( TransformerConfig.from_namespace(args), src_dict, embed_tokens ) @property def supported_targets(self): return {"self"} def _sum_spans(self, input, starts, ends): """ sum the span values to the start of each span; zero out all other positions """ starts_before = torch.roll(starts, -1, -1) input_cumsum = torch.cumsum(input, dim=-1) input_cumsum[starts.bool()] = input_cumsum[ends.bool()] - input_cumsum[starts_before.bool()] sumed = input_cumsum * starts return sumed def _avg_spans(self, span_sum, span_lens, starts): span_sum[starts.bool()] = span_sum[starts.bool()] / span_lens[starts.bool()] return span_sum """ utils from T5's objective """ def _create_sentinels(self, mask_indices): """ mask_indices: binary mask start spans as sentinel ids and other masked positions as -1 """ start_indices = mask_indices - torch.roll(mask_indices, 1, -1) * mask_indices start_indices[:,0] = mask_indices[:,0] sentinel_ids = torch.where(start_indices != 0, torch.cumsum(start_indices, dim=-1), start_indices) assert sentinel_ids.max() + self.sentinel_start_idx - 1 <= self.sentinel_end_idx, (sentinel_ids.max() + self.sentinel_start_idx - 1, self.sentinel_end_idx, sentinel_ids) sentinel_ids = torch.where(sentinel_ids != 0, (sentinel_ids + self.sentinel_start_idx - 1), 0) sentinel_ids -= mask_indices - start_indices return sentinel_ids def _build_inputs(self, masked_input, span_mask): sentinel_ids = self._create_sentinels(span_mask) masked_input = torch.where(sentinel_ids != 0, sentinel_ids, masked_input) src_lens = (masked_input >= 0).sum(-1) # src_tokens padded to max_source_positions, useful for blocksparse attention src_tokens = masked_input.new_full((masked_input.size(0), self.cfg.max_source_positions), self.pad_idx) fill_indices = torch.arange(masked_input.size(-1)).to(masked_input) fill_indices = fill_indices < src_lens.unsqueeze(-1) assert fill_indices.sum() == (masked_input >= 0).sum() # = 0 for sequence starts src_tokens[:,:masked_input.size(-1)][fill_indices] = masked_input[masked_input >= 0] return src_tokens, src_lens def _build_targets(self, masked_target, span_mask, pad_mask, eos_mask): """ masked_targets: masked positions as their original token ids and other positions as pad index span_mask: indicating hard spans eos_mask: end of sequence as 0 pad_mask: padding positions as 0 """ unmasked_positions = ~span_mask.bool() unmasked_positions[:,0] = 0 sentinel_ids = self._create_sentinels(unmasked_positions.to(masked_target)) sentinel_ids *= pad_mask sentinel_ids *= eos_mask # target: masked positions with sentinel ids or -1; # bos, eos and padding positions with value 1 target = torch.where(sentinel_ids != 0, sentinel_ids, masked_target) target[~eos_mask] = self.eos target[:,0] = self.bos tgt_lens = (target.abs() != 1).sum(-1) tgt_tokens = target.new_full(target.size(), self.pad_idx) fill_indices = torch.arange(tgt_tokens.size(-1)).to(tgt_tokens) fill_indices = fill_indices < tgt_lens.unsqueeze(-1) tgt_tokens[fill_indices] = target[target.abs() != 1] tgt_tokens = tgt_tokens[:,:tgt_lens.max()] # truncating if needed if tgt_tokens.size(-1) > self.args.max_target_positions: end_positions = (tgt_tokens == self.eos).nonzero(as_tuple=True)[1] sample_exceeds = end_positions >= (self.args.max_target_positions - 1) tgt_tokens = torch.cat( [tgt_tokens[:,:self.args.max_target_positions-1], tgt_tokens[:,-1:]], dim=-1 ) tgt_tokens[:,-1] = torch.where(sample_exceeds, self.eos, tgt_tokens[:,-1]) decoder_input = tgt_tokens.clone() decoder_input[:,0] = self.eos decoder_input[:,1:] = tgt_tokens[:,:-1] return tgt_tokens, decoder_input def forward( self, src_tokens, src_lengths, prev_output_tokens: Optional[torch.Tensor] = None, features_only: bool = False, classification_head_name: Optional[str] = None, token_embeddings: Optional[torch.Tensor] = None, return_all_hiddens: bool = True, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, masked_unfiltered: Optional[torch.Tensor] = None, ): if classification_head_name is not None: features_only = True if not self.cfg.finetune: """ use an encoder-only model to build long-range objectives """ masked_tokens_unfiltered = masked_unfiltered.ne(self.pad_idx).to(src_tokens) src_tokens_for_mlm = copy.deepcopy(src_tokens) if self.cfg.train_generator: masked_logits = self.generator( src_tokens_for_mlm, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, )[0] token_loss = modules.cross_entropy( masked_logits.view(-1, masked_logits.size(-1)), masked_unfiltered.view(-1), reduction='none', ignore_index=self.pad_idx, ).view(masked_unfiltered.size()) masked_cnt = masked_tokens_unfiltered.sum() mlm_loss = token_loss.sum() / masked_cnt else: with torch.no_grad(): masked_logits = self.generator( src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, )[0] token_loss = modules.cross_entropy( masked_logits.view(-1, masked_logits.size(-1)), masked_unfiltered.view(-1), reduction='none', ignore_index=self.pad_idx).view(masked_unfiltered.size()) # log likelihood averaged by span lengths span_starts = masked_tokens_unfiltered - torch.roll(masked_tokens_unfiltered, 1, -1) * masked_tokens_unfiltered span_starts[:,0] = masked_tokens_unfiltered[:,0] span_ends = masked_tokens_unfiltered - torch.roll(masked_tokens_unfiltered, -1, -1) * masked_tokens_unfiltered span_ends[:,-1] = masked_tokens_unfiltered[:,-1] # span_starts: binary mask marking the start of each span # span_ends: binary mask marking the end of each span # span_lens: span lens calculated put at the starts of each span span_loss = self._sum_spans(token_loss, span_starts, span_ends) span_lens = self._sum_spans(masked_tokens_unfiltered, span_starts, span_ends) span_loss_avg = self._avg_spans(span_loss, span_lens, span_starts) # find the hard spans, i.e, topk largest loss span_counts = span_starts.sum(-1).min() hard_span_starts = span_loss_avg.topk(k=math.floor(span_counts*self.cfg.sample_ratio), dim=-1)[1] # bsz * num_hard_spans hard_span_ends = span_lens.gather(1, index=hard_span_starts) + hard_span_starts # masking source with only the hard spans row_idx = torch.arange(hard_span_starts.size(0)).unsqueeze(1).repeat(1, hard_span_starts.size(1)).to(hard_span_starts) hard_mask = span_starts.new_zeros(span_starts.size()) hard_mask[row_idx.view(-1), hard_span_starts.view(-1)] = 1 hard_mask[row_idx.view(-1), hard_span_ends.view(-1)] = 1 hard_mask = (hard_mask.cumsum(dim=-1) % 2) == 1 hard_mask = hard_mask.type_as(masked_tokens_unfiltered) # filter our easy span masks mask_off = torch.logical_xor(hard_mask, masked_tokens_unfiltered) if self.cfg.easy_span_ops == 'recover': src_tokens[mask_off] = masked_unfiltered[mask_off] elif self.cfg.easy_span_ops == 'sample': # do not sample special tokens logits = masked_logits.detach().clone() logits[:,:,self.pad_idx] = float('-inf') logits[:,:,self.eos] = float('-inf') logits[:,:,self.bos] = float('-inf') logits[:,:,self.sentinel_start_idx:] = float('-inf') sampled = torch.multinomial(logits.softmax(-1).view(-1, logits.size(-1)), 1) sampled = sampled.view(src_tokens.size()) src_tokens[mask_off] = sampled[mask_off] else: assert self.cfg.easy_span_ops == 'masked' # build the denoising targets src_pad_mask = src_tokens.ne(self.pad_idx) src_eos_mask = src_tokens.ne(self.eos) src_tokens, src_lengths = self._build_inputs(src_tokens, hard_mask) target, prev_output_tokens = self._build_targets(masked_unfiltered, hard_mask, src_pad_mask, src_eos_mask) encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, token_embeddings=token_embeddings, return_all_hiddens=return_all_hiddens ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) eos: int = self.eos if classification_head_name is not None: sentence_representation = x[ src_tokens.eq(eos), : ].view(x.size(0), -1, x.size(-1))[:, -1, :] for k, head in self.classification_heads.items(): # for torch script only supports iteration if k == classification_head_name: x = head(sentence_representation) break if not self.cfg.finetune: if self.cfg.train_generator: return x, target, mlm_loss, extra return x, target, extra return x, extra def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" current_head_names = ( [] if not hasattr(self, "classification_heads") else self.classification_heads.keys() ) # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + "classification_heads."): continue head_name = k[len(prefix + "classification_heads.") :].split(".")[0] num_classes = state_dict[ prefix + "classification_heads." + head_name + ".out_proj.weight" ].size(0) inner_dim = state_dict[ prefix + "classification_heads." + head_name + ".dense.weight" ].size(0) if getattr(self.args, "load_checkpoint_heads", False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( "deleting classification head ({}) from checkpoint " "not present in current model: {}".format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( "deleting classification head ({}) from checkpoint " "with different dimensions than current model: {}".format( head_name, k ) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] def truncate_emb(key): if key in state_dict: state_dict[key] = state_dict[key][:-1, :] # When finetuning on translation task, remove last row of # embedding matrix that corresponds to mask_idx token. loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0) if ( loaded_dict_size == len(self.encoder.dictionary) + 1 and "<mask>" not in self.encoder.dictionary ): truncate_emb("encoder.embed_tokens.weight") truncate_emb("decoder.embed_tokens.weight") truncate_emb("encoder.output_projection.weight") truncate_emb("decoder.output_projection.weight") # When continued pretraining on new set of languages for mbart, # add extra lang embeddings at the end of embed_tokens. # Note: newly added languages are assumed to have been added at the end. if self.args.task == "multilingual_denoising" and loaded_dict_size < len( self.encoder.dictionary ): logger.info( "Adding extra language embeddings not found in pretrained model for " "continued pretraining of MBART on new set of languages." ) loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][ -1, : ] num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size embed_dim = state_dict["encoder.embed_tokens.weight"].size(1) new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim) nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5) new_lang_embed_to_add = new_lang_embed_to_add.to( dtype=state_dict["encoder.embed_tokens.weight"].dtype, ) state_dict["encoder.embed_tokens.weight"] = torch.cat( [ state_dict["encoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) state_dict["decoder.embed_tokens.weight"] = torch.cat( [ state_dict["decoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, "classification_heads"): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + "classification_heads." + k not in state_dict: logger.info("Overwriting " + prefix + "classification_heads." + k) state_dict[prefix + "classification_heads." + k] = v def generator_architecture(args): # options to use different sizes of generator models args.encoder_layers = safe_getattr(args, "generator_layers", 12) args.encoder_embed_dim = safe_getattr(args, "generator_embed_dim", 768) args.encoder_ffn_embed_dim = safe_getattr(args, "generator_ffn_embed_dim", 3072) args.encoder_attention_heads = safe_getattr(args, "generator_attention_heads", 12) args.max_positions = safe_getattr(args, "max_source_positions", 8192) args.encoder_learned_pos = safe_getattr(args, "generator_learned_pos", True) args.encoder_normalize_before = safe_getattr(args, "generator_normalize_before", False) args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False) # xformers config args.use_xformers = safe_getattr(args, "generator_use_xformers", True) args.attention_name = safe_getattr(args, "generator_attention_name", 'bs_local') args.xformer_config = safe_getattr(args, 'generator_xformer_config', '{"block_size": 128, "max_seq_len": 8192}') @register_model_architecture("loco_variant", "loco_variant_large") def loco_large_architecture(args): args.finetune = safe_getattr(args, "finetune", False) args.train_generator = safe_getattr(args, "train_generator", False) # @xwhan is it necessary to put it here? had issues def getattr(args, key, value): return value args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.max_target_positions = safe_getattr(args, "max_target_positions", 1024) #hack args.max_source_positions = safe_getattr(args, "max_source_positions", 1024) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", True) args.layernorm_embedding = getattr(args, "layernorm_embedding", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
bart_ls-main
fairseq-py/fairseq/models/long_transformers/model_debug.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .sliding_window import * from .loco_model import * from .utils import * from .model_debug import *
bart_ls-main
fairseq-py/fairseq/models/long_transformers/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional, Tuple from fairseq.modules.multihead_attention import MultiheadAttention import torch from typing import Dict, Optional from torch import Tensor, nn from fairseq.modules import ( TransformerEncoderLayer, ) import torch.nn.functional as F class PoolEncoderLayer(TransformerEncoderLayer): def __init__(self, cfg): super().__init__(cfg) self.top_pool = nn.AvgPool1d(18, stride=12, padding=9) self.top_pool_mask = nn.AvgPool1d(18, stride=12, padding=9,count_include_pad=False) # init top level attention self.pool_attn = self.build_cross_attention(self.embed_dim, cfg) def forward( self, x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None, attn_bias: Optional[Tensor] = None # relative position encoding ): if attn_mask is not None: attn_mask = attn_mask.masked_fill( attn_mask.to(torch.bool), -1e8 if x.dtype == torch.float32 else -1e4 ) residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, need_weights=False, attn_mask=attn_mask, ) # Project x to get K, Q k = self.pool_attn.k_proj(x) v = self.pool_attn.v_proj(x) # account for padding before pooling if (encoder_padding_mask is not None) and encoder_padding_mask.any(): k = k * (1 - encoder_padding_mask.unsqueeze(-1).permute(1, 0 ,2).type_as(k)) v = v * (1 - encoder_padding_mask.unsqueeze(-1).permute(1, 0 ,2).type_as(v)) # Pool K, Q pool_k = self.apply_pool(k) pool_v = self.apply_pool(v) # breakpoint() # Do not attend to pooled padding tokens if encoder_padding_mask is not None: # pool_mask = self.top_pool(encoder_padding_mask.to(k)).type_as(encoder_padding_mask) pool_mask = self.top_pool_mask(encoder_padding_mask.to(k)).floor().type_as(encoder_padding_mask) else: pool_mask = None cross_x, _ = self.pool_attn( query=x, key=pool_k, value=pool_v, key_padding_mask=pool_mask, need_weights=False, ) # cross_x = torch.nan_to_num(cross_x) x = cross_x + x x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) return x def apply_pool(self, input): out = input.permute(1, 2 ,0) # T x B x C -> B x C x T out = self.top_pool(out) return out.permute(2, 0 ,1) # T_pool x B x C def build_cross_attention(self, embed_dim, cfg): return MultiheadAttentionNoProj( embed_dim, cfg.encoder.attention_heads, dropout=cfg.attention_dropout, self_attention=False, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) class TwoLevelEncoderLayer(TransformerEncoderLayer): def __init__(self, cfg): super().__init__(cfg) self.top_pool = nn.AvgPool1d(18, stride=12, padding=9) # init top level attention self.pool_attn = self.build_cross_attention(self.embed_dim, cfg) def forward( self, x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None, attn_bias: Optional[Tensor] = None # relative position encoding ): if attn_mask is not None: attn_mask = attn_mask.masked_fill( attn_mask.to(torch.bool), -1e8 if x.dtype == torch.float32 else -1e4 ) residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, need_weights=False, attn_mask=attn_mask, ) # Project x to get K, Q k = self.pool_attn.k_proj(x) v = self.pool_attn.v_proj(x) # account for padding before pooling # breakpoint() if (encoder_padding_mask is not None) and encoder_padding_mask.any(): k = k * (1 - encoder_padding_mask.unsqueeze(-1).permute(1, 0 ,2).type_as(k)) v = v * (1 - encoder_padding_mask.unsqueeze(-1).permute(1, 0 ,2).type_as(v)) # Pool K, Q pool_k = self.apply_pool(k) pool_v = self.apply_pool(v) # Do not attend to pooled padding tokens if encoder_padding_mask is not None: pool_mask = self.top_pool(encoder_padding_mask.float()).type_as(encoder_padding_mask) else: pool_mask = None cross_x, _ = self.pool_attn( query=x, key=pool_k, value=pool_v, key_padding_mask=pool_mask, need_weights=False, ) x = cross_x + x x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) return x def apply_pool(self, input): out = input.permute(1, 2 ,0) # T x B x C -> B x C x T out = self.top_pool(out) return out.permute(2, 0 ,1) # T_pool x B x C def build_cross_attention(self, embed_dim, cfg): return MultiheadAttentionNoProj( embed_dim, cfg.encoder.attention_heads, dropout=cfg.attention_dropout, self_attention=False, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) class MultiheadAttentionNoProj(MultiheadAttention): """Multi-headed attention where the key, value are assumed to be pre-projected this is done to support pooling between projection and attention calculation """ def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, torch.zeros_like(self.k_proj.bias), torch.zeros_like(self.v_proj.bias))), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=torch.eye(*self.k_proj.weight.size(), out=torch.empty_like(self.k_proj.weight)), v_proj_weight=torch.eye(*self.v_proj.weight.size(), out=torch.empty_like(self.v_proj.weight)), )
bart_ls-main
fairseq-py/fairseq/models/long_transformers/pooling_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Local attention functions from https://github.com/allenai/longformer/ """ import torch import torch.nn.functional as F from typing import Union from functools import lru_cache import math def get_slopes(n): def get_slopes_power_of_2(n): start = (2**(-2**-(math.log2(n)-3))) ratio = start return [start*ratio**i for i in range(n)] if math.log2(n).is_integer(): return get_slopes_power_of_2(n) #In the paper, we only train models that have 2^a heads for some a. This function has else: #some good properties that only occur when the input is a power of 2. To maintain that even closest_power_of_2 = 2**math.floor(math.log2(n)) #when the number of heads is not a power of 2, we use this workaround. return get_slopes_power_of_2(closest_power_of_2) + get_slopes(2*closest_power_of_2)[0::2][:n-closest_power_of_2] def _get_invalid_locations_mask_fixed_dilation(seq_len: int, w: int, d: int): diagonals_list = [] for j in range(-d * w, d, d): diagonal_mask = torch.zeros(seq_len, device='cpu', dtype=torch.uint8) diagonal_mask[:-j] = 1 diagonals_list.append(diagonal_mask) return torch.stack(diagonals_list, dim=-1) @lru_cache() def _get_invalid_locations_mask(w: int, d: Union[torch.Tensor,int], autoregressive: bool, device: str): if isinstance(d, int): affected_seq_len = w * d mask = _get_invalid_locations_mask_fixed_dilation(affected_seq_len, w, d) mask = mask[None, :, None, :] else: affected_seq_len = w * d.max() head_masks = [] d_list = d.cpu().numpy().tolist() for d in d_list: one_head_mask = _get_invalid_locations_mask_fixed_dilation(affected_seq_len, w, d) head_masks.append(one_head_mask) mask = torch.stack(head_masks, dim=-2) mask = mask[None, :, :, :] ending_mask = None if autoregressive else mask.flip(dims=(1, 3)).bool().to(device) return affected_seq_len, mask.bool().to(device), ending_mask def mask_invalid_locations(input_tensor: torch.Tensor, w: int, d: Union[torch.Tensor, int], autoregressive: bool) -> torch.Tensor: # d: dilation? affected_seq_len, beginning_mask, ending_mask = _get_invalid_locations_mask(w, d, autoregressive, input_tensor.device) seq_len = input_tensor.size(1) beginning_input = input_tensor[:, :affected_seq_len, :, :w+1] beginning_mask = beginning_mask[:, :seq_len].expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask, -float('inf')) if not autoregressive: ending_input = input_tensor[:, -affected_seq_len:, :, -(w+1):] ending_mask = ending_mask[:, -seq_len:].expand(ending_input.size()) ending_input.masked_fill_(ending_mask, -float('inf')) def _skew(x, direction, padding_value): '''Convert diagonals into columns (or columns into diagonals depending on `direction`''' x_padded = F.pad(x, direction, value=padding_value) # bsz*num_heads x chunks x (2w+1) x 2w x_padded = x_padded.view(*x_padded.size()[:-2], x_padded.size(-1), x_padded.size(-2)) return x_padded def _skew2(x, padding_value): '''shift every row 1 step to right converting columns into diagonals''' # X = B x C x M x L B, C, M, L = x.size() x = F.pad(x, (0, M + 1), value=padding_value) # B x C x M x (L+M+1) x = x.view(B, C, -1) # B x C x ML+MM+M x = x[:, :, :-M] # B x C x ML+MM x = x.view(B, C, M, M + L) # B x C, M x L+M x = x[:, :, :, :-1] return x def _chunk(x, w): '''convert into overlapping chunkings. Chunk size = 2w, overlap size = w''' # non-overlapping chunks of size = 2w x = x.view(x.size(0), x.size(1) // (w * 2), w * 2, x.size(2)) # use `as_strided` to make the chunks overlap with an overlap size = w chunk_size = list(x.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride = list(x.stride()) # chunk_stride[1]: 512 x dim chunk_stride[1] = chunk_stride[1] // 2 return x.as_strided(size=chunk_size, stride=chunk_stride) def sliding_chunks_matmul_qk(q: torch.Tensor, k: torch.Tensor, w: int, padding_value: float): '''Matrix multiplicatio of query x key tensors using with a sliding window attention pattern. This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an overlap of size w''' bsz, seqlen, num_heads, head_dim = q.size() assert seqlen % (w * 2) == 0 assert q.size() == k.size() chunks_count = seqlen // w - 1 # group bsz and num_heads dimensions into one, then chunk seqlen into chunks of size w * 2 q = q.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim) k = k.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim) chunk_q = _chunk(q, w) # (B*H, num_of_overlapping_windows, head_dim) chunk_k = _chunk(k, w) # matrix multipication # bcxd: bsz*num_heads x chunks x 2w x head_dim # bcyd: bsz*num_heads x chunks x 2w x head_dim # bcxy: bsz*num_heads x chunks x 2w x 2w chunk_attn = torch.einsum('bcxd,bcyd->bcxy', (chunk_q, chunk_k)) # multiply # convert diagonals into columns diagonal_chunk_attn = _skew(chunk_attn, direction=(0, 0, 0, 1), padding_value=padding_value) # allocate space for the overall attention matrix where the chunks are compined. The last dimension # has (w * 2 + 1) columns. The first (w) columns are the w lower triangles (attention from a word to # w previous words). The following column is attention score from each word to itself, then # followed by w columns for the upper triangle. diagonal_attn = diagonal_chunk_attn.new_empty((bsz * num_heads, chunks_count + 1, w, w * 2 + 1)) # copy parts from diagonal_chunk_attn into the compined matrix of attentions # - copying the main diagonal and the upper triangle diagonal_attn[:, :-1, :, w:] = diagonal_chunk_attn[:, :, :w, :w + 1] diagonal_attn[:, -1, :, w:] = diagonal_chunk_attn[:, -1, w:, :w + 1] # Potential BUG: invalid attn weights # - copying the lower triangle diagonal_attn[:, 1:, :, :w] = diagonal_chunk_attn[:, :, - (w + 1):-1, w + 1:] diagonal_attn[:, 0, 1:w, 1:w] = diagonal_chunk_attn[:, 0, :w - 1, 1 - w:] # separate bsz and num_heads dimensions again diagonal_attn = diagonal_attn.view(bsz, num_heads, seqlen, 2 * w + 1).transpose(2, 1) mask_invalid_locations(diagonal_attn, w, 1, False) return diagonal_attn def sliding_chunks_matmul_pv(prob: torch.Tensor, v: torch.Tensor, w: int): '''Same as sliding_chunks_matmul_qk but for prob and value tensors. It is expecting the same output format from sliding_chunks_matmul_qk''' bsz, seqlen, num_heads, head_dim = v.size() assert seqlen % (w * 2) == 0 assert prob.size()[:3] == v.size()[:3] assert prob.size(3) == 2 * w + 1 chunks_count = seqlen // w - 1 # group bsz and num_heads dimensions into one, then chunk seqlen into chunks of size 2w chunk_prob = prob.transpose(1, 2).reshape(bsz * num_heads, seqlen // w, w, 2 * w + 1) # group bsz and num_heads dimensions into one v = v.transpose(1, 2).reshape(bsz * num_heads, seqlen, head_dim) # pad seqlen with w at the beginning of the sequence and another w at the end padded_v = F.pad(v, (0, 0, w, w), value=-1) # chunk padded_v into chunks of size 3w and an overlap of size w chunk_v_size = (bsz * num_heads, chunks_count + 1, 3 * w, head_dim) chunk_v_stride = padded_v.stride() chunk_v_stride = chunk_v_stride[0], w * chunk_v_stride[1], chunk_v_stride[1], chunk_v_stride[2] chunk_v = padded_v.as_strided(size=chunk_v_size, stride=chunk_v_stride) skewed_prob = _skew2(chunk_prob, padding_value=0) context = torch.einsum('bcwd,bcdh->bcwh', (skewed_prob, chunk_v)) return context.view(bsz, num_heads, seqlen, head_dim).transpose(1, 2) def pad_to_window_size(input_ids: torch.Tensor, attention_mask: torch.Tensor, one_sided_window_size: int, pad_token_id: int): '''A helper function to pad tokens and mask to work with the sliding_chunks implementation of Longformer selfattention. Input: input_ids = torch.Tensor(bsz x seqlen): ids of wordpieces attention_mask = torch.Tensor(bsz x seqlen): attention mask one_sided_window_size = int: window size on one side of each token pad_token_id = int: tokenizer.pad_token_id Returns (input_ids, attention_mask) padded to length divisible by 2 * one_sided_window_size ''' w = 2 * one_sided_window_size seqlen = input_ids.size(1) padding_len = (w - seqlen % w) % w input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id) attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens return input_ids, attention_mask
bart_ls-main
fairseq-py/fairseq/models/long_transformers/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Long-context model pretraining with fast blocksparse and extrapolation attentions """ from typing import Optional import logging import math import torch import torch.nn as nn from fairseq import utils, modules from fairseq.utils import safe_getattr from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel, TransformerConfig from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.models.transformer.transformer_config import TransformerConfig import copy from fairseq.models.roberta import RobertaEncoder logger = logging.getLogger(__name__) @register_model("loco") class LOCOModel(TransformerModel): __jit_unused_properties__ = ["supported_targets"] def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) generator_architecture(args) self.generator = RobertaEncoder(args, encoder.dictionary) if not self.args.train_generator: for p in self.generator.parameters(): p.requires_grad = False self.pad_idx = self.encoder.dictionary.pad() self.bos = self.encoder.dictionary.bos() self.sentinel_start_idx = self.encoder.dictionary.index("<sentinel_0>") self.sentinel_end_idx = len(self.encoder.dictionary) - 1 # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() if hasattr(self.encoder, "dictionary"): self.eos: int = self.encoder.dictionary.eos() @staticmethod def add_args(parser): super(LOCOModel, LOCOModel).add_args(parser) parser.add_argument( "--pooler-dropout", type=float, metavar="D", help="dropout probability in the masked_lm pooler layers", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use for pooler layer", ) parser.add_argument( "--finetune", action="store_true", help="different forwards used for pretraining and finetuning" ) parser.add_argument( "--train-generator", action="store_true", ) parser.add_argument( "--generator-xformer-config", type=str, metavar="D", ) parser.add_argument( "--generator-layers", type=int, ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): # use vanilla attention for now args.use_xformers = False # HACK, disable efficient attentions for cross-attention & decoder-side attention return super().build_decoder( TransformerConfig.from_namespace(args), tgt_dict, embed_tokens ) @classmethod def build_generator(cls, args, src_dict, embed_tokens): return super().build_encoder( TransformerConfig.from_namespace(args), src_dict, embed_tokens ) @property def supported_targets(self): return {"self"} def _sum_spans(self, input, starts, ends): """ sum the span values to the start of each span; zero out all other positions """ starts_before = torch.roll(starts, -1, -1) input_cumsum = torch.cumsum(input, dim=-1) input_cumsum[starts.bool()] = input_cumsum[ends.bool()] - input_cumsum[starts_before.bool()] sumed = input_cumsum * starts return sumed def _avg_spans(self, span_sum, span_lens, starts): span_sum[starts.bool()] = span_sum[starts.bool()] / span_lens[starts.bool()] return span_sum """ utils from T5's objective """ def _create_sentinels(self, mask_indices): """ mask_indices: binary mask start spans as sentinel ids and other masked positions as -1 """ start_indices = mask_indices - torch.roll(mask_indices, 1, -1) * mask_indices start_indices[:,0] = mask_indices[:,0] sentinel_ids = torch.where(start_indices != 0, torch.cumsum(start_indices, dim=-1), start_indices) assert sentinel_ids.max() + self.sentinel_start_idx - 1 <= self.sentinel_end_idx, (sentinel_ids.max() + self.sentinel_start_idx - 1, self.sentinel_end_idx, sentinel_ids) sentinel_ids = torch.where(sentinel_ids != 0, (sentinel_ids + self.sentinel_start_idx - 1), 0) sentinel_ids -= mask_indices - start_indices return sentinel_ids def _build_inputs(self, masked_input, span_mask): sentinel_ids = self._create_sentinels(span_mask) masked_input = torch.where(sentinel_ids != 0, sentinel_ids, masked_input) src_lens = (masked_input >= 0).sum(-1) # src_tokens padded to max_source_positions, useful for blocksparse attention src_tokens = masked_input.new_full((masked_input.size(0), self.cfg.max_source_positions), self.pad_idx) fill_indices = torch.arange(masked_input.size(-1)).to(masked_input) fill_indices = fill_indices < src_lens.unsqueeze(-1) assert fill_indices.sum() == (masked_input >= 0).sum() # = 0 for sequence starts src_tokens[:,:masked_input.size(-1)][fill_indices] = masked_input[masked_input >= 0] return src_tokens, src_lens def _build_targets(self, masked_target, span_mask, pad_mask, eos_mask): """ masked_targets: masked positions as their original token ids and other positions as pad index eos_mask: end of sequence as 0 pad_mask: padding positions as 0 """ unmasked_positions = ~span_mask.bool() unmasked_positions[:,0] = 0 sentinel_ids = self._create_sentinels(unmasked_positions.to(masked_target)) sentinel_ids *= pad_mask sentinel_ids *= eos_mask # target: masked positions with sentinel ids or -1; # bos, eos and padding positions with value 1 target = torch.where(sentinel_ids != 0, sentinel_ids, masked_target) target[~eos_mask] = self.eos target[:,0] = self.bos tgt_lens = (target.abs() != 1).sum(-1) tgt_tokens = target.new_full(target.size(), self.pad_idx) fill_indices = torch.arange(tgt_tokens.size(-1)).to(tgt_tokens) fill_indices = fill_indices < tgt_lens.unsqueeze(-1) tgt_tokens[fill_indices] = target[target.abs() != 1] tgt_tokens = tgt_tokens[:,:tgt_lens.max()] # truncating if needed if tgt_tokens.size(-1) > self.args.max_target_positions: end_positions = (tgt_tokens == self.eos).nonzero(as_tuple=True)[1] sample_exceeds = end_positions >= (self.args.max_target_positions - 1) tgt_tokens = torch.cat( [tgt_tokens[:,:self.args.max_target_positions-1], tgt_tokens[:,-1:]], dim=-1 ) tgt_tokens[:,-1] = torch.where(sample_exceeds, self.eos, tgt_tokens[:,-1]) decoder_input = tgt_tokens.clone() decoder_input[:,0] = self.eos decoder_input[:,1:] = tgt_tokens[:,:-1] return tgt_tokens, decoder_input def forward( self, src_tokens, src_lengths, prev_output_tokens: Optional[torch.Tensor] = None, features_only: bool = False, classification_head_name: Optional[str] = None, token_embeddings: Optional[torch.Tensor] = None, return_all_hiddens: bool = True, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, masked_unfiltered: Optional[torch.Tensor] = None, ): if classification_head_name is not None: features_only = True if not self.cfg.finetune: """ use an encoder-only model to build long-range objectives """ masked_tokens_unfiltered = masked_unfiltered.ne(self.pad_idx).to(src_tokens) # 1: the masked tokens (before hard sampling) src_tokens_for_mlm = copy.deepcopy(src_tokens) if self.cfg.train_generator: masked_logits = self.generator( src_tokens_for_mlm, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, )[0] token_loss = modules.cross_entropy( masked_logits.view(-1, masked_logits.size(-1)), masked_unfiltered.view(-1), reduction='none', ignore_index=self.pad_idx, ).view(masked_unfiltered.size()) masked_cnt = masked_tokens_unfiltered.sum() mlm_loss = token_loss.sum() / masked_cnt else: with torch.no_grad(): masked_logits = self.generator( src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, )[0] token_loss = modules.cross_entropy( masked_logits.view(-1, masked_logits.size(-1)), masked_unfiltered.view(-1), reduction='none', ignore_index=self.pad_idx).view(masked_unfiltered.size()) # 1 marking the span starts span_starts = masked_tokens_unfiltered - torch.roll(masked_tokens_unfiltered, 1, -1) * masked_tokens_unfiltered span_starts[:,0] = masked_tokens_unfiltered[:,0] span_ends = masked_tokens_unfiltered - torch.roll(masked_tokens_unfiltered, -1, -1) * masked_tokens_unfiltered span_ends[:,-1] = masked_tokens_unfiltered[:,-1] # span_starts: binary mask marking the start of each span # span_ends: binary mask marking the end of each span # span_lens: span lens calculated put at the starts of each span span_loss = self._sum_spans(token_loss, span_starts, span_ends) span_lens = self._sum_spans(masked_tokens_unfiltered, span_starts, span_ends) span_loss_avg = self._avg_spans(span_loss, span_lens, span_starts) # find the hard spans, i.e, topk largest loss span_counts = span_starts.sum(-1).min() hard_span_starts = span_loss_avg.topk(k=math.floor(span_counts*self.cfg.sample_ratio), dim=-1)[1] # bsz * num_hard_spans hard_span_ends = span_lens.gather(1, index=hard_span_starts) + hard_span_starts # masking source with only the hard spans row_idx = torch.arange(hard_span_starts.size(0)).unsqueeze(1).repeat(1, hard_span_starts.size(1)).to(hard_span_starts) hard_mask = span_starts.new_zeros(span_starts.size()) hard_mask[row_idx.view(-1), hard_span_starts.view(-1)] = 1 hard_mask[row_idx.view(-1), hard_span_ends.view(-1)] = 1 hard_mask = (hard_mask.cumsum(dim=-1) % 2) == 1 hard_mask = hard_mask.type_as(masked_tokens_unfiltered) # filter our easy span masks mask_off = torch.logical_xor(hard_mask, masked_tokens_unfiltered) src_tokens[mask_off] = masked_unfiltered[mask_off] # build the denoising targets src_pad_mask = src_tokens.ne(self.pad_idx) src_eos_mask = src_tokens.ne(self.eos) src_tokens, src_lengths = self._build_inputs(src_tokens, hard_mask) target, prev_output_tokens = self._build_targets(masked_unfiltered, hard_mask, src_pad_mask, src_eos_mask) encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, token_embeddings=token_embeddings, return_all_hiddens=return_all_hiddens ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) eos: int = self.eos if classification_head_name is not None: sentence_representation = x[ src_tokens.eq(eos), : ].view(x.size(0), -1, x.size(-1))[:, -1, :] for k, head in self.classification_heads.items(): # for torch script only supports iteration if k == classification_head_name: x = head(sentence_representation) break if not self.cfg.finetune: if self.cfg.train_generator: return x, target, mlm_loss, extra return x, target, extra return x, extra def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" current_head_names = ( [] if not hasattr(self, "classification_heads") else self.classification_heads.keys() ) # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + "classification_heads."): continue head_name = k[len(prefix + "classification_heads.") :].split(".")[0] num_classes = state_dict[ prefix + "classification_heads." + head_name + ".out_proj.weight" ].size(0) inner_dim = state_dict[ prefix + "classification_heads." + head_name + ".dense.weight" ].size(0) if getattr(self.args, "load_checkpoint_heads", False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( "deleting classification head ({}) from checkpoint " "not present in current model: {}".format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( "deleting classification head ({}) from checkpoint " "with different dimensions than current model: {}".format( head_name, k ) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] def truncate_emb(key): if key in state_dict: state_dict[key] = state_dict[key][:-1, :] # When finetuning on translation task, remove last row of # embedding matrix that corresponds to mask_idx token. loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0) if ( loaded_dict_size == len(self.encoder.dictionary) + 1 and "<mask>" not in self.encoder.dictionary ): truncate_emb("encoder.embed_tokens.weight") truncate_emb("decoder.embed_tokens.weight") truncate_emb("encoder.output_projection.weight") truncate_emb("decoder.output_projection.weight") # When continued pretraining on new set of languages for mbart, # add extra lang embeddings at the end of embed_tokens. # Note: newly added languages are assumed to have been added at the end. if self.args.task == "multilingual_denoising" and loaded_dict_size < len( self.encoder.dictionary ): logger.info( "Adding extra language embeddings not found in pretrained model for " "continued pretraining of MBART on new set of languages." ) loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][ -1, : ] num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size embed_dim = state_dict["encoder.embed_tokens.weight"].size(1) new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim) nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5) new_lang_embed_to_add = new_lang_embed_to_add.to( dtype=state_dict["encoder.embed_tokens.weight"].dtype, ) state_dict["encoder.embed_tokens.weight"] = torch.cat( [ state_dict["encoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) state_dict["decoder.embed_tokens.weight"] = torch.cat( [ state_dict["decoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, "classification_heads"): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + "classification_heads." + k not in state_dict: logger.info("Overwriting " + prefix + "classification_heads." + k) state_dict[prefix + "classification_heads." + k] = v def generator_architecture(args): # options to use different sizes of generator models args.encoder_layers = safe_getattr(args, "generator_layers", 6) args.encoder_embed_dim = safe_getattr(args, "generator_embed_dim", 768) args.encoder_ffn_embed_dim = safe_getattr(args, "generator_ffn_embed_dim", 3072) args.encoder_attention_heads = safe_getattr(args, "generator_attention_heads", 12) args.max_positions = safe_getattr(args, "max_source_positions", 8192) args.encoder_learned_pos = safe_getattr(args, "generator_learned_pos", True) args.encoder_normalize_before = safe_getattr(args, "generator_normalize_before", False) args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False) # xformers config args.use_xformers = safe_getattr(args, "generator_use_xformers", True) args.attention_name = safe_getattr(args, "generator_attention_name", 'block_noglobal') args.xformer_config = safe_getattr(args, 'generator_xformer_config', '{"block_size": 512}') args.pooling_layers = safe_getattr(args, "generator_pooling_layers", 0) @register_model_architecture("loco", "loco_large") def loco_large_architecture(args): args.finetune = safe_getattr(args, "finetune", False) args.train_generator = safe_getattr(args, "train_generator", False) # # @xwhan is it necessary to put it here? had issues # def getattr(args, key, value): # return value args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.max_target_positions = safe_getattr(args, "max_target_positions", 1024) #hack args.max_source_positions = safe_getattr(args, "max_source_positions", 1024) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", True) args.layernorm_embedding = getattr(args, "layernorm_embedding", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) @register_model_architecture("loco", "loco_base") def loco_base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12) loco_large_architecture(args) @register_model_architecture("loco", "loco_xlarge") def loco_xlarge_architecture(args): loco_large_architecture(args) args.encoder_layers = 24 args.decoder_layers = 24
bart_ls-main
fairseq-py/fairseq/models/long_transformers/loco_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, Optional, Tuple import torch from typing import Dict, Optional from torch import Tensor, nn from fairseq.modules import ( TransformerEncoderLayer, MultiheadAttention, ) import torch.nn.functional as F from fairseq.models.transformer import TransformerEncoder from functools import partial, reduce from fairseq.distributed import fsdp_wrap from inspect import isfunction from operator import mul from fairseq.modules.checkpoint_activations import checkpoint_wrapper """ A hacky implementation of simple block attention transformer """ class BlockTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.args = args del self.layers self.layers = nn.ModuleList([self.build_sw_encoder_layer(args, args.window_size, self.padding_idx) for i in range(args.encoder_layers)]) self.num_layers = len(self.layers) def build_sw_encoder_layer(self, args, window_size, padding_idx): layer = BlockTransformerEncoderLayer(args, window_size, padding_idx) checkpoint = args.checkpoint_activations if checkpoint: offload_to_cpu = self.cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) min_params_to_wrap = self.cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def forward( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.Tensor] = None, # @xwhan in order to add global mask ): return self.forward_scriptable( src_tokens, src_lengths, return_all_hiddens, token_embeddings, key_padding_mask ) class BlockTransformerEncoderLayer(TransformerEncoderLayer): def __init__(self, args, window_size, padding_idx): super().__init__(args) # replace self-attn self.window_size = window_size self.padding_idx = padding_idx self.self_attn = self.build_sw_self_attention(self.embed_dim, window_size, padding_idx, args) def forward( self, x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None, attn_bias: Optional[Tensor] = None, ): if attn_mask is not None: attn_mask = (attn_mask * -1e8).type_as(attn_mask) # -1 in attn_mask means global attention return super().forward(x, encoder_padding_mask, attn_mask=attn_mask) def build_sw_self_attention(self, embed_dim, window_size, padding_idx, args): return BlockSelfAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, window_size=window_size, padding_idx=padding_idx, ) class BlockSelfAttention(MultiheadAttention): def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, window_size=1024, padding_idx=1, ): super().__init__(embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention, q_noise, qn_block_size) self.window_size = window_size self.padding_idx = padding_idx self.drop_attn = self.dropout_module def forward(self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, attn_bias: Optional[Tensor] = None, ): seq_len, bsz, embed_dim = query.size() if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == seq_len assert self.self_attention q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) q = ( q.contiguous() .view(seq_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) y = self.block_attn_forward(q, k, v, key_padding_mask=key_padding_mask) assert list(y.size()) == [bsz * self.num_heads, seq_len, self.head_dim], (y.size(), query.size(), q.size()) y = y.transpose(0, 1).contiguous().view(seq_len, bsz, embed_dim) y = self.out_proj(y) return y, None def block_attn_forward( self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, att_mask: Optional[torch.Tensor] = None, key_padding_mask: Optional[Tensor] = None, *args, **kwargs ): bh = q.size(0) orig_seq_len = q.size(1) bsz = bh // self.num_heads head_dim = q.size(-1) assert key_padding_mask is not None key_padding_mask = key_padding_mask.to(q) key_padding_mask[:,0] = -1 # pad the input length to factors of bucket size def _pad_to_window_size(x, window_size): seq_len = x.size(-2) pad_len = (window_size - seq_len % window_size) % window_size return F.pad(x, (0,0,0,pad_len), value=0), pad_len q, _ = _pad_to_window_size(q, self.window_size) k, _ = _pad_to_window_size(k, self.window_size) v, _ = _pad_to_window_size(v, self.window_size) if key_padding_mask.shape[1] % self.window_size != 0: pad_len = (self.window_size - key_padding_mask.shape[1] % self.window_size) % self.window_size # key padding mask: 1 means padding tokens key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1) # global attention tokens extra_attention_mask = key_padding_mask < 0 num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1) max_num_extra_indices_per_batch = num_extra_indices_per_batch.max() hard_mask = key_padding_mask == 1 if max_num_extra_indices_per_batch <= 0: extra_attention_mask = None else: extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True) zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, device=extra_attention_mask.device) # mask indicating which values are actually going to be padding num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1) selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1) # 2) location of the non-padding values in the selected global attention selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True) # 3) location of the padding values in the selected global attention selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True) # every token attend to global tokens if extra_attention_mask is not None: selected_k = k.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, head_dim) k_splited = k.view(bsz, self.num_heads, -1, head_dim).transpose(1,2) q_splited = q.view(bsz, self.num_heads, -1, head_dim).transpose(1,2) v_splited = v.view(bsz, self.num_heads, -1, head_dim).transpose(1,2) selected_k[selection_padding_mask_nonzeros] = k_splited[extra_attention_mask_nonzeros] # (bsz, seq_len, num_heads, max_num_extra_indices_per_batch) selected_attn_weights = torch.einsum('blhd,bshd->blhs', (q_splited, selected_k)) * (head_dim ** -0.5) selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000 attn_weights_over_g_tokens = selected_attn_weights.transpose(1,2) selected_v = v.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, head_dim) selected_v[selection_padding_mask_nonzeros] = v_splited[extra_attention_mask_nonzeros] selected_v = selected_v.transpose(1,2).contiguous().view(bsz*self.num_heads, max_num_extra_indices_per_batch, head_dim) tgt_len = k.size(1) buckets = q.shape[1] // self.window_size b_q = bucket(buckets, q) b_k, b_v = map(partial(bucket, buckets), (k, v)) # BH * bct * n_b * D dots = torch.einsum('buie,buje->buij', b_q, b_k) * (head_dim ** -0.5) mask_value = -10000 # # mask # if key_padding_mask is not None: q_mask = default(key_padding_mask.eq(0), lambda: torch.ones((bsz, tgt_len), device=q.device).bool()) # 1 means not masking kv_mask = q_mask mq, mk = bucket(buckets, q_mask), bucket(buckets, kv_mask) # B * bkt * n_b expand_head_and_merge_into_batch = lambda x: merge_dims(0, 1, expand_dim(x.unsqueeze(1), 1, self.num_heads)) mq, mk = map(expand_head_and_merge_into_batch, (mq, mk)) # BH * bkt * n_b mask = mq[:, :, :, None] * mk[:, :, None, :] dots.masked_fill_(~mask, mask_value) del mask block_attn_weights = dots.view(bsz*self.num_heads, -1, self.window_size) if extra_attention_mask is not None: attn_weights_over_g_tokens = attn_weights_over_g_tokens.view(bsz*self.num_heads, -1, max_num_extra_indices_per_batch) all_attn = torch.cat([block_attn_weights, attn_weights_over_g_tokens], dim=-1) else: all_attn = block_attn_weights all_attn_probs = all_attn.softmax(dim=-1) all_attn_probs = self.drop_attn(all_attn_probs) C = 0 # calculate block attention block_attn_probs = all_attn_probs[:, :, :block_attn_weights.shape[-1]] block_attn_probs = block_attn_probs.view(bsz*self.num_heads, -1, self.window_size, self.window_size) C += block_attn_probs.matmul(b_v).view(bsz*self.num_heads, -1, head_dim) if extra_attention_mask is not None: attn_probs_over_g_tokens = all_attn_probs[:,:,-attn_weights_over_g_tokens.shape[-1]:] C += attn_probs_over_g_tokens.matmul(selected_v) # global tokens to attend all other tokens selected_q = q_splited.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, head_dim) selected_q[selection_padding_mask_nonzeros] = q_splited[extra_attention_mask_nonzeros] g2all_attn_weights = selected_q.transpose(1,2).matmul(k_splited.permute(0,2,3,1)) * (head_dim ** -0.5) g2all_attn_weights[selection_padding_mask_zeros[0], :, selection_padding_mask_zeros[1], :] = -10000.0 if hard_mask is not None: g2all_attn_weights = g2all_attn_weights.masked_fill( hard_mask.unsqueeze(1).unsqueeze(2), -10000.0, ) g2all_attn_probs_float = F.softmax(g2all_attn_weights, dim=-1, dtype=torch.float32) g2all_attn_probs = self.drop_attn(g2all_attn_probs_float.type_as(g2all_attn_weights)) g2all_attn = g2all_attn_probs.matmul(v.view(bsz, self.num_heads, -1, head_dim)) # (batch_size, self.num_head, max_num_extra_indices_per_batch, head_dim) nonzero_global_attn = g2all_attn[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]] C = C.view(bsz, self.num_heads, -1, head_dim) C[extra_attention_mask_nonzeros[0],:,extra_attention_mask_nonzeros[1]] = nonzero_global_attn C = C.view(bsz*self.num_heads, -1, head_dim) C = C[:,:orig_seq_len] return C import math class BlockSWSelfAttention(BlockSelfAttention): def forward(self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, attn_bias: Optional[Tensor] = None, ): seq_len, bsz, embed_dim = query.size() if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == seq_len assert self.self_attention q = self.q_proj(query) k = self.proj_pool(self.k_proj, key) v = self.proj_pool(self.v_proj, value) q = ( q.contiguous() .view(seq_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) y = self.block_attn_forward(q, k, v, key_padding_mask=key_padding_mask) assert list(y.size()) == [bsz * self.num_heads, seq_len, self.head_dim], (y.size(), query.size(), q.size()) y = y.transpose(0, 1).contiguous().view(seq_len, bsz, embed_dim) y = self.out_proj(y) return y, None def proj_pool(self,proj,input): input = proj(input) # Pool input = input.permute(1, 2 ,0) # T x B x C -> B x C x T input = F.avg_pool1d(input, kernel_size=18, stride=12, padding=9) input = input.permute(2, 0 ,1) # T_pool x B x C return input def block_attn_forward( self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, att_mask: Optional[torch.Tensor] = None, key_padding_mask: Optional[Tensor] = None, *args, **kwargs ): batch_size = q.shape[0] // self.num_heads sequence_length = q.shape[1] key_padding_mask = key_padding_mask.to(q) Q = q.view(batch_size, self.num_heads, -1, self.head_dim).mul(1./math.sqrt(self.head_dim)) K = k.view(batch_size, self.num_heads, -1, self.head_dim).transpose(1,2).reshape(batch_size, -1, self.embed_dim) V = v.view(batch_size, self.num_heads, -1, self.head_dim).transpose(1,2).reshape(batch_size, -1, self.embed_dim) # needs centain sequence length to make the block wise local attention work def _pad_to_window_size(x, window_size): seq_len = x.size(-2) pad_len = (window_size - seq_len % window_size) % window_size return F.pad(x, (0,0,0,pad_len), value=0), pad_len Q, _ = _pad_to_window_size(Q, self.window_size) K, _ = _pad_to_window_size(K, self.window_size) V, _ = _pad_to_window_size(V, self.window_size) if key_padding_mask.shape[1] % self.window_size != 0: pad_len = (self.window_size - key_padding_mask.shape[1] % self.window_size) % self.window_size # key padding mask: 1 means padding tokens key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_ones(key_padding_mask.size(0), pad_len).to(key_padding_mask)], dim=1) K = self.split_heads(K) # (B, H, seq_len, head_dim) V = self.split_heads(V) padding_mask = key_padding_mask != 0 # True means masked position win_attn_weights = self.sliding_chunks_matmul_qk_v2(Q, K, padding_mask) # bsz x num_heads x seqlen x 2winsize all_attn = win_attn_weights.float().softmax(dim=-1).to(win_attn_weights) hard_mask = key_padding_mask == 1 all_attn = all_attn.masked_fill(hard_mask[:,None,:,None], 0) all_attn = all_attn.to(q) all_attn = self.drop_attn(all_attn) win_attn_probs = all_attn[:,:,:,:win_attn_weights.shape[-1]] seq_len = win_attn_probs.shape[2] win_attn_probs = win_attn_probs.view(batch_size, self.num_heads, seq_len // self.window_size, self.window_size,-1) V_tiles = self.get_tiles_v2(V, transpose=False) outputs = win_attn_probs.matmul(V_tiles).view(batch_size, self.num_heads, seq_len, self.head_dim) # get rid of the padding positions outputs = outputs[:,:,:sequence_length].view(-1, sequence_length, self.head_dim) return outputs def split_heads(self, X): X = X.reshape(X.size(0), X.size(1), self.num_heads, self.head_dim) X = X.transpose(1, 2) return X def sliding_chunks_matmul_qk_v2(self, Q, K, padding_mask): bsz, num_heads, seqlen, d_h = Q.shape if self.window_size > 0: # Q, K: bsz x num_heads x seqlen x d_head # padding_mask: bsz x seqlen mask_tiles = self.get_tiled_mask_v2(padding_mask) K_tiles = self.get_tiles_v2(K, transpose=True) Q_tiles = Q.view(bsz, num_heads, seqlen // self.window_size, self.window_size, d_h) # bsz x num_heads x seqlen//winsize x winsize x 2winsize qk_scores = Q_tiles.matmul(K_tiles) qk_scores = qk_scores.masked_fill(mask_tiles, -10000) return qk_scores.view(bsz, num_heads, seqlen, -1) else: qk_scores = torch.sum(Q*K, dim=-1, keepdim=True) return qk_scores def get_tiled_mask_v2(self, mask): # only mask along the key dimension bsz, seqlen = mask.shape ext_len = max(self.window_size//2, 1) mask = F.pad(mask, (ext_len, ext_len), value=True) # (bsz, seq_len + 2*ext_len) out_shape = (bsz, seqlen//self.window_size, 2*ext_len + self.window_size) in_stride = mask.stride() out_stride = (in_stride[0], in_stride[1]*self.window_size, in_stride[1]) return mask.as_strided(size=out_shape, stride=out_stride)[:, None, :, None, :] def get_tiles_v2(self, x, transpose=False): if self.window_size <= 0: return x bsz, n_heads, seqlen, d_h = x.shape n_groups = seqlen // self.window_size ext_len = max(self.window_size//2, 1) x = F.pad(x, (0, 0, ext_len, ext_len), value=0) strides = x.stride() if transpose: out_shape = (bsz, n_heads, n_groups, d_h, 2 * ext_len + self.window_size) out_stride = (strides[0], strides[1], self.window_size * strides[2], strides[3], strides[2]) else: out_shape = (bsz, n_heads, n_groups, 2 * ext_len + self.window_size, d_h) out_stride = (strides[0], strides[1], self.window_size * strides[2], strides[2], strides[3]) return torch.as_strided(x, size=out_shape, stride=out_stride) def expand_dim(t, dim, k): expand_shape = [-1] * len(t.shape) expand_shape[dim] = k return t.expand(*expand_shape) def merge_dims(ind_from, ind_to, tensor): shape = list(tensor.shape) arr_slice = slice(ind_from, ind_to + 1) shape[arr_slice] = [reduce(mul, shape[arr_slice])] return tensor.reshape(*shape) def default(x, d): if x is None: return d if not isfunction(d) else d() return x def bucket(buckets, t, dim=1): shape = list(t.shape) shape[dim:dim+1] = [buckets, -1] return t.reshape(*shape) def unbucket(t, dim=1): shape = list(t.shape) shape[dim:dim+2] = [-1] return t.reshape(*shape)
bart_ls-main
fairseq-py/fairseq/models/long_transformers/block.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hub_interface import * # noqa from .model import * # noqa
bart_ls-main
fairseq-py/fairseq/models/bart/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """ from typing import Optional import logging import torch import torch.nn as nn from fairseq import utils from fairseq.utils import safe_getattr from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.models.transformer.transformer_config import TransformerConfig from .hub_interface import BARTHubInterface logger = logging.getLogger(__name__) @register_model("bart") class BARTModel(TransformerModel): __jit_unused_properties__ = ["supported_targets"] @classmethod def hub_models(cls): return { "bart.base": "http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz", "bart.large": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz", "bart.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz", "bart.large.cnn": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz", "bart.large.xsum": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz", } def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() if hasattr(self.encoder, "dictionary"): self.eos: int = self.encoder.dictionary.eos() @staticmethod def add_args(parser): super(BARTModel, BARTModel).add_args(parser) parser.add_argument( "--pooler-dropout", type=float, metavar="D", help="dropout probability in the masked_lm pooler layers", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use for pooler layer", ) parser.add_argument( "--spectral-norm-classification-head", action="store_true", help="Apply spectral normalization on the classification head", ) parser.add_argument( '--restrict-position-embed', action='store_true', default=False, help="do no extend the position embeddings" ) parser.add_argument( '--sliding-window', action='store_true', default=False, help="use sliding window attention as in longformer", ) parser.add_argument( '--block-attention', action='store_true', default=False, help="use block attention", ) parser.add_argument( '--top-down', action='store_true', default=False, help="use block attention", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): # use vanilla attention for now args.use_xformers = False return super().build_decoder( TransformerConfig.from_namespace(args), tgt_dict, embed_tokens ) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): # if args.sliding_window: # from fairseq.models.long_transformers.sliding_window import SWTransformerEncoder # args.attention_window = [512] # return SWTransformerEncoder(args, src_dict, embed_tokens) if args.block_attention: args.window_size = 1024 from fairseq.models.long_transformers.block import BlockTransformerEncoder return BlockTransformerEncoder(args, src_dict, embed_tokens) # if args.top_down: # args.window_size = 1024 # args.encoder_n1 = 8 # args.encoder_n2 = 2 # args.encoder_n3 = 4 # from fairseq.models.long_transformers.top_down import TopDownTransformerEncoder # return TopDownTransformerEncoder(args, src_dict, embed_tokens) return super().build_encoder( TransformerConfig.from_namespace(args), src_dict, embed_tokens ) def load_state_dict( self, state_dict, strict=True, model_cfg = None, args = None, ): # if self.args.top_down: # strict=False return super().load_state_dict(state_dict, strict, model_cfg, args) @property def supported_targets(self): return {"self"} def forward( self, src_tokens, src_lengths, prev_output_tokens, features_only: bool = False, classification_head_name: Optional[str] = None, token_embeddings: Optional[torch.Tensor] = None, return_all_hiddens: bool = True, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, masked_unfiltered: Optional[torch.Tensor] = None, ): if classification_head_name is not None: features_only = True encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, token_embeddings=token_embeddings, return_all_hiddens=return_all_hiddens ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) eos: int = self.eos if classification_head_name is not None: sentence_representation = x[ src_tokens.eq(eos), : ].view(x.size(0), -1, x.size(-1))[:, -1, :] for k, head in self.classification_heads.items(): # for torch script only supports iteration if k == classification_head_name: x = head(sentence_representation) break return x, extra @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="gpt2", sample_break_mode="eos", **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, sample_break_mode=sample_break_mode, **kwargs, ) return BARTHubInterface(x["args"], x["task"], x["models"][0]) def register_classification_head( self, name, num_classes=None, inner_dim=None, **kwargs ): """Register a classification head.""" logger.info("Registering classification head: {0}".format(name)) if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' "and inner_dim {} (prev: {})".format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = BARTClassificationHead( input_dim=self.args.encoder_embed_dim, inner_dim=inner_dim or self.args.encoder_embed_dim, num_classes=num_classes, activation_fn=self.args.pooler_activation_fn, pooler_dropout=self.args.pooler_dropout, do_spectral_norm=getattr( self.args, "spectral_norm_classification_head", False ), ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" current_head_names = ( [] if not hasattr(self, "classification_heads") else self.classification_heads.keys() ) # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + "classification_heads."): continue head_name = k[len(prefix + "classification_heads.") :].split(".")[0] num_classes = state_dict[ prefix + "classification_heads." + head_name + ".out_proj.weight" ].size(0) inner_dim = state_dict[ prefix + "classification_heads." + head_name + ".dense.weight" ].size(0) if getattr(self.args, "load_checkpoint_heads", False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( "deleting classification head ({}) from checkpoint " "not present in current model: {}".format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( "deleting classification head ({}) from checkpoint " "with different dimensions than current model: {}".format( head_name, k ) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] def truncate_emb(key): if key in state_dict: state_dict[key] = state_dict[key][:-1, :] # When finetuning on translation task, remove last row of # embedding matrix that corresponds to mask_idx token. loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0) if ( loaded_dict_size == len(self.encoder.dictionary) + 1 and "<mask>" not in self.encoder.dictionary ): truncate_emb("encoder.embed_tokens.weight") truncate_emb("decoder.embed_tokens.weight") truncate_emb("encoder.output_projection.weight") truncate_emb("decoder.output_projection.weight") # When continued pretraining on new set of languages for mbart, # add extra lang embeddings at the end of embed_tokens. # Note: newly added languages are assumed to have been added at the end. if self.args.task == "multilingual_denoising" and loaded_dict_size < len( self.encoder.dictionary ): logger.info( "Adding extra language embeddings not found in pretrained model for " "continued pretraining of MBART on new set of languages." ) loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][ -1, : ] num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size embed_dim = state_dict["encoder.embed_tokens.weight"].size(1) new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim) nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5) new_lang_embed_to_add = new_lang_embed_to_add.to( dtype=state_dict["encoder.embed_tokens.weight"].dtype, ) state_dict["encoder.embed_tokens.weight"] = torch.cat( [ state_dict["encoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) state_dict["decoder.embed_tokens.weight"] = torch.cat( [ state_dict["decoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, "classification_heads"): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + "classification_heads." + k not in state_dict: logger.info("Overwriting " + prefix + "classification_heads." + k) state_dict[prefix + "classification_heads." + k] = v class BARTClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, do_spectral_norm=False, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) if do_spectral_norm: self.out_proj = torch.nn.utils.spectral_norm(self.out_proj) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x def safe_getattr(obj, k, default=None): from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) @register_model_architecture("bart", "bart_large") def bart_large_architecture(args): # # @xwhan has to put it here due to a bug # def getattr(args, key, value): # return value args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.max_target_positions = safe_getattr(args, "max_target_positions", 1024) #hack args.max_source_positions = safe_getattr(args, "max_source_positions", 1024) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", True) args.layernorm_embedding = getattr(args, "layernorm_embedding", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) @register_model_architecture("bart", "bart_prelayernorm") def bart_prelayernorm_architecture(args): def getattr(args, key, value): return value args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.max_target_positions = safe_getattr(args, "max_target_positions", 1024) args.max_source_positions = safe_getattr(args, "max_source_positions", 1024) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) @register_model_architecture("bart", "bart_base") def bart_base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12) bart_large_architecture(args) @register_model_architecture("bart", "bart_xlarge") def bart_base_architecture(args): bart_large_architecture(args) args.encoder_layers = 24 args.decoder_layers = 24 @register_model_architecture("bart", "bart_slarge") def bart_base_architecture(args): bart_large_architecture(args) args.encoder_layers = 16 args.decoder_layers = 16 @register_model_architecture("bart", "mbart_large") def mbart_large_architecture(args): args.no_scale_embedding = getattr(args, "no_scale_embedding", False) bart_large_architecture(args) @register_model_architecture("bart", "mbart_base") def mbart_base_architecture(args): args.no_scale_embedding = getattr(args, "no_scale_embedding", False) bart_base_architecture(args) @register_model_architecture("bart", "mbart_base_wmt20") def mbart_base_wmt20_architecture(args): args.layernorm_embedding = getattr(args, "layernorm_embedding", False) mbart_base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/bart/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import logging from typing import Dict, List import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import encoders from fairseq.hub_utils import GeneratorHubInterface from omegaconf import open_dict logger = logging.getLogger(__name__) class BARTHubInterface(GeneratorHubInterface): """A simple PyTorch Hub interface to BART. Usage: https://github.com/pytorch/fairseq/tree/main/examples/bart """ def __init__(self, cfg, task, model): super().__init__(cfg, task, [model]) self.model = self.models[0] def encode( self, sentence: str, *addl_sentences, no_separator=True ) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`). Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> bart.encode('Hello world').tolist() [0, 31414, 232, 2] >>> bart.encode(' world').tolist() [0, 232, 2] >>> bart.encode('world').tolist() [0, 8331, 2] """ tokens = self.bpe.encode(sentence) # if len(tokens.split(" ")) > min(self.max_positions) - 2: if len(tokens.split(" ")) > self.max_positions[0] - 2: # tokens = " ".join(tokens.split(" ")[: min(self.max_positions) - 2]) tokens = " ".join(tokens.split(" ")[: self.max_positions[0] - 2]) bpe_sentence = "<s> " + tokens + " </s>" for s in addl_sentences: bpe_sentence += " </s>" if not no_separator else "" bpe_sentence += " " + self.bpe.encode(s) + " </s>" tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.cpu().numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = tokens == self.task.source_dictionary.eos() doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [ self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences ] if len(sentences) == 1: return sentences[0] return sentences def _build_sample(self, src_tokens: List[torch.LongTensor]): # assert torch.is_tensor(src_tokens) dataset = self.task.build_dataset_for_inference( src_tokens, [x.numel() for x in src_tokens], ) sample = dataset.collater(dataset) sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample) return sample def generate( self, tokenized_sentences: List[torch.LongTensor], *args, inference_step_args=None, skip_invalid_size_inputs=False, **kwargs ) -> List[List[Dict[str, torch.Tensor]]]: inference_step_args = inference_step_args or {} if "prefix_tokens" in inference_step_args: raise NotImplementedError("prefix generation not implemented for BART") res = [] # inference_step_args["prefix_tokens"] = src_tokens.new_full( # (src_tokens.size(0), 1), fill_value=self.task.source_dictionary.bos() # ).to(device=self.device) results = super().generate( tokenized_sentences, *args, inference_step_args=inference_step_args, skip_invalid_size_inputs=skip_invalid_size_inputs, **kwargs ) # for id, hypos in zip(batch['id'].tolist(), results): # res.append((id, hypos)) # res = [hypos for _, hypos in sorted(res, key=lambda x: x[0])] # return res return results def extract_features( self, tokens: torch.LongTensor, return_all_hiddens: bool = False ) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > min(self.model.max_positions()): raise ValueError( "tokens exceeds maximum length: {} > {}".format( tokens.size(-1), self.model.max_positions() ) ) tokens.to(device=self.device), prev_output_tokens = tokens.clone() prev_output_tokens[:, 0] = tokens.gather( 1, (tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1), ).squeeze() prev_output_tokens[:, 1:] = tokens[:, :-1] features, extra = self.model( src_tokens=tokens, src_lengths=None, prev_output_tokens=prev_output_tokens, features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra["inner_states"] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): if tokens.dim() == 1: tokens = tokens.unsqueeze(0) features = self.extract_features(tokens.to(device=self.device)) sentence_representation = features[ tokens.eq(self.task.source_dictionary.eos()), : ].view(features.size(0), -1, features.size(-1))[:, -1, :] logits = self.model.classification_heads[head](sentence_representation) if return_logits: return logits return F.log_softmax(logits, dim=-1) def fill_mask( self, masked_inputs: List[str], topk: int = 5, match_source_len: bool = True, **generate_kwargs ): masked_token = '<mask>' batch_tokens = [] for masked_input in masked_inputs: assert masked_token in masked_input, \ "please add one {} token for the input".format(masked_token) text_spans = masked_input.split(masked_token) text_spans_bpe = (' {0} '.format(masked_token)).join( [self.bpe.encode(text_span.rstrip()) for text_span in text_spans] ).strip() tokens = self.task.source_dictionary.encode_line( '<s> ' + text_spans_bpe + ' </s>', append_eos=False, add_if_not_exist=False, ).long() batch_tokens.append(tokens) # ensure beam size is at least as big as topk generate_kwargs['beam'] = max( topk, generate_kwargs.get('beam', -1), ) generate_kwargs['match_source_len'] = match_source_len batch_hypos = self.generate(batch_tokens, **generate_kwargs) return [ [(self.decode(hypo['tokens']), hypo['score']) for hypo in hypos[:topk]] for hypos in batch_hypos ]
bart_ls-main
fairseq-py/fairseq/models/bart/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .wav2vec import * # noqa from .wav2vec2 import * # noqa from .wav2vec2_asr import * # noqa
bart_ls-main
fairseq-py/fairseq/models/wav2vec/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace import contextlib import copy import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from dataclasses import dataclass, field from omegaconf import MISSING, II, open_dict from typing import Any, Optional from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.tasks import FairseqTask from fairseq.models import ( BaseFairseqModel, FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, ) from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES from fairseq.modules import ( LayerNorm, PositionalEmbedding, TransformerDecoderLayer, ) @dataclass class Wav2Vec2AsrConfig(FairseqDataclass): w2v_path: str = field( default=MISSING, metadata={"help": "path to wav2vec 2.0 model"} ) no_pretrained_weights: bool = field( default=False, metadata={"help": "if true, does not load pretrained weights"} ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) final_dropout: float = field( default=0.0, metadata={"help": "dropout after transformer and before final projection"}, ) dropout: float = field( default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"} ) attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights inside wav2vec 2.0 model" }, ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN inside wav2vec 2.0 model" }, ) conv_feature_layers: Optional[str] = field( default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]", metadata={ "help": ( "string describing convolutional feature extraction " "layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" ), }, ) encoder_embed_dim: Optional[int] = field( default=768, metadata={"help": "encoder embedding dimension"} ) # masking apply_mask: bool = field( default=False, metadata={"help": "apply masking during fine-tuning"} ) mask_length: int = field( default=10, metadata={"help": "repeat the mask indices multiple times"} ) mask_prob: float = field( default=0.5, metadata={ "help": "probability of replacing a token with mask (normalized by length)" }, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose masks"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: Optional[int] = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"} ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"} ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"} ) freeze_finetune_updates: int = field( default=0, metadata={"help": "dont finetune wav2vec for this many updates"} ) feature_grad_mult: float = field( default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"} ) layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"} ) mask_channel_min_space: Optional[int] = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) mask_channel_before: bool = False normalize: bool = II("task.normalize") data: str = II("task.data") # this holds the loaded wav2vec args w2v_args: Any = None @dataclass class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig): blank_weight: float = 0 blank_mode: str = "add" @register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig) class Wav2VecCtc(BaseFairseqModel): def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel): super().__init__() self.cfg = cfg self.w2v_encoder = w2v_encoder self.blank_weight = cfg.blank_weight self.blank_mode = cfg.blank_mode def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask): """Build a new model instance.""" w2v_encoder = Wav2VecEncoder(cfg, len(task.target_dictionary)) return cls(cfg, w2v_encoder) def get_logits(self, net_output, normalize=False): logits = net_output["encoder_out"] if self.blank_weight != 0: if self.blank_mode == "add": logits[..., 0] += self.blank_weight elif self.blank_mode == "set": logits[..., 0] = self.blank_weight else: raise Exception(f"invalid blank mode {self.blank_mode}") if net_output["padding_mask"] is not None and net_output["padding_mask"].any(): logits[net_output["padding_mask"].T][..., 0] = float("inf") logits[net_output["padding_mask"].T][..., 1:] = float("-inf") if normalize: logits = utils.log_softmax(logits.float(), dim=-1) return logits def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = self.get_logits(net_output) if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def forward(self, **kwargs): x = self.w2v_encoder(**kwargs) return x @dataclass class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig): decoder_embed_dim: int = field( default=768, metadata={"help": "decoder embedding dimension"} ) decoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"}) decoder_layerdrop: float = field( default=0.0, metadata={"help": "decoder layerdrop chance"} ) decoder_attention_heads: int = field( default=4, metadata={"help": "num decoder attention heads"} ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"} ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings (outside self attention)" }, ) decoder_dropout: float = field( default=0.0, metadata={"help": "dropout probability in the decoder"} ) decoder_attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights inside the decoder" }, ) decoder_activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN inside the decoder" }, ) max_target_positions: int = field( default=2048, metadata={"help": "max target positions"} ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) autoregressive: bool = II("task.autoregressive") @register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig) class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask): """Build a new model instance.""" assert ( cfg.autoregressive ), "Please set task.autoregressive=true for seq2seq asr models" src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) return emb decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim) encoder = cls.build_encoder(cfg) decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens) return Wav2Vec2Seq2SeqModel(encoder, decoder) @classmethod def build_encoder(cls, cfg: Wav2Vec2AsrConfig): return Wav2VecEncoder(cfg) @classmethod def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens): return TransformerDecoder(cfg, tgt_dict, embed_tokens) def forward(self, **kwargs): encoder_out = self.encoder(**kwargs) decoder_out = self.decoder(encoder_out=encoder_out, **kwargs) return decoder_out def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict class Wav2VecEncoder(FairseqEncoder): def __init__(self, cfg: Wav2Vec2AsrConfig, output_size=None): self.apply_mask = cfg.apply_mask arg_overrides = { "dropout": cfg.dropout, "activation_dropout": cfg.activation_dropout, "dropout_input": cfg.dropout_input, "attention_dropout": cfg.attention_dropout, "mask_length": cfg.mask_length, "mask_prob": cfg.mask_prob, "mask_selection": cfg.mask_selection, "mask_other": cfg.mask_other, "no_mask_overlap": cfg.no_mask_overlap, "mask_channel_length": cfg.mask_channel_length, "mask_channel_prob": cfg.mask_channel_prob, "mask_channel_before": cfg.mask_channel_before, "mask_channel_selection": cfg.mask_channel_selection, "mask_channel_other": cfg.mask_channel_other, "no_mask_channel_overlap": cfg.no_mask_channel_overlap, "encoder_layerdrop": cfg.layerdrop, "feature_grad_mult": cfg.feature_grad_mult, } if cfg.w2v_args is None: state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides) w2v_args = state.get("cfg", None) if w2v_args is None: w2v_args = convert_namespace_to_omegaconf(state["args"]) w2v_args.criterion = None w2v_args.lr_scheduler = None cfg.w2v_args = w2v_args else: state = None w2v_args = cfg.w2v_args if isinstance(w2v_args, Namespace): cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args) assert cfg.normalize == w2v_args.task.normalize, ( "Fine-tuning works best when data normalization is the same. " "Please check that --normalize is set or unset for both pre-training and here" ) w2v_args.task.data = cfg.data task = tasks.setup_task(w2v_args.task) model = task.build_model(w2v_args.model) if state is not None and not cfg.no_pretrained_weights: model.load_state_dict(state["model"], strict=True) model.remove_pretraining_modules() super().__init__(task.source_dictionary) d = w2v_args.model.encoder_embed_dim self.w2v_model = model self.final_dropout = nn.Dropout(cfg.final_dropout) self.freeze_finetune_updates = cfg.freeze_finetune_updates self.num_updates = 0 targ_d = None self.proj = None if output_size is not None: targ_d = output_size elif getattr(cfg, "decoder_embed_dim", d) != d: targ_d = cfg.decoder_embed_dim if targ_d is not None: self.proj = Linear(d, targ_d) def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, source, padding_mask, **kwargs): w2v_args = { "source": source, "padding_mask": padding_mask, "mask": self.apply_mask and self.training, } ft = self.freeze_finetune_updates <= self.num_updates with torch.no_grad() if not ft else contextlib.ExitStack(): res = self.w2v_model.extract_features(**w2v_args) x = res["x"] padding_mask = res["padding_mask"] # B x T x C -> T x B x C x = x.transpose(0, 1) x = self.final_dropout(x) if self.proj: x = self.proj(x) return { "encoder_out": x, # T x B x C "padding_mask": padding_mask, # B x T, "layer_results": res["layer_results"], } def forward_torchscript(self, net_input): if torch.jit.is_scripting(): return self.forward(net_input["source"], net_input["padding_mask"]) else: return self.forward_non_torchscript(net_input) def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["padding_mask"] is not None: encoder_out["padding_mask"] = encoder_out[ "padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return None def upgrade_state_dict_named(self, state_dict, name): return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg: Wav2Vec2Seq2SeqConfig, dictionary, embed_tokens, no_encoder_attn=False, ): super().__init__(dictionary) self.dropout = cfg.decoder_dropout self.share_input_output_embed = cfg.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = cfg.decoder_embed_dim self.output_embed_dim = cfg.decoder_embed_dim self.layerdrop = cfg.decoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.max_target_positions = cfg.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( cfg.max_target_positions, embed_dim, self.padding_idx, learned=cfg.decoder_learned_pos, ) if not cfg.no_token_positional_embeddings else None ) # TODO: update this when transformer gets converted to dataclass configs transformer_cfg = copy.deepcopy(cfg) with open_dict(transformer_cfg): transformer_cfg.dropout = transformer_cfg.decoder_dropout transformer_cfg.attention_dropout = ( transformer_cfg.decoder_attention_dropout ) transformer_cfg.activation_dropout = ( transformer_cfg.decoder_activation_dropout ) self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerDecoderLayer(transformer_cfg, no_encoder_attn) for _ in range(transformer_cfg.decoder_layers) ] ) if not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), self.output_embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if transformer_cfg.decoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ prev_output_tokens = prev_output_tokens.long() x, extra = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers self_attn_padding_mask = None if prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) for layer in self.layers: dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, attn, _ = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["padding_mask"] if encoder_out is not None else None, incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, self_attn_padding_mask=self_attn_padding_mask ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) return x, {"attn": attn, "inner_states": inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
bart_ls-main
fairseq-py/fairseq/models/wav2vec/wav2vec2_asr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import List, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data.data_utils import compute_mask_indices from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.modules import ( Fp32GroupNorm, Fp32LayerNorm, GradMultiply, GumbelVectorQuantizer, LayerNorm, MultiheadAttention, SamePad, TransposeLast, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import buffered_arange, index_put, is_xla_tensor EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"]) MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"]) @dataclass class Wav2Vec2Config(FairseqDataclass): extractor_mode: EXTRACTOR_MODE_CHOICES = field( default="default", metadata={ "help": "mode for feature extractor. default has a single group norm with d " "groups in the first conv block, whereas layer_norm has layer norms in " "every block (meant to use with normalize=True)" }, ) encoder_layers: int = field( default=12, metadata={"help": "num encoder layers in the transformer"} ) encoder_embed_dim: int = field( default=768, metadata={"help": "encoder embedding dimension"} ) encoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "encoder embedding dimension for FFN"} ) encoder_attention_heads: int = field( default=12, metadata={"help": "num encoder attention heads"} ) activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="gelu", metadata={"help": "activation function to use"} ) # dropouts dropout: float = field( default=0.1, metadata={"help": "dropout probability for the transformer"} ) attention_dropout: float = field( default=0.1, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"} ) encoder_layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"} ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) dropout_features: float = field( default=0.0, metadata={"help": "dropout to apply to the features (after feat extr)"}, ) final_dim: int = field( default=0, metadata={ "help": "project final representations and targets to this many dimensions." "set to encoder_embed_dim is <= 0" }, ) layer_norm_first: bool = field( default=False, metadata={"help": "apply layernorm first in the transformer"} ) conv_feature_layers: str = field( default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]", metadata={ "help": "string describing convolutional feature extraction layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" }, ) conv_bias: bool = field( default=False, metadata={"help": "include bias in conv encoder"} ) logit_temp: float = field( default=0.1, metadata={"help": "temperature to divide logits by"} ) quantize_targets: bool = field( default=False, metadata={"help": "use quantized targets"} ) quantize_input: bool = field( default=False, metadata={"help": "use quantized inputs"} ) same_quantizer: bool = field( default=False, metadata={"help": "use same quantizer for inputs and targets"} ) target_glu: bool = field( default=False, metadata={"help": "adds projection + glu to targets"} ) feature_grad_mult: float = field( default=1.0, metadata={"help": "multiply feature extractor var grads by this"} ) quantizer_depth: int = field( default=1, metadata={"help": "number of quantizer layers"}, ) quantizer_factor: int = field( default=3, metadata={ "help": "dimensionality increase for inner quantizer layers (if depth > 1)" }, ) latent_vars: int = field( default=320, metadata={"help": "number of latent variables V in each group of the codebook"}, ) latent_groups: int = field( default=2, metadata={"help": "number of groups G of latent variables in the codebook"}, ) latent_dim: int = field( default=0, metadata={ "help": "if > 0, uses this dimensionality for latent variables. " "otherwise uses final_dim / latent_groups" }, ) # masking mask_length: int = field(default=10, metadata={"help": "mask length"}) mask_prob: float = field( default=0.65, metadata={"help": "probability of replacing a token with mask"} ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: int = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"} ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"} ) mask_channel_before: bool = False mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"} ) mask_channel_min_space: int = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # negative selection num_negatives: int = field( default=100, metadata={"help": "number of negative examples from the same sample"}, ) negatives_from_everywhere: bool = field( default=False, metadata={"help": "sample negatives from everywhere, not just masked states"}, ) cross_sample_negatives: int = field( default=0, metadata={"help": "number of negative examples from the any sample"} ) codebook_negatives: int = field( default=0, metadata={"help": "number of negative examples codebook"} ) # positional embeddings conv_pos: int = field( default=128, metadata={"help": "number of filters for convolutional positional embeddings"}, ) conv_pos_groups: int = field( default=16, metadata={"help": "number of groups for convolutional positional embedding"}, ) latent_temp: Tuple[float, float, float] = field( default=(2, 0.5, 0.999995), metadata={ "help": "temperature for latent variable sampling. " "can be tuple of 3 values (start, end, decay)" }, ) @register_model("wav2vec2", dataclass=Wav2Vec2Config) class Wav2Vec2Model(BaseFairseqModel): def __init__(self, cfg: Wav2Vec2Config): super().__init__() self.cfg = cfg feature_enc_layers = eval(cfg.conv_feature_layers) self.embed = feature_enc_layers[-1][0] self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, mode=cfg.extractor_mode, conv_bias=cfg.conv_bias, ) self.post_extract_proj = ( nn.Linear(self.embed, cfg.encoder_embed_dim) if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input else None ) self.mask_prob = cfg.mask_prob self.mask_selection = cfg.mask_selection self.mask_other = cfg.mask_other self.mask_length = cfg.mask_length self.no_mask_overlap = cfg.no_mask_overlap self.mask_min_space = cfg.mask_min_space self.mask_channel_prob = cfg.mask_channel_prob self.mask_channel_before = cfg.mask_channel_before self.mask_channel_selection = cfg.mask_channel_selection self.mask_channel_other = cfg.mask_channel_other self.mask_channel_length = cfg.mask_channel_length self.no_mask_channel_overlap = cfg.no_mask_channel_overlap self.mask_channel_min_space = cfg.mask_channel_min_space self.dropout_input = nn.Dropout(cfg.dropout_input) self.dropout_features = nn.Dropout(cfg.dropout_features) self.feature_grad_mult = cfg.feature_grad_mult self.quantizer = None self.input_quantizer = None self.n_negatives = cfg.num_negatives self.cross_sample_negatives = cfg.cross_sample_negatives self.codebook_negatives = cfg.codebook_negatives self.negatives_from_everywhere = cfg.negatives_from_everywhere self.logit_temp = cfg.logit_temp final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim if cfg.quantize_targets: vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim self.quantizer = GumbelVectorQuantizer( dim=self.embed, num_vars=cfg.latent_vars, temp=cfg.latent_temp, groups=cfg.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, weight_proj_depth=cfg.quantizer_depth, weight_proj_factor=cfg.quantizer_factor, ) self.project_q = nn.Linear(vq_dim, final_dim) else: self.project_q = nn.Linear(self.embed, final_dim) if cfg.quantize_input: if cfg.same_quantizer and self.quantizer is not None: vq_dim = final_dim self.input_quantizer = self.quantizer else: vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim self.input_quantizer = GumbelVectorQuantizer( dim=self.embed, num_vars=cfg.latent_vars, temp=cfg.latent_temp, groups=cfg.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, weight_proj_depth=cfg.quantizer_depth, weight_proj_factor=cfg.quantizer_factor, ) self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim) self.mask_emb = nn.Parameter( torch.FloatTensor(cfg.encoder_embed_dim).uniform_() ) self.encoder = TransformerEncoder(cfg) self.layer_norm = LayerNorm(self.embed) self.target_glu = None if cfg.target_glu: self.target_glu = nn.Sequential( nn.Linear(final_dim, final_dim * 2), nn.GLU() ) self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict @classmethod def build_model(cls, cfg: Wav2Vec2Config, task=None): """Build a new model instance.""" return cls(cfg) def apply_mask( self, x, padding_mask, mask_indices=None, mask_channel_indices=None, ): B, T, C = x.shape if self.mask_channel_prob > 0 and self.mask_channel_before: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x[mask_channel_indices] = 0 if self.mask_prob > 0: if mask_indices is None: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=2, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space, ) mask_indices = torch.from_numpy(mask_indices).to(x.device) x = index_put(x, mask_indices, self.mask_emb) else: mask_indices = None if self.mask_channel_prob > 0 and not self.mask_channel_before: if mask_channel_indices is None: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x = index_put(x, mask_channel_indices, 0) return x, mask_indices def sample_negatives(self, y, num, padding_count=None): if self.n_negatives == 0 and self.cross_sample_negatives == 0: return y.new(0) bsz, tsz, fsz = y.shape y = y.view(-1, fsz) # BTC => (BxT)C # FIXME: what happens if padding_count is specified? cross_high = tsz * bsz high = tsz - (padding_count or 0) with torch.no_grad(): assert high > 1, f"{bsz,tsz,fsz}" if self.n_negatives > 0: tszs = ( buffered_arange(num) .unsqueeze(-1) .expand(-1, self.n_negatives) .flatten() ) neg_idxs = torch.randint( low=0, high=high - 1, size=(bsz, self.n_negatives * num) ) neg_idxs[neg_idxs >= tszs] += 1 if self.cross_sample_negatives > 0: tszs = ( buffered_arange(num) .unsqueeze(-1) .expand(-1, self.cross_sample_negatives) .flatten() ) cross_neg_idxs = torch.randint( low=0, high=cross_high - 1, size=(bsz, self.cross_sample_negatives * num), ) cross_neg_idxs[cross_neg_idxs >= tszs] += 1 if self.n_negatives > 0: for i in range(1, bsz): neg_idxs[i] += i * high else: neg_idxs = cross_neg_idxs if self.cross_sample_negatives > 0 and self.n_negatives > 0: neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) negs = y[neg_idxs.view(-1)] negs = negs.view( bsz, num, self.n_negatives + self.cross_sample_negatives, fsz ).permute( 2, 0, 1, 3 ) # to NxBxTxC return negs, neg_idxs def compute_preds(self, x, y, negatives): neg_is_pos = (y == negatives).all(-1) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x) logits = logits / self.logit_temp if is_xla_tensor(logits) or neg_is_pos.any(): fillval = -float(2 ** 30) if not hasattr(self, "_inftensor"): self._inftensor = ( torch.tensor(fillval).to(x.device) if is_xla_tensor(logits) else float("-inf") ) logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor) return logits def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): return torch.floor((input_length - kernel_size) / stride + 1) conv_cfg_list = eval(self.cfg.conv_feature_layers) for i in range(len(conv_cfg_list)): input_lengths = _conv_out_length( input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2] ) return input_lengths.to(torch.long) def forward( self, source, padding_mask=None, mask=True, features_only=False, layer=None, mask_indices=None, mask_channel_indices=None, padding_count=None, ): if self.feature_grad_mult > 0: features = self.feature_extractor(source) if self.feature_grad_mult != 1.0: features = GradMultiply.apply(features, self.feature_grad_mult) else: with torch.no_grad(): features = self.feature_extractor(source) features_pen = features.float().pow(2).mean() features = features.transpose(1, 2) features = self.layer_norm(features) unmasked_features = features.clone() if padding_mask is not None and padding_mask.any(): input_lengths = (1 - padding_mask.long()).sum(-1) # apply conv formula to get real output_lengths output_lengths = self._get_feat_extract_output_lengths(input_lengths) padding_mask = torch.zeros( features.shape[:2], dtype=features.dtype, device=features.device ) # these two operations makes sure that all values # before the output lengths indices are attended to padding_mask[ ( torch.arange(padding_mask.shape[0], device=padding_mask.device), output_lengths - 1, ) ] = 1 padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool() else: padding_mask = None if self.post_extract_proj is not None: features = self.post_extract_proj(features) features = self.dropout_input(features) unmasked_features = self.dropout_features(unmasked_features) num_vars = None code_ppl = None prob_ppl = None curr_temp = None if self.input_quantizer: q = self.input_quantizer(features, produce_targets=False) features = q["x"] num_vars = q["num_vars"] code_ppl = q["code_perplexity"] prob_ppl = q["prob_perplexity"] curr_temp = q["temp"] features = self.project_inp(features) if mask: x, mask_indices = self.apply_mask( features, padding_mask, mask_indices=mask_indices, mask_channel_indices=mask_channel_indices, ) if not is_xla_tensor(x) and mask_indices is not None: # tpu-comment: reducing the size in a dynamic way causes # too many recompilations on xla. y = unmasked_features[mask_indices].view( unmasked_features.size(0), -1, unmasked_features.size(-1) ) else: y = unmasked_features else: x = features y = unmasked_features mask_indices = None x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer) if features_only: return { "x": x, "padding_mask": padding_mask, "features": unmasked_features, "layer_results": layer_results, } if self.quantizer: q = self.quantizer(y, produce_targets=False) y = q["x"] num_vars = q["num_vars"] code_ppl = q["code_perplexity"] prob_ppl = q["prob_perplexity"] curr_temp = q["temp"] y = self.project_q(y) if self.negatives_from_everywhere: neg_cands = self.quantizer(unmasked_features, produce_targets=False)[ "x" ] negs, _ = self.sample_negatives( neg_cands, y.size(1), padding_count=padding_count, ) negs = self.project_q(negs) else: negs, _ = self.sample_negatives( y, y.size(1), padding_count=padding_count, ) if self.codebook_negatives > 0: cb_negs = self.quantizer.sample_from_codebook( y.size(0) * y.size(1), self.codebook_negatives ) cb_negs = cb_negs.view( self.codebook_negatives, y.size(0), y.size(1), -1 ) # order doesnt matter cb_negs = self.project_q(cb_negs) negs = torch.cat([negs, cb_negs], dim=0) else: y = self.project_q(y) if self.negatives_from_everywhere: negs, _ = self.sample_negatives( unmasked_features, y.size(1), padding_count=padding_count, ) negs = self.project_q(negs) else: negs, _ = self.sample_negatives( y, y.size(1), padding_count=padding_count, ) if not is_xla_tensor(x): # tpu-comment: reducing the size in a dynamic way causes # too many recompilations on xla. x = x[mask_indices].view(x.size(0), -1, x.size(-1)) if self.target_glu: y = self.target_glu(y) negs = self.target_glu(negs) x = self.final_proj(x) x = self.compute_preds(x, y, negs) result = { "x": x, "padding_mask": padding_mask, "features_pen": features_pen, } if prob_ppl is not None: result["prob_perplexity"] = prob_ppl result["code_perplexity"] = code_ppl result["num_vars"] = num_vars result["temp"] = curr_temp return result def quantize(self, x): assert self.quantizer is not None x = self.feature_extractor(x) x = x.transpose(1, 2) x = self.layer_norm(x) return self.quantizer.forward_idx(x) def extract_features(self, source, padding_mask, mask=False, layer=None): res = self.forward( source, padding_mask, mask=mask, features_only=True, layer=layer ) return res def get_logits(self, net_output): logits = net_output["x"] logits = logits.transpose(0, 2) logits = logits.reshape(-1, logits.size(-1)) return logits def get_targets(self, sample, net_output, expand_steps=True): x = net_output["x"] return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long) def get_extra_losses(self, net_output): pen = [] if "prob_perplexity" in net_output: pen.append( (net_output["num_vars"] - net_output["prob_perplexity"]) / net_output["num_vars"] ) if "features_pen" in net_output: pen.append(net_output["features_pen"]) return pen def remove_pretraining_modules(self): self.quantizer = None self.project_q = None self.target_glu = None self.final_proj = None class ConvFeatureExtractionModel(nn.Module): def __init__( self, conv_layers: List[Tuple[int, int, int]], dropout: float = 0.0, mode: str = "default", conv_bias: bool = False, ): super().__init__() assert mode in {"default", "layer_norm"} def block( n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False, ): def make_conv(): conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias) nn.init.kaiming_normal_(conv.weight) return conv assert ( is_layer_norm and is_group_norm ) == False, "layer norm and group norm are exclusive" if is_layer_norm: return nn.Sequential( make_conv(), nn.Dropout(p=dropout), nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=True), TransposeLast(), ), nn.GELU(), ) elif is_group_norm: return nn.Sequential( make_conv(), nn.Dropout(p=dropout), Fp32GroupNorm(dim, dim, affine=True), nn.GELU(), ) else: return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU()) in_d = 1 self.conv_layers = nn.ModuleList() for i, cl in enumerate(conv_layers): assert len(cl) == 3, "invalid conv definition: " + str(cl) (dim, k, stride) = cl self.conv_layers.append( block( in_d, dim, k, stride, is_layer_norm=mode == "layer_norm", is_group_norm=mode == "default" and i == 0, conv_bias=conv_bias, ) ) in_d = dim def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: x = conv(x) return x class TransformerEncoder(nn.Module): def __init__(self, args): super().__init__() self.dropout = args.dropout self.embedding_dim = args.encoder_embed_dim self.pos_conv = nn.Conv1d( self.embedding_dim, self.embedding_dim, kernel_size=args.conv_pos, padding=args.conv_pos // 2, groups=args.conv_pos_groups, ) dropout = 0 std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim)) nn.init.normal_(self.pos_conv.weight, mean=0, std=std) nn.init.constant_(self.pos_conv.bias, 0) self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2) self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU()) self.layers = nn.ModuleList( [ TransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=self.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_fn=args.activation_fn, layer_norm_first=args.layer_norm_first, ) for _ in range(args.encoder_layers) ] ) self.layer_norm_first = args.layer_norm_first self.layer_norm = LayerNorm(self.embedding_dim) self.layerdrop = args.encoder_layerdrop self.apply(init_bert_params) def forward(self, x, padding_mask=None, layer=None): x, layer_results = self.extract_features(x, padding_mask, layer) if self.layer_norm_first and layer is None: x = self.layer_norm(x) return x, layer_results def extract_features(self, x, padding_mask=None, tgt_layer=None): if padding_mask is not None: x = index_put(x, padding_mask, 0) x_conv = self.pos_conv(x.transpose(1, 2)) x_conv = x_conv.transpose(1, 2) x = x + x_conv if not self.layer_norm_first: x = self.layer_norm(x) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) layer_results = [] r = None for i, layer in enumerate(self.layers): dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False) if tgt_layer is not None: layer_results.append((x, z)) if i == tgt_layer: r = x break if r is not None: x = r # T x B x C -> B x T x C x = x.transpose(0, 1) return x, layer_results def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: float = 768, ffn_embedding_dim: float = 3072, num_attention_heads: float = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", layer_norm_first: bool = False, ) -> None: super().__init__() # Initialize parameters self.embedding_dim = embedding_dim self.dropout = dropout self.activation_dropout = activation_dropout # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = MultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, ) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(self.activation_dropout) self.dropout3 = nn.Dropout(dropout) self.layer_norm_first = layer_norm_first # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim) self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim) def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, need_weights: bool = False, att_args=None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer imlementation. """ residual = x if self.layer_norm_first: x = self.self_attn_layer_norm(x) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, attn_mask=self_attn_mask, ) x = self.dropout1(x) x = residual + x residual = x x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x else: x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, ) x = self.dropout1(x) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x x = self.final_layer_norm(x) return x, attn
bart_ls-main
fairseq-py/fairseq/models/wav2vec/wav2vec2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import logging import math from typing import Optional, Tuple from omegaconf import II import sys import torch import torch.nn as nn import torch.nn.functional as F from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.modules import ( Fp32GroupNorm, Fp32LayerNorm, GumbelVectorQuantizer, KmeansVectorQuantizer, TransposeLast, ) from fairseq.tasks import FairseqTask from fairseq.utils import buffered_arange logger = logging.getLogger(__name__) AGGREGATOR_CHOICES = ChoiceEnum(["cnn", "gru"]) PROJECT_FEATURES_CHOICES = ChoiceEnum(["none", "same", "new"]) ACTIVATION_CHOICES = ChoiceEnum(["relu", "gelu"]) VQ_TYPE_CHOICES = ChoiceEnum(["none", "gumbel", "kmeans"]) @dataclass class Wav2VecConfig(FairseqDataclass): prediction_steps: int = field( default=12, metadata={"help": "number of steps ahead to predict"} ) sample_distance: Optional[int] = field( default=None, metadata={ "help": "sample distance from target. does not work properly with cross-sampling" }, ) cross_sample_negatives: int = field( default=0, metadata={"help": "num of cross sampled negatives"} ) num_negatives: int = field( default=10, metadata={"help": "num of sampled negatives"} ) conv_feature_layers: str = field( default="[(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)]", metadata={ "help": "convolutional feature extraction layers [(dim, kernel_size, stride), ...]" }, ) conv_aggregator_layers: str = field( default="[(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)]", metadata={ "help": "convolutional aggregator layers [(dim, kernel_size, stride), ...]" }, ) dropout: float = field( default=0.0, metadata={"help": "dropout to apply within the model"} ) dropout_features: float = field( default=0.0, metadata={"help": "dropout to apply to the features"} ) dropout_agg: float = field( default=0.0, metadata={"help": "dropout to apply after aggregation step"} ) aggregator: AGGREGATOR_CHOICES = field( default="cnn", metadata={"help": "type of aggregator to use"} ) gru_dim: int = field(default=512, metadata={"help": "GRU dimensionality"}) no_conv_bias: bool = field( default=False, metadata={"help": "if set, does not learn bias for conv layers"} ) agg_zero_pad: bool = field( default=False, metadata={"help": "if set, zero pads in aggregator instead of repl pad"}, ) skip_connections_feat: bool = field( default=False, metadata={"help": "if set, adds skip connections to the feature extractor"}, ) skip_connections_agg: bool = field( default=True, metadata={"help": "if set, adds skip connections to the aggregator"}, ) residual_scale: float = field( default=0.5, metadata={"help": "scales residual by sqrt(value)"} ) log_compression: bool = field( default=True, metadata={"help": "if set, adds a log compression to feature extractor"}, ) balanced_classes: bool = field( default=False, metadata={"help": "if set, loss is scaled to balance for number of negatives"}, ) project_features: PROJECT_FEATURES_CHOICES = field( default="none", metadata={ "help": "if not none, features are projected using the (same or new) aggregator" }, ) non_affine_group_norm: bool = field( default=False, metadata={"help": "if set, group norm is not affine"} ) offset: str = field( default="auto", metadata={ "help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value" }, ) activation: ACTIVATION_CHOICES = field( default="relu", metadata={ "help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value" }, ) vq_type: VQ_TYPE_CHOICES = field( default="none", metadata={"help": "which type of quantizer to use"} ) vq_vars: int = field( default=320, metadata={"help": "project to this many vector quantized variables per group"}, ) vq_groups: int = field( default=2, metadata={"help": "number of groups of latent variables"} ) vq_dim: int = field( default=0, metadata={ "help": "uses this dimensionality for quantized vectors. 0 to use model dim // groups" }, ) vq_depth: int = field( default=1, metadata={"help": "number of layers for vq weight projection"} ) combine_groups: bool = field( default=False, metadata={"help": "if set, variables are shared among groups"} ) vq_temp: Tuple[float, float, float] = field( default=(2.0, 0.5, 0.999995), metadata={ "help": "temperature for latent variable sampling with gumbel softmax. should be a tuple of 3 values (start, end, decay)" }, ) vq_gamma: float = field( default=0.25, metadata={"help": "gamma parameter for kmeans style vector quantization"}, ) infonce: bool = II("criterion.infonce") @register_model("wav2vec", dataclass=Wav2VecConfig) class Wav2VecModel(BaseFairseqModel): @classmethod def build_model(cls, cfg: Wav2VecConfig, task: FairseqTask): """Build a new model instance.""" model = Wav2VecModel(cfg) logger.info(model) return model def __init__(self, cfg: Wav2VecConfig): super().__init__() self.prediction_steps = cfg.prediction_steps offset = cfg.offset if cfg.activation == "relu": activation = nn.ReLU() elif cfg.activation == "gelu": activation = nn.GELU() else: raise Exception("unknown activation " + cfg.activation) feature_enc_layers = eval(cfg.conv_feature_layers) self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, log_compression=cfg.log_compression, skip_connections=cfg.skip_connections_feat, residual_scale=cfg.residual_scale, non_affine_group_norm=cfg.non_affine_group_norm, activation=activation, ) embed = feature_enc_layers[-1][0] self.vector_quantizer = None if cfg.vq_type == "gumbel": self.vector_quantizer = GumbelVectorQuantizer( dim=embed, num_vars=cfg.vq_vars, temp=cfg.vq_temp, groups=cfg.vq_groups, combine_groups=cfg.combine_groups, vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed, time_first=False, activation=activation, weight_proj_depth=cfg.vq_depth, weight_proj_factor=2, ) elif cfg.vq_type == "kmeans": self.vector_quantizer = KmeansVectorQuantizer( dim=embed, num_vars=cfg.vq_vars, groups=cfg.vq_groups, combine_groups=cfg.combine_groups, vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed, time_first=False, gamma=cfg.vq_gamma, ) else: assert ( cfg.vq_type == "none" or cfg.vq_type is None ), "Unknown quantizer type" if cfg.offset == "auto": jin = 0 rin = 0 for _, k, stride in feature_enc_layers: if rin == 0: rin = k rin = rin + (k - 1) * jin if jin == 0: jin = stride else: jin *= stride offset = math.ceil(rin / jin) offset = int(offset) def make_aggregator(): if cfg.aggregator == "cnn": agg_layers = eval(cfg.conv_aggregator_layers) agg_dim = agg_layers[-1][0] feature_aggregator = ConvAggegator( conv_layers=agg_layers, embed=embed, dropout=cfg.dropout, skip_connections=cfg.skip_connections_agg, residual_scale=cfg.residual_scale, non_affine_group_norm=cfg.non_affine_group_norm, conv_bias=not cfg.no_conv_bias, zero_pad=cfg.agg_zero_pad, activation=activation, ) elif cfg.aggregator == "gru": agg_dim = cfg.gru_dim feature_aggregator = nn.Sequential( TransposeLast(), nn.GRU( input_size=embed, hidden_size=agg_dim, num_layers=1, dropout=cfg.dropout, ), TransposeLast(deconstruct_idx=0), ) else: raise Exception("unknown aggregator type " + cfg.aggregator) return feature_aggregator, agg_dim self.feature_aggregator, agg_dim = make_aggregator() self.wav2vec_predictions = Wav2VecPredictionsModel( in_dim=agg_dim, out_dim=embed, prediction_steps=cfg.prediction_steps, n_negatives=cfg.num_negatives, cross_sample_negatives=cfg.cross_sample_negatives, sample_distance=cfg.sample_distance, dropout=cfg.dropout, offset=offset, balanced_classes=cfg.balanced_classes, infonce=cfg.infonce, ) self.dropout_feats = nn.Dropout(p=cfg.dropout_features) self.dropout_agg = nn.Dropout(p=cfg.dropout_agg) if cfg.project_features == "none": self.project_features = None elif cfg.project_features == "same": self.project_features = self.feature_aggregator elif cfg.project_features == "new": self.project_features, _ = make_aggregator() def forward(self, source): result = {} features = self.feature_extractor(source) if self.vector_quantizer: q_res = self.vector_quantizer(features) features = q_res["x"] for k in q_res.keys(): if k != "x": result[k] = q_res[k] x = self.dropout_feats(features) x = self.feature_aggregator(x) x = self.dropout_agg(x) if self.project_features is not None: features = self.project_features(features) x, targets = self.wav2vec_predictions(x, features) result["cpc_logits"] = x result["cpc_targets"] = targets return result def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) def max_positions(self): """Maximum length supported by the model.""" return sys.maxsize def get_logits(self, net_output): logits = net_output["cpc_logits"] return logits def get_targets(self, sample, net_output): t = net_output["cpc_targets"] if isinstance(t, tuple): t = t[0] return t.contiguous() def get_target_weights(self, targets, net_output): targets = net_output["cpc_targets"] if isinstance(targets, tuple) and targets[-1] is not None: return targets[-1] return None def get_extra_losses(self, net_output): loss = None if "prob_perplexity" in net_output: loss = net_output["num_vars"] - net_output["prob_perplexity"] elif "kmeans_loss" in net_output: loss = net_output["kmeans_loss"] return loss def norm_block(is_layer_norm, dim, affine=True): if is_layer_norm: mod = nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=affine), TransposeLast(), ) else: mod = Fp32GroupNorm(1, dim, affine=affine) return mod class ConvFeatureExtractionModel(nn.Module): def __init__( self, conv_layers, dropout, log_compression, skip_connections, residual_scale, non_affine_group_norm, activation, ): super().__init__() def block(n_in, n_out, k, stride): return nn.Sequential( nn.Conv1d(n_in, n_out, k, stride=stride, bias=False), nn.Dropout(p=dropout), norm_block( is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm ), activation, ) in_d = 1 self.conv_layers = nn.ModuleList() for dim, k, stride in conv_layers: self.conv_layers.append(block(in_d, dim, k, stride)) in_d = dim self.log_compression = log_compression self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: residual = x x = conv(x) if self.skip_connections and x.size(1) == residual.size(1): tsz = x.size(2) r_tsz = residual.size(2) residual = residual[..., :: r_tsz // tsz][..., :tsz] x = (x + residual) * self.residual_scale if self.log_compression: x = x.abs() x = x + 1 x = x.log() return x class ZeroPad1d(nn.Module): def __init__(self, pad_left, pad_right): super().__init__() self.pad_left = pad_left self.pad_right = pad_right def forward(self, x): return F.pad(x, (self.pad_left, self.pad_right)) class ConvAggegator(nn.Module): def __init__( self, conv_layers, embed, dropout, skip_connections, residual_scale, non_affine_group_norm, conv_bias, zero_pad, activation, ): super().__init__() def block(n_in, n_out, k, stride): # padding dims only really make sense for stride = 1 ka = k // 2 kb = ka - 1 if k % 2 == 0 else ka pad = ( ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0)) ) return nn.Sequential( pad, nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias), nn.Dropout(p=dropout), norm_block(False, n_out, affine=not non_affine_group_norm), activation, ) in_d = embed self.conv_layers = nn.ModuleList() self.residual_proj = nn.ModuleList() for dim, k, stride in conv_layers: if in_d != dim and skip_connections: self.residual_proj.append(nn.Conv1d(in_d, dim, 1, bias=False)) else: self.residual_proj.append(None) self.conv_layers.append(block(in_d, dim, k, stride)) in_d = dim self.conv_layers = nn.Sequential(*self.conv_layers) self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): for rproj, conv in zip(self.residual_proj, self.conv_layers): residual = x x = conv(x) if self.skip_connections: if rproj is not None: residual = rproj(residual) x = (x + residual) * self.residual_scale return x class Wav2VecPredictionsModel(nn.Module): def __init__( self, in_dim, out_dim, prediction_steps, n_negatives, cross_sample_negatives, sample_distance, dropout, offset, balanced_classes, infonce, ): super().__init__() self.n_negatives = n_negatives self.cross_sample_negatives = cross_sample_negatives self.sample_distance = sample_distance self.project_to_steps = nn.ConvTranspose2d( in_dim, out_dim, (1, prediction_steps) ) self.dropout = nn.Dropout(p=dropout) self.offset = offset self.balanced_classes = balanced_classes self.infonce = infonce def sample_negatives(self, y): bsz, fsz, tsz = y.shape y = y.transpose(0, 1) # BCT -> CBT y = y.contiguous().view(fsz, -1) # CBT => C(BxT) cross_high = tsz * bsz high = tsz if self.sample_distance is None else min(tsz, self.sample_distance) assert high > 1 neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz)) with torch.no_grad(): if self.n_negatives > 0: tszs = ( buffered_arange(tsz) .unsqueeze(-1) .expand(-1, self.n_negatives) .flatten() ) neg_idxs = torch.randint( low=0, high=high - 1, size=(bsz, self.n_negatives * tsz) ) neg_idxs[neg_idxs >= tszs] += 1 if self.cross_sample_negatives > 0: tszs = ( buffered_arange(tsz) .unsqueeze(-1) .expand(-1, self.cross_sample_negatives) .flatten() ) cross_neg_idxs = torch.randint( low=0, high=cross_high - 1, size=(bsz, self.cross_sample_negatives * tsz), ) cross_neg_idxs[cross_neg_idxs >= tszs] += 1 if self.n_negatives > 0: for i in range(1, bsz): neg_idxs[i] += i * high else: neg_idxs = cross_neg_idxs if self.cross_sample_negatives > 0 and self.n_negatives > 0: neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) negs = y[..., neg_idxs.view(-1)] negs = negs.view( fsz, bsz, self.n_negatives + self.cross_sample_negatives, tsz ).permute( 2, 1, 0, 3 ) # to NxBxCxT return negs def forward(self, x, y): x = x.unsqueeze(-1) x = self.project_to_steps(x) # BxCxTxS x = self.dropout(x) negatives = self.sample_negatives(y) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) # Copies x B x C x T copies = targets.size(0) bsz, dim, tsz, steps = x.shape steps = min(steps, tsz - self.offset) predictions = x.new( bsz * copies * (tsz - self.offset + 1) * steps - ((steps + 1) * steps // 2) * copies * bsz ) if self.infonce: labels = predictions.new_full( (predictions.shape[0] // copies,), 0, dtype=torch.long ) else: labels = torch.zeros_like(predictions) weights = ( torch.full_like(labels, 1 / self.n_negatives) if self.balanced_classes and not self.infonce else None ) start = end = 0 for i in range(steps): offset = i + self.offset end = start + (tsz - offset) * bsz * copies if self.infonce: predictions[start:end] = torch.einsum( "bct,nbct->tbn", x[..., :-offset, i], targets[..., offset:] ).flatten() else: pos_num = (end - start) // copies predictions[start:end] = torch.einsum( "bct,nbct->nbt", x[..., :-offset, i], targets[..., offset:] ).flatten() labels[start : start + pos_num] = 1.0 if weights is not None: weights[start : start + pos_num] = 1.0 start = end assert end == predictions.numel(), "{} != {}".format(end, predictions.numel()) if self.infonce: predictions = predictions.view(-1, copies) else: if weights is not None: labels = (labels, weights) return predictions, labels
bart_ls-main
fairseq-py/fairseq/models/wav2vec/wav2vec.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import torch.nn as nn import fairseq.checkpoint_utils from fairseq.models import ( FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import TransformerDecoder from fairseq.models.roberta import model as roberta logger = logging.getLogger(__name__) @register_model("roberta_enc_dec") class RobertaEncDecModel(FairseqEncoderDecoderModel): @staticmethod def add_args(parser): parser.add_argument( "--pretrained-mlm-checkpoint", default=None, type=str, metavar="PRETRAINED", help="path to pretrained mlm checkpoint", ) parser.add_argument( "--pretrained-decoder", action="store_true", help="reload decoder" ) parser.add_argument( "--hack-layernorm-embedding", action="store_true", help="hack to reload old models trained with encoder-normalize-before=False (no equivalent to encoder-normalize-before=False and layernorm_embedding=False", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--share-all-embeddings", action="store_true", help="share encoder, decoder and output embeddings" " (requires shared dictionary and embed dim)", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_enc_dec_architecture(args) if args.pretrained_mlm_checkpoint: arg_overrides = None if args.hack_layernorm_embedding: arg_overrides = {"layernorm_embedding": False} loaded = fairseq.checkpoint_utils.load_model_ensemble_and_task( [args.pretrained_mlm_checkpoint], arg_overrides=arg_overrides ) ([roberta_enc], _cfg, _task) = loaded else: # Do we need to edit untie_weights here ? share_in_out = ( args.share_decoder_input_output_embed or args.share_all_embeddings ) args.untie_weights_roberta = not share_in_out if args.hack_layernorm_embedding: args.layernorm_embedding = False args.encoder_normalize_before = False roberta_enc = roberta.RobertaModel.build_model(args, task) return cls.from_roberta(roberta_enc, args, task.source_dictionary) @staticmethod def from_roberta(roberta_enc: roberta.RobertaModel, args, dictionary): encoder = roberta_enc.encoder.sentence_encoder vocab_size, embed_dim = encoder.embed_tokens.weight.shape if args.share_all_embeddings: lm_head = roberta_enc.encoder.lm_head assert encoder.embed_tokens.weight is lm_head.weight, ( "Can't use --share-all-embeddings with a model " "that was pretraiend with --untie-weights-roberta_enc" ) else: lm_head = roberta.RobertaLMHead( embed_dim, vocab_size, roberta_enc.args.activation_fn ) dec_embs = nn.Embedding(vocab_size, embed_dim, dictionary.pad()) if args.share_all_embeddings or args.share_decoder_input_output_embed: # Note: I wasn't able to use Embedding _weight parameter to achive this sharing. dec_embs.weight = lm_head.weight decoder = TransformerDecoder( RobertaEncDecModel.read_args_from_roberta(roberta_enc.args), dictionary, dec_embs, no_encoder_attn=False, output_projection=lm_head, ) if getattr(args, "pretrained_decoder", False): decoder_dict = encoder.state_dict() # TODO: hide setting "encoder_attn" layers behind a flag. for k, w in list(decoder_dict.items()): if ".self_attn" in k: k_enc_attn = k.replace(".self_attn", ".encoder_attn") decoder_dict[k_enc_attn] = w.detach().clone() for k, w in lm_head.state_dict().items(): decoder_dict["output_projection." + k] = w missing_keys, unexpected_keys = decoder.load_state_dict( decoder_dict, strict=False ) # missing_keys = [m for m in missing_keys if ".encoder_attn" not in m] assert not missing_keys and not unexpected_keys, ( "Failed to load state dict. " f"Missing keys: {missing_keys}. " f"Unexpected keys: {unexpected_keys}." ) if args.share_all_embeddings: assert decoder.output_projection.weight is decoder.embed_tokens.weight assert encoder.embed_tokens.weight is decoder.embed_tokens.weight elif args.share_decoder_input_output_embed: assert decoder.output_projection.weight is decoder.embed_tokens.weight assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight else: assert decoder.output_projection.weight is not decoder.embed_tokens.weight assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight return RobertaEncDecModel(encoder, decoder) @staticmethod def read_args_from_roberta(roberta_args: argparse.Namespace): # TODO: this would become easier if encoder/decoder where using a similar # TransformerConfig object args = argparse.Namespace(**vars(roberta_args)) attr_map = [ ("encoder_attention_heads", "decoder_attention_heads"), ("encoder_embed_dim", "decoder_embed_dim"), ("encoder_embed_dim", "decoder_output_dim"), ("encoder_normalize_before", "decoder_normalize_before"), ("encoder_layers_to_keep", "decoder_layers_to_keep"), ("encoder_ffn_embed_dim", "decoder_ffn_embed_dim"), ("encoder_layerdrop", "decoder_layerdrop"), ("encoder_layers", "decoder_layers"), ("encoder_learned_pos", "decoder_learned_pos"), # should this be set from here ? ("max_positions", "max_target_positions"), ] for k1, k2 in attr_map: setattr(args, k2, getattr(roberta_args, k1)) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = not roberta_args.untie_weights_roberta return args def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" super().upgrade_state_dict_named(state_dict, name) old_keys = list(state_dict.keys()) # rename decoder -> encoder before upgrading children modules for k in old_keys: if k.startswith(prefix + "encoder.lm_head"): state_dict.pop(k) continue new_k = k new_k = new_k.replace(".sentence_encoder.", ".") new_k = new_k.replace("decoder.lm_head.", "decoder.output_projection.") if k == new_k: continue # print(k, "->", new_k) state_dict[new_k] = state_dict.pop(k) @register_model_architecture("roberta_enc_dec", "roberta_enc_dec") def base_enc_dec_architecture(args): args.hack_layernorm_embedding = getattr(args, "hack_layernorm_embedding", False) args.pretrained_mlm_checkpoint = getattr(args, "pretrained_mlm_checkpoint", None) args.pretrained_decoder = getattr(args, "pretrained_decoder", None) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) roberta.base_architecture(args)
bart_ls-main
fairseq-py/fairseq/models/roberta/enc_dec.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ GottBERT: a pure German Language Model """ from fairseq.models import register_model from .hub_interface import RobertaHubInterface from .model import RobertaModel @register_model('gottbert') class GottbertModel(RobertaModel): @classmethod def hub_models(cls): return { 'gottbert-base': 'https://dl.gottbert.de/fairseq/models/gottbert-base.tar.gz', } @classmethod def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='hf_byte_bpe', bpe_vocab='vocab.json', bpe_merges='merges.txt', bpe_add_prefix_space=False, **kwargs ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, bpe_vocab=bpe_vocab, bpe_merges=bpe_merges, bpe_add_prefix_space=bpe_add_prefix_space, **kwargs, ) return RobertaHubInterface(x['args'], x['task'], x['models'][0])
bart_ls-main
fairseq-py/fairseq/models/roberta/model_gottbert.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Unsupervised Cross-lingual Representation Learning at Scale """ from fairseq.models import register_model from .hub_interface import RobertaHubInterface from .model import RobertaModel @register_model("xlmr") class XLMRModel(RobertaModel): @classmethod def hub_models(cls): return { "xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz", "xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz", "xlmr.xl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz", "xlmr.xxl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz", } @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="sentencepiece", **kwargs ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return RobertaHubInterface(x["args"], x["task"], x["models"][0])
bart_ls-main
fairseq-py/fairseq/models/roberta/model_xlmr.py