python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# ! /usr/bin/python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Optional
import torch
from omegaconf import DictConfig
from nemo.collections.asr.modules.transformer import (
BeamSearchSequenceGenerator,
GreedySequenceGenerator,
TopKSequenceGenerator,
)
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.core.classes.module import NeuralModule
@dataclass
class SequenceGeneratorConfig:
type: str = "greedy" # choices=[greedy, topk, beam]
max_sequence_length: int = 512
max_delta_length: int = -1
temperature: float = 1.0 # for top-k sampling
beam_size: int = 1 # K for top-k sampling, N for beam search
len_pen: float = 0.0 # for beam-search
class SequenceGenerator:
"""
Wrapper class for sequence generators for NeMo transformers.
"""
TYPE_GREEDY = "greedy"
TYPE_TOPK = "topk"
TYPE_BEAM = "beam"
SEARCHER_TYPES = [TYPE_GREEDY, TYPE_TOPK, TYPE_BEAM]
def __init__(
self,
cfg: DictConfig,
embedding: NeuralModule,
decoder: NeuralModule,
log_softmax: NeuralModule,
tokenizer: TokenizerSpec,
) -> None:
super().__init__()
self._type = cfg.get("type", "greedy")
self.tokenizer = tokenizer
self.pad_id = getattr(tokenizer, "pad_id", 0)
self.eos_id = getattr(tokenizer, "eos_id", -1)
self.bos_id = getattr(tokenizer, "bos_id", -1)
common_args = {
"pad": self.pad_id,
"bos": self.bos_id,
"eos": self.eos_id,
"max_sequence_length": cfg.get("max_sequence_length", 512),
"max_delta_length": cfg.get("max_delta_length", -1),
"batch_size": cfg.get("batch_size", 1),
}
if self._type == self.TYPE_GREEDY:
self.generator = GreedySequenceGenerator(embedding, decoder, log_softmax, **common_args)
elif self._type == self.TYPE_TOPK:
beam_size = cfg.get("beam_size", 1)
temperature = cfg.get("temperature", 1.0)
self.generator = TopKSequenceGenerator(
embedding, decoder, log_softmax, beam_size, temperature, **common_args
)
elif self._type == self.TYPE_BEAM:
beam_size = cfg.get("beam_size", 1)
len_pen = cfg.get("len_pen", 0.0)
self.generator = BeamSearchSequenceGenerator(
embedding, decoder, log_softmax, beam_size, len_pen, **common_args
)
else:
raise ValueError(
f"Sequence Generator only supports one of {self.SEARCH_TYPES}, but got {self._type} instead."
)
def __call__(
self,
encoder_states: torch.Tensor,
encoder_input_mask: torch.Tensor = None,
return_beam_scores: bool = False,
pad_max_len: Optional[int] = None,
return_length: bool = False,
):
"""
Generate sequence tokens given the input encoder states and masks.
Params:
- encoder_states: a torch Tensor of shape BxTxD
- encoder_input_mask: a binary tensor of shape BxTxD
- return_beam_scores: whether to return beam scores
- pad_max_len: optional int, set it to pad all sequence to the same length
- return_length: whether to return the lengths for generated sequences (shape B)
Returns:
- generated tokens tensor of shape BxT
"""
predictions = self.generator(
encoder_hidden_states=encoder_states,
encoder_input_mask=encoder_input_mask,
return_beam_scores=return_beam_scores,
)
if pad_max_len:
predictions = pad_sequence(predictions, pad_max_len, self.pad_id)
if return_length:
return predictions, self.get_seq_length(predictions)
return predictions
def get_seq_length(self, seq: torch.Tensor) -> torch.Tensor:
"""
Get sequence length.
Params:
- seq: batched sequence tensor of shape BxTxD
Returns:
- tensor of shape B, where each element is the length of the sequence
"""
lengths = seq.size(1) * torch.ones(seq.size(0), device=seq.device).long()
pos = (seq == self.eos_id).long().nonzero()
seq_lengths = torch.scatter(lengths, dim=0, index=pos[:, 0], src=pos[:, 1])
return seq_lengths
def decode_semantics_from_tokens(self, seq_tokens: torch.Tensor) -> List[str]:
"""
Decode tokens into strings
Rarams:
- seq_tokens: integer tensor of shape BxT
Returns:
- list of strings
"""
semantics_list = []
# Drop sequence tokens to CPU
seq_tokens = seq_tokens.detach().long().cpu()
seq_lengths = self.get_seq_length(seq_tokens)
# iterate over batch
for ind in range(seq_tokens.shape[0]):
tokens = seq_tokens[ind].numpy().tolist()
length = seq_lengths[ind].long().cpu().item()
tokens = tokens[:length]
text = "".join(self.tokenizer.tokenizer.decode_ids(tokens))
semantics_list.append(text)
return semantics_list
def get_seq_length(seq: torch.Tensor, eos_id: int) -> torch.Tensor:
"""
Get sequence length.
Params:
- seq: batched sequence tensor of shape BxTxD
- eos_id: integer representing the end of sentence
Returns:
- tensor of shape B, where each element is the length of the sequence
"""
lengths = seq.size(1) * torch.ones(seq.size(0), device=seq.device).long()
pos = (seq == eos_id).long().nonzero()
seq_lengths = torch.scatter(lengths, dim=0, index=pos[:, 0], src=pos[:, 1])
return seq_lengths
def pad_sequence(seq: torch.Tensor, max_len: int, pad_token: int = 0) -> torch.Tensor:
"""
Params:
- seq: integer token sequences of shape BxT
- max_len: integer for max sequence length
- pad_token: integer token for padding
Returns:
- padded sequence of shape B x max_len
"""
batch = seq.size(0)
curr_len = seq.size(1)
if curr_len >= max_len:
return seq
padding = torch.zeros(batch, max_len - curr_len, dtype=seq.dtype, device=seq.device).fill_(pad_token)
return torch.cat([seq, padding], dim=1)
def get_seq_mask(seq: torch.Tensor, seq_lens: torch.Tensor) -> torch.Tensor:
"""
Get the sequence mask based on the actual length of each sequence
Params:
- seq: tensor of shape [BxLxD]
- seq_len: tensor of shape [B]
Returns:
- binary mask of shape [BxL]
"""
mask = torch.arange(seq.size(1))[None, :].to(seq.device) < seq_lens[:, None]
return mask.to(seq.device, dtype=bool)
|
NeMo-main
|
nemo/collections/asr/parts/utils/slu_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
@dataclass
class Hypothesis:
"""Hypothesis class for beam search algorithms.
score: A float score obtained from an AbstractRNNTDecoder module's score_hypothesis method.
y_sequence: Either a sequence of integer ids pointing to some vocabulary, or a packed torch.Tensor
behaving in the same manner. dtype must be torch.Long in the latter case.
dec_state: A list (or list of list) of LSTM-RNN decoder states. Can be None.
text: (Optional) A decoded string after processing via CTC / RNN-T decoding (removing the CTC/RNNT
`blank` tokens, and optionally merging word-pieces). Should be used as decoded string for
Word Error Rate calculation.
timestep: (Optional) A list of integer indices representing at which index in the decoding
process did the token appear. Should be of same length as the number of non-blank tokens.
alignments: (Optional) Represents the CTC / RNNT token alignments as integer tokens along an axis of
time T (for CTC) or Time x Target (TxU).
For CTC, represented as a single list of integer indices.
For RNNT, represented as a dangling list of list of integer indices.
Outer list represents Time dimension (T), inner list represents Target dimension (U).
The set of valid indices **includes** the CTC / RNNT blank token in order to represent alignments.
frame_confidence: (Optional) Represents the CTC / RNNT per-frame confidence scores as token probabilities
along an axis of time T (for CTC) or Time x Target (TxU).
For CTC, represented as a single list of float indices.
For RNNT, represented as a dangling list of list of float indices.
Outer list represents Time dimension (T), inner list represents Target dimension (U).
token_confidence: (Optional) Represents the CTC / RNNT per-token confidence scores as token probabilities
along an axis of Target U.
Represented as a single list of float indices.
word_confidence: (Optional) Represents the CTC / RNNT per-word confidence scores as token probabilities
along an axis of Target U.
Represented as a single list of float indices.
length: Represents the length of the sequence (the original length without padding), otherwise
defaults to 0.
y: (Unused) A list of torch.Tensors representing the list of hypotheses.
lm_state: (Unused) A dictionary state cache used by an external Language Model.
lm_scores: (Unused) Score of the external Language Model.
ngram_lm_state: (Optional) State of the external n-gram Language Model.
tokens: (Optional) A list of decoded tokens (can be characters or word-pieces.
last_token (Optional): A token or batch of tokens which was predicted in the last step.
"""
score: float
y_sequence: Union[List[int], torch.Tensor]
text: Optional[str] = None
dec_out: Optional[List[torch.Tensor]] = None
dec_state: Optional[Union[List[List[torch.Tensor]], List[torch.Tensor]]] = None
timestep: Union[List[int], torch.Tensor] = field(default_factory=list)
alignments: Optional[Union[List[int], List[List[int]]]] = None
frame_confidence: Optional[Union[List[float], List[List[float]]]] = None
token_confidence: Optional[List[float]] = None
word_confidence: Optional[List[float]] = None
length: Union[int, torch.Tensor] = 0
y: List[torch.tensor] = None
lm_state: Optional[Union[Dict[str, Any], List[Any]]] = None
lm_scores: Optional[torch.Tensor] = None
ngram_lm_state: Optional[Union[Dict[str, Any], List[Any]]] = None
tokens: Optional[Union[List[int], torch.Tensor]] = None
last_token: Optional[torch.Tensor] = None
@property
def non_blank_frame_confidence(self) -> List[float]:
"""Get per-frame confidence for non-blank tokens according to self.timestep
Returns:
List with confidence scores. The length of the list is the same as `timestep`.
"""
non_blank_frame_confidence = []
# self.timestep can be a dict for RNNT
timestep = self.timestep['timestep'] if isinstance(self.timestep, dict) else self.timestep
if len(self.timestep) != 0 and self.frame_confidence is not None:
if any(isinstance(i, list) for i in self.frame_confidence): # rnnt
t_prev = -1
offset = 0
for t in timestep:
if t != t_prev:
t_prev = t
offset = 0
else:
offset += 1
non_blank_frame_confidence.append(self.frame_confidence[t][offset])
else: # ctc
non_blank_frame_confidence = [self.frame_confidence[t] for t in timestep]
return non_blank_frame_confidence
@property
def words(self) -> List[str]:
"""Get words from self.text
Returns:
List with words (str).
"""
return [] if self.text is None else self.text.split()
@dataclass
class NBestHypotheses:
"""List of N best hypotheses"""
n_best_hypotheses: Optional[List[Hypothesis]]
@dataclass
class HATJointOutput:
"""HATJoint outputs for beam search decoding
hat_logprobs: standard HATJoint outputs as for RNNTJoint
ilm_logprobs: internal language model probabilities (for ILM subtraction)
"""
hat_logprobs: Optional[torch.Tensor] = None
ilm_logprobs: Optional[torch.Tensor] = None
def is_prefix(x: List[int], pref: List[int]) -> bool:
"""
Obtained from https://github.com/espnet/espnet.
Check if pref is a prefix of x.
Args:
x: Label ID sequence.
pref: Prefix label ID sequence.
Returns:
: Whether pref is a prefix of x.
"""
if len(pref) >= len(x):
return False
for i in range(len(pref)):
if pref[i] != x[i]:
return False
return True
def select_k_expansions(
hyps: List[Hypothesis], topk_idxs: torch.Tensor, topk_logps: torch.Tensor, gamma: float, beta: int,
) -> List[Tuple[int, Hypothesis]]:
"""
Obtained from https://github.com/espnet/espnet
Return K hypotheses candidates for expansion from a list of hypothesis.
K candidates are selected according to the extended hypotheses probabilities
and a prune-by-value method. Where K is equal to beam_size + beta.
Args:
hyps: Hypotheses.
topk_idxs: Indices of candidates hypothesis. Shape = [B, num_candidates]
topk_logps: Log-probabilities for hypotheses expansions. Shape = [B, V + 1]
gamma: Allowed logp difference for prune-by-value method.
beta: Number of additional candidates to store.
Return:
k_expansions: Best K expansion hypotheses candidates.
"""
k_expansions = []
for i, hyp in enumerate(hyps):
hyp_i = [(int(k), hyp.score + float(v)) for k, v in zip(topk_idxs[i], topk_logps[i])]
k_best_exp_val = max(hyp_i, key=lambda x: x[1])
k_best_exp_idx = k_best_exp_val[0]
k_best_exp = k_best_exp_val[1]
expansions = sorted(filter(lambda x: (k_best_exp - gamma) <= x[1], hyp_i), key=lambda x: x[1],)
if len(expansions) > 0:
k_expansions.append(expansions)
else:
k_expansions.append([(k_best_exp_idx, k_best_exp)])
return k_expansions
|
NeMo-main
|
nemo/collections/asr/parts/utils/rnnt_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import is_dataclass
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.utils import logging
# Constants
LINEAR_ADAPTER_CLASSPATH = "nemo.collections.common.parts.adapter_modules.LinearAdapter"
MHA_ADAPTER_CLASSPATH = (
"nemo.collections.asr.parts.submodules.adapters.multi_head_attention_adapter_module.MultiHeadAttentionAdapter"
)
RELMHA_ADAPTER_CLASSPATH = "nemo.collections.asr.parts.submodules.adapters.multi_head_attention_adapter_module.RelPositionMultiHeadAttentionAdapter"
POS_ENCODING_ADAPTER_CLASSPATH = (
"nemo.collections.asr.parts.submodules.adapters.multi_head_attention_adapter_module.PositionalEncodingAdapter"
)
REL_POS_ENCODING_ADAPTER_CLASSPATH = (
"nemo.collections.asr.parts.submodules.adapters.multi_head_attention_adapter_module.RelPositionalEncodingAdapter"
)
def convert_adapter_cfg_to_dict_config(cfg: DictConfig):
# Convert to DictConfig from dict or Dataclass
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if not isinstance(cfg, DictConfig):
cfg = DictConfig(cfg)
return cfg
def update_adapter_cfg_input_dim(module: torch.nn.Module, cfg: DictConfig, *, module_dim: int):
"""
Update the input dimension of the provided adapter config with some default value.
Args:
module: The module that implements AdapterModuleMixin.
cfg: A DictConfig or a Dataclass representing the adapter config.
module_dim: A default module dimension, used if cfg has an incorrect input dimension.
Returns:
A DictConfig representing the adapter's config.
"""
cfg = convert_adapter_cfg_to_dict_config(cfg)
input_dim_valid_keys = ['in_features', 'n_feat']
input_key = None
for key in input_dim_valid_keys:
if key in cfg:
input_key = key
break
if input_key is None:
raise ValueError(
f"Failed to infer the input dimension of the Adapter cfg. \nExpected one of : {input_dim_valid_keys}.\n"
f"Provided config : \n"
f"{OmegaConf.to_yaml(cfg)}"
)
input_dim = cfg[input_key]
if input_dim != module_dim:
logging.info(f"Updating {module.__class__.__name__} Adapter input dim from {input_dim} to {module_dim}")
input_dim = module_dim
cfg[input_key] = input_dim
return cfg
|
NeMo-main
|
nemo/collections/asr/parts/utils/adapter_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from functools import partial
from typing import List, Optional
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.utils import logging
class ConfidenceMeasureConstants:
NAMES = ("max_prob", "entropy")
ENTROPY_TYPES = ("gibbs", "tsallis", "renyi")
ENTROPY_NORMS = ("lin", "exp")
@classmethod
def print(cls):
return (
cls.__name__
+ ": "
+ str({"NAMES": cls.NAMES, "ENTROPY_TYPES": cls.ENTROPY_TYPES, "ENTROPY_NORMS": cls.ENTROPY_NORMS})
)
class ConfidenceConstants:
AGGREGATIONS = ("mean", "min", "max", "prod")
@classmethod
def print(cls):
return cls.__name__ + ": " + str({"AGGREGATIONS": cls.AGGREGATIONS})
@dataclass
class ConfidenceMeasureConfig:
"""A Config which contains the measure name and settings to compute per-frame confidence scores.
Args:
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str).
Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
name: str = "entropy"
entropy_type: str = "tsallis"
alpha: float = 0.33
entropy_norm: str = "exp"
temperature: str = "DEPRECATED"
def __post_init__(self):
if self.temperature != "DEPRECATED":
logging.warning(
"`temperature` is deprecated and will be removed in the future. Please use `alpha` instead."
)
# TODO (alaptev): delete the following two lines sometime in the future
logging.warning("Re-writing `alpha` with the value of `temperature`.")
# self.temperature has type str
self.alpha = float(self.temperature)
self.temperature = "DEPRECATED"
if self.name not in ConfidenceMeasureConstants.NAMES:
raise ValueError(
f"`name` must be one of the following: "
f"{'`' + '`, `'.join(ConfidenceMeasureConstants.NAMES) + '`'}. Provided: `{self.name}`"
)
if self.entropy_type not in ConfidenceMeasureConstants.ENTROPY_TYPES:
raise ValueError(
f"`entropy_type` must be one of the following: "
f"{'`' + '`, `'.join(ConfidenceMeasureConstants.ENTROPY_TYPES) + '`'}. Provided: `{self.entropy_type}`"
)
if self.alpha <= 0.0:
raise ValueError(f"`alpha` must be > 0. Provided: {self.alpha}")
if self.entropy_norm not in ConfidenceMeasureConstants.ENTROPY_NORMS:
raise ValueError(
f"`entropy_norm` must be one of the following: "
f"{'`' + '`, `'.join(ConfidenceMeasureConstants.ENTROPY_NORMS) + '`'}. Provided: `{self.entropy_norm}`"
)
@dataclass
class ConfidenceConfig:
"""A config which contains the following key-value pairs related to confidence scores.
Args:
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding. When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of floats.
preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `token_confidence` in it. Here, `token_confidence` is a List of floats.
The length of the list corresponds to the number of recognized tokens.
preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `word_confidence` in it. Here, `word_confidence` is a List of floats.
The length of the list corresponds to the number of recognized words.
exclude_blank: Bool flag indicating that blank token confidence scores are to be excluded
from the `token_confidence`.
aggregation: Which aggregation type to use for collapsing per-token confidence into per-word confidence.
Valid options are `mean`, `min`, `max`, `prod`.
measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
preserve_frame_confidence: bool = False
preserve_token_confidence: bool = False
preserve_word_confidence: bool = False
exclude_blank: bool = True
aggregation: str = "min"
measure_cfg: ConfidenceMeasureConfig = ConfidenceMeasureConfig()
method_cfg: str = "DEPRECATED"
def __post_init__(self):
# OmegaConf.structured ensures that post_init check is always executed
self.measure_cfg = OmegaConf.structured(
self.measure_cfg
if isinstance(self.measure_cfg, ConfidenceMeasureConfig)
else ConfidenceMeasureConfig(**self.measure_cfg)
)
if self.method_cfg != "DEPRECATED":
logging.warning(
"`method_cfg` is deprecated and will be removed in the future. Please use `measure_cfg` instead."
)
# TODO (alaptev): delete the following two lines sometime in the future
logging.warning("Re-writing `measure_cfg` with the value of `method_cfg`.")
# OmegaConf.structured ensures that post_init check is always executed
self.measure_cfg = OmegaConf.structured(
self.method_cfg
if isinstance(self.method_cfg, ConfidenceMeasureConfig)
else ConfidenceMeasureConfig(**self.method_cfg)
)
self.method_cfg = "DEPRECATED"
if self.aggregation not in ConfidenceConstants.AGGREGATIONS:
raise ValueError(
f"`aggregation` has to be one of the following: "
f"{'`' + '`, `'.join(ConfidenceMeasureConstants.AGGREGATIONS) + '`'}. Provided: `{self.aggregation}`"
)
def get_confidence_measure_bank():
"""Generate a dictionary with confidence measure functionals.
Supported confidence measures:
max_prob: normalized maximum probability
entropy_gibbs_lin: Gibbs entropy with linear normalization
entropy_gibbs_exp: Gibbs entropy with exponential normalization
entropy_tsallis_lin: Tsallis entropy with linear normalization
entropy_tsallis_exp: Tsallis entropy with exponential normalization
entropy_renyi_lin: Rényi entropy with linear normalization
entropy_renyi_exp: Rényi entropy with exponential normalization
Returns:
dictionary with lambda functions.
"""
# helper functions
# Gibbs entropy is implemented without alpha
neg_entropy_gibbs = lambda x: (x.exp() * x).sum(-1)
neg_entropy_alpha = lambda x, t: (x * t).exp().sum(-1)
neg_entropy_alpha_gibbs = lambda x, t: ((x * t).exp() * x).sum(-1)
# too big for a lambda
def entropy_tsallis_exp(x, v, t):
exp_neg_max_ent = math.exp((1 - math.pow(v, 1 - t)) / (1 - t))
return (((1 - neg_entropy_alpha(x, t)) / (1 - t)).exp() - exp_neg_max_ent) / (1 - exp_neg_max_ent)
def entropy_gibbs_exp(x, v, t):
exp_neg_max_ent = math.pow(v, -t * math.pow(v, 1 - t))
return ((neg_entropy_alpha_gibbs(x, t) * t).exp() - exp_neg_max_ent) / (1 - exp_neg_max_ent)
# use Gibbs entropies for Tsallis and Rényi with t == 1.0
entropy_gibbs_lin_baseline = lambda x, v: 1 + neg_entropy_gibbs(x) / math.log(v)
entropy_gibbs_exp_baseline = lambda x, v: (neg_entropy_gibbs(x).exp() * v - 1) / (v - 1)
# fill the measure bank
confidence_measure_bank = {}
# Maximum probability measure is implemented without alpha
confidence_measure_bank["max_prob"] = (
lambda x, v, t: (x.max(dim=-1)[0].exp() * v - 1) / (v - 1)
if t == 1.0
else ((x.max(dim=-1)[0] * t).exp() * math.pow(v, t) - 1) / (math.pow(v, t) - 1)
)
confidence_measure_bank["entropy_gibbs_lin"] = (
lambda x, v, t: entropy_gibbs_lin_baseline(x, v)
if t == 1.0
else 1 + neg_entropy_alpha_gibbs(x, t) / math.log(v) / math.pow(v, 1 - t)
)
confidence_measure_bank["entropy_gibbs_exp"] = (
lambda x, v, t: entropy_gibbs_exp_baseline(x, v) if t == 1.0 else entropy_gibbs_exp(x, v, t)
)
confidence_measure_bank["entropy_tsallis_lin"] = (
lambda x, v, t: entropy_gibbs_lin_baseline(x, v)
if t == 1.0
else 1 + (1 - neg_entropy_alpha(x, t)) / (math.pow(v, 1 - t) - 1)
)
confidence_measure_bank["entropy_tsallis_exp"] = (
lambda x, v, t: entropy_gibbs_exp_baseline(x, v) if t == 1.0 else entropy_tsallis_exp(x, v, t)
)
confidence_measure_bank["entropy_renyi_lin"] = (
lambda x, v, t: entropy_gibbs_lin_baseline(x, v)
if t == 1.0
else 1 + neg_entropy_alpha(x, t).log2() / (t - 1) / math.log(v, 2)
)
confidence_measure_bank["entropy_renyi_exp"] = (
lambda x, v, t: entropy_gibbs_exp_baseline(x, v)
if t == 1.0
else (neg_entropy_alpha(x, t).pow(1 / (t - 1)) * v - 1) / (v - 1)
)
return confidence_measure_bank
def get_confidence_aggregation_bank():
"""Generate a dictionary with confidence aggregation functions.
Supported confidence measures:
min: minimum
max: maximum
mean: arithmetic mean
prod: product
Returns:
dictionary with functions.
"""
confidence_aggregation_bank = {"mean": lambda x: sum(x) / len(x), "min": min, "max": max}
# python 3.7 and earlier do not have math.prod
if hasattr(math, "prod"):
confidence_aggregation_bank["prod"] = math.prod
else:
import operator
from functools import reduce
confidence_aggregation_bank["prod"] = lambda x: reduce(operator.mul, x, 1)
return confidence_aggregation_bank
class ConfidenceMeasureMixin(ABC):
"""Confidence Measure Mixin class.
It initializes per-frame confidence measure.
"""
def _init_confidence_measure(self, confidence_measure_cfg: Optional[DictConfig] = None):
"""Initialize per-frame confidence measure from config.
"""
# OmegaConf.structured ensures that post_init check is always executed
confidence_measure_cfg = OmegaConf.structured(
ConfidenceMeasureConfig()
if confidence_measure_cfg is None
else ConfidenceMeasureConfig(**confidence_measure_cfg)
)
# set confidence calculation measure
# we suppose that self.blank_id == len(vocabulary)
self.num_tokens = (self.blank_id if hasattr(self, "blank_id") else self._blank_index) + 1
self.alpha = confidence_measure_cfg.alpha
# init confidence measure bank
self.confidence_measure_bank = get_confidence_measure_bank()
measure = None
# construct measure_name
measure_name = ""
if confidence_measure_cfg.name == "max_prob":
measure_name = "max_prob"
elif confidence_measure_cfg.name == "entropy":
measure_name = '_'.join(
[confidence_measure_cfg.name, confidence_measure_cfg.entropy_type, confidence_measure_cfg.entropy_norm]
)
else:
raise ValueError(f"Unsupported `confidence_measure_cfg.name`: `{confidence_measure_cfg.name}`")
if measure_name not in self.confidence_measure_bank:
raise ValueError(f"Unsupported measure setup: `{measure_name}`")
measure = partial(self.confidence_measure_bank[measure_name], v=self.num_tokens, t=self.alpha)
self._get_confidence = lambda x: measure(torch.nan_to_num(x)).tolist()
class ConfidenceMixin(ABC):
"""Confidence Mixin class.
It is responsible for confidence estimation method initialization and high-level confidence score calculation.
"""
def _init_confidence(self, confidence_cfg: Optional[DictConfig] = None):
"""Initialize confidence-related fields and confidence aggregation function from config.
"""
# OmegaConf.structured ensures that post_init check is always executed
confidence_cfg = OmegaConf.structured(
ConfidenceConfig() if confidence_cfg is None else ConfidenceConfig(**confidence_cfg)
)
self.confidence_measure_cfg = confidence_cfg.measure_cfg
# extract the config
self.preserve_word_confidence = confidence_cfg.get('preserve_word_confidence', False)
# set preserve_frame_confidence and preserve_token_confidence to True
# if preserve_word_confidence is True
self.preserve_token_confidence = (
confidence_cfg.get('preserve_token_confidence', False) | self.preserve_word_confidence
)
# set preserve_frame_confidence to True if preserve_token_confidence is True
self.preserve_frame_confidence = (
confidence_cfg.get('preserve_frame_confidence', False) | self.preserve_token_confidence
)
self.exclude_blank_from_confidence = confidence_cfg.get('exclude_blank', True)
self.word_confidence_aggregation = confidence_cfg.get('aggregation', "min")
# define aggregation functions
self.confidence_aggregation_bank = get_confidence_aggregation_bank()
self._aggregate_confidence = self.confidence_aggregation_bank[self.word_confidence_aggregation]
# Update preserve frame confidence
if self.preserve_frame_confidence is False:
if self.cfg.strategy in ['greedy', 'greedy_batch']:
self.preserve_frame_confidence = self.cfg.greedy.get('preserve_frame_confidence', False)
# OmegaConf.structured ensures that post_init check is always executed
confidence_measure_cfg = OmegaConf.structured(self.cfg.greedy).get('confidence_measure_cfg', None)
self.confidence_measure_cfg = (
OmegaConf.structured(ConfidenceMeasureConfig())
if confidence_measure_cfg is None
else OmegaConf.structured(ConfidenceMeasureConfig(**confidence_measure_cfg))
)
@abstractmethod
def compute_confidence(self, hypotheses_list: List[Hypothesis]) -> List[Hypothesis]:
"""Computes high-level (per-token and/or per-word) confidence scores for a list of hypotheses.
Assumes that `frame_confidence` is present in the hypotheses.
Args:
hypotheses_list: List of Hypothesis.
Returns:
A list of hypotheses with high-level confidence scores.
"""
raise NotImplementedError()
@abstractmethod
def _aggregate_token_confidence(self, hypothesis: Hypothesis) -> List[float]:
"""Implemented by subclass in order to aggregate token confidence to a word-level confidence.
Args:
hypothesis: Hypothesis
Returns:
A list of word-level confidence scores.
"""
raise NotImplementedError()
def _aggregate_token_confidence_chars(self, words: List[str], token_confidence: List[float]) -> List[float]:
"""Implementation of token confidence aggregation for character-based models.
Args:
words: List of words of a hypothesis.
token_confidence: List of token-level confidence scores of a hypothesis.
Returns:
A list of word-level confidence scores.
"""
word_confidence = []
i = 0
for word in words:
word_len = len(word)
word_confidence.append(self._aggregate_confidence(token_confidence[i : i + word_len]))
# we assume that there is exactly one space token between words and exclude it from word confidence
i += word_len + 1
return word_confidence
def _aggregate_token_confidence_subwords_sentencepiece(
self, words: List[str], token_confidence: List[float], token_ids: List[int]
) -> List[float]:
"""Implementation of token confidence aggregation for subword-based models.
**Note**: Only supports Sentencepiece based tokenizers !
Args:
words: List of words of a hypothesis.
token_confidence: List of token-level confidence scores of a hypothesis.
token_ids: List of token ids of a hypothesis.
Returns:
A list of word-level confidence scores.
"""
word_confidence = []
# run only if there are final words
if len(words) > 0:
j = 0
prev_unk = False
prev_underline = False
for i, token_id in enumerate(token_ids):
token = self.decode_ids_to_tokens([int(token_id)])[0]
token_text = self.decode_tokens_to_str([int(token_id)])
# treat `<unk>` as a separate word regardless of the next token
# to match the result of `tokenizer.ids_to_text`
if (token != token_text or prev_unk) and i > j:
# do not add confidence for `▁` if the current token starts with `▁`
# to match the result of `tokenizer.ids_to_text`
if not prev_underline:
word_confidence.append(self._aggregate_confidence(token_confidence[j:i]))
j = i
prev_unk = token == '<unk>'
prev_underline = token == '▁'
if not prev_underline:
word_confidence.append(self._aggregate_confidence(token_confidence[j : len(token_ids)]))
if len(words) != len(word_confidence):
raise RuntimeError(
f"""Something went wrong with word-level confidence aggregation.\n
Please check these values for debugging:\n
len(words): {len(words)},\n
len(word_confidence): {len(word_confidence)},\n
recognized text: `{' '.join(words)}`"""
)
return word_confidence
|
NeMo-main
|
nemo/collections/asr/parts/utils/asr_confidence_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import re
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig
from tqdm.auto import tqdm
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.models import ASRModel, EncDecHybridRNNTCTCModel
from nemo.collections.asr.parts.utils import rnnt_utils
from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchASR
from nemo.collections.common.parts.preprocessing.manifest import get_full_path
from nemo.utils import logging, model_utils
def get_buffered_pred_feat_rnnt(
asr: FrameBatchASR,
tokens_per_chunk: int,
delay: int,
model_stride_in_secs: int,
batch_size: int,
manifest: str = None,
filepaths: List[list] = None,
) -> List[rnnt_utils.Hypothesis]:
"""
Moved from examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py
Write all information presented in input manifest to output manifest and removed WER calculation.
"""
hyps = []
refs = []
if filepaths and manifest:
raise ValueError("Please select either filepaths or manifest")
if filepaths is None and manifest is None:
raise ValueError("Either filepaths or manifest shoud not be None")
if manifest:
filepaths = []
with open(manifest, "r", encoding='utf_8') as mfst_f:
print("Parsing manifest files...")
for l in mfst_f:
row = json.loads(l.strip())
audio_file = get_full_path(audio_file=row['audio_filepath'], manifest_file=manifest)
filepaths.append(audio_file)
if 'text' in row:
refs.append(row['text'])
with torch.inference_mode():
with torch.cuda.amp.autocast():
batch = []
asr.sample_offset = 0
for idx in tqdm(range(len(filepaths)), desc='Sample:', total=len(filepaths)):
batch.append((filepaths[idx]))
if len(batch) == batch_size:
audio_files = [sample for sample in batch]
asr.reset()
asr.read_audio_file(audio_files, delay, model_stride_in_secs)
hyp_list = asr.transcribe(tokens_per_chunk, delay)
hyps.extend(hyp_list)
batch.clear()
asr.sample_offset += batch_size
if len(batch) > 0:
asr.batch_size = len(batch)
asr.frame_bufferer.batch_size = len(batch)
asr.reset()
audio_files = [sample for sample in batch]
asr.read_audio_file(audio_files, delay, model_stride_in_secs)
hyp_list = asr.transcribe(tokens_per_chunk, delay)
hyps.extend(hyp_list)
batch.clear()
asr.sample_offset += len(batch)
if os.environ.get('DEBUG', '0') in ('1', 'y', 't'):
if len(refs) == 0:
print("ground-truth text does not present!")
for hyp in hyps:
print("hyp:", hyp)
else:
for hyp, ref in zip(hyps, refs):
print("hyp:", hyp)
print("ref:", ref)
wrapped_hyps = wrap_transcription(hyps)
return wrapped_hyps
def get_buffered_pred_feat(
asr: FrameBatchASR,
frame_len: float,
tokens_per_chunk: int,
delay: int,
preprocessor_cfg: DictConfig,
model_stride_in_secs: int,
device: Union[List[int], int],
manifest: str = None,
filepaths: List[list] = None,
) -> List[rnnt_utils.Hypothesis]:
"""
Moved from examples/asr/asr_chunked_inference/ctc/speech_to_text_buffered_infer_ctc.py
Write all information presented in input manifest to output manifest and removed WER calculation.
"""
# Create a preprocessor to convert audio samples into raw features,
# Normalization will be done per buffer in frame_bufferer
# Do not normalize whatever the model's preprocessor setting is
preprocessor_cfg.normalize = "None"
preprocessor = nemo_asr.models.EncDecCTCModelBPE.from_config_dict(preprocessor_cfg)
preprocessor.to(device)
hyps = []
refs = []
if filepaths and manifest:
raise ValueError("Please select either filepaths or manifest")
if filepaths is None and manifest is None:
raise ValueError("Either filepaths or manifest shoud not be None")
if filepaths:
for l in tqdm(filepaths, desc="Sample:"):
asr.reset()
asr.read_audio_file(l, delay, model_stride_in_secs)
hyp = asr.transcribe(tokens_per_chunk, delay)
hyps.append(hyp)
else:
with open(manifest, "r", encoding='utf_8') as mfst_f:
for l in tqdm(mfst_f, desc="Sample:"):
asr.reset()
row = json.loads(l.strip())
if 'text' in row:
refs.append(row['text'])
audio_file = get_full_path(audio_file=row['audio_filepath'], manifest_file=manifest)
# do not support partial audio
asr.read_audio_file(audio_file, delay, model_stride_in_secs)
hyp = asr.transcribe(tokens_per_chunk, delay)
hyps.append(hyp)
if os.environ.get('DEBUG', '0') in ('1', 'y', 't'):
if len(refs) == 0:
print("ground-truth text does not present!")
for hyp in hyps:
print("hyp:", hyp)
else:
for hyp, ref in zip(hyps, refs):
print("hyp:", hyp)
print("ref:", ref)
wrapped_hyps = wrap_transcription(hyps)
return wrapped_hyps
def wrap_transcription(hyps: List[str]) -> List[rnnt_utils.Hypothesis]:
""" Wrap transcription to the expected format in func write_transcription """
wrapped_hyps = []
for hyp in hyps:
hypothesis = rnnt_utils.Hypothesis(score=0.0, y_sequence=[], text=hyp)
wrapped_hyps.append(hypothesis)
return wrapped_hyps
def setup_model(cfg: DictConfig, map_location: torch.device) -> Tuple[ASRModel, str]:
""" Setup model from cfg and return model and model name for next step """
if cfg.model_path is not None and cfg.model_path != "None":
# restore model from .nemo file path
model_cfg = ASRModel.restore_from(restore_path=cfg.model_path, return_config=True)
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath) # type: ASRModel
logging.info(f"Restoring model : {imported_class.__name__}")
asr_model = imported_class.restore_from(
restore_path=cfg.model_path, map_location=map_location,
) # type: ASRModel
model_name = os.path.splitext(os.path.basename(cfg.model_path))[0]
else:
# restore model by name
asr_model = ASRModel.from_pretrained(
model_name=cfg.pretrained_name, map_location=map_location,
) # type: ASRModel
model_name = cfg.pretrained_name
if hasattr(cfg, "model_change"):
asr_model.change_attention_model(
self_attention_model=cfg.model_change.conformer.get("self_attention_model", None),
att_context_size=cfg.model_change.conformer.get("att_context_size", None),
)
return asr_model, model_name
def prepare_audio_data(cfg: DictConfig) -> Tuple[List[str], bool]:
""" Prepare audio data and decide whether it's partial_audio condition. """
# this part may need refactor alongsides with refactor of transcribe
partial_audio = False
if cfg.audio_dir is not None and not cfg.append_pred:
filepaths = list(glob.glob(os.path.join(cfg.audio_dir, f"**/*.{cfg.audio_type}"), recursive=True))
else:
# get filenames from manifest
filepaths = []
if os.stat(cfg.dataset_manifest).st_size == 0:
logging.error(f"The input dataset_manifest {cfg.dataset_manifest} is empty. Exiting!")
return None
with open(cfg.dataset_manifest, 'r', encoding='utf_8') as f:
has_two_fields = []
for line in f:
item = json.loads(line)
if "offset" in item and "duration" in item:
has_two_fields.append(True)
else:
has_two_fields.append(False)
audio_key = cfg.get('audio_key', 'audio_filepath')
audio_file = get_full_path(audio_file=item[audio_key], manifest_file=cfg.dataset_manifest)
filepaths.append(audio_file)
partial_audio = all(has_two_fields)
logging.info(f"\nTranscribing {len(filepaths)} files...\n")
return filepaths, partial_audio
def compute_output_filename(cfg: DictConfig, model_name: str) -> DictConfig:
""" Compute filename of output manifest and update cfg"""
if cfg.output_filename is None:
# create default output filename
if cfg.audio_dir is not None:
cfg.output_filename = os.path.dirname(os.path.join(cfg.audio_dir, '.')) + '.json'
elif cfg.pred_name_postfix is not None:
cfg.output_filename = cfg.dataset_manifest.replace('.json', f'_{cfg.pred_name_postfix}.json')
else:
cfg.output_filename = cfg.dataset_manifest.replace('.json', f'_{model_name}.json')
return cfg
def normalize_timestamp_output(timestamps: dict):
"""
Normalize the dictionary of timestamp values to JSON serializable values.
Expects the following keys to exist -
"start_offset": int-like object that represents the starting index of the token
in the full audio after downsampling.
"end_offset": int-like object that represents the ending index of the token
in the full audio after downsampling.
Args:
timestamps: Nested dict.
Returns:
Normalized `timestamps` dictionary (in-place normalized)
"""
for val_idx in range(len(timestamps)):
timestamps[val_idx]['start_offset'] = int(timestamps[val_idx]['start_offset'])
timestamps[val_idx]['end_offset'] = int(timestamps[val_idx]['end_offset'])
return timestamps
def write_transcription(
transcriptions: Union[List[rnnt_utils.Hypothesis], List[List[rnnt_utils.Hypothesis]], List[str]],
cfg: DictConfig,
model_name: str,
filepaths: List[str] = None,
compute_langs: bool = False,
compute_timestamps: bool = False,
) -> Tuple[str, str]:
""" Write generated transcription to output file. """
if cfg.append_pred:
logging.info(f'Transcripts will be written in "{cfg.output_filename}" file')
if cfg.pred_name_postfix is not None:
pred_by_model_name = cfg.pred_name_postfix
else:
pred_by_model_name = model_name
pred_text_attr_name = 'pred_text_' + pred_by_model_name
else:
pred_text_attr_name = 'pred_text'
return_hypotheses = True
if isinstance(transcriptions[0], str): # List[str]:
best_hyps = transcriptions
return_hypotheses = False
elif isinstance(transcriptions[0], rnnt_utils.Hypothesis): # List[rnnt_utils.Hypothesis]
best_hyps = transcriptions
assert cfg.decoding.beam.return_best_hypothesis, "Works only with return_best_hypothesis=true"
elif isinstance(transcriptions[0], list) and isinstance(
transcriptions[0][0], rnnt_utils.Hypothesis
): # List[List[rnnt_utils.Hypothesis]] NBestHypothesis
best_hyps, beams = [], []
for hyps in transcriptions:
best_hyps.append(hyps[0])
if not cfg.decoding.beam.return_best_hypothesis:
beam = []
for hyp in hyps:
beam.append((hyp.text, hyp.score))
beams.append(beam)
else:
raise TypeError
with open(cfg.output_filename, 'w', encoding='utf-8', newline='\n') as f:
if cfg.audio_dir is not None:
for idx, transcription in enumerate(best_hyps): # type: rnnt_utils.Hypothesis or str
if not return_hypotheses: # transcription is str
item = {'audio_filepath': filepaths[idx], pred_text_attr_name: transcription}
else: # transcription is Hypothesis
item = {'audio_filepath': filepaths[idx], pred_text_attr_name: transcription.text}
if compute_timestamps:
timestamps = transcription.timestep
if timestamps is not None and isinstance(timestamps, dict):
timestamps.pop(
'timestep', None
) # Pytorch tensor calculating index of each token, not needed.
for key in timestamps.keys():
values = normalize_timestamp_output(timestamps[key])
item[f'timestamps_{key}'] = values
if compute_langs:
item['pred_lang'] = transcription.langs
item['pred_lang_chars'] = transcription.langs_chars
if not cfg.decoding.beam.return_best_hypothesis:
item['beams'] = beams[idx]
f.write(json.dumps(item) + "\n")
else:
with open(cfg.dataset_manifest, 'r', encoding='utf-8') as fr:
for idx, line in enumerate(fr):
item = json.loads(line)
if not return_hypotheses: # transcription is str
item[pred_text_attr_name] = best_hyps[idx]
else: # transcription is Hypothesis
item[pred_text_attr_name] = best_hyps[idx].text
if compute_timestamps:
timestamps = best_hyps[idx].timestep
if timestamps is not None and isinstance(timestamps, dict):
timestamps.pop(
'timestep', None
) # Pytorch tensor calculating index of each token, not needed.
for key in timestamps.keys():
values = normalize_timestamp_output(timestamps[key])
item[f'timestamps_{key}'] = values
if compute_langs:
item['pred_lang'] = best_hyps[idx].langs
item['pred_lang_chars'] = best_hyps[idx].langs_chars
if not cfg.decoding.beam.return_best_hypothesis:
item['beams'] = beams[idx]
f.write(json.dumps(item) + "\n")
return cfg.output_filename, pred_text_attr_name
def transcribe_partial_audio(
asr_model,
path2manifest: str = None,
batch_size: int = 4,
logprobs: bool = False,
return_hypotheses: bool = False,
num_workers: int = 0,
channel_selector: Optional[int] = None,
augmentor: DictConfig = None,
decoder_type: Optional[str] = None,
) -> List[str]:
"""
See description of this function in trancribe() in nemo/collections/asr/models/ctc_models.py and nemo/collections/asr/models/rnnt_models.py
"""
if return_hypotheses and logprobs:
raise ValueError(
"Either `return_hypotheses` or `logprobs` can be True at any given time."
"Returned hypotheses will contain the logprobs."
)
if num_workers is None:
num_workers = min(batch_size, os.cpu_count() - 1)
# We will store transcriptions here
hypotheses = []
# Model's mode and device
mode = asr_model.training
device = next(asr_model.parameters()).device
dither_value = asr_model.preprocessor.featurizer.dither
pad_to_value = asr_model.preprocessor.featurizer.pad_to
if decoder_type is not None: # Hybrid model
decode_function = (
asr_model.decoding.rnnt_decoder_predictions_tensor
if decoder_type == 'rnnt'
else asr_model.ctc_decoding.ctc_decoder_predictions_tensor
)
elif hasattr(asr_model, 'joint'): # RNNT model
decode_function = asr_model.decoding.rnnt_decoder_predictions_tensor
else: # CTC model
decode_function = asr_model.decoding.ctc_decoder_predictions_tensor
try:
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
asr_model.eval()
# Freeze the encoder and decoder modules
asr_model.encoder.freeze()
asr_model.decoder.freeze()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
config = {
'manifest_filepath': path2manifest,
'batch_size': batch_size,
'num_workers': num_workers,
'channel_selector': channel_selector,
}
if augmentor:
config['augmentor'] = augmentor
temporary_datalayer = asr_model._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Transcribing"):
outputs = asr_model.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
logits, logits_len = outputs[0], outputs[1]
if isinstance(asr_model, EncDecHybridRNNTCTCModel) and decoder_type == "ctc":
logits = asr_model.ctc_decoder(encoder_output=logits)
logits = logits.cpu()
if logprobs:
logits = logits.numpy()
# dump log probs per file
for idx in range(logits.shape[0]):
lg = logits[idx][: logits_len[idx]]
hypotheses.append(lg)
else:
current_hypotheses, _ = decode_function(logits, logits_len, return_hypotheses=return_hypotheses,)
if return_hypotheses:
# dump log probs per file
for idx in range(logits.shape[0]):
current_hypotheses[idx].y_sequence = logits[idx][: logits_len[idx]]
if current_hypotheses[idx].alignments is None:
current_hypotheses[idx].alignments = current_hypotheses[idx].y_sequence
hypotheses += current_hypotheses
del logits
del test_batch
finally:
# set mode back to its original value
asr_model.train(mode=mode)
asr_model.preprocessor.featurizer.dither = dither_value
asr_model.preprocessor.featurizer.pad_to = pad_to_value
if mode is True:
asr_model.encoder.unfreeze()
asr_model.decoder.unfreeze()
logging.set_verbosity(logging_level)
return hypotheses
class PunctuationCapitalization:
def __init__(self, punctuation_marks: str):
"""
Class for text processing with punctuation and capitalization. Can be used with class TextProcessingConfig.
Args:
punctuation_marks (str): String with punctuation marks to process.
Example: punctuation_marks = '.,?'
"""
if punctuation_marks:
self.regex_punctuation = re.compile(fr"([{''.join(punctuation_marks)}])")
self.regex_extra_space = re.compile('\s{2,}')
else:
self.regex_punctuation = None
def separate_punctuation(self, lines: List[str]) -> List[str]:
if self.regex_punctuation is not None:
return [
self.regex_extra_space.sub(' ', self.regex_punctuation.sub(r' \1 ', line)).strip() for line in lines
]
else:
return lines
def do_lowercase(self, lines: List[str]) -> List[str]:
return [line.lower() for line in lines]
def rm_punctuation(self, lines: List[str]) -> List[str]:
if self.regex_punctuation is not None:
return [self.regex_extra_space.sub(' ', self.regex_punctuation.sub(' ', line)).strip() for line in lines]
else:
return lines
@dataclass
class TextProcessingConfig:
# Punctuation marks to process. Example: ".,?"
punctuation_marks: str = ""
# Whether to apply lower case conversion on the training text.
do_lowercase: bool = False
# Whether to remove punctuation marks from text.
rm_punctuation: bool = False
# Whether to separate punctuation with the previouse word by space.
separate_punctuation: bool = True
|
NeMo-main
|
nemo/collections/asr/parts/utils/transcribe_utils.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import csv
import json
import os
from collections import OrderedDict as od
from datetime import datetime
from typing import Dict, List, Tuple
import numpy as np
from nemo.collections.asr.metrics.der import concat_perm_word_error_rate
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.models import ClusteringDiarizer
from nemo.collections.asr.parts.utils.speaker_utils import (
audio_rttm_map,
get_uniqname_from_filepath,
labels_to_rttmfile,
rttm_to_labels,
write_rttm2manifest,
)
from nemo.utils import logging
try:
import arpa
ARPA = True
except ImportError:
ARPA = False
__all__ = ['OfflineDiarWithASR']
def dump_json_to_file(file_path: str, session_trans_dict: dict):
"""
Write a json file from the session_trans_dict dictionary.
Args:
file_path (str):
Target filepath where json file is saved
session_trans_dict (dict):
Dictionary containing transcript, speaker labels and timestamps
"""
with open(file_path, "w") as outfile:
json.dump(session_trans_dict, outfile, indent=4)
def write_txt(w_path: str, val: str):
"""
Write a text file from the string input.
Args:
w_path (str):
Target path for saving a file
val (str):
String variable to be written
"""
with open(w_path, "w") as output:
output.write(val + '\n')
def convert_ctm_to_text(ctm_file_path: str) -> Tuple[List[str], str]:
"""
Convert ctm file into a list containing transcription (space seperated string) per each speaker.
Args:
ctm_file_path (str):
Filepath to the reference CTM files.
Returns:
spk_reference (list):
List containing the reference transcripts for each speaker.
Example:
>>> spk_reference = ["hi how are you well that's nice", "i'm good yeah how is your sister"]
mix_reference (str):
Reference transcript from CTM file. This transcript has word sequence in temporal order.
Example:
>>> mix_reference = "hi how are you i'm good well that's nice yeah how is your sister"
"""
mix_reference, per_spk_ref_trans_dict = [], {}
ctm_content = open(ctm_file_path).readlines()
for ctm_line in ctm_content:
ctm_split = ctm_line.split()
spk = ctm_split[1]
if spk not in per_spk_ref_trans_dict:
per_spk_ref_trans_dict[spk] = []
per_spk_ref_trans_dict[spk].append(ctm_split[4])
mix_reference.append(ctm_split[4])
spk_reference = [" ".join(word_list) for word_list in per_spk_ref_trans_dict.values()]
mix_reference = " ".join(mix_reference)
return spk_reference, mix_reference
def convert_word_dict_seq_to_text(word_dict_seq_list: List[Dict[str, float]]) -> Tuple[List[str], str]:
"""
Convert word_dict_seq_list into a list containing transcription (space seperated string) per each speaker.
Args:
word_dict_seq_list (list):
List containing words and corresponding word timestamps in dictionary format.
Example:
>>> word_dict_seq_list = \
>>> [{'word': 'right', 'start_time': 0.0, 'end_time': 0.04, 'speaker': 'speaker_0'},
{'word': 'and', 'start_time': 0.64, 'end_time': 0.68, 'speaker': 'speaker_1'},
...],
Returns:
spk_hypothesis (list):
Dictionary containing the hypothesis transcript for each speaker. A list containing the sequence
of words is assigned for each speaker.
Example:
>>> spk_hypothesis= ["hi how are you well that's nice", "i'm good yeah how is your sister"]
mix_hypothesis (str):
Hypothesis transcript from ASR output. This transcript has word sequence in temporal order.
Example:
>>> mix_hypothesis = "hi how are you i'm good well that's nice yeah how is your sister"
"""
mix_hypothesis, per_spk_hyp_trans_dict = [], {}
for word_dict in word_dict_seq_list:
spk = word_dict['speaker']
if spk not in per_spk_hyp_trans_dict:
per_spk_hyp_trans_dict[spk] = []
per_spk_hyp_trans_dict[spk].append(word_dict['word'])
mix_hypothesis.append(word_dict['word'])
# Create a list containing string formatted transcript
spk_hypothesis = [" ".join(word_list) for word_list in per_spk_hyp_trans_dict.values()]
mix_hypothesis = " ".join(mix_hypothesis)
return spk_hypothesis, mix_hypothesis
def convert_word_dict_seq_to_ctm(
word_dict_seq_list: List[Dict[str, float]], uniq_id: str = 'null', decimals: int = 3
) -> Tuple[List[str], str]:
"""
Convert word_dict_seq_list into a list containing transcription in CTM format.
Args:
word_dict_seq_list (list):
List containing words and corresponding word timestamps in dictionary format.
Example:
>>> word_dict_seq_list = \
>>> [{'word': 'right', 'start_time': 0.0, 'end_time': 0.34, 'speaker': 'speaker_0'},
{'word': 'and', 'start_time': 0.64, 'end_time': 0.81, 'speaker': 'speaker_1'},
...],
Returns:
ctm_lines_list (list):
List containing the hypothesis transcript in CTM format.
Example:
>>> ctm_lines_list= ["my_audio_01 speaker_0 0.0 0.34 right 0",
my_audio_01 speaker_0 0.64 0.81 and 0",
"""
ctm_lines = []
confidence = 0
for word_dict in word_dict_seq_list:
spk = word_dict['speaker']
stt = word_dict['start_time']
dur = round(word_dict['end_time'] - word_dict['start_time'], decimals)
word = word_dict['word']
ctm_line_str = f"{uniq_id} {spk} {stt} {dur} {word} {confidence}"
ctm_lines.append(ctm_line_str)
return ctm_lines
def get_total_result_dict(
der_results: Dict[str, Dict[str, float]], wer_results: Dict[str, Dict[str, float]], csv_columns: List[str],
):
"""
Merge WER results and DER results into a single dictionary variable.
Args:
der_results (dict):
Dictionary containing FA, MISS, CER and DER values for both aggregated amount and
each session.
wer_results (dict):
Dictionary containing session-by-session WER and cpWER. `wer_results` only
exists when CTM files are provided.
Returns:
total_result_dict (dict):
Dictionary containing both DER and WER results. This dictionary contains unique-IDs of
each session and `total` key that includes average (cp)WER and DER/CER/Miss/FA values.
"""
total_result_dict = {}
for uniq_id in der_results.keys():
if uniq_id == 'total':
continue
total_result_dict[uniq_id] = {x: "-" for x in csv_columns}
total_result_dict[uniq_id]["uniq_id"] = uniq_id
if uniq_id in der_results:
total_result_dict[uniq_id].update(der_results[uniq_id])
if uniq_id in wer_results:
total_result_dict[uniq_id].update(wer_results[uniq_id])
total_result_jsons = list(total_result_dict.values())
return total_result_jsons
def get_audacity_label(word: str, stt_sec: float, end_sec: float, speaker: str) -> str:
"""
Get a string formatted line for Audacity label.
Args:
word (str):
A decoded word
stt_sec (float):
Start timestamp of the word
end_sec (float):
End timestamp of the word
Returns:
speaker (str):
Speaker label in string type
"""
spk = speaker.split('_')[-1]
return f'{stt_sec}\t{end_sec}\t[{spk}] {word}'
def get_num_of_spk_from_labels(labels: List[str]) -> int:
"""
Count the number of speakers in a segment label list.
Args:
labels (list):
List containing segment start and end timestamp and speaker labels.
Example:
>>> labels = ["15.25 21.82 speaker_0", "21.18 29.51 speaker_1", ... ]
Returns:
n_spk (int):
The number of speakers in the list `labels`
"""
spk_set = [x.split(' ')[-1].strip() for x in labels]
return len(set(spk_set))
class OfflineDiarWithASR:
"""
A class designed for performing ASR and diarization together.
Attributes:
cfg_diarizer (OmegaConf):
Hydra config for diarizer key
params (OmegaConf):
Parameters config in diarizer.asr
ctc_decoder_params (OmegaConf)
Hydra config for beam search decoder
realigning_lm_params (OmegaConf):
Hydra config for realigning language model
manifest_filepath (str):
Path to the input manifest path
nonspeech_threshold (float):
Threshold for VAD logits that are used for creating speech segments
fix_word_ts_with_VAD (bool):
Choose whether to fix word timestamps by using VAD results
root_path (str):
Path to the folder where diarization results are saved
vad_threshold_for_word_ts (float):
Threshold used for compensating word timestamps with VAD output
max_word_ts_length_in_sec (float):
Maximum limit for the duration of each word timestamp
word_ts_anchor_offset (float):
Offset for word timestamps from ASR decoders
run_ASR:
Placeholder variable for an ASR launcher function
realigning_lm:
Placeholder variable for a loaded ARPA Language model
ctm_exists (bool):
Boolean that indicates whether all files have the corresponding reference CTM file
frame_VAD (dict):
Dictionary containing frame-level VAD logits
AUDIO_RTTM_MAP:
Dictionary containing the input manifest information
color_palette (dict):
Dictionary containing the ANSI color escape codes for each speaker label (speaker index)
"""
def __init__(self, cfg_diarizer):
self.cfg_diarizer = cfg_diarizer
self.params = cfg_diarizer.asr.parameters
self.ctc_decoder_params = cfg_diarizer.asr.ctc_decoder_parameters
self.realigning_lm_params = cfg_diarizer.asr.realigning_lm_parameters
self.manifest_filepath = cfg_diarizer.manifest_filepath
self.nonspeech_threshold = self.params.asr_based_vad_threshold
self.fix_word_ts_with_VAD = self.params.fix_word_ts_with_VAD
self.root_path = cfg_diarizer.out_dir
self.vad_threshold_for_word_ts = 0.7
self.max_word_ts_length_in_sec = 0.6
self.word_ts_anchor_offset = 0.0
self.run_ASR = None
self.realigning_lm = None
self.ctm_exists = False
self.frame_VAD = {}
self.make_file_lists()
self.color_palette = self.get_color_palette()
self.csv_columns = self.get_csv_columns()
@staticmethod
def get_color_palette() -> Dict[str, str]:
return {
'speaker_0': '\033[1;32m',
'speaker_1': '\033[1;34m',
'speaker_2': '\033[1;30m',
'speaker_3': '\033[1;31m',
'speaker_4': '\033[1;35m',
'speaker_5': '\033[1;36m',
'speaker_6': '\033[1;37m',
'speaker_7': '\033[1;30m',
'speaker_8': '\033[1;33m',
'speaker_9': '\033[0;34m',
'white': '\033[0;37m',
}
@staticmethod
def get_csv_columns() -> List[str]:
return [
'uniq_id',
'DER',
'CER',
'FA',
'MISS',
'est_n_spk',
'ref_n_spk',
'cpWER',
'WER',
'mapping',
]
def make_file_lists(self):
"""
Create lists containing the filepaths of audio clips and CTM files.
"""
self.AUDIO_RTTM_MAP = audio_rttm_map(self.manifest_filepath)
self.audio_file_list = [value['audio_filepath'] for _, value in self.AUDIO_RTTM_MAP.items()]
self.ctm_file_list = []
for k, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
if (
'ctm_filepath' in self.AUDIO_RTTM_MAP[uniq_id]
and self.AUDIO_RTTM_MAP[uniq_id]['ctm_filepath'] is not None
and uniq_id in self.AUDIO_RTTM_MAP[uniq_id]['ctm_filepath']
):
self.ctm_file_list.append(self.AUDIO_RTTM_MAP[uniq_id]['ctm_filepath'])
# check if all unique IDs have CTM files
if len(self.audio_file_list) == len(self.ctm_file_list):
self.ctm_exists = True
def _load_realigning_LM(self):
"""
Load ARPA language model for realigning speaker labels for words.
"""
self.N_range = (
self.realigning_lm_params['min_number_of_words'],
self.realigning_lm_params['max_number_of_words'],
)
self.stt_end_tokens = ['</s>', '<s>']
logging.info(f"Loading LM for realigning: {self.realigning_lm_params['arpa_language_model']}")
return arpa.loadf(self.realigning_lm_params['arpa_language_model'])[0]
def _init_session_trans_dict(self, uniq_id: str, n_spk: int):
"""
Initialize json (in dictionary variable) formats for session level result and Gecko style json.
Returns:
(dict): Session level result dictionary variable
"""
return od(
{
'status': 'initialized',
'session_id': uniq_id,
'transcription': '',
'speaker_count': n_spk,
'words': [],
'sentences': [],
}
)
def _init_session_gecko_dict(self):
"""
Initialize a dictionary format for Gecko style json.
Returns:
(dict):
Gecko style json dictionary.
"""
return od({'schemaVersion': 2.0, 'monologues': []})
def _save_VAD_labels_list(self, word_ts_dict: Dict[str, Dict[str, List[float]]]):
"""
Take the non_speech labels from logit output. The logit output is obtained from
`run_ASR` function.
Args:
word_ts_dict (dict):
Dictionary containing word timestamps.
"""
self.VAD_RTTM_MAP = {}
for idx, (uniq_id, word_timestamps) in enumerate(word_ts_dict.items()):
speech_labels_float = self.get_speech_labels_from_decoded_prediction(
word_timestamps, self.nonspeech_threshold
)
speech_labels = self.get_str_speech_labels(speech_labels_float)
output_path = os.path.join(self.root_path, 'pred_rttms')
if not os.path.exists(output_path):
os.makedirs(output_path)
filename = labels_to_rttmfile(speech_labels, uniq_id, output_path)
self.VAD_RTTM_MAP[uniq_id] = {'audio_filepath': self.audio_file_list[idx], 'rttm_filepath': filename}
@staticmethod
def get_speech_labels_from_decoded_prediction(
input_word_ts: List[float], nonspeech_threshold: float,
) -> List[float]:
"""
Extract speech labels from the ASR output (decoded predictions)
Args:
input_word_ts (list):
List containing word timestamps.
Returns:
word_ts (list):
The ranges of the speech segments, which are merged ranges of input_word_ts.
"""
speech_labels = []
word_ts = copy.deepcopy(input_word_ts)
if word_ts == []:
return speech_labels
else:
count = len(word_ts) - 1
while count > 0:
if len(word_ts) > 1:
if word_ts[count][0] - word_ts[count - 1][1] <= nonspeech_threshold:
trangeB = word_ts.pop(count)
trangeA = word_ts.pop(count - 1)
word_ts.insert(count - 1, [trangeA[0], trangeB[1]])
count -= 1
return word_ts
def run_diarization(self, diar_model_config, word_timestamps) -> Dict[str, List[str]]:
"""
Launch the diarization process using the given VAD timestamp (oracle_manifest).
Args:
diar_model_config (OmegaConf):
Hydra configurations for speaker diarization
word_and_timestamps (list):
List containing words and word timestamps
Returns:
diar_hyp (dict):
A dictionary containing rttm results which are indexed by a unique ID.
score Tuple[pyannote object, dict]:
A tuple containing pyannote metric instance and mapping dictionary between
speakers in hypotheses and speakers in reference RTTM files.
"""
if diar_model_config.diarizer.asr.parameters.asr_based_vad:
self._save_VAD_labels_list(word_timestamps)
oracle_manifest = os.path.join(self.root_path, 'asr_vad_manifest.json')
oracle_manifest = write_rttm2manifest(self.VAD_RTTM_MAP, oracle_manifest)
diar_model_config.diarizer.vad.model_path = None
diar_model_config.diarizer.vad.external_vad_manifest = oracle_manifest
diar_model = ClusteringDiarizer(cfg=diar_model_config)
score = diar_model.diarize()
if diar_model_config.diarizer.vad.model_path is not None and not diar_model_config.diarizer.oracle_vad:
self._get_frame_level_VAD(
vad_processing_dir=diar_model.vad_pred_dir,
smoothing_type=diar_model_config.diarizer.vad.parameters.smoothing,
)
diar_hyp = {}
for k, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
pred_rttm = os.path.join(self.root_path, 'pred_rttms', uniq_id + '.rttm')
diar_hyp[uniq_id] = rttm_to_labels(pred_rttm)
return diar_hyp, score
def _get_frame_level_VAD(self, vad_processing_dir, smoothing_type=False):
"""
Read frame-level VAD outputs.
Args:
vad_processing_dir (str):
Path to the directory where the VAD results are saved.
smoothing_type (bool or str): [False, median, mean]
type of smoothing applied softmax logits to smooth the predictions.
"""
if isinstance(smoothing_type, bool) and not smoothing_type:
ext_type = 'frame'
else:
ext_type = smoothing_type
for uniq_id in self.AUDIO_RTTM_MAP:
frame_vad = os.path.join(vad_processing_dir, uniq_id + '.' + ext_type)
frame_vad_float_list = []
with open(frame_vad, 'r') as fp:
for line in fp.readlines():
frame_vad_float_list.append(float(line.strip()))
self.frame_VAD[uniq_id] = frame_vad_float_list
@staticmethod
def gather_eval_results(
diar_score,
audio_rttm_map_dict: Dict[str, Dict[str, str]],
trans_info_dict: Dict[str, Dict[str, float]],
root_path: str,
decimals: int = 4,
) -> Dict[str, Dict[str, float]]:
"""
Gather diarization evaluation results from pyannote DiarizationErrorRate metric object.
Args:
metric (DiarizationErrorRate metric):
DiarizationErrorRate metric pyannote object
trans_info_dict (dict):
Dictionary containing word timestamps, speaker labels and words from all sessions.
Each session is indexed by unique ID as a key.
mapping_dict (dict):
Dictionary containing speaker mapping labels for each audio file with key as unique name
decimals (int):
The number of rounding decimals for DER value
Returns:
der_results (dict):
Dictionary containing scores for each audio file along with aggregated results
"""
metric, mapping_dict, _ = diar_score
results = metric.results_
der_results = {}
count_correct_spk_counting = 0
for result in results:
key, score = result
if 'hyp_rttm_filepath' in audio_rttm_map_dict[key]:
pred_rttm = audio_rttm_map_dict[key]['hyp_rttm_filepath']
else:
pred_rttm = os.path.join(root_path, 'pred_rttms', key + '.rttm')
pred_labels = rttm_to_labels(pred_rttm)
ref_rttm = audio_rttm_map_dict[key]['rttm_filepath']
ref_labels = rttm_to_labels(ref_rttm)
ref_n_spk = get_num_of_spk_from_labels(ref_labels)
est_n_spk = get_num_of_spk_from_labels(pred_labels)
_DER, _CER, _FA, _MISS = (
(score['confusion'] + score['false alarm'] + score['missed detection']) / score['total'],
score['confusion'] / score['total'],
score['false alarm'] / score['total'],
score['missed detection'] / score['total'],
)
der_results[key] = {
"DER": round(_DER, decimals),
"CER": round(_CER, decimals),
"FA": round(_FA, decimals),
"MISS": round(_MISS, decimals),
"est_n_spk": est_n_spk,
"ref_n_spk": ref_n_spk,
"mapping": mapping_dict[key],
}
count_correct_spk_counting += int(est_n_spk == ref_n_spk)
DER, CER, FA, MISS = (
abs(metric),
metric['confusion'] / metric['total'],
metric['false alarm'] / metric['total'],
metric['missed detection'] / metric['total'],
)
der_results["total"] = {
"DER": DER,
"CER": CER,
"FA": FA,
"MISS": MISS,
"spk_counting_acc": count_correct_spk_counting / len(metric.results_),
}
return der_results
def _get_the_closest_silence_start(
self, vad_index_word_end: float, vad_frames: np.ndarray, offset: int = 10
) -> float:
"""
Find the closest silence frame from the given starting position.
Args:
vad_index_word_end (float):
The timestamp of the end of the current word.
vad_frames (numpy.array):
The numpy array containing frame-level VAD probability.
params (dict):
Contains the parameters for diarization and ASR decoding.
Returns:
cursor (float):
A timestamp of the earliest start of a silence region from
the given time point, vad_index_word_end.
"""
cursor = vad_index_word_end + offset
limit = int(100 * self.max_word_ts_length_in_sec + vad_index_word_end)
while cursor < len(vad_frames):
if vad_frames[cursor] < self.vad_threshold_for_word_ts:
break
else:
cursor += 1
if cursor > limit:
break
cursor = min(len(vad_frames) - 1, cursor)
cursor = round(cursor / 100.0, 2)
return cursor
def _compensate_word_ts_list(
self, audio_file_list: List[str], word_ts_dict: Dict[str, List[float]],
) -> Dict[str, List[List[float]]]:
"""
Compensate the word timestamps based on the VAD output.
The length of each word is capped by self.max_word_ts_length_in_sec.
Args:
audio_file_list (list):
List containing audio file paths.
word_ts_dict (dict):
Dictionary containing timestamps of words.
Returns:
enhanced_word_ts_dict (dict):
Dictionary containing the enhanced word timestamp values indexed by unique-IDs.
"""
enhanced_word_ts_dict = {}
for idx, (uniq_id, word_ts_seq_list) in enumerate(word_ts_dict.items()):
N = len(word_ts_seq_list)
enhanced_word_ts_buffer = []
for k, word_ts in enumerate(word_ts_seq_list):
if k < N - 1:
word_len = round(word_ts[1] - word_ts[0], 2)
len_to_next_word = round(word_ts_seq_list[k + 1][0] - word_ts[0] - 0.01, 2)
if uniq_id in self.frame_VAD:
vad_index_word_end = int(100 * word_ts[1])
closest_sil_stt = self._get_the_closest_silence_start(
vad_index_word_end, self.frame_VAD[uniq_id]
)
vad_est_len = round(closest_sil_stt - word_ts[0], 2)
else:
vad_est_len = len_to_next_word
min_candidate = min(vad_est_len, len_to_next_word)
fixed_word_len = max(min(self.max_word_ts_length_in_sec, min_candidate), word_len)
enhanced_word_ts_buffer.append([word_ts[0], word_ts[0] + fixed_word_len])
else:
enhanced_word_ts_buffer.append([word_ts[0], word_ts[1]])
enhanced_word_ts_dict[uniq_id] = enhanced_word_ts_buffer
return enhanced_word_ts_dict
def get_transcript_with_speaker_labels(
self, diar_hyp: Dict[str, List[str]], word_hyp: Dict[str, List[str]], word_ts_hyp: Dict[str, List[float]]
) -> Dict[str, Dict[str, float]]:
"""
Match the diarization result with the ASR output.
The words and the timestamps for the corresponding words are matched in a for loop.
Args:
diar_hyp (dict):
Dictionary of the Diarization output labels in str. Indexed by unique IDs.
Example:
>>> diar_hyp['my_audio_01'] = ['0.0 4.375 speaker_1', '4.375 5.125 speaker_0', ...]
word_hyp (dict):
Dictionary of words from ASR inference. Indexed by unique IDs.
Example:
>>> word_hyp['my_audio_01'] = ['hi', 'how', 'are', ...]
word_ts_hyp (dict):
Dictionary containing the start time and the end time of each word.
Indexed by unique IDs.
Example:
>>> word_ts_hyp['my_audio_01'] = [[0.0, 0.04], [0.64, 0.68], [0.84, 0.88], ...]
Returns:
trans_info_dict (dict):
Dictionary containing word timestamps, speaker labels and words from all sessions.
Each session is indexed by a unique ID.
"""
trans_info_dict = {}
if self.fix_word_ts_with_VAD:
if self.frame_VAD == {}:
logging.warning(
f"VAD timestamps are not provided. Fixing word timestamps without VAD. Please check the hydra configurations."
)
word_ts_refined = self._compensate_word_ts_list(self.audio_file_list, word_ts_hyp)
else:
word_ts_refined = word_ts_hyp
if self.realigning_lm_params['arpa_language_model']:
if not ARPA:
raise ImportError(
'LM for realigning is provided but arpa is not installed. Install arpa using PyPI: pip install arpa'
)
else:
self.realigning_lm = self._load_realigning_LM()
word_dict_seq_list = []
for k, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
words, diar_labels = word_hyp[uniq_id], diar_hyp[uniq_id]
word_ts, word_rfnd_ts = word_ts_hyp[uniq_id], word_ts_refined[uniq_id]
# Assign speaker labels to words
word_dict_seq_list = self.get_word_level_json_list(
words=words, word_ts=word_ts, word_rfnd_ts=word_rfnd_ts, diar_labels=diar_labels
)
if self.realigning_lm:
word_dict_seq_list = self.realign_words_with_lm(word_dict_seq_list)
# Create a transscript information json dictionary from the output variables
trans_info_dict[uniq_id] = self._make_json_output(uniq_id, diar_labels, word_dict_seq_list)
logging.info(f"Diarization with ASR output files are saved in: {self.root_path}/pred_rttms")
return trans_info_dict
def get_word_level_json_list(
self,
words: List[str],
diar_labels: List[str],
word_ts: List[List[float]],
word_rfnd_ts: List[List[float]] = None,
decimals: int = 2,
) -> Dict[str, Dict[str, str]]:
"""
Assign speaker labels to each word and save the hypothesis words and speaker labels to
a dictionary variable for future use.
Args:
uniq_id (str):
A unique ID (key) that identifies each input audio file.
diar_labels (list):
List containing the Diarization output labels in str. Indexed by unique IDs.
Example:
>>> diar_labels = ['0.0 4.375 speaker_1', '4.375 5.125 speaker_0', ...]
words (list):
Dictionary of words from ASR inference. Indexed by unique IDs.
Example:
>>> words = ['hi', 'how', 'are', ...]
word_ts (list):
Dictionary containing the start time and the end time of each word.
Indexed by unique IDs.
Example:
>>> word_ts = [[0.0, 0.04], [0.64, 0.68], [0.84, 0.88], ...]
word_ts_refined (list):
Dictionary containing the refined (end point fixed) word timestamps based on hypothesis
word timestamps. Indexed by unique IDs.
Example:
>>> word_rfnd_ts = [[0.0, 0.60], [0.64, 0.80], [0.84, 0.92], ...]
Returns:
word_dict_seq_list (list):
List containing word by word dictionary containing word, timestamps and speaker labels.
Example:
>>> [{'word': 'right', 'start_time': 0.0, 'end_time': 0.04, 'speaker': 'speaker_0'},
{'word': 'and', 'start_time': 0.64, 'end_time': 0.68, 'speaker': 'speaker_1'},
{'word': 'i', 'start_time': 0.84, 'end_time': 0.88, 'speaker': 'speaker_1'},
...]
"""
if word_rfnd_ts is None:
word_rfnd_ts = word_ts
start_point, end_point, speaker = diar_labels[0].split()
word_pos, turn_idx = 0, 0
word_dict_seq_list = []
for word_idx, (word, word_ts_stt_end, refined_word_ts_stt_end) in enumerate(zip(words, word_ts, word_rfnd_ts)):
word_pos = self._get_word_timestamp_anchor(word_ts_stt_end)
if word_pos > float(end_point):
turn_idx += 1
turn_idx = min(turn_idx, len(diar_labels) - 1)
start_point, end_point, speaker = diar_labels[turn_idx].split()
stt_sec = round(refined_word_ts_stt_end[0], decimals)
end_sec = round(refined_word_ts_stt_end[1], decimals)
word_dict_seq_list.append({'word': word, 'start_time': stt_sec, 'end_time': end_sec, 'speaker': speaker})
return word_dict_seq_list
def _make_json_output(
self, uniq_id: str, diar_labels: List[str], word_dict_seq_list: List[Dict[str, float]],
) -> Dict[str, Dict[str, str]]:
"""
Generate json output files and transcripts from the ASR and diarization results.
Args:
uniq_id (str):
A unique ID (key) that identifies each input audio file.
diar_labels (list):
List containing the diarization hypothesis timestamps
Example:
>>> diar_hyp['my_audio_01'] = ['0.0 4.375 speaker_1', '4.375 5.125 speaker_0', ...]
word_dict_seq_list (list):
List containing words and corresponding word timestamps in dictionary format.
Example:
>>> [{'word': 'right', 'start_time': 0.0, 'end_time': 0.04, 'speaker': 'speaker_0'},
{'word': 'and', 'start_time': 0.64, 'end_time': 0.68, 'speaker': 'speaker_1'},
{'word': 'i', 'start_time': 0.84, 'end_time': 0.88, 'speaker': 'speaker_1'},
...]
Returns:
session_result_dict (dict):
A dictionary containing overall results of diarization and ASR inference.
`session_result_dict` has following keys: `status`, `session_id`, `transcription`, `speaker_count`,
`words`, `sentences`.
Example:
>>> session_trans_dict = \
{
'status': 'Success',
'session_id': 'my_audio_01',
'transcription': 'right and i really think ...',
'speaker_count': 2,
'words': [{'word': 'right', 'start_time': 0.0, 'end_time': 0.04, 'speaker': 'speaker_0'},
{'word': 'and', 'start_time': 0.64, 'end_time': 0.68, 'speaker': 'speaker_1'},
{'word': 'i', 'start_time': 0.84, 'end_time': 0.88, 'speaker': 'speaker_1'},
...
]
'sentences': [{'sentence': 'right', 'start_time': 0.0, 'end_time': 0.04, 'speaker': 'speaker_0'},
{'sentence': 'and i really think ...',
'start_time': 0.92, 'end_time': 4.12, 'speaker': 'speaker_0'},
...
]
}
"""
word_seq_list, audacity_label_words = [], []
start_point, end_point, speaker = diar_labels[0].split()
prev_speaker = speaker
sentences, terms_list = [], []
sentence = {'speaker': speaker, 'start_time': start_point, 'end_time': end_point, 'text': ''}
n_spk = get_num_of_spk_from_labels(diar_labels)
logging.info(f"Creating results for Session: {uniq_id} n_spk: {n_spk} ")
session_trans_dict = self._init_session_trans_dict(uniq_id=uniq_id, n_spk=n_spk)
gecko_dict = self._init_session_gecko_dict()
for k, word_dict in enumerate(word_dict_seq_list):
word, speaker = word_dict['word'], word_dict['speaker']
word_seq_list.append(word)
start_point, end_point = word_dict['start_time'], word_dict['end_time']
if speaker != prev_speaker:
if len(terms_list) != 0:
gecko_dict['monologues'].append(
{'speaker': {'name': None, 'id': prev_speaker}, 'terms': terms_list}
)
terms_list = []
# remove trailing space in text
sentence['text'] = sentence['text'].strip()
# store last sentence
sentences.append(sentence)
# start construction of a new sentence
sentence = {'speaker': speaker, 'start_time': start_point, 'end_time': end_point, 'text': ''}
else:
# correct the ending time
sentence['end_time'] = end_point
stt_sec, end_sec = start_point, end_point
terms_list.append({'start': stt_sec, 'end': end_sec, 'text': word, 'type': 'WORD'})
# add current word to sentence
sentence['text'] += word.strip() + ' '
audacity_label_words.append(get_audacity_label(word, stt_sec, end_sec, speaker))
prev_speaker = speaker
session_trans_dict['words'] = word_dict_seq_list
# note that we need to add the very last sentence.
sentence['text'] = sentence['text'].strip()
sentences.append(sentence)
gecko_dict['monologues'].append({'speaker': {'name': None, 'id': speaker}, 'terms': terms_list})
# Speaker independent transcription
session_trans_dict['transcription'] = ' '.join(word_seq_list)
# add sentences to transcription information dict
session_trans_dict['sentences'] = sentences
self._write_and_log(uniq_id, session_trans_dict, audacity_label_words, gecko_dict, sentences)
return session_trans_dict
def _get_realignment_ranges(self, k: int, word_seq_len: int) -> Tuple[int, int]:
"""
Calculate word ranges for realignment operation.
N1, N2 are calculated to not exceed the start and end of the input word sequence.
Args:
k (int):
Index of the current word
word_seq_len (int):
Length of the sentence
Returns:
N1 (int):
Start index of the word sequence
N2 (int):
End index of the word sequence
"""
if k < self.N_range[1]:
N1 = max(k, self.N_range[0])
N2 = min(word_seq_len - k, self.N_range[1])
elif k > (word_seq_len - self.N_range[1]):
N1 = min(k, self.N_range[1])
N2 = max(word_seq_len - k, self.N_range[0])
else:
N1, N2 = self.N_range[1], self.N_range[1]
return N1, N2
def _get_word_timestamp_anchor(self, word_ts_stt_end: List[float]) -> float:
"""
Determine a reference point to match a word with the diarization results.
word_ts_anchor_pos determines the position of a word in relation to the given diarization labels:
- 'start' uses the beginning of the word
- 'end' uses the end of the word
- 'mid' uses the mean of start and end of the word
word_ts_anchor_offset determines how much offset we want to add to the anchor position.
It is recommended to use the default value.
Args:
word_ts_stt_end (list):
List containing start and end of the decoded word.
Returns:
word_pos (float):
Floating point number that indicates temporal location of the word.
"""
if self.params['word_ts_anchor_pos'] == 'start':
word_pos = word_ts_stt_end[0]
elif self.params['word_ts_anchor_pos'] == 'end':
word_pos = word_ts_stt_end[1]
elif self.params['word_ts_anchor_pos'] == 'mid':
word_pos = (word_ts_stt_end[0] + word_ts_stt_end[1]) / 2
else:
logging.info(
f"word_ts_anchor_pos: {self.params['word_ts_anchor']} is not a supported option. Using the default 'start' option."
)
word_pos = word_ts_stt_end[0]
word_pos = word_pos + self.word_ts_anchor_offset
return word_pos
def realign_words_with_lm(self, word_dict_seq_list: List[Dict[str, float]]) -> List[Dict[str, float]]:
"""
Realign the mapping between speaker labels and words using a language model.
The realigning process calculates the probability of the certain range around the words,
especially at the boundary between two hypothetical sentences spoken by different speakers.
Example:
k-th word: "but"
hyp_former:
since i think like tuesday </s> <s> but he's coming back to albuquerque
hyp_latter:
since i think like tuesday but </s> <s> he's coming back to albuquerque
The joint probabilities of words in the sentence are computed for these two hypotheses. In addition,
logprob_diff_threshold parameter is used for reducing the false positive realigning.
Args:
word_dict_seq_list (list):
List containing words and corresponding word timestamps in dictionary format.
Returns:
realigned_list (list):
List of dictionaries containing words, word timestamps and speaker labels.
"""
word_seq_len = len(word_dict_seq_list)
hyp_w_dict_list, spk_list = [], []
for k, line_dict in enumerate(word_dict_seq_list):
word, spk_label = line_dict['word'], line_dict['speaker']
hyp_w_dict_list.append(word)
spk_list.append(spk_label)
realigned_list = []
org_spk_list = copy.deepcopy(spk_list)
for k, line_dict in enumerate(word_dict_seq_list):
if self.N_range[0] < k < (word_seq_len - self.N_range[0]) and (
spk_list[k] != org_spk_list[k + 1] or spk_list[k] != org_spk_list[k - 1]
):
N1, N2 = self._get_realignment_ranges(k, word_seq_len)
hyp_former = self.realigning_lm.log_s(
' '.join(hyp_w_dict_list[k - N1 : k] + self.stt_end_tokens + hyp_w_dict_list[k : k + N2])
)
hyp_latter = self.realigning_lm.log_s(
' '.join(hyp_w_dict_list[k - N1 : k + 1] + self.stt_end_tokens + hyp_w_dict_list[k + 1 : k + N2])
)
log_p = [hyp_former, hyp_latter]
p_order = np.argsort(log_p)[::-1]
if log_p[p_order[0]] > log_p[p_order[1]] + self.realigning_lm_params['logprob_diff_threshold']:
if p_order[0] == 0:
spk_list[k] = org_spk_list[k + 1]
line_dict['speaker'] = spk_list[k]
realigned_list.append(line_dict)
return realigned_list
@staticmethod
def evaluate(
audio_file_list: List[str],
hyp_trans_info_dict: Dict[str, Dict[str, float]],
hyp_ctm_file_list: List[str] = None,
ref_ctm_file_list: List[str] = None,
) -> Dict[str, Dict[str, float]]:
"""
Evaluate the result transcripts based on the provided CTM file. WER and cpWER are calculated to assess
the performance of ASR system and diarization at the same time.
Args:
audio_file_list (list):
List containing file path to the input audio files.
hyp_trans_info_dict (dict):
Dictionary containing the hypothesis transcriptions for all sessions.
hyp_ctm_file_list (list):
List containing file paths of the hypothesis transcriptions in CTM format for all sessions.
ref_ctm_file_list (list):
List containing file paths of the reference transcriptions in CTM format for all sessions.
Note: Either `hyp_trans_info_dict` or `hyp_ctm_file_list` should be provided.
Returns:
wer_results (dict):
Session-by-session results including DER, miss rate, false alarm rate, WER and cpWER
"""
wer_results = {}
if ref_ctm_file_list is not None:
spk_hypotheses, spk_references = [], []
mix_hypotheses, mix_references = [], []
WER_values, uniq_id_list = [], []
for k, (audio_file_path, ctm_file_path) in enumerate(zip(audio_file_list, ref_ctm_file_list)):
uniq_id = get_uniqname_from_filepath(audio_file_path)
uniq_id_list.append(uniq_id)
if uniq_id != get_uniqname_from_filepath(ctm_file_path):
raise ValueError("audio_file_list has mismatch in uniq_id with ctm_file_path")
# Either hypothesis CTM file or hyp_trans_info_dict should be provided
if hyp_ctm_file_list is not None:
if uniq_id == get_uniqname_from_filepath(hyp_ctm_file_list[k]):
spk_hypothesis, mix_hypothesis = convert_ctm_to_text(hyp_ctm_file_list[k])
else:
raise ValueError("Hypothesis CTM files are provided but uniq_id is mismatched")
elif hyp_trans_info_dict is not None and uniq_id in hyp_trans_info_dict:
spk_hypothesis, mix_hypothesis = convert_word_dict_seq_to_text(
hyp_trans_info_dict[uniq_id]['words']
)
else:
raise ValueError("Hypothesis information is not provided in the correct format.")
spk_reference, mix_reference = convert_ctm_to_text(ctm_file_path)
spk_hypotheses.append(spk_hypothesis)
spk_references.append(spk_reference)
mix_hypotheses.append(mix_hypothesis)
mix_references.append(mix_reference)
# Calculate session by session WER value
WER_values.append(word_error_rate([mix_hypothesis], [mix_reference]))
cpWER_values, hyps_spk, refs_spk = concat_perm_word_error_rate(spk_hypotheses, spk_references)
# Take an average of cpWER and regular WER value on all sessions
wer_results['total'] = {}
wer_results['total']['average_cpWER'] = word_error_rate(hypotheses=hyps_spk, references=refs_spk)
wer_results['total']['average_WER'] = word_error_rate(hypotheses=mix_hypotheses, references=mix_references)
for (uniq_id, cpWER, WER) in zip(uniq_id_list, cpWER_values, WER_values):
# Save session-level cpWER and WER values
wer_results[uniq_id] = {}
wer_results[uniq_id]['cpWER'] = cpWER
wer_results[uniq_id]['WER'] = WER
return wer_results
@staticmethod
def get_str_speech_labels(speech_labels_float: List[List[float]]) -> List[str]:
"""
Convert floating point speech labels list to a list containing string values.
Args:
speech_labels_float (list):
List containing start and end timestamps of the speech segments in floating point type
speech_labels (list):
List containing start and end timestamps of the speech segments in string format
"""
speech_labels = []
for start, end in speech_labels_float:
speech_labels.append("{:.3f} {:.3f} speech".format(start, end))
return speech_labels
@staticmethod
def write_session_level_result_in_csv(
der_results: Dict[str, Dict[str, float]],
wer_results: Dict[str, Dict[str, float]],
root_path: str,
csv_columns: List[str],
csv_file_name: str = "ctm_eval.csv",
):
"""
This function is for development use when a CTM file is provided.
Saves the session-level diarization and ASR result into a csv file.
Args:
wer_results (dict):
Dictionary containing session-by-session results of ASR and diarization in terms of
WER and cpWER.
"""
target_path = f"{root_path}/pred_rttms"
os.makedirs(target_path, exist_ok=True)
logging.info(f"Writing {target_path}/{csv_file_name}")
total_result_jsons = get_total_result_dict(der_results, wer_results, csv_columns)
try:
with open(f"{target_path}/{csv_file_name}", 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in total_result_jsons:
writer.writerow(data)
except IOError:
logging.info("I/O error has occurred while writing a csv file.")
def _break_lines(self, string_out: str, max_chars_in_line: int = 90) -> str:
"""
Break the lines in the transcript.
Args:
string_out (str):
Input transcript with speaker labels
max_chars_in_line (int):
Maximum characters in each line
Returns:
return_string_out (str):
String variable containing line breaking
"""
color_str_len = len('\033[1;00m') if self.params['colored_text'] else 0
split_string_out = string_out.split('\n')
return_string_out = []
for org_chunk in split_string_out:
buffer = []
if len(org_chunk) - color_str_len > max_chars_in_line:
color_str = org_chunk[:color_str_len] if color_str_len > 0 else ''
for i in range(color_str_len, len(org_chunk), max_chars_in_line):
trans_str = org_chunk[i : i + max_chars_in_line]
if len(trans_str.strip()) > 0:
c_trans_str = color_str + trans_str
buffer.append(c_trans_str)
return_string_out.extend(buffer)
else:
return_string_out.append(org_chunk)
return_string_out = '\n'.join(return_string_out)
return return_string_out
def _write_and_log(
self,
uniq_id: str,
session_trans_dict: Dict[str, Dict[str, float]],
audacity_label_words: List[str],
gecko_dict: Dict[str, Dict[str, float]],
sentences: List[Dict[str, float]],
):
"""
Write output files and display logging messages.
Args:
uniq_id (str):
A unique ID (key) that identifies each input audio file
session_trans_dict (dict):
Dictionary containing the transcription output for a session
audacity_label_words (list):
List containing word and word timestamp information in Audacity label format
gecko_dict (dict):
Dictionary formatted to be opened in Gecko software
sentences (list):
List containing sentence dictionary
"""
# print the sentences in the .txt output
string_out = self.print_sentences(sentences)
if self.params['break_lines']:
string_out = self._break_lines(string_out)
session_trans_dict["status"] = "success"
ctm_lines_list = convert_word_dict_seq_to_ctm(session_trans_dict['words'])
dump_json_to_file(f'{self.root_path}/pred_rttms/{uniq_id}.json', session_trans_dict)
dump_json_to_file(f'{self.root_path}/pred_rttms/{uniq_id}_gecko.json', gecko_dict)
write_txt(f'{self.root_path}/pred_rttms/{uniq_id}.ctm', '\n'.join(ctm_lines_list))
write_txt(f'{self.root_path}/pred_rttms/{uniq_id}.txt', string_out.strip())
write_txt(f'{self.root_path}/pred_rttms/{uniq_id}.w.label', '\n'.join(audacity_label_words))
@staticmethod
def print_errors(der_results: Dict[str, Dict[str, float]], wer_results: Dict[str, Dict[str, float]]):
"""
Print a slew of error metrics for ASR and Diarization.
Args:
der_results (dict):
Dictionary containing FA, MISS, CER and DER values for both aggregated amount and
each session.
wer_results (dict):
Dictionary containing session-by-session WER and cpWER. `wer_results` only
exists when CTM files are provided.
"""
DER_info = f"\nDER : {der_results['total']['DER']:.4f} \
\nFA : {der_results['total']['FA']:.4f} \
\nMISS : {der_results['total']['MISS']:.4f} \
\nCER : {der_results['total']['CER']:.4f} \
\nSpk. counting acc. : {der_results['total']['spk_counting_acc']:.4f}"
if wer_results is not None and len(wer_results) > 0:
logging.info(
DER_info
+ f"\ncpWER : {wer_results['total']['average_cpWER']:.4f} \
\nWER : {wer_results['total']['average_WER']:.4f}"
)
else:
logging.info(DER_info)
def print_sentences(self, sentences: List[Dict[str, float]]):
"""
Print a transcript with speaker labels and timestamps.
Args:
sentences (list):
List containing sentence-level dictionaries.
Returns:
string_out (str):
String variable containing transcript and the corresponding speaker label.
"""
# init output
string_out = ''
for sentence in sentences:
# extract info
speaker = sentence['speaker']
start_point = sentence['start_time']
end_point = sentence['end_time']
text = sentence['text']
if self.params['colored_text']:
color = self.color_palette.get(speaker, '\033[0;37m')
else:
color = ''
# cast timestamp to the correct format
datetime_offset = 16 * 3600
if float(start_point) > 3600:
time_str = '%H:%M:%S.%f'
else:
time_str = '%M:%S.%f'
start_point, end_point = max(float(start_point), 0), max(float(end_point), 0)
start_point_str = datetime.fromtimestamp(start_point - datetime_offset).strftime(time_str)[:-4]
end_point_str = datetime.fromtimestamp(end_point - datetime_offset).strftime(time_str)[:-4]
if self.params['print_time']:
time_str = f'[{start_point_str} - {end_point_str}] '
else:
time_str = ''
# string out concatenation
string_out += f'{color}{time_str}{speaker}: {text}\n'
return string_out
|
NeMo-main
|
nemo/collections/asr/parts/utils/diarization_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Iterable, Optional, Union
import librosa
import numpy as np
import numpy.typing as npt
import scipy
import soundfile as sf
import torch
from scipy.spatial.distance import pdist, squareform
from nemo.utils import logging
SOUND_VELOCITY = 343.0 # m/s
ChannelSelectorType = Union[int, Iterable[int], str]
def get_samples(audio_file: str, target_sr: int = 16000, dtype: str = 'float32'):
"""
Read the samples from the given audio_file path. If not specified, the input audio file is automatically
resampled to 16kHz.
Args:
audio_file (str):
Path to the input audio file
target_sr (int):
Targeted sampling rate
Returns:
samples (numpy.ndarray):
Time-series sample data from the given audio file
"""
with sf.SoundFile(audio_file, 'r') as f:
samples = f.read(dtype=dtype)
if f.samplerate != target_sr:
samples = librosa.core.resample(samples, orig_sr=f.samplerate, target_sr=target_sr)
samples = samples.transpose()
return samples
def select_channels(signal: npt.NDArray, channel_selector: Optional[ChannelSelectorType] = None) -> npt.NDArray:
"""
Convert a multi-channel signal to a single-channel signal by averaging over channels or selecting a single channel,
or pass-through multi-channel signal when channel_selector is `None`.
Args:
signal: numpy array with shape (..., num_channels)
channel selector: string denoting the downmix mode, an integer denoting the channel to be selected, or an iterable
of integers denoting a subset of channels. Channel selector is using zero-based indexing.
If set to `None`, the original signal will be returned. Uses zero-based indexing.
Returns:
numpy array
"""
if signal.ndim == 1:
# For one-dimensional input, return the input signal.
if channel_selector not in [None, 0, 'average']:
raise ValueError(
'Input signal is one-dimensional, channel selector (%s) cannot not be used.', str(channel_selector)
)
return signal
num_channels = signal.shape[-1]
num_samples = signal.size // num_channels # handle multi-dimensional signals
if num_channels >= num_samples:
logging.warning(
'Number of channels (%d) is greater or equal than number of samples (%d). Check for possible transposition.',
num_channels,
num_samples,
)
# Samples are arranged as (num_channels, ...)
if channel_selector is None:
# keep the original multi-channel signal
pass
elif channel_selector == 'average':
# default behavior: downmix by averaging across channels
signal = np.mean(signal, axis=-1)
elif isinstance(channel_selector, int):
# select a single channel
if channel_selector >= num_channels:
raise ValueError(f'Cannot select channel {channel_selector} from a signal with {num_channels} channels.')
signal = signal[..., channel_selector]
elif isinstance(channel_selector, Iterable):
# select multiple channels
if max(channel_selector) >= num_channels:
raise ValueError(
f'Cannot select channel subset {channel_selector} from a signal with {num_channels} channels.'
)
signal = signal[..., channel_selector]
# squeeze the channel dimension if a single-channel is selected
# this is done to have the same shape as when using integer indexing
if len(channel_selector) == 1:
signal = np.squeeze(signal, axis=-1)
else:
raise ValueError(f'Unexpected value for channel_selector ({channel_selector})')
return signal
def sinc_unnormalized(x: float) -> float:
"""Unnormalized sinc.
Args:
x: input value
Returns:
Calculates sin(x)/x
"""
return np.sinc(x / np.pi)
def theoretical_coherence(
mic_positions: npt.NDArray,
sample_rate: float,
field: str = 'spherical',
fft_length: int = 512,
sound_velocity: float = SOUND_VELOCITY,
) -> npt.NDArray:
"""Calculate a theoretical coherence matrix for given mic positions and field type.
Args:
mic_positions: 3D Cartesian coordinates of microphone positions, shape (num_mics, 3)
field: string denoting the type of the soundfield
sample_rate: sampling rate of the input signal in Hz
fft_length: length of the fft in samples
sound_velocity: speed of sound in m/s
Returns:
Calculated coherence with shape (num_subbands, num_mics, num_mics)
"""
assert mic_positions.shape[1] == 3, "Expecting 3D microphone positions"
num_mics = mic_positions.shape[0]
if num_mics < 2:
raise ValueError(f'Expecting at least 2 microphones, received {num_mics}')
num_subbands = fft_length // 2 + 1
angular_freq = 2 * np.pi * sample_rate * np.arange(0, num_subbands) / fft_length
desired_coherence = np.zeros((num_subbands, num_mics, num_mics))
mic_distance = squareform(pdist(mic_positions))
for p in range(num_mics):
desired_coherence[:, p, p] = 1.0
for q in range(p + 1, num_mics):
dist_pq = mic_distance[p, q]
if field == 'spherical':
desired_coherence[:, p, q] = sinc_unnormalized(angular_freq * dist_pq / sound_velocity)
else:
raise ValueError(f'Unknown noise field {field}.')
# symmetry
desired_coherence[:, q, p] = desired_coherence[:, p, q]
return desired_coherence
def estimated_coherence(S: npt.NDArray, eps: float = 1e-16) -> npt.NDArray:
"""Estimate complex-valued coherence for the input STFT-domain signal.
Args:
S: STFT of the signal with shape (num_subbands, num_frames, num_channels)
eps: small regularization constant
Returns:
Estimated coherence with shape (num_subbands, num_channels, num_channels)
"""
if S.ndim != 3:
raise RuntimeError('Expecting the input STFT to be a 3D array')
num_subbands, num_frames, num_channels = S.shape
if num_channels < 2:
raise ValueError('Expecting at least 2 microphones')
psd = np.mean(np.abs(S) ** 2, axis=1)
estimated_coherence = np.zeros((num_subbands, num_channels, num_channels), dtype=complex)
for p in range(num_channels):
estimated_coherence[:, p, p] = 1.0
for q in range(p + 1, num_channels):
cross_psd = np.mean(S[:, :, p] * np.conjugate(S[:, :, q]), axis=1)
estimated_coherence[:, p, q] = cross_psd / np.sqrt(psd[:, p] * psd[:, q] + eps)
# symmetry
estimated_coherence[:, q, p] = np.conjugate(estimated_coherence[:, p, q])
return estimated_coherence
def generate_approximate_noise_field(
mic_positions: npt.NDArray,
noise_signal: npt.NDArray,
sample_rate: float,
field: str = 'spherical',
fft_length: int = 512,
method: str = 'cholesky',
sound_velocity: float = SOUND_VELOCITY,
):
"""
Args:
mic_positions: 3D microphone positions, shape (num_mics, 3)
noise_signal: signal used to generate the approximate noise field, shape (num_samples, num_mics).
Different channels need to be independent.
sample_rate: sampling rate of the input signal
field: string denoting the type of the soundfield
fft_length: length of the fft in samples
method: coherence decomposition method
sound_velocity: speed of sound in m/s
Returns:
Signal with coherence approximately matching the desired coherence, shape (num_samples, num_channels)
References:
E.A.P. Habets, I. Cohen and S. Gannot, 'Generating nonstationary multisensor
signals under a spatial coherence constraint', Journal of the Acoustical Society
of America, Vol. 124, Issue 5, pp. 2911-2917, Nov. 2008.
"""
assert fft_length % 2 == 0
num_mics = mic_positions.shape[0]
if num_mics < 2:
raise ValueError('Expecting at least 2 microphones')
desired_coherence = theoretical_coherence(
mic_positions=mic_positions,
field=field,
sample_rate=sample_rate,
fft_length=fft_length,
sound_velocity=sound_velocity,
)
return transform_to_match_coherence(signal=noise_signal, desired_coherence=desired_coherence, method=method)
def transform_to_match_coherence(
signal: npt.NDArray,
desired_coherence: npt.NDArray,
method: str = 'cholesky',
ref_channel: int = 0,
corrcoef_threshold: float = 0.2,
) -> npt.NDArray:
"""Transform the input multichannel signal to match the desired coherence.
Note: It's assumed that channels are independent.
Args:
signal: independent noise signals with shape (num_samples, num_channels)
desired_coherence: desired coherence with shape (num_subbands, num_channels, num_channels)
method: decomposition method used to construct the transformation matrix
ref_channel: reference channel for power normalization of the input signal
corrcoef_threshold: used to detect input signals with high correlation between channels
Returns:
Signal with coherence approximately matching the desired coherence, shape (num_samples, num_channels)
References:
E.A.P. Habets, I. Cohen and S. Gannot, 'Generating nonstationary multisensor
signals under a spatial coherence constraint', Journal of the Acoustical Society
of America, Vol. 124, Issue 5, pp. 2911-2917, Nov. 2008.
"""
num_channels = signal.shape[1]
num_subbands = desired_coherence.shape[0]
assert desired_coherence.shape[1] == num_channels
assert desired_coherence.shape[2] == num_channels
fft_length = 2 * (num_subbands - 1)
# remove DC component
signal = signal - np.mean(signal, axis=0)
# channels needs to have equal power, so normalize with the ref mic
signal_power = np.mean(np.abs(signal) ** 2, axis=0)
signal = signal * np.sqrt(signal_power[ref_channel]) / np.sqrt(signal_power)
# input channels should be uncorrelated
# here, we just check for high correlation coefficients between channels to detect ill-constructed inputs
corrcoef_matrix = np.corrcoef(signal.transpose())
# mask the diagonal elements
np.fill_diagonal(corrcoef_matrix, 0.0)
if np.any(np.abs(corrcoef_matrix) > corrcoef_threshold):
raise RuntimeError(
f'Input channels are correlated above the threshold {corrcoef_threshold}. Max abs off-diagonal element of the coefficient matrix: {np.abs(corrcoef_matrix).max()}.'
)
# analysis transform
S = librosa.stft(signal.transpose(), n_fft=fft_length)
# (channel, subband, frame) -> (subband, frame, channel)
S = S.transpose(1, 2, 0)
# generate output signal for each subband
X = np.zeros_like(S)
# factorize the desired coherence (skip the DC component)
if method == 'cholesky':
L = np.linalg.cholesky(desired_coherence[1:])
A = L.swapaxes(1, 2)
elif method == 'evd':
w, V = np.linalg.eig(desired_coherence[1:])
# scale eigenvectors
A = np.sqrt(w)[:, None, :] * V
# prepare transform matrix
A = A.swapaxes(1, 2)
else:
raise ValueError(f'Unknown method {method}')
# transform vectors at each time step:
# x_t = A^T * s_t
# or in matrix notation: X = S * A
X[1:, ...] = np.matmul(S[1:, ...], A)
# synthesis transform
# transpose X from (subband, frame, channel) to (channel, subband, frame)
x = librosa.istft(X.transpose(2, 0, 1), length=len(signal))
# (channel, sample) -> (sample, channel)
x = x.transpose()
return x
def rms(x: np.ndarray) -> float:
"""Calculate RMS value for the input signal.
Args:
x: input signal
Returns:
RMS of the input signal.
"""
return np.sqrt(np.mean(np.abs(x) ** 2))
def mag2db(mag: float, eps: Optional[float] = 1e-16) -> float:
"""Convert magnitude ratio from linear scale to dB.
Args:
mag: linear magnitude value
eps: small regularization constant
Returns:
Value in dB.
"""
return 20 * np.log10(mag + eps)
def db2mag(db: float) -> float:
"""Convert value in dB to linear magnitude ratio.
Args:
db: magnitude ratio in dB
Returns:
Magnitude ratio in linear scale.
"""
return 10 ** (db / 20)
def pow2db(power: float, eps: Optional[float] = 1e-16) -> float:
"""Convert power ratio from linear scale to dB.
Args:
power: power ratio in linear scale
eps: small regularization constant
Returns:
Power in dB.
"""
return 10 * np.log10(power + eps)
def get_segment_start(signal: np.ndarray, segment: np.ndarray) -> int:
"""Get starting point of `segment` in `signal`.
We assume that `segment` is a sub-segment of `signal`.
For example, `signal` may be a 10 second audio signal,
and `segment` could be the signal between 2 seconds and
5 seconds. This function will then return the index of
the sample where `segment` starts (at 2 seconds).
Args:
signal: numpy array with shape (num_samples,)
segment: numpy array with shape (num_samples,)
Returns:
Index of the start of `segment` in `signal`.
"""
if len(signal) <= len(segment):
raise ValueError(
f'segment must be shorter than signal: len(segment) = {len(segment)}, len(signal) = {len(signal)}'
)
cc = scipy.signal.correlate(signal, segment, mode='valid')
return np.argmax(cc)
def calculate_sdr_numpy(
estimate: np.ndarray,
target: np.ndarray,
scale_invariant: bool = False,
convolution_invariant: bool = False,
convolution_filter_length: Optional[int] = None,
remove_mean: bool = True,
sdr_max: Optional[float] = None,
eps: float = 1e-10,
) -> float:
"""Calculate signal-to-distortion ratio.
SDR = 10 * log10( ||t||_2^2 / (||e-t||_2^2 + alpha * ||t||^2)
where
alpha = 10^(-sdr_max/10)
Optionally, apply scale-invariant scaling to target signal.
Args:
estimate: estimated signal
target: target signal
Returns:
SDR in dB.
"""
if scale_invariant and convolution_invariant:
raise ValueError('Arguments scale_invariant and convolution_invariant cannot be used simultaneously.')
if remove_mean:
estimate = estimate - np.mean(estimate)
target = target - np.mean(target)
if scale_invariant or (convolution_invariant and convolution_filter_length == 1):
target = scale_invariant_target_numpy(estimate=estimate, target=target, eps=eps)
elif convolution_invariant:
target = convolution_invariant_target_numpy(
estimate=estimate, target=target, filter_length=convolution_filter_length, eps=eps
)
target_pow = np.mean(np.abs(target) ** 2)
distortion_pow = np.mean(np.abs(estimate - target) ** 2)
if sdr_max is not None:
distortion_pow = distortion_pow + 10 ** (-sdr_max / 10) * target_pow
sdr = 10 * np.log10(target_pow / (distortion_pow + eps) + eps)
return sdr
def wrap_to_pi(x: torch.Tensor) -> torch.Tensor:
"""Wrap angle in radians to [-pi, pi]
Args:
x: angle in radians
Returns:
Angle in radians wrapped to [-pi, pi]
"""
pi = torch.tensor(math.pi, device=x.device)
return torch.remainder(x + pi, 2 * pi) - pi
def convmtx_numpy(x: np.ndarray, filter_length: int, delay: int = 0, n_steps: Optional[int] = None) -> np.ndarray:
"""Construct a causal convolutional matrix from x delayed by `delay` samples.
Args:
x: input signal, shape (N,)
filter_length: length of the filter in samples
delay: delay the signal by a number of samples
n_steps: total number of time steps (rows) for the output matrix
Returns:
Convolutional matrix, shape (n_steps, filter_length)
"""
if x.ndim != 1:
raise ValueError(f'Expecting one-dimensional signal. Received signal with shape {x.shape}')
if n_steps is None:
# Keep the same length as the input signal
n_steps = len(x)
# pad as necessary
x_pad = np.hstack([np.zeros(delay), x])
if (pad_len := n_steps - len(x_pad)) > 0:
x_pad = np.hstack([x_pad, np.zeros(pad_len)])
else:
x_pad = x_pad[:n_steps]
return scipy.linalg.toeplitz(x_pad, np.hstack([x_pad[0], np.zeros(filter_length - 1)]))
def convmtx_mc_numpy(x: np.ndarray, filter_length: int, delay: int = 0, n_steps: Optional[int] = None) -> np.ndarray:
"""Construct a causal multi-channel convolutional matrix from `x` delayed by `delay` samples.
Args:
x: input signal, shape (N, M)
filter_length: length of the filter in samples
delay: delay the signal by a number of samples
n_steps: total number of time steps (rows) for the output matrix
Returns:
Multi-channel convolutional matrix, shape (n_steps, M * filter_length)
"""
if x.ndim != 2:
raise ValueError(f'Expecting two-dimensional signal. Received signal with shape {x.shape}')
mc_mtx = []
for m in range(x.shape[1]):
mc_mtx.append(convmtx_numpy(x[:, m], filter_length=filter_length, delay=delay, n_steps=n_steps))
return np.hstack(mc_mtx)
def scale_invariant_target_numpy(estimate: np.ndarray, target: np.ndarray, eps: float = 1e-10) -> np.ndarray:
"""Calculate convolution-invariant target for a given estimated signal.
Calculate scaled target obtained by solving
min_scale || scale * target - estimate ||^2
Args:
estimate: one-dimensional estimated signal, shape (T,)
target: one-dimensional target signal, shape (T,)
eps: regularization constans
Returns:
Scaled target signal, shape (T,)
"""
assert target.ndim == estimate.ndim == 1, f'Only one-dimensional inputs supported'
estimate_dot_target = np.mean(estimate * target)
target_pow = np.mean(np.abs(target) ** 2)
scale = estimate_dot_target / (target_pow + eps)
return scale * target
def convolution_invariant_target_numpy(
estimate: np.ndarray, target: np.ndarray, filter_length, diag_reg: float = 1e-8, eps: float = 1e-10
) -> np.ndarray:
"""Calculate convolution-invariant target for a given estimated signal.
Calculate target filtered with a linear f obtained by solving
min_filter || conv(filter, target) - estimate ||^2
Args:
estimate: one-dimensional estimated signal
target: one-dimensional target signal
filter_length: length of the (convolutive) filter
diag_reg: multiplicative factor for relative diagonal loading
eps: absolute diagonal loading
"""
assert target.ndim == estimate.ndim == 1, f'Only one-dimensional inputs supported'
n_fft = 2 ** math.ceil(math.log2(len(target) + len(estimate) - 1))
T = np.fft.rfft(target, n=n_fft)
E = np.fft.rfft(estimate, n=n_fft)
# target autocorrelation
tt_corr = np.fft.irfft(np.abs(T) ** 2, n=n_fft)
# target-estimate crosscorrelation
te_corr = np.fft.irfft(T.conj() * E, n=n_fft)
# Use only filter_length
tt_corr = tt_corr[:filter_length]
te_corr = te_corr[:filter_length]
if diag_reg is not None:
tt_corr[0] += diag_reg * tt_corr[0] + eps
# Construct the Toeplitz system matrix
TT = scipy.linalg.toeplitz(tt_corr)
# Solve the linear system for the optimal filter
filt = np.linalg.solve(TT, te_corr)
# Calculate filtered target
T_filt = T * np.fft.rfft(filt, n=n_fft)
target_filt = np.fft.irfft(T_filt, n=n_fft)
return target_filt[: len(target)]
def toeplitz(x: torch.Tensor) -> torch.Tensor:
"""Create Toeplitz matrix for one-dimensional signals along the last dimension.
Args:
x: tensor with shape (..., T)
Returns:
Tensor with shape (..., T, T)
"""
length = x.size(-1)
x = torch.cat([x[..., 1:].flip(dims=(-1,)), x], dim=-1)
return x.unfold(-1, length, 1).flip(dims=(-1,))
|
NeMo-main
|
nemo/collections/asr/parts/utils/audio_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
def compute_stochastic_depth_drop_probs(
num_layers: int,
stochastic_depth_drop_prob: float = 0.0,
stochastic_depth_mode: str = "linear",
stochastic_depth_start_layer: int = 1,
) -> List[float]:
"""Computes drop probabilities for stochastic depth regularization technique.
The first layer is never dropped and the starting layer needs to be greater
or equal to 1.
Args:
num_layers (int): number of layers in the network.
stochastic_depth_drop_prob (float): if non-zero, will randomly drop
layers during training. The higher this value, the more often layers
are dropped. Defaults to 0.0.
stochastic_depth_mode (str): can be either "linear" or "uniform". If
set to "uniform", all layers have the same probability of drop. If
set to "linear", the drop probability grows linearly from 0 for the
first layer to the desired value for the final layer. Defaults to
"linear".
stochastic_depth_start_layer (int): starting layer for stochastic depth.
All layers before this will never be dropped. Note that drop
probability will be adjusted accordingly if mode is "linear" when
start layer is > 1. Defaults to 1.
Returns:
List[float]: list of drop probabilities for all layers
"""
if not (0 <= stochastic_depth_drop_prob < 1.0):
raise ValueError("stochastic_depth_drop_prob has to be in [0, 1).")
if not (1 <= stochastic_depth_start_layer <= num_layers):
raise ValueError("stochastic_depth_start_layer has to be in [1, num layers].")
# Layers before `stochastic_depth_start_layer` are never dropped
layer_drop_probs = [0.0] * stochastic_depth_start_layer
# Layers starting with `stochastic_depth_start_layer` may be dropped
if (L := num_layers - stochastic_depth_start_layer) > 0:
if stochastic_depth_mode == "linear":
# we start with 1/L * drop_prob and and end with the desired drop probability.
layer_drop_probs += [l / L * stochastic_depth_drop_prob for l in range(1, L + 1)]
elif stochastic_depth_mode == "uniform":
layer_drop_probs += [stochastic_depth_drop_prob] * L
else:
raise ValueError(
f'stochastic_depth_mode has to be one of ["linear", "uniform"]. Current value: {stochastic_depth_mode}'
)
return layer_drop_probs
|
NeMo-main
|
nemo/collections/asr/parts/utils/regularization_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import math
import multiprocessing
import os
import shutil
from itertools import repeat
from math import ceil, floor
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import IPython.display as ipd
import librosa
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from omegaconf import DictConfig
from pyannote.core import Annotation, Segment
from pyannote.metrics import detection
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterGrid
from tqdm import tqdm
from nemo.collections.asr.models import EncDecClassificationModel, EncDecFrameClassificationModel
from nemo.collections.common.parts.preprocessing.manifest import get_full_path
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
"""
This file contains all the utility functions required for voice activity detection.
"""
def prepare_manifest(config: dict) -> str:
"""
Perform VAD on long audio snippet might cause CUDA out of memory issue.
Automatically split manifest entry by split_duration to avoid the potential memory issue.
"""
if 'prepared_manifest_vad_input' in config and config['prepared_manifest_vad_input']:
manifest_vad_input = config['prepared_manifest_vad_input']
else:
default_path = "manifest_vad_input.json"
manifest_vad_input = os.path.join(config["out_dir"], default_path) if "out_dir" in config else default_path
# input_list is a list of variable ['audio_filepath': i, "offset": xxx, "duration": xxx])
if type(config['input']) == str:
input_list = []
with open(config['input'], 'r', encoding='utf-8') as manifest:
for line in manifest.readlines():
input_list.append(json.loads(line.strip()))
elif type(config['input']) == list:
input_list = config['input']
else:
raise ValueError(
"The input for manifest preparation would either be a string of the filepath to manifest or a list of {'audio_filepath': i, 'offset': 0, 'duration': null} "
)
args_func = {
'label': 'infer',
'split_duration': config['split_duration'],
'window_length_in_sec': config['window_length_in_sec'],
'manifest_dir': Path(config['input']).parent if type(config['input']) == str else '',
}
if config.get('num_workers') is not None and config['num_workers'] > 1:
with multiprocessing.Pool(processes=config['num_workers']) as p:
inputs = zip(input_list, repeat(args_func))
results = list(
tqdm(
p.imap(write_vad_infer_manifest_star, inputs),
total=len(input_list),
desc='splitting manifest',
leave=True,
)
)
else:
results = [
write_vad_infer_manifest(input_el, args_func)
for input_el in tqdm(input_list, desc='splitting manifest', leave=True)
]
if os.path.exists(manifest_vad_input):
logging.info("The prepared manifest file exists. Overwriting!")
os.remove(manifest_vad_input)
with open(manifest_vad_input, 'a', encoding='utf-8') as fout:
for res in results:
for r in res:
json.dump(r, fout)
fout.write('\n')
fout.flush()
return manifest_vad_input
def write_vad_infer_manifest_star(args):
"""
A workaround for tqdm with starmap of multiprocessing
"""
return write_vad_infer_manifest(*args)
def write_vad_infer_manifest(file: dict, args_func: dict) -> list:
"""
Used by prepare_manifest.
Given a list of files, split them with maximum split_duration and write them to the manifest.
Args:
files (dict) : file to be processed
args_func:
label (str): label for audio snippet.y
split_duration (float): max duration of each audio clip (each line in json)
window_length_in_sec (float) : length of window for generating the frame. Used for taking care of joint.
Returns:
res (list) : list of generated metadata line of json for file
"""
res = []
label = args_func['label']
split_duration = args_func['split_duration']
window_length_in_sec = args_func['window_length_in_sec']
filepath = file['audio_filepath']
in_duration = file.get('duration', None)
in_offset = file.get('offset', 0)
# if filepath is not found, try to find it in the dir of manifest
if not Path(filepath).is_file():
new_filepath = Path(args_func['manifest_dir']) / filepath
if new_filepath.is_file():
filepath = new_filepath.absolute().as_posix()
try:
sr = 16000
x, _sr = librosa.load(filepath, sr=sr, offset=in_offset, duration=in_duration)
duration = librosa.get_duration(y=x, sr=sr)
left = duration
current_offset = in_offset
status = 'single'
while left > 0:
if left <= split_duration:
if status == 'single':
write_duration = left
current_offset = 0
else:
status = 'end'
write_duration = left + window_length_in_sec
current_offset -= window_length_in_sec
offset_inc = left
left = 0
else:
if status == 'start' or status == 'next':
status = 'next'
else:
status = 'start'
if status == 'start':
write_duration = split_duration
offset_inc = split_duration
else:
write_duration = split_duration + window_length_in_sec
current_offset -= window_length_in_sec
offset_inc = split_duration + window_length_in_sec
left -= split_duration
metadata = {
'audio_filepath': filepath,
'duration': write_duration,
'label': label,
'text': '_',
'offset': current_offset,
}
res.append(metadata)
current_offset += offset_inc
except Exception as e:
err_file = "error.log"
with open(err_file, 'w', encoding='utf-8') as fout:
fout.write(filepath + ":" + str(e))
return res
def get_vad_stream_status(data: list) -> list:
"""
Generate a list of status for each snippet in manifest. A snippet should be in single, start, next or end status.
Used for concatenating to full audio file.
Args:
data (list): list of filepath of audio snippet
Returns:
status (list): list of status of each snippet.
"""
if len(data) == 1:
return ['single']
status = [None] * len(data)
for i in range(len(data)):
if i == 0:
status[i] = 'start' if data[i] == data[i + 1] else 'single'
elif i == len(data) - 1:
status[i] = 'end' if data[i] == data[i - 1] else 'single'
else:
if data[i] != data[i - 1] and data[i] == data[i + 1]:
status[i] = 'start'
elif data[i] == data[i - 1] and data[i] == data[i + 1]:
status[i] = 'next'
elif data[i] == data[i - 1] and data[i] != data[i + 1]:
status[i] = 'end'
else:
status[i] = 'single'
return status
def load_tensor_from_file(filepath: str) -> Tuple[torch.Tensor, str]:
"""
Load torch.Tensor and the name from file
"""
frame = []
with open(filepath, "r", encoding='utf-8') as f:
for line in f.readlines():
frame.append(float(line))
name = Path(filepath).stem
return torch.tensor(frame), name
def generate_overlap_vad_seq(
frame_pred_dir: str,
smoothing_method: str,
overlap: float,
window_length_in_sec: float,
shift_length_in_sec: float,
num_workers: int,
out_dir: str = None,
) -> str:
"""
Generate predictions with overlapping input windows/segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple windows.
Two common smoothing filters are supported: majority vote (median) and average (mean).
This function uses multiprocessing to speed up.
Args:
frame_pred_dir (str): Directory of frame prediction file to be processed.
smoothing_method (str): median or mean smoothing filter.
overlap (float): amounts of overlap of adjacent windows.
window_length_in_sec (float): length of window for generating the frame.
shift_length_in_sec (float): amount of shift of window for generating the frame.
out_dir (str): directory of generated predictions.
num_workers(float): number of process for multiprocessing
Returns:
overlap_out_dir(str): directory of the generated predictions.
"""
frame_filepathlist = glob.glob(frame_pred_dir + "/*.frame")
if out_dir:
overlap_out_dir = out_dir
else:
overlap_out_dir = os.path.join(
frame_pred_dir, "overlap_smoothing_output" + "_" + smoothing_method + "_" + str(overlap)
)
if not os.path.exists(overlap_out_dir):
os.mkdir(overlap_out_dir)
per_args = {
"overlap": overlap,
"window_length_in_sec": window_length_in_sec,
"shift_length_in_sec": shift_length_in_sec,
"out_dir": overlap_out_dir,
"smoothing_method": smoothing_method,
}
if num_workers is not None and num_workers > 1:
with multiprocessing.Pool(processes=num_workers) as p:
inputs = zip(frame_filepathlist, repeat(per_args))
results = list(
tqdm(
p.imap(generate_overlap_vad_seq_per_file_star, inputs),
total=len(frame_filepathlist),
desc='generating preds',
leave=True,
)
)
else:
for frame_filepath in tqdm(frame_filepathlist, desc='generating preds', leave=False):
generate_overlap_vad_seq_per_file(frame_filepath, per_args)
return overlap_out_dir
def generate_overlap_vad_seq_per_file_star(args):
"""
A workaround for tqdm with starmap of multiprocessing
"""
return generate_overlap_vad_seq_per_file(*args)
@torch.jit.script
def generate_overlap_vad_seq_per_tensor(
frame: torch.Tensor, per_args: Dict[str, float], smoothing_method: str
) -> torch.Tensor:
"""
Use generated frame prediction (generated by shifting window of shift_length_in_sec (10ms)) to generate prediction with overlapping input window/segments
See description in generate_overlap_vad_seq.
Use this for single instance pipeline.
"""
# This function will be refactor for vectorization but this is okay for now
overlap = per_args['overlap']
window_length_in_sec = per_args['window_length_in_sec']
shift_length_in_sec = per_args['shift_length_in_sec']
frame_len = per_args.get('frame_len', 0.01)
shift = int(shift_length_in_sec / frame_len) # number of units of shift
seg = int((window_length_in_sec / frame_len + 1)) # number of units of each window/segment
jump_on_target = int(seg * (1 - overlap)) # jump on target generated sequence
jump_on_frame = int(jump_on_target / shift) # jump on input frame sequence
if jump_on_frame < 1:
raise ValueError(
f"Note we jump over frame sequence to generate overlapping input segments. \n \
Your input makes jump_on_frame={jump_on_frame} < 1 which is invalid because it cannot jump and will stuck.\n \
Please try different window_length_in_sec, shift_length_in_sec and overlap choices. \n \
jump_on_target = int(seg * (1 - overlap)) \n \
jump_on_frame = int(jump_on_frame/shift) "
)
target_len = int(len(frame) * shift)
if smoothing_method == 'mean':
preds = torch.zeros(target_len)
pred_count = torch.zeros(target_len)
for i, og_pred in enumerate(frame):
if i % jump_on_frame != 0:
continue
start = i * shift
end = start + seg
preds[start:end] = preds[start:end] + og_pred
pred_count[start:end] = pred_count[start:end] + 1
preds = preds / pred_count
last_non_zero_pred = preds[pred_count != 0][-1]
preds[pred_count == 0] = last_non_zero_pred
elif smoothing_method == 'median':
preds = [torch.empty(0) for _ in range(target_len)]
for i, og_pred in enumerate(frame):
if i % jump_on_frame != 0:
continue
start = i * shift
end = start + seg
for j in range(start, end):
if j <= target_len - 1:
preds[j] = torch.cat((preds[j], og_pred.unsqueeze(0)), 0)
preds = torch.stack([torch.nanquantile(l, q=0.5) for l in preds])
nan_idx = torch.isnan(preds)
last_non_nan_pred = preds[~nan_idx][-1]
preds[nan_idx] = last_non_nan_pred
else:
raise ValueError("smoothing_method should be either mean or median")
return preds
def generate_overlap_vad_seq_per_file(frame_filepath: str, per_args: dict) -> str:
"""
A wrapper for generate_overlap_vad_seq_per_tensor.
"""
out_dir = per_args['out_dir']
smoothing_method = per_args['smoothing_method']
frame, name = load_tensor_from_file(frame_filepath)
per_args_float: Dict[str, float] = {}
for i in per_args:
if type(per_args[i]) == float or type(per_args[i]) == int:
per_args_float[i] = per_args[i]
preds = generate_overlap_vad_seq_per_tensor(frame, per_args_float, smoothing_method)
overlap_filepath = os.path.join(out_dir, name + "." + smoothing_method)
with open(overlap_filepath, "w", encoding='utf-8') as f:
for pred in preds:
f.write(f"{pred:.4f}\n")
return overlap_filepath
@torch.jit.script
def merge_overlap_segment(segments: torch.Tensor) -> torch.Tensor:
"""
Merged the given overlapped segments.
For example:
torch.Tensor([[0, 1.5], [1, 3.5]]) -> torch.Tensor([0, 3.5])
"""
if (
segments.shape == torch.Size([0])
or segments.shape == torch.Size([0, 2])
or segments.shape == torch.Size([1, 2])
):
return segments
segments = segments[segments[:, 0].sort()[1]]
merge_boundary = segments[:-1, 1] >= segments[1:, 0]
head_padded = torch.nn.functional.pad(merge_boundary, [1, 0], mode='constant', value=0.0)
head = segments[~head_padded, 0]
tail_padded = torch.nn.functional.pad(merge_boundary, [0, 1], mode='constant', value=0.0)
tail = segments[~tail_padded, 1]
merged = torch.stack((head, tail), dim=1)
return merged
@torch.jit.script
def filter_short_segments(segments: torch.Tensor, threshold: float) -> torch.Tensor:
"""
Remove segments which duration is smaller than a threshold.
For example,
torch.Tensor([[0, 1.5], [1, 3.5], [4, 7]]) and threshold = 2.0
->
torch.Tensor([[1, 3.5], [4, 7]])
"""
return segments[segments[:, 1] - segments[:, 0] >= threshold]
def percentile(data: torch.Tensor, perc: int) -> float:
"""
Calculate percentile given data
"""
size = len(data)
return float(sorted(data)[int(math.ceil((size * perc) / 100)) - 1])
def cal_vad_onset_offset(
scale: str, onset: float, offset: float, sequence: torch.Tensor = None
) -> Tuple[float, float]:
"""
Calculate onset and offset threshold given different scale.
"""
if scale == "absolute":
mini = 0
maxi = 1
elif scale == "relative":
mini = min(sequence)
maxi = max(sequence)
elif scale == "percentile":
mini = percentile(sequence, 1)
maxi = percentile(sequence, 99)
onset = mini + onset * (maxi - mini)
offset = mini + offset * (maxi - mini)
return float(onset), float(offset)
@torch.jit.script
def binarization(sequence: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:
"""
Binarize predictions to speech and non-speech
Reference
Paper: Gregory Gelly and Jean-Luc Gauvain. "Minimum Word Error Training of RNN-based Voice Activity Detection", InterSpeech 2015.
Implementation: https://github.com/pyannote/pyannote-audio/blob/master/pyannote/audio/utils/signal.py
Args:
sequence (torch.Tensor) : A tensor of frame level predictions.
per_args:
onset (float): onset threshold for detecting the beginning and end of a speech
offset (float): offset threshold for detecting the end of a speech.
pad_onset (float): adding durations before each speech segment
pad_offset (float): adding durations after each speech segment;
frame_length_in_sec (float): length of frame.
Returns:
speech_segments(torch.Tensor): A tensor of speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format.
"""
frame_length_in_sec = per_args.get('frame_length_in_sec', 0.01)
onset = per_args.get('onset', 0.5)
offset = per_args.get('offset', 0.5)
pad_onset = per_args.get('pad_onset', 0.0)
pad_offset = per_args.get('pad_offset', 0.0)
speech = False
start = 0.0
i = 0
speech_segments = torch.empty(0)
for i in range(0, len(sequence)):
# Current frame is speech
if speech:
# Switch from speech to non-speech
if sequence[i] < offset:
if i * frame_length_in_sec + pad_offset > max(0, start - pad_onset):
new_seg = torch.tensor(
[max(0, start - pad_onset), i * frame_length_in_sec + pad_offset]
).unsqueeze(0)
speech_segments = torch.cat((speech_segments, new_seg), 0)
start = i * frame_length_in_sec
speech = False
# Current frame is non-speech
else:
# Switch from non-speech to speech
if sequence[i] > onset:
start = i * frame_length_in_sec
speech = True
# if it's speech at the end, add final segment
if speech:
new_seg = torch.tensor([max(0, start - pad_onset), i * frame_length_in_sec + pad_offset]).unsqueeze(0)
speech_segments = torch.cat((speech_segments, new_seg), 0)
# Merge the overlapped speech segments due to padding
speech_segments = merge_overlap_segment(speech_segments) # not sorted
return speech_segments
@torch.jit.script
def remove_segments(original_segments: torch.Tensor, to_be_removed_segments: torch.Tensor) -> torch.Tensor:
"""
Remove speech segments list in to_be_removed_segments from original_segments.
For example,
remove torch.Tensor([[start2, end2],[start4, end4]]) from torch.Tensor([[start1, end1],[start2, end2],[start3, end3], [start4, end4]]),
->
torch.Tensor([[start1, end1],[start3, end3]])
"""
for y in to_be_removed_segments:
original_segments = original_segments[original_segments.eq(y).all(dim=1).logical_not()]
return original_segments
@torch.jit.script
def get_gap_segments(segments: torch.Tensor) -> torch.Tensor:
"""
Get the gap segments.
For example,
torch.Tensor([[start1, end1], [start2, end2], [start3, end3]]) -> torch.Tensor([[end1, start2], [end2, start3]])
"""
segments = segments[segments[:, 0].sort()[1]]
return torch.column_stack((segments[:-1, 1], segments[1:, 0]))
@torch.jit.script
def filtering(speech_segments: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:
"""
Filter out short non_speech and speech segments.
Reference
Paper: Gregory Gelly and Jean-Luc Gauvain. "Minimum Word Error Training of RNN-based Voice Activity Detection", InterSpeech 2015.
Implementation: https://github.com/pyannote/pyannote-audio/blob/master/pyannote/audio/utils/signal.py
Args:
speech_segments (torch.Tensor): A tensor of speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format.
per_args:
min_duration_on (float): threshold for small non_speech deletion
min_duration_off (float): threshold for short speech segment deletion
filter_speech_first (float): Whether to perform short speech segment deletion first. Use 1.0 to represent True.
Returns:
speech_segments(torch.Tensor): A tensor of filtered speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format.
"""
if speech_segments.shape == torch.Size([0]):
return speech_segments
min_duration_on = per_args.get('min_duration_on', 0.0)
min_duration_off = per_args.get('min_duration_off', 0.0)
filter_speech_first = per_args.get('filter_speech_first', 1.0)
if filter_speech_first == 1.0:
# Filter out the shorter speech segments
if min_duration_on > 0.0:
speech_segments = filter_short_segments(speech_segments, min_duration_on)
# Filter out the shorter non-speech segments and return to be as speech segments
if min_duration_off > 0.0:
# Find non-speech segments
non_speech_segments = get_gap_segments(speech_segments)
# Find shorter non-speech segments
short_non_speech_segments = remove_segments(
non_speech_segments, filter_short_segments(non_speech_segments, min_duration_off)
)
# Return shorter non-speech segments to be as speech segments
speech_segments = torch.cat((speech_segments, short_non_speech_segments), 0)
# Merge the overlapped speech segments
speech_segments = merge_overlap_segment(speech_segments)
else:
if min_duration_off > 0.0:
# Find non-speech segments
non_speech_segments = get_gap_segments(speech_segments)
# Find shorter non-speech segments
short_non_speech_segments = remove_segments(
non_speech_segments, filter_short_segments(non_speech_segments, min_duration_off)
)
speech_segments = torch.cat((speech_segments, short_non_speech_segments), 0)
# Merge the overlapped speech segments
speech_segments = merge_overlap_segment(speech_segments)
if min_duration_on > 0.0:
speech_segments = filter_short_segments(speech_segments, min_duration_on)
return speech_segments
def prepare_gen_segment_table(sequence: torch.Tensor, per_args: dict) -> Tuple[str, dict]:
"""
Preparing for generating segment table.
"""
out_dir = per_args.get('out_dir', None)
# calculate onset offset based on scale selection
per_args['onset'], per_args['offset'] = cal_vad_onset_offset(
per_args.get('scale', 'absolute'), per_args['onset'], per_args['offset'], sequence
)
# cast 'filter_speech_first' for torch.jit.script
if 'filter_speech_first' in per_args:
if per_args['filter_speech_first']:
per_args['filter_speech_first'] = 1.0
else:
per_args['filter_speech_first'] = 0.0
per_args_float: Dict[str, float] = {}
for i in per_args:
if type(per_args[i]) == float or type(per_args[i]) == int:
per_args_float[i] = per_args[i]
return out_dir, per_args_float
@torch.jit.script
def generate_vad_segment_table_per_tensor(sequence: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:
"""
See description in generate_overlap_vad_seq.
Use this for single instance pipeline.
"""
UNIT_FRAME_LEN = 0.01
speech_segments = binarization(sequence, per_args)
speech_segments = filtering(speech_segments, per_args)
if speech_segments.shape == torch.Size([0]):
return speech_segments
speech_segments, _ = torch.sort(speech_segments, 0)
dur = speech_segments[:, 1:2] - speech_segments[:, 0:1] + UNIT_FRAME_LEN
speech_segments = torch.column_stack((speech_segments, dur))
return speech_segments
def generate_vad_segment_table_per_file(pred_filepath: str, per_args: dict) -> str:
"""
A wrapper for generate_vad_segment_table_per_tensor
"""
sequence, name = load_tensor_from_file(pred_filepath)
out_dir, per_args_float = prepare_gen_segment_table(sequence, per_args)
preds = generate_vad_segment_table_per_tensor(sequence, per_args_float)
ext = ".rttm" if per_args.get("use_rttm", False) else ".txt"
save_name = name + ext
save_path = os.path.join(out_dir, save_name)
if preds.shape[0] == 0:
with open(save_path, "w", encoding='utf-8') as fp:
if per_args.get("use_rttm", False):
fp.write(f"SPEAKER <NA> 1 0 0 <NA> <NA> speech <NA> <NA>\n")
else:
fp.write(f"0 0 speech\n")
else:
with open(save_path, "w", encoding='utf-8') as fp:
for i in preds:
if per_args.get("use_rttm", False):
fp.write(f"SPEAKER {name} 1 {i[0]:.4f} {i[2]:.4f} <NA> <NA> speech <NA> <NA>\n")
else:
fp.write(f"{i[0]:.4f} {i[2]:.4f} speech\n")
return save_path
def generate_vad_segment_table(
vad_pred_dir: str,
postprocessing_params: dict,
frame_length_in_sec: float,
num_workers: int,
out_dir: str = None,
use_rttm: bool = False,
) -> str:
"""
Convert frame level prediction to speech segment in start and end times format.
And save to csv file in rttm-like format
0, 10, speech
17,18, speech
Args:
vad_pred_dir (str): directory of prediction files to be processed.
postprocessing_params (dict): dictionary of thresholds for prediction score. See details in binarization and filtering.
frame_length_in_sec (float): frame length.
out_dir (str): output dir of generated table/csv file.
num_workers(float): number of process for multiprocessing
Returns:
out_dir(str): directory of the generated table.
"""
suffixes = ("frame", "mean", "median")
vad_pred_filepath_list = [os.path.join(vad_pred_dir, x) for x in os.listdir(vad_pred_dir) if x.endswith(suffixes)]
if not out_dir:
out_dir_name = "seg_output"
for key in postprocessing_params:
out_dir_name = out_dir_name + "-" + str(key) + str(postprocessing_params[key])
out_dir = os.path.join(vad_pred_dir, out_dir_name)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
per_args = {
"frame_length_in_sec": frame_length_in_sec,
"out_dir": out_dir,
"use_rttm": use_rttm,
}
per_args = {**per_args, **postprocessing_params}
num_workers = None
if num_workers is not None and num_workers > 1:
with multiprocessing.Pool(num_workers) as p:
inputs = zip(vad_pred_filepath_list, repeat(per_args))
list(
tqdm(
p.imap(generate_vad_segment_table_per_file_star, inputs),
total=len(vad_pred_filepath_list),
desc='creating speech segments',
leave=True,
)
)
else:
for vad_pred_filepath in tqdm(vad_pred_filepath_list, desc='creating speech segments', leave=True):
generate_vad_segment_table_per_file(vad_pred_filepath, per_args)
return out_dir
def generate_vad_segment_table_per_file_star(args):
"""
A workaround for tqdm with starmap of multiprocessing
"""
return generate_vad_segment_table_per_file(*args)
def vad_construct_pyannote_object_per_file(
vad_table_filepath: str, groundtruth_RTTM_file: str
) -> Tuple[Annotation, Annotation]:
"""
Construct a Pyannote object for evaluation.
Args:
vad_table_filepath(str) : path of vad rttm-like table.
groundtruth_RTTM_file(str): path of groundtruth rttm file.
Returns:
reference(pyannote.Annotation): groundtruth
hypothesis(pyannote.Annotation): prediction
"""
pred = pd.read_csv(vad_table_filepath, sep=" ", header=None)
label = pd.read_csv(groundtruth_RTTM_file, sep=" ", delimiter=None, header=None)
label = label.rename(columns={3: "start", 4: "dur", 7: "speaker"})
# construct reference
reference = Annotation()
for index, row in label.iterrows():
reference[Segment(row['start'], row['start'] + row['dur'])] = row['speaker']
# construct hypothsis
hypothesis = Annotation()
for index, row in pred.iterrows():
hypothesis[Segment(float(row[0]), float(row[0]) + float(row[1]))] = 'Speech'
return reference, hypothesis
def get_parameter_grid(params: dict) -> list:
"""
Get the parameter grid given a dictionary of parameters.
"""
has_filter_speech_first = False
if 'filter_speech_first' in params:
filter_speech_first = params['filter_speech_first']
has_filter_speech_first = True
params.pop("filter_speech_first")
params_grid = list(ParameterGrid(params))
if has_filter_speech_first:
for i in params_grid:
i['filter_speech_first'] = filter_speech_first
return params_grid
def vad_tune_threshold_on_dev(
params: dict,
vad_pred: str,
groundtruth_RTTM: str,
result_file: str = "res",
vad_pred_method: str = "frame",
focus_metric: str = "DetER",
frame_length_in_sec: float = 0.01,
num_workers: int = 20,
) -> Tuple[dict, dict]:
"""
Tune thresholds on dev set. Return best thresholds which gives the lowest detection error rate (DetER) in thresholds.
Args:
params (dict): dictionary of parameters to be tuned on.
vad_pred_method (str): suffix of prediction file. Use to locate file. Should be either in "frame", "mean" or "median".
groundtruth_RTTM_dir (str): directory of ground-truth rttm files or a file contains the paths of them.
focus_metric (str): metrics we care most when tuning threshold. Should be either in "DetER", "FA", "MISS"
frame_length_in_sec (float): frame length.
num_workers (int): number of workers.
Returns:
best_threshold (float): threshold that gives lowest DetER.
"""
min_score = 100
all_perf = {}
try:
check_if_param_valid(params)
except:
raise ValueError("Please check if the parameters are valid")
paired_filenames, groundtruth_RTTM_dict, vad_pred_dict = pred_rttm_map(vad_pred, groundtruth_RTTM, vad_pred_method)
metric = detection.DetectionErrorRate()
params_grid = get_parameter_grid(params)
for param in params_grid:
for i in param:
if type(param[i]) == np.float64 or type(param[i]) == np.int64:
param[i] = float(param[i])
try:
# Generate speech segments by performing binarization on the VAD prediction according to param.
# Filter speech segments according to param and write the result to rttm-like table.
vad_table_dir = generate_vad_segment_table(
vad_pred, param, frame_length_in_sec=frame_length_in_sec, num_workers=num_workers
)
# add reference and hypothesis to metrics
for filename in paired_filenames:
groundtruth_RTTM_file = groundtruth_RTTM_dict[filename]
vad_table_filepath = os.path.join(vad_table_dir, filename + ".txt")
reference, hypothesis = vad_construct_pyannote_object_per_file(
vad_table_filepath, groundtruth_RTTM_file
)
metric(reference, hypothesis) # accumulation
# delete tmp table files
shutil.rmtree(vad_table_dir, ignore_errors=True)
report = metric.report(display=False)
DetER = report.iloc[[-1]][('detection error rate', '%')].item()
FA = report.iloc[[-1]][('false alarm', '%')].item()
MISS = report.iloc[[-1]][('miss', '%')].item()
assert (
focus_metric == "DetER" or focus_metric == "FA" or focus_metric == "MISS"
), "Metric we care most should be only in 'DetER', 'FA' or 'MISS'!"
all_perf[str(param)] = {'DetER (%)': DetER, 'FA (%)': FA, 'MISS (%)': MISS}
logging.info(f"parameter {param}, {all_perf[str(param)] }")
score = all_perf[str(param)][focus_metric + ' (%)']
del report
metric.reset() # reset internal accumulator
# save results for analysis
with open(result_file + ".txt", "a", encoding='utf-8') as fp:
fp.write(f"{param}, {all_perf[str(param)] }\n")
if score < min_score:
best_threshold = param
optimal_scores = all_perf[str(param)]
min_score = score
print("Current best", best_threshold, optimal_scores)
except RuntimeError as e:
print(f"Pass {param}, with error {e}")
except pd.errors.EmptyDataError as e1:
print(f"Pass {param}, with error {e1}")
return best_threshold, optimal_scores
def check_if_param_valid(params: dict) -> bool:
"""
Check if the parameters are valid.
"""
for i in params:
if i == "filter_speech_first":
if not type(params["filter_speech_first"]) == bool:
raise ValueError("Invalid inputs! filter_speech_first should be either True or False!")
elif i == "pad_onset":
continue
elif i == "pad_offset":
continue
else:
for j in params[i]:
if not j >= 0:
raise ValueError(
"Invalid inputs! All float parameters except pad_onset and pad_offset should be larger than 0!"
)
if not (all(i <= 1 for i in params['onset']) and all(i <= 1 for i in params['offset'])):
raise ValueError("Invalid inputs! The onset and offset thresholds should be in range [0, 1]!")
return True
def pred_rttm_map(vad_pred: str, groundtruth_RTTM: str, vad_pred_method: str = "frame") -> Tuple[set, dict, dict]:
"""
Find paired files in vad_pred and groundtruth_RTTM
"""
groundtruth_RTTM_dict = {}
if os.path.isfile(groundtruth_RTTM):
with open(groundtruth_RTTM, "r", encoding='utf-8') as fp:
groundtruth_RTTM_files = fp.read().splitlines()
elif os.path.isdir(groundtruth_RTTM):
groundtruth_RTTM_files = glob.glob(os.path.join(groundtruth_RTTM, "*.rttm"))
else:
raise ValueError(
"groundtruth_RTTM should either be a directory contains rttm files or a file contains paths to them!"
)
for f in groundtruth_RTTM_files:
filename = os.path.basename(f).rsplit(".", 1)[0]
groundtruth_RTTM_dict[filename] = f
vad_pred_dict = {}
if os.path.isfile(vad_pred):
with open(vad_pred, "r", encoding='utf-8') as fp:
vad_pred_files = fp.read().splitlines()
elif os.path.isdir(vad_pred):
vad_pred_files = glob.glob(os.path.join(vad_pred, "*." + vad_pred_method))
else:
raise ValueError(
"vad_pred should either be a directory containing vad pred files or a file contains paths to them!"
)
for f in vad_pred_files:
filename = os.path.basename(f).rsplit(".", 1)[0]
vad_pred_dict[filename] = f
paired_filenames = groundtruth_RTTM_dict.keys() & vad_pred_dict.keys()
return paired_filenames, groundtruth_RTTM_dict, vad_pred_dict
def plot(
path2audio_file: str,
path2_vad_pred: Optional[str] = None,
path2groundtruth_rttm: Optional[str] = None,
groundtruth_labels: Optional[str] = None,
sample_rate: int = 16000,
offset: float = 0,
duration: float = None,
threshold: float = None,
per_args: dict = None,
unit_frame_len: float = 0.01,
label_repeat: int = 1,
xticks_step: int = 5,
) -> ipd.Audio:
"""
Plot Audio and/or VAD output and/or groundtruth labels for visualization
Args:
path2audio_file (str): path to audio file.
path2_vad_pred (str): path to vad prediction file,
path2groundtruth_rttm(str): path to groundtruth RTTM file.
ground_truth_labels(str): a list of groundtruth label.
sample_rate (int): sample rate of audio file.
offset (float): offset in seconds.
duration (float): duration in seconds.
threshold (float): threshold for prediction score (from 0 to 1).
per_args(dict): a dict that stores the thresholds for postprocessing.
unit_frame_len (float): unit frame length in seconds for VAD predictions.
label_repeat (int): repeat the label for this number of times to match different frame lengths in preds and labels.
xticks_step (int): step size for xticks.
"""
plt.figure(figsize=[20, 2])
audio, sample_rate = librosa.load(
path=path2audio_file, sr=sample_rate, mono=True, offset=offset, duration=duration
)
dur = librosa.get_duration(y=audio, sr=sample_rate)
time = np.arange(offset, offset + dur, unit_frame_len)
len_pred = int(dur / unit_frame_len) + 1
frame_snippet = None
if path2_vad_pred:
frame, _ = load_tensor_from_file(path2_vad_pred)
frame_snippet = frame[int(offset / unit_frame_len) : int((offset + dur) / unit_frame_len)]
len_pred = len(frame_snippet)
ax1 = plt.subplot()
ax1.plot(np.arange(audio.size) / sample_rate, audio, 'gray')
ax1.set_xlim([0, int(dur) + 1])
ax1.tick_params(axis='y', labelcolor='b')
ax1.set_ylabel('Signal')
ax1.set_ylim([-1, 1])
ax2 = ax1.twinx()
if threshold and per_args:
raise ValueError("threshold and per_args cannot be used at same time!")
if not threshold and not per_args:
raise ValueError("One and only one of threshold and per_args must have been used!")
if threshold and frame_snippet is not None:
pred_snippet = np.where(frame_snippet >= threshold, 1, 0)
elif per_args and frame_snippet is not None:
_, per_args_float = prepare_gen_segment_table(
frame, per_args
) # take whole frame here for calculating onset and offset
speech_segments = generate_vad_segment_table_per_tensor(frame, per_args_float)
pred = gen_pred_from_speech_segments(speech_segments, frame)
pred_snippet = pred[int(offset / unit_frame_len) : int((offset + dur) / unit_frame_len)]
else:
pred_snippet = None
if path2groundtruth_rttm and path2groundtruth_rttm.endswith('.rttm'):
label = extract_labels(path2groundtruth_rttm, time)
elif groundtruth_labels:
label = [float(x) for x in groundtruth_labels]
if label_repeat > 1:
label = np.repeat(label, label_repeat)
label = label[int(offset / unit_frame_len) : int((offset + dur) / unit_frame_len)]
else:
label = None
if label is not None:
ax2.plot(np.arange(len_pred) * unit_frame_len, label, 'r', label='label')
if pred_snippet is not None:
ax2.plot(np.arange(len_pred) * unit_frame_len, pred_snippet, 'b', label='pred')
if frame_snippet is not None:
ax2.plot(np.arange(len_pred) * unit_frame_len, frame_snippet, 'g--', label='speech prob')
ax2.tick_params(axis='y', labelcolor='r')
ax2.legend(loc='lower right', shadow=True)
ax2.set_ylabel('Preds and Probas')
ax2.set_ylim([-0.1, 1.1])
ax2.set_xticks(np.arange(0, int(dur) + 1, xticks_step))
return ipd.Audio(audio, rate=sample_rate)
def gen_pred_from_speech_segments(
speech_segments: torch.Tensor, prob: float, shift_length_in_sec: float = 0.01
) -> np.array:
"""
Generate prediction arrays like 000111000... from speech segments {[0,1][2,4]}
"""
pred = np.zeros(prob.shape)
speech_segments = [list(i) for i in speech_segments]
speech_segments.sort(key=lambda x: x[0])
for seg in speech_segments:
start = int(seg[0] / shift_length_in_sec)
end = int(seg[1] / shift_length_in_sec)
pred[start:end] = 1
return pred
def extract_labels(path2ground_truth_label: str, time: list) -> list:
"""
Extract ground-truth label for given time period.
path2ground_truth_label (str): path of groundtruth RTTM file
time (list) : a list of array representing time period.
"""
data = pd.read_csv(path2ground_truth_label, sep="\s+", delimiter=None, header=None)
data = data.rename(columns={3: "start", 4: "dur", 7: "speaker"})
labels = []
for pos in time:
line = data[(data["start"] <= pos) & (data["start"] + data["dur"] > pos)]
if len(line) >= 1:
labels.append(1)
else:
labels.append(0)
return labels
def generate_vad_frame_pred(
vad_model,
window_length_in_sec: float,
shift_length_in_sec: float,
manifest_vad_input: str,
out_dir: str,
use_feat: bool = False,
) -> str:
"""
Generate VAD frame level prediction and write to out_dir
"""
time_unit = int(window_length_in_sec / shift_length_in_sec)
trunc = int(time_unit / 2)
trunc_l = time_unit - trunc
all_len = 0
data = []
with open(manifest_vad_input, 'r', encoding='utf-8') as f:
for line in f:
file = json.loads(line)['audio_filepath'].split("/")[-1]
data.append(file.split(".wav")[0])
logging.info(f"Inference on {len(data)} audio files/json lines!")
status = get_vad_stream_status(data)
for i, test_batch in enumerate(tqdm(vad_model.test_dataloader(), total=len(vad_model.test_dataloader()))):
test_batch = [x.to(vad_model.device) for x in test_batch]
with autocast():
if use_feat:
log_probs = vad_model(processed_signal=test_batch[0], processed_signal_length=test_batch[1])
else:
log_probs = vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])
probs = torch.softmax(log_probs, dim=-1)
if len(probs.shape) == 3 and probs.shape[0] == 1:
# squeeze the batch dimension, since batch size is 1 for frame-VAD
probs = probs.squeeze(0) # [1,T,C] -> [T,C]
pred = probs[:, 1]
if window_length_in_sec == 0:
to_save = pred
elif status[i] == 'start':
to_save = pred[:-trunc]
elif status[i] == 'next':
to_save = pred[trunc:-trunc_l]
elif status[i] == 'end':
to_save = pred[trunc_l:]
else:
to_save = pred
to_save = to_save.cpu().tolist()
all_len += len(to_save)
outpath = os.path.join(out_dir, data[i] + ".frame")
with open(outpath, "a", encoding='utf-8') as fout:
for f in range(len(to_save)):
fout.write('{0:0.4f}\n'.format(to_save[f]))
del test_batch
if status[i] == 'end' or status[i] == 'single':
logging.debug(f"Overall length of prediction of {data[i]} is {all_len}!")
all_len = 0
return out_dir
def init_vad_model(model_path: str):
"""
Initiate VAD model with model path
"""
if model_path.endswith('.nemo'):
logging.info(f"Using local VAD model from {model_path}")
vad_model = EncDecClassificationModel.restore_from(restore_path=model_path)
elif model_path.endswith('.ckpt'):
vad_model = EncDecClassificationModel.load_from_checkpoint(checkpoint_path=model_path)
else:
logging.info(f"Using NGC cloud VAD model {model_path}")
vad_model = EncDecClassificationModel.from_pretrained(model_name=model_path)
return vad_model
def init_frame_vad_model(model_path: str):
"""
Initiate VAD model with model path
"""
if model_path.endswith('.nemo'):
logging.info(f"Using local VAD model from {model_path}")
vad_model = EncDecFrameClassificationModel.restore_from(restore_path=model_path)
elif model_path.endswith('.ckpt'):
vad_model = EncDecFrameClassificationModel.load_from_checkpoint(checkpoint_path=model_path)
else:
logging.info(f"Using NGC cloud VAD model {model_path}")
vad_model = EncDecFrameClassificationModel.from_pretrained(model_name=model_path)
return vad_model
def stitch_segmented_asr_output(
segmented_output_manifest: str,
speech_segments_tensor_dir: str = "speech_segments",
stitched_output_manifest: str = "asr_stitched_output_manifest.json",
) -> str:
"""
Stitch the prediction of speech segments.
"""
if not os.path.exists(speech_segments_tensor_dir):
os.mkdir(speech_segments_tensor_dir)
segmented_output = []
with open(segmented_output_manifest, 'r', encoding='utf-8') as f:
for line in f:
file = json.loads(line)
segmented_output.append(file)
with open(stitched_output_manifest, 'w', encoding='utf-8') as fout:
speech_segments = torch.Tensor()
all_pred_text = ""
if len(segmented_output) > 1:
for i in range(1, len(segmented_output)):
start, end = (
segmented_output[i - 1]['offset'],
segmented_output[i - 1]['offset'] + segmented_output[i - 1]['duration'],
)
new_seg = torch.tensor([start, end]).unsqueeze(0)
speech_segments = torch.cat((speech_segments, new_seg), 0)
pred_text = segmented_output[i - 1]['pred_text']
all_pred_text += pred_text
name = segmented_output[i - 1]['audio_filepath'].split("/")[-1].rsplit(".", 1)[0]
if segmented_output[i - 1]['audio_filepath'] != segmented_output[i]['audio_filepath']:
speech_segments_tensor_path = os.path.join(speech_segments_tensor_dir, name + '.pt')
torch.save(speech_segments, speech_segments_tensor_path)
meta = {
'audio_filepath': segmented_output[i - 1]['audio_filepath'],
'speech_segments_filepath': speech_segments_tensor_path,
'pred_text': all_pred_text,
}
json.dump(meta, fout)
fout.write('\n')
fout.flush()
speech_segments = torch.Tensor()
all_pred_text = ""
else:
all_pred_text += " "
else:
i = -1
start, end = segmented_output[i]['offset'], segmented_output[i]['offset'] + segmented_output[i]['duration']
new_seg = torch.tensor([start, end]).unsqueeze(0)
speech_segments = torch.cat((speech_segments, new_seg), 0)
pred_text = segmented_output[i]['pred_text']
all_pred_text += pred_text
name = segmented_output[i]['audio_filepath'].split("/")[-1].rsplit(".", 1)[0]
speech_segments_tensor_path = os.path.join(speech_segments_tensor_dir, name + '.pt')
torch.save(speech_segments, speech_segments_tensor_path)
meta = {
'audio_filepath': segmented_output[i]['audio_filepath'],
'speech_segments_filepath': speech_segments_tensor_path,
'pred_text': all_pred_text,
}
json.dump(meta, fout)
fout.write('\n')
fout.flush()
logging.info(
f"Finish stitch segmented ASR output to {stitched_output_manifest}, the speech segments info has been stored in directory {speech_segments_tensor_dir}"
)
return stitched_output_manifest
def construct_manifest_eval(
input_manifest: str, stitched_output_manifest: str, aligned_vad_asr_output_manifest: str = "vad_asr_out.json"
) -> str:
"""
Generate aligned manifest for evaluation.
Because some pure noise samples might not appear in stitched_output_manifest.
"""
stitched_output = dict()
with open(stitched_output_manifest, 'r', encoding='utf-8') as f:
for line in f:
file = json.loads(line)
stitched_output[file["audio_filepath"]] = file
out = []
with open(input_manifest, 'r', encoding='utf-8') as f:
for line in f:
file = json.loads(line)
sample = file["audio_filepath"]
if sample in stitched_output:
file["pred_text"] = stitched_output[sample]["pred_text"]
file["speech_segments_filepath"] = stitched_output[sample]["speech_segments_filepath"]
else:
file["pred_text"] = ""
file["speech_segments_filepath"] = ""
out.append(file)
with open(aligned_vad_asr_output_manifest, 'w', encoding='utf-8') as fout:
for i in out:
json.dump(i, fout)
fout.write('\n')
fout.flush()
return aligned_vad_asr_output_manifest
def load_rttm_file(filepath: str) -> pd.DataFrame:
"""
Load rttm file and extract speech segments
"""
if not Path(filepath).exists():
raise ValueError(f"File not found: {filepath}")
data = pd.read_csv(filepath, sep="\s+", delimiter=None, header=None)
data = data.rename(columns={3: "start", 4: "dur", 7: "speaker"})
data['start'] = data['start'].astype(float)
data['dur'] = data['dur'].astype(float)
data['end'] = data['start'] + data['dur']
data = data.sort_values(by=['start'])
data['segment'] = list(zip(data['start'], data['end']))
return data
def merge_intervals(intervals: List[List[float]]) -> List[List[float]]:
"""
Merge speech segments into non-overlapping segments
"""
intervals.sort(key=lambda x: x[0])
merged = []
for interval in intervals:
# if the list of merged intervals is empty or if the current
# interval does not overlap with the previous, simply append it.
if not merged or merged[-1][1] < interval[0]:
merged.append(interval)
else:
# otherwise, there is overlap, so we merge the current and previous
# intervals.
merged[-1][1] = max(merged[-1][1], interval[1])
return merged
def load_speech_segments_from_rttm(rttm_file: str) -> List[List[float]]:
"""
load speech segments from rttm file, where each segment is represented
as [start, end] interval
"""
speech_segments = list(load_rttm_file(rttm_file)['segment'])
speech_segments = [list(x) for x in speech_segments]
speech_segments = merge_intervals(speech_segments)
return speech_segments
def load_speech_overlap_segments_from_rttm(rttm_file: str) -> Tuple[List[List[float]], List[List[float]]]:
"""
Load speech segments from RTTM file, merge and extract possible overlaps
Args:
rttm_file (str): Path to RTTM file
Returns:
merged (List[List[float]]): merged speech intervals without overlaps
overlaps (List[List[float]]): intervals with overlap speech
"""
speech_segments = list(load_rttm_file(rttm_file)['segment'])
speech_segments = [list(x) for x in speech_segments]
speech_segments.sort(key=lambda x: x[0]) # sort by start time
merged = []
overlaps = []
for interval in speech_segments:
# if the list of merged intervals is empty or if the current
# interval does not overlap with the previous, simply append it.
if not merged or merged[-1][1] < interval[0]:
merged.append(interval)
else:
# otherwise, there is overlap, so we merge the current and previous
# intervals.
overlaps.append([interval[0], min(merged[-1][1], interval[1])])
merged[-1][1] = max(merged[-1][1], interval[1])
return merged, overlaps
def get_nonspeech_segments(
speech_segments: List[List[float]], max_duration: Optional[float] = None
) -> List[List[float]]:
"""
Get non-speech segments from given speech segments and maximum duration
Args:
speech_segments (List[List[float]]): speech segment intervals loaded by load_speech_segments()
max_duration (Optional[float]): maximum duration of the audio, used to calculate the last silence segment
Returns:
nonspeech_segments (List[List[float]]): intervals of non-speech segments
"""
nonspeech_segments = []
start = 0.0
for sp_seg in speech_segments:
end = sp_seg[0]
nonspeech_segments.append([start, end])
start = sp_seg[1]
if max_duration is not None and start < max_duration:
nonspeech_segments.append([start, max_duration])
return nonspeech_segments
def get_frame_labels(
segments: List[List[float]], frame_length: float, offset: float, duration: float, as_str: bool = True
) -> str:
"""
Generate frame-level binary labels for audio, '0' for non-speech and '1' for speech
Args:
segments (List[List[float]]): speech segments loaded by load_speech_segments_from_rttm
frame_length (float): frame length in seconds, e.g. 0.01 for 10ms frames
offset (float): Offset of the audio clip
duration (float): duration of the audio clip
"""
labels = []
n_frames = int(np.ceil(duration / frame_length))
sid = 0
for i in range(n_frames):
t = offset + i * frame_length
while sid < len(segments) - 1 and segments[sid][1] < t:
sid += 1
if segments[sid][1] != 0 and segments[sid][0] <= t <= segments[sid][1]:
labels.append(1)
else:
labels.append(0)
if as_str:
return ' '.join([str(x) for x in labels])
return [float(x) for x in labels]
def plot_sample_from_rttm(
audio_file: str,
rttm_file: str,
max_duration: Optional[float] = None,
save_path: str = "",
show: bool = True,
offset: float = 0.0,
unit_frame_len: float = 0.01,
):
"""
Plot audio signal and frame-level labels from RTTM file
"""
plt.figure(figsize=[20, 2])
audio, sample_rate = librosa.load(path=audio_file, sr=16000, mono=True, offset=offset, duration=max_duration)
dur = librosa.get_duration(y=audio, sr=sample_rate)
segments = load_speech_segments_from_rttm(rttm_file)
labels = get_frame_labels(segments, unit_frame_len, offset, dur)
labels = [float(x) for x in labels.split()]
length = len(labels)
ax1 = plt.subplot()
ax1.set_title(audio_file)
ax1.plot(np.arange(audio.size) / sample_rate, audio, 'gray')
ax1.set_xlim([0, int(dur) + 1])
ax1.tick_params(axis='y', labelcolor='b')
ax1.set_ylabel('Signal')
ax1.set_ylim([-1, 1])
ax2 = ax1.twinx()
ax2.plot(np.arange(length) * unit_frame_len, labels, 'r', label='label')
ax2.tick_params(axis='y', labelcolor='r')
ax2.legend(loc='lower right', shadow=True)
ax2.set_ylabel('Labels')
ax2.set_ylim([-0.1, 1.1])
if show:
plt.show()
if save_path:
plt.savefig(save_path)
return ipd.Audio(audio, rate=16000)
def align_labels_to_frames(probs, labels, threshold=0.2):
"""
Aligns labels to frames when the frame length (e.g., 10ms) is different from the label length (e.g., 20ms).
The threshold 0.2 is not important, since the actual ratio will always be close to an integer unless using frame/label
lengths that are not multiples of each other (e.g., 15ms frame length and 20ms label length), which is not valid.
The value 0.2 here is just for easier unit testing.
Args:
probs (List[float]): list of probabilities
labels (List[int]): list of labels
threshold (float): threshold for rounding ratio to integer
Returns:
labels (List[int]): list of labels aligned to frames
"""
frames_len = len(probs)
labels_len = len(labels)
probs = torch.tensor(probs).float()
labels = torch.tensor(labels).long()
if frames_len < labels_len:
# pad labels with zeros until labels_len is a multiple of frames_len
ratio = labels_len / frames_len
res = labels_len % frames_len
if (
ceil(ratio) - ratio < threshold
): # e.g., ratio = 2.9, ceil(ratio) = 3, then we pad labels to make it a multiple of 3
# pad labels with zeros until labels_max_len is a multiple of logits_max_len
labels = labels.tolist()
if len(labels) % ceil(ratio) != 0:
labels += [0] * (ceil(ratio) - len(labels) % ceil(ratio))
labels = torch.tensor(labels).long()
labels = labels.view(-1, ceil(ratio)).amax(1)
return align_labels_to_frames(probs.tolist(), labels.long().tolist())
# otherwise, truncate additional labels until labels_max_len is a multiple of logits_max_len
if res > 0:
labels = labels[:-res]
labels = labels.view(-1, floor(ratio)).amax(1)
return labels.long().tolist()
elif frames_len > labels_len:
# repeat labels until labels_len is a multiple of frames_len
ratio = frames_len / labels_len
res = frames_len % labels_len
if ceil(ratio) - ratio < threshold:
# e.g., ratio is 1.83, ceil(ratio) = 2, then we repeat labels to make it a multiple of 2, and discard the redundant labels
labels = labels.repeat_interleave(ceil(ratio), dim=0).long().tolist()
labels = labels[:frames_len]
else:
# e.g., ratio is 2.02, floor(ratio) = 2, then we repeat labels to make it a multiple of 2 and add additional labels
labels = labels.repeat_interleave(floor(ratio), dim=0).long().tolist()
if res > 0:
labels += labels[-res:]
return labels
else:
return labels.long().tolist()
def read_rttm_as_pyannote_object(rttm_file: str, speaker_override: Optional[str] = None) -> Annotation:
"""
Read rttm file and construct a Pyannote object.
Args:
rttm_file(str) : path of rttm file.
speaker_override(str) : if not None, all speakers will be replaced by this value.
Returns:
annotation(pyannote.Annotation): annotation object
"""
annotation = Annotation()
data = pd.read_csv(rttm_file, sep="\s+", delimiter=None, header=None)
data = data.rename(columns={3: "start", 4: "dur", 7: "speaker"})
for index, row in data.iterrows():
if speaker_override is not None:
annotation[Segment(row['start'], row['start'] + row['dur'])] = speaker_override
else:
annotation[Segment(row['start'], row['start'] + row['dur'])] = row['speaker']
return annotation
def convert_labels_to_speech_segments(labels: List[float], frame_length_in_sec: float = 0.01):
"""
Convert a list of labels to a list of speech segments.
Args:
labels (List[float]): list of labels
frame_length_in_sec (float): frame length in seconds
Returns:
segments (List[Tuple[float, float]]): list of speech segments
"""
segments = []
start = -1
for i, label in enumerate(labels):
if label == 1:
if start == -1:
start = i * frame_length_in_sec
else:
if start > -1:
segments.append([start, (i - 1) * frame_length_in_sec])
start = -1
if start != -1:
segments.append([start, (len(labels) - 1) * frame_length_in_sec])
return segments
def frame_vad_construct_pyannote_object_per_file(
prediction: Union[str, List[float]], groundtruth: Union[str, List[float]], frame_length_in_sec: float = 0.01
) -> Tuple[Annotation, Annotation]:
"""
Construct a Pyannote object for evaluation.
Args:
prediction (str) : path of VAD predictions stored as RTTM or CSV-like txt.
groundtruth (str): path of groundtruth rttm file.
frame_length_in_sec(float): frame length in seconds
Returns:
reference(pyannote.Annotation): groundtruth
hypothesis(pyannote.Annotation): prediction
"""
hypothesis = Annotation()
if isinstance(groundtruth, str) and prediction.endswith('.rttm'):
hypothesis = read_rttm_as_pyannote_object(prediction, speaker_override='speech')
elif isinstance(groundtruth, str) and prediction.endswith('.txt'):
pred = pd.read_csv(prediction, sep=" ", header=None)
for index, row in pred.iterrows():
hypothesis[Segment(float(row[0]), float(row[0]) + float(row[1]))] = 'speech'
elif isinstance(groundtruth, list):
segments = convert_labels_to_speech_segments(prediction, frame_length_in_sec)
for segment in segments:
hypothesis[Segment(segment[0], segment[1])] = 'speech'
else:
raise ValueError('prediction must be a path to rttm file or a list of frame labels.')
reference = Annotation()
if isinstance(groundtruth, str) and groundtruth.endswith('.rttm'):
reference = read_rttm_as_pyannote_object(groundtruth, speaker_override='speech')
elif isinstance(groundtruth, list):
segments = convert_labels_to_speech_segments(groundtruth, frame_length_in_sec)
for segment in segments:
reference[Segment(segment[0], segment[1])] = 'speech'
else:
raise ValueError('groundtruth must be a path to rttm file or a list of frame labels.')
return reference, hypothesis
def frame_vad_infer_load_manifest(cfg: DictConfig):
"""
Load manifest file and prepare label/rttm mapping
Args:
cfg: config file
Returns:
manifest_orig (List[Dict]): original manifest data
key_labels_map (Dict): mapping from unique_audio_name to its labels
key_rttm_map (Dict): mapping from unique_audio_name to its rttm file
"""
unique_audio_names = set()
key_labels_map = {}
key_rttm_map = {}
manifest_orig = []
manifest_file = Path(cfg.dataset).absolute().as_posix()
with open(manifest_file, 'r') as fin:
for line in fin.readlines():
entry = json.loads(line.strip())
audio_filepath = get_full_path(audio_file=entry['audio_filepath'], manifest_file=manifest_file)
entry['audio_filepath'] = str(audio_filepath)
uniq_audio_name = Path(audio_filepath).stem
if uniq_audio_name in unique_audio_names:
raise ValueError("Please make sure each line is with different audio_filepath! ")
else:
unique_audio_names.add(uniq_audio_name)
manifest_orig.append(entry)
# always prefer RTTM labels if exist
if "label" not in entry and ("rttm_filepath" in entry or "rttm_file" in entry):
rttm_key = "rttm_filepath" if "rttm_filepath" in entry else "rttm_file"
segments = load_speech_segments_from_rttm(entry[rttm_key])
label_str = get_frame_labels(
segments=segments,
frame_length=cfg.vad.parameters.shift_length_in_sec,
duration=entry['duration'],
offset=entry['offset'],
)
key_rttm_map[uniq_audio_name] = entry[rttm_key]
key_labels_map[uniq_audio_name] = [float(x) for x in label_str.split()]
elif entry.get("label", None) is not None:
key_labels_map[uniq_audio_name] = [float(x) for x in entry["label"].split()]
elif cfg.evaluate:
raise ValueError("Must have either `label` or `rttm_filepath` in manifest when evaluate=True")
return manifest_orig, key_labels_map, key_rttm_map
def frame_vad_eval_detection_error(
pred_dir: str, key_labels_map: dict, key_rttm_map: dict, key_pred_rttm_map: dict, frame_length_in_sec: float
):
"""
Perform evaluation on frame-VAD results
Args:
pred_dir: directory of frame-VAD prediction files with in `<unique_audio_name>.frame` format
key_labels_map: dictionary of mapping each <unique_audio_name> to its labels
key_rttm_map: dictionary of mapping each <unique_audio_name> to its GROUNDTRUTH rttm file
key_pred_rttm_map: dictionary of mapping each <unique_audio_name> to its PREDICTED rttm file
frame_length_in_sec: frame length in seconds, e.g. 0.02s
Returns:
auroc: AUROC score in 0~100%
report: Pyannote detection.DetectionErrorRate() report
"""
all_probs = []
all_labels = []
metric = detection.DetectionErrorRate()
key_probs_map = {}
predictions_list = list(Path(pred_dir).glob("*.frame"))
for frame_pred in tqdm(predictions_list, desc="Evaluating VAD results", total=len(predictions_list)):
pred_probs = []
with frame_pred.open("r") as fin:
for line in fin.readlines():
line = line.strip()
if not line:
continue
pred_probs.append(float(line))
key = frame_pred.stem
key_probs_map[key] = pred_probs
key_labels_map[key] = align_labels_to_frames(probs=pred_probs, labels=key_labels_map[key])
all_probs.extend(key_probs_map[key])
all_labels.extend(key_labels_map[key])
if key in key_rttm_map:
groundtruth = key_rttm_map[key]
else:
groundtruth = key_labels_map[key]
reference, hypothesis = frame_vad_construct_pyannote_object_per_file(
prediction=key_pred_rttm_map[key], groundtruth=groundtruth, frame_length_in_sec=frame_length_in_sec,
)
metric(reference, hypothesis)
auroc = roc_auc_score(y_true=all_labels, y_score=all_probs)
report = metric.report(display=False)
return auroc, report
|
NeMo-main
|
nemo/collections/asr/parts/utils/vad_utils.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from omegaconf import DictConfig, open_dict
from nemo.collections.asr.modules import conv_asr
from nemo.collections.asr.parts.submodules import jasper
from nemo.utils import logging
def change_conv_asr_se_context_window(model: 'ASRModel', context_window: int, update_config: bool = True):
"""
Update the context window of the SqueezeExcitation module if the provided model contains an
`encoder` which is an instance of `ConvASREncoder`.
Args:
model: A subclass of `ASRModel`, itself a subclass of `ModelPT`.
context_window: An integer representing the number of input timeframes that will be used
to compute the context. Each timeframe corresponds to a single window stride of the
STFT features.
Say the window_stride = 0.01s, then a context window of 128 represents 128 * 0.01 s
of context to compute the Squeeze step.
update_config: Whether to update the config or not with the new context window.
"""
if update_config and not hasattr(model.cfg, 'encoder'):
logging.info(
"Could not change the context window in SqueezeExcite module "
"since the model provided does not contain an `encoder` module in its config."
)
return
if not isinstance(model.encoder, conv_asr.ConvASREncoder):
logging.info(
f"Could not change the context window in SqueezeExcite module "
f"since the `encoder` module is not an instance of `ConvASREncoder`.\n"
f"Provided encoder class = {model.encoder.__class__.__name__}"
)
return
enc_cfg = model.cfg.encoder if update_config else None
if enc_cfg is not None:
with open_dict(enc_cfg):
_update_se_context_window(model, context_window, cfg=enc_cfg)
else:
_update_se_context_window(model, context_window)
# Update model config
if update_config:
model.cfg.encoder = enc_cfg
def _update_se_context_window(model: 'ASRModel', context_window: int, cfg: Optional[DictConfig] = None):
jasper_block_counter = -1
for name, m in model.named_modules():
if type(m) == jasper.JasperBlock:
jasper_block_counter += 1
if type(m) == jasper.MaskedConv1d:
if m.conv.stride[0] > 1 and 'mconv' in name:
context_window = context_window // m.conv.stride[0]
if type(m) == jasper.SqueezeExcite:
m.change_context_window(context_window=context_window)
# update config
if cfg is not None:
cfg.jasper[jasper_block_counter].se_context_size = context_window
|
NeMo-main
|
nemo/collections/asr/parts/utils/asr_module_utils.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2007-2020 The scikit-learn developers.
# BSD 3-Clause License
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# NME-SC clustering is based on the implementation from the paper
# https://arxiv.org/pdf/2003.02405.pdf and the implementation from
# https://github.com/tango4j/Auto-Tuning-Spectral-Clustering.
from typing import Dict, List, Tuple
import torch
from torch.linalg import eigh, eigvalsh
def cos_similarity(emb_a: torch.Tensor, emb_b: torch.Tensor, eps=torch.tensor(3.5e-4)) -> torch.Tensor:
"""
Calculate cosine similarities of the given two set of tensors. The output is an N by N
matrix where N is the number of feature vectors.
Args:
a (Tensor):
Matrix containing speaker representation vectors. (N x embedding_dim)
b (Tensor):
Matrix containing speaker representation vectors. (N x embedding_dim)
Returns:
res (Tensor):
N by N matrix containing the cosine similarities of the values.
"""
# If number of embedding count is 1, it creates nan values
if emb_a.shape[0] == 1 or emb_b.shape[0] == 1:
raise ValueError(f"Number of feature vectors should be greater than 1 but got {emb_a.shape} and {emb_b.shape}")
a_norm = emb_a / (torch.norm(emb_a, dim=1).unsqueeze(1) + eps)
b_norm = emb_b / (torch.norm(emb_b, dim=1).unsqueeze(1) + eps)
res = torch.mm(a_norm, b_norm.transpose(0, 1))
res.fill_diagonal_(1)
return res
def ScalerMinMax(X: torch.Tensor) -> torch.Tensor:
"""
Min-max scale the input affinity matrix X, which will lead to a dynamic range of [0, 1].
Args:
X (Tensor):
Matrix containing cosine similarity values among embedding vectors (N x N)
Returns:
v_norm (Tensor):
Min-max normalized value of X.
"""
v_min, v_max = X.min(), X.max()
v_norm = (X - v_min) / (v_max - v_min)
return v_norm
def getEuclideanDistance(
specEmbA: torch.Tensor, specEmbB: torch.Tensor, device: torch.device = torch.device('cpu')
) -> torch.Tensor:
"""
Calculate Euclidean distances from the given feature tensors.
Args:
specEmbA (Tensor):
Matrix containing spectral embedding vectors from eigenvalue decomposition (N x embedding_dim).
specEmbB (Tensor):
Matrix containing spectral embedding vectors from eigenvalue decomposition (N x embedding_dim).
Returns:
dis (Tensor):
Euclidean distance values of the two sets of spectral embedding vectors.
"""
specEmbA, specEmbB = specEmbA.to(device), specEmbB.to(device)
A, B = specEmbA.unsqueeze(dim=1), specEmbB.unsqueeze(dim=0)
dis = (A - B) ** 2.0
dis = dis.sum(dim=-1).squeeze()
return dis
def kmeans_plusplus_torch(
X: torch.Tensor,
n_clusters: int,
random_state: int,
n_local_trials: int = 30,
device: torch.device = torch.device('cpu'),
):
"""
Choose initial centroids for initializing k-means algorithm. The performance of
k-means algorithm can vary significantly by the initial centroids. To alleviate
this problem, k-means++ algorithm chooses initial centroids based on the probability
proportional to the distance from the formally chosen centroids. The centroids
selected by k-means++ algorithm improve the chance of getting more accurate and
stable clustering results. The overall implementation of k-means++ algorithm is
inspired by the numpy based k-means++ implementation in:
https://github.com/scikit-learn/scikit-learn
Originally, the implementation of the k-means++ algorithm in scikit-learn is based
on the following research article:
Arthur, David, and Sergei Vassilvitskii. k-means++: The advantages of careful
seeding. Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete
algorithms, Society for Industrial and Applied Mathematics (2007)
Args:
X (Tensor):
Matrix containing cosine similarity values among embedding vectors (N x N)
n_clusters (int):
Maximum number of speakers for estimating number of speakers.
Shows stable performance under 20.
random_state (int):
Seed variable for setting up a random state.
n_local_trials (int):
Number of trials for creating initial values of the center points.
device (torch.device)
Torch device variable.
Returns:
centers (Tensor):
The coordinates for center points that are used for initializing k-means algorithm.
indices (Tensor):
The indices of the best candidate center points.
"""
torch.manual_seed(random_state)
X = X.to(device)
n_samples, n_features = X.shape
centers = torch.zeros(n_clusters, n_features, dtype=X.dtype)
center_id = torch.randint(0, n_samples, (1,)).long()
indices = torch.full([n_clusters,], -1, dtype=torch.int)
centers[0] = X[center_id].squeeze(0)
indices[0] = center_id.squeeze(0)
centers = centers.to(device)
closest_dist_diff = centers[0, None].repeat(1, X.shape[0]).view(X.shape[0], -1) - X
closest_dist_sq = closest_dist_diff.pow(2).sum(dim=1).unsqueeze(dim=0)
current_pot = closest_dist_sq.sum()
for c in range(1, n_clusters):
rand_vals = torch.rand(n_local_trials) * current_pot.item()
if len(closest_dist_sq.shape) > 1:
torch_cumsum = torch.cumsum(closest_dist_sq, dim=1)[0]
else:
torch_cumsum = torch.cumsum(closest_dist_sq, dim=0)
candidate_ids = torch.searchsorted(torch_cumsum, rand_vals.to(device))
N_ci = candidate_ids.shape[0]
distance_diff = X[candidate_ids].repeat(1, X.shape[0]).view(X.shape[0] * N_ci, -1) - X.repeat(N_ci, 1)
distance = distance_diff.pow(2).sum(dim=1).view(N_ci, -1)
distance_to_candidates = torch.minimum(closest_dist_sq, distance)
candidates_pot = distance_to_candidates.sum(dim=1)
best_candidate = torch.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
centers[c] = X[best_candidate]
indices[c] = best_candidate
return centers, indices
def kmeans_torch(
X: torch.Tensor,
num_clusters: int,
threshold: float = 1e-4,
iter_limit: int = 15,
random_state: int = 0,
device: torch.device = torch.device('cpu'),
) -> torch.Tensor:
"""
Run k-means algorithm on the given set of spectral embeddings in X. The threshold
and iter_limit variables are set to show the best performance on speaker diarization
tasks. The overall implementation of k-means algorithm is inspired by the k-means
algorithm implemented in https://github.com/scikit-learn/scikit-learn.
References:
Arthur, David, and Sergei Vassilvitskii. k-means++: The advantages of careful
seeding. Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete
algorithms, Society for Industrial and Applied Mathematics (2007).
Args:
X (Tensor):
Cosine similarity matrix calculated from speaker embeddings
num_clusters (int):
The estimated number of speakers.
threshold (float):
This threshold limits the change of center values. If the square of
the center shift values are bigger than this threshold, the iteration stops.
iter_limit (int):
The maximum number of iterations that is allowed by the k-means algorithm.
device (torch.device):
Torch device variable
Returns:
selected_cluster_indices (Tensor):
The assigned cluster labels from the k-means clustering.
"""
# Convert tensor type to float
X = X.float().to(device)
input_size = X.shape[0]
# Initialize the cluster centers with kmeans_plusplus algorithm.
plusplus_init_states = kmeans_plusplus_torch(X, n_clusters=num_clusters, random_state=random_state, device=device)
centers = plusplus_init_states[0]
selected_cluster_indices = torch.zeros(input_size).long()
for iter_count in range(iter_limit):
euc_dist = getEuclideanDistance(X, centers, device=device)
if len(euc_dist.shape) <= 1:
break
else:
selected_cluster_indices = torch.argmin(euc_dist, dim=1)
center_inits = centers.clone()
for index in range(num_clusters):
selected_cluster = torch.nonzero(selected_cluster_indices == index).squeeze().to(device)
chosen_indices = torch.index_select(X, 0, selected_cluster)
if chosen_indices.shape[0] == 0:
chosen_indices = X[torch.randint(len(X), (1,))]
centers[index] = chosen_indices.mean(dim=0)
# Calculate the delta from center_inits to centers
center_delta_pow = torch.pow((centers - center_inits), 2)
center_shift_pow = torch.pow(torch.sum(torch.sqrt(torch.sum(center_delta_pow, dim=1))), 2)
# If the cluster centers are not changing significantly, stop the loop.
if center_shift_pow < threshold:
break
return selected_cluster_indices
def getTheLargestComponent(affinity_mat: torch.Tensor, seg_index: int, device: torch.device) -> torch.Tensor:
"""
Find the largest affinity_mat connected components for each given node.
This is for checking whether the affinity_mat is fully connected.
Args:
affinity_mat (Tensor):
A square matrix (tensor) containing normalized cosine distance values
seg_index (int):
The segment index that is targeted to be explored.
Returns:
connected_nodes (Tensor):
A tensor containing booleans that indicate whether the node is connected.
"""
num_of_segments = affinity_mat.shape[0]
connected_nodes = torch.zeros(num_of_segments, dtype=torch.bool).to(device)
nodes_to_explore = torch.zeros(num_of_segments, dtype=torch.bool).to(device)
nodes_to_explore[seg_index] = True
nodes_to_explore = nodes_to_explore.to(device)
for k in range(num_of_segments):
last_num_component = connected_nodes.sum()
torch.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = (nodes_to_explore == torch.tensor(True)).nonzero().t().squeeze()
if len(indices.size()) == 0:
indices = indices.unsqueeze(0)
for i in indices:
neighbors = affinity_mat[i].to(device)
torch.logical_or(nodes_to_explore, neighbors.squeeze(0), out=nodes_to_explore)
return connected_nodes
def isGraphFullyConnected(affinity_mat: torch.Tensor, device: torch.device) -> torch.Tensor:
"""
Check whether the given affinity matrix is a fully connected graph.
"""
return getTheLargestComponent(affinity_mat, 0, device).sum() == affinity_mat.shape[0]
def getKneighborsConnections(affinity_mat: torch.Tensor, p_value: int, mask_method: str = 'binary') -> torch.Tensor:
"""
Binarize top-p values for each row from the given affinity matrix.
Args:
affinity_mat (Tensor):
A square matrix (tensor) containing normalized cosine similarity values
p_value (int):
The number of top values that are selected from each row.
mask_method (str):
The method that is used to manipulate the affinity matrix. The default method is 'binary'.
Returns:
binarized_affinity_mat (Tensor):
A binarized affinity matrix based on the given mask method.
"""
dim = affinity_mat.shape
binarized_affinity_mat = torch.zeros_like(affinity_mat).half()
sorted_matrix = torch.argsort(affinity_mat, dim=1, descending=True)[:, :p_value]
binarized_affinity_mat[sorted_matrix.T, torch.arange(affinity_mat.shape[0])] = (
torch.ones(1).to(affinity_mat.device).half()
)
indices_row = sorted_matrix[:, :p_value].flatten()
indices_col = torch.arange(dim[1]).repeat(p_value, 1).T.flatten()
if mask_method == 'binary' or mask_method is None:
binarized_affinity_mat[indices_row, indices_col] = (
torch.ones(indices_row.shape[0]).to(affinity_mat.device).half()
)
elif mask_method == 'drop':
binarized_affinity_mat[indices_row, indices_col] = affinity_mat[indices_row, indices_col].half()
elif mask_method == 'sigmoid':
binarized_affinity_mat[indices_row, indices_col] = torch.sigmoid(affinity_mat[indices_row, indices_col]).half()
else:
raise ValueError(f'Unknown mask method: {mask_method}')
return binarized_affinity_mat
def getAffinityGraphMat(affinity_mat_raw: torch.Tensor, p_value: int) -> torch.Tensor:
"""
Calculate a binarized graph matrix and
symmetrize the binarized graph matrix.
"""
X = affinity_mat_raw if p_value <= 0 else getKneighborsConnections(affinity_mat_raw, p_value)
symm_affinity_mat = 0.5 * (X + X.T)
return symm_affinity_mat
def getMinimumConnection(
mat: torch.Tensor, max_N: torch.Tensor, n_list: torch.Tensor, device: torch.device
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generate connections until fully connect all the nodes in the graph.
If the graph is not fully connected, it might generate inaccurate results.
"""
p_value = torch.tensor(1)
affinity_mat = getAffinityGraphMat(mat, p_value)
for i, p_value in enumerate(n_list):
fully_connected = isGraphFullyConnected(affinity_mat, device)
affinity_mat = getAffinityGraphMat(mat, p_value)
if fully_connected or p_value > max_N:
break
return affinity_mat, p_value
def getRepeatedList(mapping_argmat: torch.Tensor, score_mat_size: torch.Tensor) -> torch.Tensor:
"""
Count the numbers in the mapping dictionary and create lists that contain
repeated indices that will be used for creating a repeated affinity matrix.
This repeated matrix is then used for fusing multiple affinity values.
"""
repeat_list = torch.zeros(score_mat_size, dtype=torch.int32).to(mapping_argmat.device)
idxs, counts = torch.unique(mapping_argmat, return_counts=True)
repeat_list[idxs] = counts.int().to(mapping_argmat.device)
return repeat_list
def get_argmin_mat(timestamps_in_scales: List[torch.Tensor]) -> List[torch.Tensor]:
"""
Calculate the mapping between the base scale and other scales. A segment from a longer scale is
repeatedly mapped to a segment from a shorter scale or the base scale.
Args:
timestamps_in_scales (list):
List containing timestamp tensors for each scale.
Each tensor has dimensions of (Number of base segments) x 2.
Returns:
session_scale_mapping_list (list):
List containing argmin arrays indexed by scale index.
"""
scale_list = list(range(len(timestamps_in_scales)))
segment_anchor_list = []
for scale_idx in scale_list:
time_stamps_float = timestamps_in_scales[scale_idx]
segment_anchor_list.append(torch.mean(time_stamps_float, dim=1))
base_scale_idx = max(scale_list)
base_scale_anchor = segment_anchor_list[base_scale_idx]
session_scale_mapping_list = []
for scale_idx in scale_list:
curr_scale_anchor = segment_anchor_list[scale_idx]
curr_mat = torch.tile(curr_scale_anchor, (base_scale_anchor.shape[0], 1))
base_mat = torch.tile(base_scale_anchor, (curr_scale_anchor.shape[0], 1)).t()
argmin_mat = torch.argmin(torch.abs(curr_mat - base_mat), dim=1)
session_scale_mapping_list.append(argmin_mat)
return session_scale_mapping_list
def getCosAffinityMatrix(emb: torch.Tensor) -> torch.Tensor:
"""
Calculate cosine similarity values among speaker embeddings then min-max normalize
the affinity matrix.
Args:
emb (Tensor):
Matrix containing embedding vectors. emb variable should be float(FP32) type to make the data-type
compatible with torch.mm operation for both CPU and GPU(CUDA).
dimension: (Number of embedding vectors) x (embedding dimension)
Returns:
sim_d (Tensor):
Matrix containing cosine similarity values among the given embedding vectors.
dimension: (Number of embedding vectors) x (Number of embedding vectors)
"""
if emb.shape[0] == 1:
sim_d = torch.tensor([[1]]).to(emb.device)
else:
emb = emb.float()
sim_d = cos_similarity(emb, emb)
sim_d = ScalerMinMax(sim_d)
return sim_d
def get_scale_interpolated_embs(
multiscale_weights: torch.Tensor,
embeddings_in_scales: List[torch.Tensor],
timestamps_in_scales: List[torch.Tensor],
device: torch.device = torch.device('cpu'),
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
Generate a scale-interpolated single embedding vector by calculating the weighted sum
of the multiple embedding vectors from different scales. The output is a set of embedding
vectors corresponding to the base-scale segments.
Args:
multiscale_weights (Tensor):
Tensor containing Multiscale weights
Dimensions: (Number of scales) x 1
embeddings_in_scales (list):
List containing split embedding tensors by each scale
timestamps_in_scales (list):
List containing split timestamps tensors by each scale
device (torch.device):
Torch device variable
Returns:
context_emb (torch.tensor):
A set of scale-interpolated embedding vectors.
Dimensions: (Number of base-scale segments) x (Dimensions of embedding vector)
session_scale_mapping_list (list):
List containing argmin arrays indexed by scale index.
"""
rep_mat_list = []
multiscale_weights = multiscale_weights.to(device)
session_scale_mapping_list = get_argmin_mat(timestamps_in_scales)
scale_list = list(range(len(timestamps_in_scales)))
for scale_idx in scale_list:
mapping_argmat = session_scale_mapping_list[scale_idx]
emb_t = embeddings_in_scales[scale_idx].to(device)
mapping_argmat = mapping_argmat.to(device)
repeat_list = getRepeatedList(mapping_argmat, torch.tensor(emb_t.shape[0])).to(device)
rep_emb_t = torch.repeat_interleave(emb_t, repeats=repeat_list, dim=0)
rep_mat_list.append(rep_emb_t)
stacked_scale_embs = torch.stack(rep_mat_list)
context_emb = torch.matmul(stacked_scale_embs.permute(2, 1, 0), multiscale_weights.t()).squeeze().t()
if len(context_emb.shape) < 2:
context_emb = context_emb.unsqueeze(0)
context_emb = context_emb.to(device)
return context_emb, session_scale_mapping_list
def getMultiScaleCosAffinityMatrix(
multiscale_weights: torch.Tensor,
embeddings_in_scales: List[torch.Tensor],
timestamps_in_scales: List[torch.Tensor],
device: torch.device = torch.device('cpu'),
) -> torch.Tensor:
"""
Calculate cosine similarity values among speaker embeddings for each scale then
apply multiscale weights to calculate the fused similarity matrix.
NOTE: Due to CUDA memory limit, the embedding vectors in embeddings_in_scales are stored in `cpu` device.
Args:
multiscale_weights (Tensor):
Tensor containing multiscale weights
Dimensions: (Number of scales) x 1
embeddings_in_scales (list):
List containing split embedding tensors by each scale
timestamps_in_scales (list):
List containing split timestamps tensors by each scale
device (torch.device):
Torch device variable
Returns:
fused_sim_d (Tensor):
An affinity matrix that is obtained by calculating the weighted sum of
the multiple affinity matrices from the different scales.
"""
multiscale_weights = torch.squeeze(multiscale_weights, dim=0).to(device)
session_scale_mapping_list = get_argmin_mat(timestamps_in_scales)
scale_list = list(range(len(timestamps_in_scales)))
fused_sim_d = torch.zeros(len(timestamps_in_scales[-1]), len(timestamps_in_scales[-1])).to(device)
for scale_idx in scale_list:
mapping_argmat = session_scale_mapping_list[scale_idx]
emb_t = embeddings_in_scales[scale_idx].half().to(device)
score_mat_torch = getCosAffinityMatrix(emb_t)
repeat_list = getRepeatedList(mapping_argmat, torch.tensor(score_mat_torch.shape[0])).to(device)
repeated_tensor_0 = torch.repeat_interleave(score_mat_torch, repeats=repeat_list, dim=0).to(device)
repeated_tensor_1 = torch.repeat_interleave(repeated_tensor_0, repeats=repeat_list, dim=1).to(device)
fused_sim_d += multiscale_weights[scale_idx] * repeated_tensor_1
return fused_sim_d
def getLaplacian(X: torch.Tensor) -> torch.Tensor:
"""
Calculate a laplacian matrix from an affinity matrix X.
"""
X.fill_diagonal_(0)
D = torch.sum(torch.abs(X), dim=1)
D = torch.diag_embed(D)
L = D - X
return L
def eigDecompose(laplacian: torch.Tensor, cuda: bool, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate eigenvalues and eigenvectors from the Laplacian matrix.
"""
if cuda:
if device is None:
device = torch.cuda.current_device()
laplacian = laplacian.float().to(device)
else:
laplacian = laplacian.float().to(torch.device('cpu'))
lambdas, diffusion_map = eigh(laplacian)
return lambdas, diffusion_map
def eigValueSh(laplacian: torch.Tensor, cuda: bool, device: torch.device) -> torch.Tensor:
"""
Calculate only eigenvalues from the Laplacian matrix.
"""
if cuda:
if device is None:
device = torch.cuda.current_device()
laplacian = laplacian.float().to(device)
else:
laplacian = laplacian.float().to(torch.device('cpu'))
lambdas = eigvalsh(laplacian)
return lambdas
def getLamdaGaplist(lambdas: torch.Tensor) -> torch.Tensor:
"""
Calculate the gaps between lambda values.
"""
if torch.is_complex(lambdas):
lambdas = torch.real(lambdas)
return lambdas[1:] - lambdas[:-1]
def addAnchorEmb(emb: torch.Tensor, anchor_sample_n: int, anchor_spk_n: int, sigma: float) -> torch.Tensor:
"""
Add randomly generated synthetic embeddings to make eigenanalysis more stable.
We refer to these embeddings as anchor embeddings.
emb (Tensor):
The input embedding from the embedding extractor.
anchor_sample_n (int):
Number of embedding samples per speaker.
anchor_sample_n = 10 is recommended.
anchor_spk_n (int):
Number of speakers for synthetic embedding.
anchor_spk_n = 3 is recommended.
sigma (int):
The amplitude of synthetic noise for each embedding vector.
If the sigma value is too small, under-counting could happen.
If the sigma value is too large, over-counting could happen.
sigma = 50 is recommended.
"""
emb_dim = emb.shape[1]
std_org = torch.std(emb, dim=0)
sigma = torch.tensor(sigma).to(emb.device)
new_emb_list = []
for _ in range(anchor_spk_n):
emb_m = torch.tile(torch.randn(1, emb_dim), (anchor_sample_n, 1)).to(emb.device)
emb_noise = torch.randn(anchor_sample_n, emb_dim).T.to(emb.device)
emb_noise = torch.matmul(
torch.diag(std_org), emb_noise / torch.max(torch.abs(emb_noise), dim=0)[0].unsqueeze(0)
).T
emb_gen = emb_m + sigma * emb_noise
new_emb_list.append(emb_gen)
new_emb_list.append(emb)
new_emb_np = torch.vstack(new_emb_list)
return new_emb_np
def getEnhancedSpeakerCount(
emb: torch.Tensor,
random_test_count: int = 5,
anchor_spk_n: int = 3,
anchor_sample_n: int = 10,
sigma: float = 50,
cuda: bool = False,
) -> torch.Tensor:
"""
Calculate the number of speakers using NME analysis with anchor embeddings. Add dummy speaker
embedding vectors and run speaker counting multiple times to enhance the speaker counting accuracy
for the short audio samples.
Args:
emb (Tensor):
The input embedding from the embedding extractor.
cuda (bool):
Use cuda for the operations if cuda==True.
random_test_count (int):
Number of trials of the enhanced counting with randomness.
The higher the count, the more accurate the enhanced counting is.
anchor_spk_n (int):
Number of speakers for synthetic embedding.
anchor_spk_n = 3 is recommended.
anchor_sample_n (int):
Number of embedding samples per speaker.
anchor_sample_n = 10 is recommended.
sigma (float):
The amplitude of synthetic noise for each embedding vector.
If the sigma value is too small, under-counting could happen.
If the sigma value is too large, over-counting could happen.
sigma = 50 is recommended.
Returns:
comp_est_num_of_spk (Tensor):
The estimated number of speakers. `anchor_spk_n` is subtracted from the estimated
number of speakers to factor out the dummy speaker embedding vectors.
"""
est_num_of_spk_list: List[int] = []
for seed in range(random_test_count):
torch.manual_seed(seed)
emb_aug = addAnchorEmb(emb, anchor_sample_n, anchor_spk_n, sigma)
mat = getCosAffinityMatrix(emb_aug)
nmesc = NMESC(
mat,
max_num_speakers=emb.shape[0],
max_rp_threshold=0.15,
sparse_search=True,
sparse_search_volume=10,
fixed_thres=-1.0,
nme_mat_size=300,
cuda=cuda,
)
est_num_of_spk, _ = nmesc.forward()
est_num_of_spk_list.append(est_num_of_spk.item())
comp_est_num_of_spk = torch.tensor(max(torch.mode(torch.tensor(est_num_of_spk_list))[0].item() - anchor_spk_n, 1))
return comp_est_num_of_spk
def split_input_data(
embeddings_in_scales: torch.Tensor,
timestamps_in_scales: torch.Tensor,
multiscale_segment_counts: torch.LongTensor,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Split multiscale embeddings and multiscale timestamps and put split scale-wise data into python lists.
This formatting function is needed to make the input type as `torch.Tensor`.
Args:
embeddings_in_scales (Tensor):
Concatenated Torch tensor containing embeddings in multiple scales
timestamps_in_scales (Tensor):
Concatenated Torch tensor containing timestamps in multiple scales
multiscale_segment_counts (LongTensor):
Concatenated Torch LongTensor containing number of segments per each scale
Returns:
embeddings_in_scales (list):
List containing split embedding tensors by each scale
timestamps_in_scales (list):
List containing split timestamps tensors by each scale
"""
split_index: List[int] = multiscale_segment_counts.tolist()
embeddings_in_scales = torch.split(embeddings_in_scales, split_index, dim=0)
timestamps_in_scales = torch.split(timestamps_in_scales, split_index, dim=0)
embeddings_in_scales, timestamps_in_scales = list(embeddings_in_scales), list(timestamps_in_scales)
return embeddings_in_scales, timestamps_in_scales
def estimateNumofSpeakers(
affinity_mat: torch.Tensor, max_num_speakers: int, cuda: bool = False
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Estimate the number of speakers using eigendecomposition on the Laplacian Matrix.
Args:
affinity_mat (Tensor):
N by N affinity matrix
max_num_speakers (int):
Maximum number of clusters to consider for each session
cuda (bool):
If cuda available eigendecomposition is computed on GPUs.
Returns:
num_of_spk (Tensor):
The estimated number of speakers
lambdas (Tensor):
The lambda values from eigendecomposition
lambda_gap (Tensor):
The gap between the lambda values from eigendecomposition
"""
laplacian = getLaplacian(affinity_mat)
lambdas = eigValueSh(laplacian, cuda=cuda, device=affinity_mat.device)
lambdas = torch.sort(lambdas)[0]
lambda_gap = getLamdaGaplist(lambdas)
num_of_spk = torch.argmax(lambda_gap[: min(max_num_speakers, lambda_gap.shape[0])]) + 1
return num_of_spk, lambdas, lambda_gap
class SpectralClustering:
"""
Perform spectral clustering by calculating spectral embeddings then run k-means clustering
algorithm on the spectral embeddings.
"""
def __init__(
self,
n_clusters: int = 8,
random_state: int = 0,
n_random_trials: int = 1,
cuda: bool = False,
device: torch.device = torch.device('cpu'),
):
"""
Initialize the variables needed for spectral clustering and k-means++.
Args:
n_clusters (int):
Number of the estimated (or oracle) number of speakers
random_state (int):
Random seed that determines a random state of k-means initialization.
n_random_trials (int):
Number of trials with different random seeds for k-means initialization.
k-means++ algorithm is executed for multiple times then the final result
is obtained by taking a majority vote.
cuda (bool):
if cuda=True, spectral clustering is done on GPU.
device (torch.device):
Torch device variable
"""
self.n_clusters = n_clusters
self.random_state = random_state
self.n_random_trials = max(n_random_trials, 1)
self.cuda = cuda
self.device = device
def forward(self, X) -> torch.Tensor:
"""
Call self.clusterSpectralEmbeddings() function to predict cluster labels.
Args:
X (Tensor):
Affinity matrix input
Returns:
labels (Tensor):
clustering label output
"""
if X.shape[0] != X.shape[1]:
raise ValueError("The affinity matrix is not a square matrix.")
labels = self.clusterSpectralEmbeddings(X, cuda=self.cuda, device=self.device)
return labels
def clusterSpectralEmbeddings(
self, affinity: torch.Tensor, cuda: bool = False, device: torch.device = torch.device('cpu')
) -> torch.Tensor:
"""
Perform k-means clustering on spectral embeddings. To alleviate the effect of randomness,
k-means clustering is performed for (self.n_random_trials) times then the final labels are obtained
by taking a majority vote. If speed is the major concern, self.n_random_trials should be set to 1.
n_random_trials=30 is recommended to see an improved result.
Args:
affinity (Tensor):
Affinity matrix input
cuda (torch.bool):
Use cuda for spectral clustering if cuda=True
device (torch.device):
Torch device variable
Returns:
labels (Tensor):
clustering label output
"""
spectral_emb = self.getSpectralEmbeddings(affinity, n_spks=self.n_clusters, cuda=cuda)
labels_set = []
for random_state_seed in range(self.random_state, self.random_state + self.n_random_trials):
_labels = kmeans_torch(
X=spectral_emb, num_clusters=self.n_clusters, random_state=random_state_seed, device=device
)
labels_set.append(_labels)
stacked_labels = torch.stack(labels_set)
label_index = torch.mode(torch.mode(stacked_labels, 0)[1])[0]
labels = stacked_labels[label_index]
return labels
def getSpectralEmbeddings(self, affinity_mat: torch.Tensor, n_spks: int = 8, cuda: bool = False) -> torch.Tensor:
"""
Calculate eigenvalues and eigenvectors to extract spectral embeddings.
Args:
affinity (Tensor):
Affinity matrix input
cuda (torch.bool):
Use cuda for spectral clustering if cuda=True
device (torch.device):
Torch device variable
Returns:
labels (Tensor):
clustering label output
"""
laplacian = getLaplacian(affinity_mat)
_, diffusion_map_ = eigDecompose(laplacian, cuda=cuda, device=affinity_mat.device)
diffusion_map = diffusion_map_[:, :n_spks]
inv_idx = torch.arange(diffusion_map.size(1) - 1, -1, -1).long()
embedding = diffusion_map.T[inv_idx, :]
return embedding[:n_spks].T
class NMESC:
"""
Normalized Maximum Eigengap based Spectral Clustering (NME-SC)
uses Eigengap analysis to get an estimated p-value for
affinity binarization and an estimated number of speakers.
p_value (also referred to as p_neighbors) is for taking
top p number of affinity values and convert those to 1 while
convert the rest of values to 0.
p_value can be also tuned on a development set without performing
NME-analysis. Fixing p_value brings about significantly faster clustering
speed, but the performance is limited to the development set.
References:
Tae Jin Park et al., Auto-Tuning Spectral Clustering for Speaker Diarization
Using Normalized Maximum Eigengap, IEEE Signal Processing Letters 27 (2019),
https://arxiv.org/abs/2003.02405
Args:
Please refer to def __init__().
Methods:
NMEanalysis():
Performs NME-analysis to estimate p_value and the number of speakers
subsampleAffinityMat(nme_mat_size):
Subsamples the number of speakers to reduce the computational load
getPvalueList():
Generates a list containing p-values that need to be examined.
getEigRatio(p_neighbors):
Calculates g_p, which is a ratio between p_neighbors and the maximum eigengap
getLamdaGaplist(lambdas):
Calculates lambda gap values from an array contains lambda values
estimateNumofSpeakers(affinity_mat):
Estimates the number of speakers using lambda gap list
"""
def __init__(
self,
mat: torch.Tensor,
max_num_speakers: int = 10,
max_rp_threshold: float = 0.15,
sparse_search: bool = True,
sparse_search_volume: int = 30,
nme_mat_size: int = 512,
use_subsampling_for_nme: bool = True,
fixed_thres: float = -1.0,
maj_vote_spk_count: bool = False,
parallelism: bool = True,
cuda: bool = False,
device: torch.device = torch.device('cpu'),
):
"""
Args:
mat (Tensor):
Cosine similarity matrix calculated from the provided speaker embeddings.
max_num_speakers (int):
Maximum number of speakers for estimating number of speakers.
Shows stable performance under 20.
max_rp_threshold (float):
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.25.
sparse_search (bool):
To increase the speed of parameter estimation, sparse_search=True
limits the number of p_values we search.
sparse_search_volume (int):
Number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
However, a value lower than 20 might cause a poor parameter estimation.
nme_mat_size (int):
Targeted size of matrix for NME analysis.
use_subsampling_for_nme (bool):
Use subsampling to reduce the calculational complexity.
Default is True.
fixed_thres (float or None):
A fixed threshold which can be used instead of estimating the
threshold with NME analysis. If fixed_thres is float,
it skips the NME analysis part.
maj_vote_spk_count (bool):
If True, take a majority vote on all p-values in the given range to estimate the number of speakers.
The majority voting may contribute to surpress overcounting of the speakers and improve speaker
counting accuracy.
parallelism (bool):
If True, turn on parallelism based on torch.jit.script library.
cuda (bool):
Use cuda for Eigen decomposition if cuda=True.
device (torch.device):
Torch device variable
"""
self.max_num_speakers: int = max_num_speakers
self.max_rp_threshold: float = max_rp_threshold
self.use_subsampling_for_nme: bool = use_subsampling_for_nme
self.nme_mat_size: int = nme_mat_size
self.sparse_search: bool = sparse_search
self.sparse_search_volume: int = sparse_search_volume
self.min_p_value = torch.tensor(2)
self.fixed_thres: float = fixed_thres
self.eps = 1e-10
self.max_N = torch.tensor(0)
self.mat: torch.Tensor = mat
self.p_value_list: torch.Tensor = self.min_p_value.unsqueeze(0)
self.cuda: bool = cuda
self.device: torch.device = device
self.maj_vote_spk_count: bool = maj_vote_spk_count
self.parallelism: bool = parallelism
def forward(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Subsample the input matrix to reduce the computational load.
Returns:
est_num_of_spk (Tensor):
Estimated number of speakers from NMESC approach
p_hat_value (Tensor):
Estimated p-value (determines how many neighboring values to be selected)
"""
if self.use_subsampling_for_nme:
subsample_ratio = self.subsampleAffinityMat(self.nme_mat_size)
else:
subsample_ratio = torch.tensor(1)
# Scans p_values and find a p_value that generates the smallest g_p value.
results: List[torch.Tensor] = []
est_spk_n_dict: Dict[int, torch.Tensor] = {}
self.p_value_list = self.getPvalueList()
p_volume = self.p_value_list.shape[0]
eig_ratio_list = torch.zeros(p_volume,)
est_num_of_spk_list = torch.zeros(p_volume,)
if self.parallelism:
futures: List[torch.jit.Future[torch.Tensor]] = []
for p_idx, p_value in enumerate(self.p_value_list):
futures.append(torch.jit.fork(self.getEigRatio, p_value))
for future in futures:
results.append(torch.jit.wait(future))
else:
for p_idx, p_value in enumerate(self.p_value_list):
results.append(self.getEigRatio(p_value))
# Retrieve the eigen analysis results
for p_idx, p_value in enumerate(self.p_value_list):
output = results[p_idx]
g_p, est_num_of_spk = output[0], output[1].int()
eig_ratio_list[p_idx] = g_p
est_spk_n_dict[p_value.item()] = est_num_of_spk
est_num_of_spk_list[p_idx] = est_num_of_spk
index_nn = torch.argmin(eig_ratio_list)
rp_p_value = self.p_value_list[index_nn]
affinity_mat = getAffinityGraphMat(self.mat, rp_p_value)
# Checks whether the affinity graph is fully connected.
# If not, it adds a minimum number of connections to make it fully connected.
if not isGraphFullyConnected(affinity_mat, device=self.device):
affinity_mat, rp_p_value = getMinimumConnection(
self.mat, self.max_N, self.p_value_list, device=self.device
)
p_hat_value = (subsample_ratio * rp_p_value).type(torch.int)
if self.maj_vote_spk_count:
est_num_of_spk = torch.mode(torch.tensor(est_num_of_spk_list))[0]
else:
est_num_of_spk = est_spk_n_dict[rp_p_value.item()]
return est_num_of_spk, p_hat_value
def subsampleAffinityMat(self, nme_mat_size: int) -> torch.Tensor:
"""
Perform subsampling of affinity matrix.
This subsampling is for calculational complexity, not for performance.
The smaller nme_mat_size is,
- the bigger the chance of missing a speaker.
- the faster p-value estimation speed (based on eigen decomposition).
The recommended nme_mat_size is 250~750.
However, if there are speakers who speak for very short period of time in the recording,
this subsampling might make the system miss underrepresented speakers.
Use this variable with caution.
Args:
nme_mat_size (int):
The targeted matrix size
Returns:
subsample_ratio (float):
The ratio between nme_mat_size and the original matrix size
"""
subsample_ratio = torch.max(torch.tensor(1), torch.tensor(self.mat.shape[0] / nme_mat_size)).type(torch.int)
self.mat = self.mat[:: subsample_ratio.item(), :: subsample_ratio.item()]
return subsample_ratio
def getEigRatio(self, p_neighbors: int) -> torch.Tensor:
"""
For a given p_neighbors value, calculate g_p, which is a ratio between p_neighbors and the
maximum eigengap values.
References:
Tae Jin Park et al., Auto-Tuning Spectral Clustering for Speaker Diarization Using
Normalized Maximum Eigengap, IEEE Signal Processing Letters 27 (2019),
https://arxiv.org/abs/2003.02405
Args:
p_neighbors (int):
Determines how many binary graph connections we want to keep for each row.
Returns:
est_num_of_spk (int):
Estimated number of speakers
g_p (float):
The ratio between p_neighbors value and the maximum eigen gap value.
"""
affinity_mat = getAffinityGraphMat(self.mat, p_neighbors)
est_num_of_spk, lambdas, lambda_gap_list = estimateNumofSpeakers(
affinity_mat, self.max_num_speakers, self.cuda
)
arg_sorted_idx = torch.argsort(lambda_gap_list[: self.max_num_speakers], descending=True)
max_key = arg_sorted_idx[0]
max_eig_gap = lambda_gap_list[max_key] / (torch.max(lambdas).item() + self.eps)
g_p = (p_neighbors / self.mat.shape[0]) / (max_eig_gap + self.eps)
return torch.stack([g_p, est_num_of_spk])
def getPvalueList(self) -> torch.Tensor:
"""
Generates a p-value (p_neighbour) list for searching. p_value_list must include 2 (min_p_value)
since at least one neighboring segment should be selected other than itself.
If fixed_thres value is specified, then only one p-value is specified.
If fixed_thres is not provided, multiple p-values are searched.
If sparse_search is True:
- Limit the number of p-values to be searched to sparse_search_volume.
- N should be at least 2 to include a number greater than 1.
If sparse_search is False:
- Scan all the p_values from 1 to max_N
- If sparse_search is False, NMESC analysis could take more time compared to sparse_search = True.
Returns:
p_value_list (Tensor):
Tensor containing the p_values to be searched.
"""
if self.fixed_thres is not None and self.fixed_thres > 0.0:
self.max_N = torch.max(
torch.floor(torch.tensor(self.mat.shape[0] * self.fixed_thres)).type(torch.int), self.min_p_value
)
p_value_list = self.max_N.unsqueeze(0).int()
else:
self.max_N = torch.max(
torch.floor(torch.tensor(self.mat.shape[0] * self.max_rp_threshold)).type(torch.int), self.min_p_value
)
if self.sparse_search:
search_volume = torch.min(self.max_N, torch.tensor(self.sparse_search_volume).type(torch.int))
# search at least two values
N = torch.max(search_volume, torch.tensor(2))
# avoid repeating values by limiting the step size
steps = min(self.max_N, N)
p_value_list = torch.linspace(start=1, end=self.max_N, steps=steps).type(torch.int)
else:
p_value_list = torch.arange(1, self.max_N + 1)
if p_value_list.shape[0] == 0:
raise ValueError("p_value_list should not be empty.")
return p_value_list
class SpeakerClustering(torch.nn.Module):
def __init__(
self,
min_samples_for_nmesc: int = 6,
nme_mat_size: int = 512,
sparse_search: bool = True,
maj_vote_spk_count: bool = False,
parallelism: bool = False,
cuda: bool = False,
):
"""
Clustering method for speaker diarization based on cosine similarity.
NME-SC part is converted to torch.tensor based operations in NeMo 1.9.
Args:
min_samples_for_nmesc (int):
The minimum number of samples required for NME clustering. This avoids
zero p_neighbour_lists. If the input has fewer segments than min_samples,
it is directed to the enhanced speaker counting mode.
sparse_search (bool):
Toggle sparse search mode. If True, limit the size of p_value_list to sparse_search_volume.
maj_vote_spk_count (bool):
If True, take a majority vote on all p-values in the given range to estimate the number of speakers.
The majority voting may contribute to surpress overcounting of the speakers and improve speaker
counting accuracy.
parallelism (bool):
Use dynamic parallelism feature in torch.jit compiler to accelerate the p-value search.
cuda (bool):
Boolean variable for toggling cuda availability.
"""
super().__init__()
self.min_samples_for_nmesc: int = min_samples_for_nmesc
self.nme_mat_size: int = nme_mat_size
self.sparse_search: bool = sparse_search
self.parallelism: bool = parallelism
self.cuda: bool = cuda
self.maj_vote_spk_count: bool = maj_vote_spk_count
self.embeddings_in_scales: List[torch.Tensor] = [torch.Tensor(0)]
self.timestamps_in_scales: List[torch.Tensor] = [torch.Tensor(0)]
self.device = torch.device("cuda") if self.cuda else torch.device("cpu")
def forward(self, param_dict: Dict[str, torch.Tensor]) -> torch.LongTensor:
"""
A function wrapper designed for inference in exported script format.
Note:
Dict is used to allow easy inference of the exported jit model in Triton server using easy to understand
naming convention.
See https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#special-conventions-for-pytorch-backend
Args:
param_dict (dict):
Dictionary containing the arguments for speaker clustering.
See `forward_infer` function for the argument information.
Returns:
Y (LongTensor):
Speaker labels for the segments in the given input embeddings.
"""
embeddings_in_scales = param_dict['embeddings']
timestamps_in_scales = param_dict['timestamps']
multiscale_segment_counts = param_dict['multiscale_segment_counts']
multiscale_weights = param_dict['multiscale_weights']
oracle_num_speakers = int(param_dict['oracle_num_speakers'].item())
max_num_speakers = int(param_dict['max_num_speakers'].item())
enhanced_count_thres = int(param_dict['enhanced_count_thres'].item())
sparse_search_volume = int(param_dict['sparse_search_volume'].item())
max_rp_threshold = float(param_dict['max_rp_threshold'].item())
fixed_thres = float(param_dict['fixed_thres'].item())
return self.forward_infer(
embeddings_in_scales=embeddings_in_scales,
timestamps_in_scales=timestamps_in_scales,
multiscale_segment_counts=multiscale_segment_counts,
multiscale_weights=multiscale_weights,
oracle_num_speakers=oracle_num_speakers,
max_rp_threshold=max_rp_threshold,
max_num_speakers=max_num_speakers,
enhanced_count_thres=enhanced_count_thres,
sparse_search_volume=sparse_search_volume,
fixed_thres=fixed_thres,
)
def forward_infer(
self,
embeddings_in_scales: torch.Tensor,
timestamps_in_scales: torch.Tensor,
multiscale_segment_counts: torch.LongTensor,
multiscale_weights: torch.Tensor,
oracle_num_speakers: int = -1,
max_rp_threshold: float = 0.15,
max_num_speakers: int = 8,
enhanced_count_thres: int = 40,
sparse_search_volume: int = 30,
fixed_thres: float = -1.0,
kmeans_random_trials: int = 1,
) -> torch.LongTensor:
"""
Calculate affinity matrix using timestamps and speaker embeddings, run NME analysis to estimate the best
p-value and perform spectral clustering based on the estimated p-value and the calculated affinity matrix.
Caution:
For the sake of compatibility with libtorch, python boolean `False` is replaced with `torch.LongTensor(-1)`.
Args:
Dict containing following keys associated with tensors.
embeddings (Tensor):
Concatenated Torch tensor containing embeddings in multiple scales
This tensor has dimensions of (Number of base segments) x (Embedding Dimension)
timestamps (Tensor):
Concatenated Torch tensor containing timestamps in multiple scales.
This tensor has dimensions of (Total number of segments all scales) x 2
Example:
>>> timestamps_in_scales = \
>>> torch.tensor([0.4, 1.4], [0.9, 1.9], [1.4, 2.4], ... [121.2, 122.2]])
multiscale_segment_counts (LongTensor):
Concatenated Torch tensor containing number of segments per each scale
This tensor has dimensions of (Number of scales)
Example:
>>> multiscale_segment_counts = torch.LongTensor([31, 52, 84, 105, 120])
multiscale_weights (Tensor):
Multi-scale weights that are used when affinity scores are merged.
Example:
>>> multiscale_weights = torch.tensor([1.4, 1.3, 1.2, 1.1, 1.0])
oracle_num_speakers (int):
The number of speakers in a session from the reference transcript
max_num_speakers (int):
The upper bound for the number of speakers in each session
max_rp_threshold (float):
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.15.
enhanced_count_thres (int):
For the short audio recordings, clustering algorithm cannot
accumulate enough amount of speaker profile for each cluster.
Thus, function `getEnhancedSpeakerCount` employs anchor embeddings
(dummy representations) to mitigate the effect of cluster sparsity.
enhanced_count_thres = 80 is recommended.
sparse_search_volume (int):
Number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
Lower than 20 might cause a poor parameter estimation.
fixed_thres (float):
If fixed_thres value is provided, NME-analysis process will be skipped.
This value should be optimized on a development set to obtain a quality result.
Default is None and performs NME-analysis to estimate the threshold.
kmeans_random_trials (int):
Number of random trials for initializing k-means clustering. More trials
will result in a more stable clustering result. Default is 1.
Returns:
Y (LongTensor):
Speaker labels for the segments in the given input embeddings.
"""
self.embeddings_in_scales, self.timestamps_in_scales = split_input_data(
embeddings_in_scales, timestamps_in_scales, multiscale_segment_counts
)
# Last slot is the base scale embeddings
emb = self.embeddings_in_scales[-1]
# Cases for extreamly short sessions
if emb.shape[0] == 1:
return torch.zeros((1,), dtype=torch.int64)
elif emb.shape[0] <= max(enhanced_count_thres, self.min_samples_for_nmesc) and oracle_num_speakers < 0:
est_num_of_spk_enhanced = getEnhancedSpeakerCount(emb=emb, cuda=self.cuda)
else:
est_num_of_spk_enhanced = torch.tensor(-1)
if oracle_num_speakers > 0:
max_num_speakers = oracle_num_speakers
mat = getMultiScaleCosAffinityMatrix(
multiscale_weights, self.embeddings_in_scales, self.timestamps_in_scales, self.device
)
nmesc = NMESC(
mat,
max_num_speakers=max_num_speakers,
max_rp_threshold=max_rp_threshold,
sparse_search=self.sparse_search,
sparse_search_volume=sparse_search_volume,
fixed_thres=fixed_thres,
nme_mat_size=self.nme_mat_size,
maj_vote_spk_count=self.maj_vote_spk_count,
parallelism=self.parallelism,
cuda=self.cuda,
device=self.device,
)
# If there are less than `min_samples_for_nmesc` segments, est_num_of_spk is 1.
if mat.shape[0] > self.min_samples_for_nmesc:
est_num_of_spk, p_hat_value = nmesc.forward()
affinity_mat = getAffinityGraphMat(mat, p_hat_value)
else:
nmesc.fixed_thres = max_rp_threshold
est_num_of_spk, p_hat_value = nmesc.forward()
affinity_mat = mat
# n_clusters is number of speakers estimated from spectral clustering.
if oracle_num_speakers > 0:
n_clusters = int(oracle_num_speakers)
elif est_num_of_spk_enhanced > 0:
n_clusters = int(est_num_of_spk_enhanced.item())
else:
n_clusters = int(est_num_of_spk.item())
spectral_model = SpectralClustering(
n_clusters=n_clusters, n_random_trials=kmeans_random_trials, cuda=self.cuda, device=self.device
)
Y = spectral_model.forward(affinity_mat)
return Y
|
NeMo-main
|
nemo/collections/asr/parts/utils/offline_clustering.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import (
PrecisionRecallDisplay,
RocCurveDisplay,
average_precision_score,
log_loss,
precision_recall_curve,
roc_auc_score,
roc_curve,
)
def auc_roc(y_true: Union[List[int], np.ndarray], y_score: Union[List[float], np.ndarray]) -> float:
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) from prediction scores.
Note: If only one class is present in y_true, 0.5 is returned.
"""
y_true = np.array(y_true)
y_score = np.array(y_score)
assert len(y_true) == len(y_score)
assert np.all(y_true >= 0) and np.all(y_true <= 1)
if np.all(y_true == 0) or np.all(y_true == 1):
return 0.5
return roc_auc_score(y_true, y_score)
def auc_pr(y_true: Union[List[int], np.ndarray], y_score: Union[List[float], np.ndarray]) -> float:
"""Compute Area Under the Precision-Recall Curve (PR AUC) from prediction scores.
Note: If only regatives are present in y_true, 0.0 is returned.
"""
y_true = np.array(y_true)
y_score = np.array(y_score)
assert len(y_true) == len(y_score)
assert np.all(y_true >= 0) and np.all(y_true <= 1)
if np.all(y_true == 0):
return 0.0
return average_precision_score(y_true, y_score)
def auc_nt(y_true: Union[List[int], np.ndarray], y_score: Union[List[float], np.ndarray]) -> float:
"""Compute Area Under the Negative Predictive Value vs. True Negative Rate Curve (NT AUC) from prediction scores.
This metric can be thought of as a PR AUC in which errors are treated as positives.
Note: If only positives are present in y_true, 0.0 is returned.
"""
y_true = np.array(y_true)
y_score = np.array(y_score)
assert len(y_true) == len(y_score)
assert np.all(y_true >= 0) and np.all(y_true <= 1)
if np.all(y_true == 1):
return 0.0
return average_precision_score(1 - y_true, 1 - y_score)
def nce(y_true: Union[List[int], np.ndarray], y_score: Union[List[float], np.ndarray]) -> float:
"""Compute Normalized Cross Entropy (NCE) from prediction scores. Also known as the Normalized Mutual Information.
NCE measures how close the correct prediction scores are to one and the incorrect prediction scores are to zero.
Negative NCE values indicate that the classifier performs worse than the setting all prediction scores
as the proportion of correct predictions.
Note: If only one class is present in y_true, 0.5 is returned.
"""
y_true = np.array(y_true)
y_score = np.array(y_score)
assert len(y_true) == len(y_score)
assert np.all(y_true >= 0) and np.all(y_true <= 1)
if np.all(y_true == 0) or np.all(y_true == 1):
return -math.inf
p = y_true.mean()
eps = 1e-15
Hp = -(math.log(p + eps) * p + math.log(1 - p + eps) * (1 - p))
return (Hp - log_loss(y_true, y_score)) / Hp
def ece(
y_true: Union[List[int], np.ndarray],
y_score: Union[List[float], np.ndarray],
n_bins: int = 100,
return_curve: bool = False,
) -> Union[float, Tuple[float, Tuple[List[int], List[float]]]]:
"""Compute Expected Calibration Error (ECE) from prediction scores.
ECE measures how close the correct prediction scores are to one and the incorrect prediction scores are to zero.
ECE ranges from zero to one with the best value zero (the lower the value, the better).
"""
y_true = np.array(y_true)
y_score = np.array(y_score)
assert len(y_true) == len(y_score)
assert np.all(y_true >= 0) and np.all(y_true <= 1)
py = np.array([1 - y_score, y_score]).T
acc, conf = np.zeros(n_bins), np.zeros(n_bins)
Bm = np.zeros(n_bins)
ece_curve = []
thresholds = []
for m in range(n_bins):
a, b = m / n_bins, (m + 1) / n_bins
threshold = (a + b) / 2
thresholds.append(threshold)
py_index = (py.T[1] >= threshold).astype(int)
py_value = py[np.arange(len(py_index)), py_index]
bin_range = ((py_value > a) & (py_value <= b)).nonzero()[0]
Bm[m] = len(bin_range)
if Bm[m] > 0:
acc[m] = (py_index[bin_range] == y_true[bin_range]).sum() / Bm[m]
conf[m] = py_value[bin_range].sum() / Bm[m]
ece_curve.append(Bm[m] * np.abs(acc[m] - conf[m]))
ece = sum(ece_curve) / sum(Bm)
if return_curve:
return ece, (thresholds, ece_curve)
else:
return ece
def auc_yc(
y_true: Union[List[int], np.ndarray],
y_score: Union[List[float], np.ndarray],
n_bins: int = 100,
return_std_maximum: bool = False,
return_curve: bool = False,
) -> Union[
float,
Tuple[float, Tuple[List[int], List[float]]],
Tuple[float, float, float],
Tuple[float, float, float, Tuple[List[int], List[float]]],
]:
"""Compute Area Under the Youden's Curve (YC AUC) from prediction scores.
YC AUC represents the rate of the effective threshold range.
If return_std_maximum is set to True, std and maximum values of the Youden's Curve are returned with the AUC.
Note: If only one class is present in y_true, zeroes are returned for every entity.
"""
y_true = np.array(y_true)
y_score = np.array(y_score)
thresholds = np.linspace(0, 1, n_bins + 1)
assert len(y_true) == len(y_score)
assert np.all(y_true >= 0) and np.all(y_true <= 1)
if np.all(y_true == 0) or np.all(y_true == 1):
if return_std_maximum and return_curve:
return 0.0, 0.0, 0.0, (thresholds, np.zeros(len(thresholds)))
elif return_std_maximum:
return 0.0, 0.0, 0.0
elif return_curve:
return 0.0, (thresholds, np.zeros(len(thresholds)))
else:
return 0.0
mask_correct = y_true == 1
count_correct = max(len(mask_correct.nonzero()[0]), 1)
count_incorrect = max(len(y_true) - count_correct, 1)
y_score_correct = y_score[mask_correct]
y_score_incorrect = y_score[~mask_correct]
yc = []
for threshold in thresholds:
tnr = len((y_score_incorrect < threshold).nonzero()[0]) / count_incorrect
fnr = len((y_score_correct < threshold).nonzero()[0]) / count_correct
yc.append(abs(tnr - fnr))
yc = np.array(yc)
if return_std_maximum and return_curve:
return yc.mean(), yc.std(), yc.max(), (thresholds, yc)
elif return_std_maximum:
return yc.mean(), yc.std(), yc.max()
elif return_curve:
return yc.mean(), (thresholds, yc)
else:
return yc.mean()
def save_confidence_hist(y_score: Union[List[float], np.ndarray], plot_dir: Union[str, Path], name: str = "hist"):
os.makedirs(plot_dir, exist_ok=True)
plt.hist(np.array(y_score), 50, range=(0, 1))
plt.title(name)
plt.xlabel("Confidence score")
plt.ylabel("Count")
plt.savefig(Path(plot_dir) / Path(name + ".png"), dpi=300)
plt.clf()
def save_roc_curve(
y_true: Union[List[int], np.ndarray],
y_score: Union[List[float], np.ndarray],
plot_dir: Union[str, Path],
name: str = "roc",
):
assert len(y_true) == len(y_score)
os.makedirs(plot_dir, exist_ok=True)
fpr, tpr, _ = roc_curve(1 - np.array(y_true), 1 - np.array(y_score))
RocCurveDisplay(fpr=fpr, tpr=tpr).plot()
plt.title(name)
plt.savefig(Path(plot_dir) / Path(name + ".png"), dpi=300)
plt.clf()
def save_pr_curve(
y_true: Union[List[int], np.ndarray],
y_score: Union[List[float], np.ndarray],
plot_dir: Union[str, Path],
name: str = "pr",
):
assert len(y_true) == len(y_score)
os.makedirs(plot_dir, exist_ok=True)
precision, recall, _ = precision_recall_curve(np.array(y_true), np.array(y_score))
PrecisionRecallDisplay(precision=precision, recall=recall).plot()
plt.title(name)
plt.savefig(Path(plot_dir) / Path(name + ".png"), dpi=300)
plt.clf()
def save_nt_curve(
y_true: Union[List[int], np.ndarray],
y_score: Union[List[float], np.ndarray],
plot_dir: Union[str, Path],
name: str = "nt",
):
assert len(y_true) == len(y_score)
os.makedirs(plot_dir, exist_ok=True)
precision, recall, _ = precision_recall_curve(1 - np.array(y_true), 1 - np.array(y_score))
PrecisionRecallDisplay(precision=precision, recall=recall).plot()
plt.title(name)
plt.savefig(Path(plot_dir) / Path(name + ".png"), dpi=300)
plt.clf()
def save_custom_confidence_curve(
thresholds: Union[List[float], np.ndarray],
values: Union[List[float], np.ndarray],
plot_dir: Union[str, Path],
name: str = "my_awesome_curve",
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
):
assert len(thresholds) == len(values)
os.makedirs(plot_dir, exist_ok=True)
plt.plot(thresholds, values)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.title(name)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
plt.savefig(Path(plot_dir) / Path(name + ".png"), dpi=300)
plt.clf()
|
NeMo-main
|
nemo/collections/asr/parts/utils/confidence_metrics.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections import Counter
from collections import OrderedDict as od
from pathlib import Path
from typing import Dict, List, Union
import librosa
import numpy as np
from nemo.collections.asr.parts.utils.speaker_utils import (
audio_rttm_map,
get_subsegments,
get_uniqname_from_filepath,
rttm_to_labels,
segments_manifest_to_subsegments_manifest,
write_rttm2manifest,
)
from nemo.utils.data_utils import DataStoreObject
def rreplace(s: str, old: str, new: str) -> str:
"""
Replace end of string.
Args:
s (str): string to operate on
old (str): ending of string to replace
new (str): replacement for ending of string
Returns:
new.join(li) (string): new string with end replaced
"""
li = s.rsplit(old, 1)
return new.join(li)
def get_uniq_id_with_period(path: str) -> str:
"""
Get uniq_id from path string with period in it.
Args:
path (str): path to audio file
Returns:
uniq_id (str): unique speaker ID
"""
split_path = os.path.basename(path).split('.')[:-1]
uniq_id = '.'.join(split_path) if len(split_path) > 1 else split_path[0]
return uniq_id
def get_subsegment_dict(subsegments_manifest_file: str, window: float, shift: float, deci: int) -> Dict[str, dict]:
"""
Get subsegment dictionary from manifest file.
Args:
subsegments_manifest_file (str): Path to subsegment manifest file
window (float): Window length for segmentation
shift (float): Shift length for segmentation
deci (int): Rounding number of decimal places
Returns:
_subsegment_dict (dict): Subsegment dictionary
"""
_subsegment_dict = {}
with open(subsegments_manifest_file, 'r') as subsegments_manifest:
segments = subsegments_manifest.readlines()
for segment in segments:
segment = segment.strip()
dic = json.loads(segment)
audio, offset, duration, label = dic['audio_filepath'], dic['offset'], dic['duration'], dic['label']
subsegments = get_subsegments(offset=offset, window=window, shift=shift, duration=duration)
if dic['uniq_id'] is not None:
uniq_id = dic['uniq_id']
else:
uniq_id = get_uniq_id_with_period(audio)
if uniq_id not in _subsegment_dict:
_subsegment_dict[uniq_id] = {'ts': [], 'json_dic': []}
for subsegment in subsegments:
start, dur = subsegment
_subsegment_dict[uniq_id]['ts'].append([round(start, deci), round(start + dur, deci)])
_subsegment_dict[uniq_id]['json_dic'].append(dic)
return _subsegment_dict
def get_input_manifest_dict(input_manifest_path: str) -> Dict[str, dict]:
"""
Get dictionary from manifest file.
Args:
input_manifest_path (str): Path to manifest file
Returns:
input_manifest_dict (dict): Dictionary from manifest file
"""
input_manifest_dict = {}
with open(input_manifest_path, 'r') as input_manifest_fp:
json_lines = input_manifest_fp.readlines()
for json_line in json_lines:
dic = json.loads(json_line)
dic["text"] = "-"
uniq_id = get_uniqname_from_filepath(dic["audio_filepath"])
input_manifest_dict[uniq_id] = dic
return input_manifest_dict
def write_truncated_subsegments(
input_manifest_dict: Dict[str, dict],
_subsegment_dict: Dict[str, dict],
output_manifest_path: str,
step_count: int,
deci: int,
):
"""
Write subsegments to manifest filepath.
Args:
input_manifest_dict (dict): Input manifest dictionary
_subsegment_dict (dict): Input subsegment dictionary
output_manifest_path (str): Path to output manifest file
step_count (int): Number of the unit segments you want to create per utterance
deci (int): Rounding number of decimal places
"""
with open(output_manifest_path, 'w') as output_manifest_fp:
for uniq_id, subseg_dict in _subsegment_dict.items():
subseg_array = np.array(subseg_dict['ts'])
subseg_array_idx = np.argsort(subseg_array, axis=0)
chunked_set_count = subseg_array_idx.shape[0] // step_count
for idx in range(chunked_set_count - 1):
chunk_index_stt = subseg_array_idx[:, 0][idx * step_count]
chunk_index_end = subseg_array_idx[:, 1][(idx + 1) * step_count]
offset_sec = subseg_array[chunk_index_stt, 0]
end_sec = subseg_array[chunk_index_end, 1]
dur = round(end_sec - offset_sec, deci)
meta = input_manifest_dict[uniq_id]
meta['offset'] = offset_sec
meta['duration'] = dur
json.dump(meta, output_manifest_fp)
output_manifest_fp.write("\n")
def write_file(name: str, lines: List[dict], idx: int):
"""
Write json lines to file.
Args:
name (str): Output file path
lines (list): List of json lines
idx (int): Indices to dump to the file
"""
with open(name, 'w') as fout:
for i in idx:
dic = lines[i]
json.dump(dic, fout)
fout.write('\n')
def read_file(pathlist: str) -> List[str]:
"""
Read list of lines from target file.
Args:
pathlist (str): Input file path
Returns:
sorted(pathlist) (list): List of lines
"""
with open(pathlist, 'r') as f:
pathlist = f.readlines()
return sorted(pathlist)
def get_dict_from_wavlist(pathlist: List[str]) -> Dict[str, str]:
"""
Read dictionaries from list of lines
Args:
pathlist (list): List of file paths
Returns:
path_dict (dict): Dictionary containing dictionaries read from files
"""
path_dict = od()
pathlist = sorted(pathlist)
for line_path in pathlist:
uniq_id = os.path.basename(line_path).split('.')[0]
path_dict[uniq_id] = line_path
return path_dict
def get_dict_from_list(data_pathlist: List[str], uniqids: List[str]) -> Dict[str, str]:
"""
Create dictionaries from list of lines
Args:
data_pathlist (list): List of file paths
uniqids (list): List of file IDs
Returns:
path_dict (dict): Dictionary containing file paths
"""
path_dict = {}
for line_path in data_pathlist:
uniq_id = os.path.basename(line_path).split('.')[0]
if uniq_id in uniqids:
path_dict[uniq_id] = line_path
else:
raise ValueError(f'uniq id {uniq_id} is not in wav filelist')
return path_dict
def get_path_dict(data_path: str, uniqids: List[str], len_wavs: int = None) -> Dict[str, str]:
"""
Create dictionary from list of lines (using the get_dict_from_list function)
Args:
data_path (str): Path to file containing list of files
uniqids (list): List of file IDs
len_wavs (int): Length of file list
Returns:
data_pathdict (dict): Dictionary containing file paths
"""
if data_path is not None:
data_pathlist = read_file(data_path)
if len_wavs is not None:
assert len(data_pathlist) == len_wavs
data_pathdict = get_dict_from_list(data_pathlist, uniqids)
elif len_wavs is not None:
data_pathdict = {uniq_id: None for uniq_id in uniqids}
return data_pathdict
def create_segment_manifest(
input_manifest_path: str, output_manifest_path: str, window: float, shift: float, step_count: int, deci: int
):
"""
Create segmented manifest file from base manifest file
Args:
input_manifest_path (str): Path to input manifest file
output_manifest_path (str): Path to output manifest file
window (float): Window length for segmentation
shift (float): Shift length for segmentation
step_count (int): Number of the unit segments you want to create per utterance
deci (int): Rounding number of decimal places
"""
if '.json' not in input_manifest_path:
raise ValueError("input_manifest_path file should be .json file format")
if output_manifest_path and '.json' not in output_manifest_path:
raise ValueError("output_manifest_path file should be .json file format")
elif not output_manifest_path:
output_manifest_path = rreplace(input_manifest_path, '.json', f'_{step_count}seg.json')
input_manifest_dict = get_input_manifest_dict(input_manifest_path)
segment_manifest_path = rreplace(input_manifest_path, '.json', '_seg.json')
subsegment_manifest_path = rreplace(input_manifest_path, '.json', '_subseg.json')
min_subsegment_duration = 0.05
step_count = int(step_count)
AUDIO_RTTM_MAP = audio_rttm_map(input_manifest_path)
segments_manifest_file = write_rttm2manifest(AUDIO_RTTM_MAP, segment_manifest_path, deci)
subsegments_manifest_file = subsegment_manifest_path
segments_manifest_to_subsegments_manifest(
segments_manifest_file, subsegments_manifest_file, window, shift, min_subsegment_duration,
)
subsegments_dict = get_subsegment_dict(subsegments_manifest_file, window, shift, deci)
write_truncated_subsegments(input_manifest_dict, subsegments_dict, output_manifest_path, step_count, deci)
os.remove(segment_manifest_path)
os.remove(subsegment_manifest_path)
def create_manifest(
wav_path: str,
manifest_filepath: str,
text_path: str = None,
rttm_path: str = None,
uem_path: str = None,
ctm_path: str = None,
add_duration: bool = False,
):
"""
Create base manifest file
Args:
wav_path (str): Path to list of wav files
manifest_filepath (str): Path to output manifest file
text_path (str): Path to list of text files
rttm_path (str): Path to list of rttm files
uem_path (str): Path to list of uem files
ctm_path (str): Path to list of ctm files
add_duration (bool): Whether to add durations to the manifest file
"""
if os.path.exists(manifest_filepath):
os.remove(manifest_filepath)
wav_pathlist = read_file(wav_path)
wav_pathdict = get_dict_from_wavlist(wav_pathlist)
len_wavs = len(wav_pathlist)
uniqids = sorted(wav_pathdict.keys())
text_pathdict = get_path_dict(text_path, uniqids, len_wavs)
rttm_pathdict = get_path_dict(rttm_path, uniqids, len_wavs)
uem_pathdict = get_path_dict(uem_path, uniqids, len_wavs)
ctm_pathdict = get_path_dict(ctm_path, uniqids, len_wavs)
lines = []
for uid in uniqids:
wav, text, rttm, uem, ctm = (
wav_pathdict[uid],
text_pathdict[uid],
rttm_pathdict[uid],
uem_pathdict[uid],
ctm_pathdict[uid],
)
audio_line = wav.strip()
if rttm is not None:
rttm = rttm.strip()
labels = rttm_to_labels(rttm)
num_speakers = Counter([l.split()[-1] for l in labels]).keys().__len__()
else:
num_speakers = None
if uem is not None:
uem = uem.strip()
if text is not None:
with open(text.strip()) as f:
text = f.readlines()[0].strip()
else:
text = "-"
if ctm is not None:
ctm = ctm.strip()
duration = None
if add_duration:
y, sr = librosa.load(audio_line, sr=None)
duration = librosa.get_duration(y=y, sr=sr)
meta = [
{
"audio_filepath": audio_line,
"offset": 0,
"duration": duration,
"label": "infer",
"text": text,
"num_speakers": num_speakers,
"rttm_filepath": rttm,
"uem_filepath": uem,
"ctm_filepath": ctm,
}
]
lines.extend(meta)
write_file(manifest_filepath, lines, range(len(lines)))
def read_manifest(manifest: Union[Path, str]) -> List[dict]:
"""
Read manifest file
Args:
manifest (str or Path): Path to manifest file
Returns:
data (list): List of JSON items
"""
manifest = DataStoreObject(str(manifest))
data = []
try:
f = open(manifest.get(), 'r', encoding='utf-8')
except:
raise Exception(f"Manifest file could not be opened: {manifest}")
for line in f:
item = json.loads(line)
data.append(item)
f.close()
return data
def write_manifest(output_path: Union[Path, str], target_manifest: List[dict], ensure_ascii: bool = True):
"""
Write to manifest file
Args:
output_path (str or Path): Path to output manifest file
target_manifest (list): List of manifest file entries
ensure_ascii (bool): default is True, meaning the output is guaranteed to have all incoming non-ASCII characters escaped. If ensure_ascii is false, these characters will be output as-is.
"""
with open(output_path, "w", encoding="utf-8") as outfile:
for tgt in target_manifest:
json.dump(tgt, outfile, ensure_ascii=ensure_ascii)
outfile.write('\n')
def write_ctm(output_path: str, target_ctm: Dict[str, dict]):
"""
Write ctm entries from diarization session to a .ctm file.
Args:
output_path (str): target file path
target_ctm (dict): list of ctm entries
"""
target_ctm.sort(key=lambda y: y[0])
with open(output_path, "w") as outfile:
for pair in target_ctm:
tgt = pair[1]
outfile.write(tgt)
def write_text(output_path: str, target_ctm: Dict[str, dict]):
"""
Write text from diarization session to a .txt file
Args:
output_path (str): target file path
target_ctm (dict): list of ctm entries
"""
target_ctm.sort(key=lambda y: y[0])
with open(output_path, "w") as outfile:
for pair in target_ctm:
tgt = pair[1]
word = tgt.split(' ')[4]
outfile.write(word + ' ')
outfile.write('\n')
|
NeMo-main
|
nemo/collections/asr/parts/utils/manifest_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import json
import math
import os
import shutil
from copy import deepcopy
from typing import Dict, List, Tuple, Union
import numpy as np
import omegaconf
import soundfile as sf
import torch
from pyannote.core import Annotation, Segment
from tqdm import tqdm
from nemo.collections.asr.data.audio_to_label import repeat_signal
from nemo.collections.asr.parts.utils.offline_clustering import SpeakerClustering, get_argmin_mat, split_input_data
from nemo.utils import logging
"""
This file contains all the utility functions required for speaker embeddings part in diarization scripts
"""
def get_uniqname_from_filepath(filepath):
"""
Return base name from provided filepath
"""
if type(filepath) is str:
uniq_id = os.path.splitext(os.path.basename(filepath))[0]
return uniq_id
else:
raise TypeError("input must be filepath string")
def get_uniq_id_from_manifest_line(line: str) -> str:
"""
Retrieve `uniq_id` from the `audio_filepath` in a manifest line.
"""
dic = json.loads(line.strip())
uniq_id = get_uniqname_from_filepath(dic['audio_filepath'])
return uniq_id
def get_uniq_id_with_dur(meta, decimals=3):
"""
Return basename with offset and end time labels
"""
# bare_uniq_id = get_uniqname_from_filepath(meta['audio_filepath'])
bare_uniq_id = get_uniqname_from_filepath(meta['rttm_filepath'])
if meta['offset'] is None and meta['duration'] is None:
return bare_uniq_id
if meta['offset']:
offset = str(int(round(meta['offset'], decimals) * pow(10, decimals)))
else:
offset = 0
if meta['duration']:
endtime = str(int(round(meta['offset'] + meta['duration'], decimals) * pow(10, decimals)))
else:
endtime = 'NULL'
uniq_id = f"{bare_uniq_id}_{offset}_{endtime}"
return uniq_id
def audio_rttm_map(manifest, attach_dur=False):
"""
This function creates AUDIO_RTTM_MAP which is used by all diarization components to extract embeddings,
cluster and unify time stamps
Args: manifest file that contains keys audio_filepath, rttm_filepath if exists, text, num_speakers if known and uem_filepath if exists
returns:
AUDIO_RTTM_MAP (dict) : A dictionary with keys of uniq id, which is being used to map audio files and corresponding rttm files
"""
AUDIO_RTTM_MAP = {}
with open(manifest, 'r') as inp_file:
lines = inp_file.readlines()
logging.info("Number of files to diarize: {}".format(len(lines)))
for line in lines:
line = line.strip()
dic = json.loads(line)
meta = {
'audio_filepath': dic['audio_filepath'],
'rttm_filepath': dic.get('rttm_filepath', None),
'offset': dic.get('offset', None),
'duration': dic.get('duration', None),
'text': dic.get('text', None),
'num_speakers': dic.get('num_speakers', None),
'uem_filepath': dic.get('uem_filepath', None),
'ctm_filepath': dic.get('ctm_filepath', None),
}
if attach_dur:
uniqname = get_uniq_id_with_dur(meta)
else:
uniqname = get_uniqname_from_filepath(filepath=meta['audio_filepath'])
if uniqname not in AUDIO_RTTM_MAP:
AUDIO_RTTM_MAP[uniqname] = meta
else:
raise KeyError(
"file {} is already part of AUDIO_RTTM_MAP, it might be duplicated, Note: file basename must be unique".format(
meta['audio_filepath']
)
)
return AUDIO_RTTM_MAP
def parse_scale_configs(window_lengths_in_sec, shift_lengths_in_sec, multiscale_weights):
"""
Check whether multiscale parameters are provided correctly. window_lengths_in_sec, shift_lengfhs_in_sec and
multiscale_weights should be all provided in omegaconf.listconfig.ListConfig type. In addition, the scales
should be provided in descending order, from the longest scale to the base scale (the shortest).
Example:
Single-scale setting:
parameters.window_length_in_sec=1.5
parameters.shift_length_in_sec=0.75
parameters.multiscale_weights=null
Multiscale setting (base scale - window_length 0.5 s and shift_length 0.25):
parameters.window_length_in_sec=[1.5,1.0,0.5]
parameters.shift_length_in_sec=[0.75,0.5,0.25]
parameters.multiscale_weights=[1,1,1]
In addition, you can also specify session-by-session multiscale weight. In this case, each dictionary key
points to different weights.
"""
check_float_config = [isinstance(var, float) for var in (window_lengths_in_sec, shift_lengths_in_sec)]
check_list_config = [
isinstance(var, (omegaconf.listconfig.ListConfig, list, tuple))
for var in (window_lengths_in_sec, shift_lengths_in_sec, multiscale_weights)
]
if all(check_list_config) or all(check_float_config):
# If bare floating numbers are provided, convert them to list format.
if all(check_float_config):
window_lengths, shift_lengths, multiscale_weights = (
[window_lengths_in_sec],
[shift_lengths_in_sec],
[1.0],
)
else:
window_lengths, shift_lengths, multiscale_weights = (
window_lengths_in_sec,
shift_lengths_in_sec,
multiscale_weights,
)
length_check = (
len(set([len(window_lengths), len(shift_lengths), len(multiscale_weights)])) == 1
and len(multiscale_weights) > 0
)
scale_order_check = (
list(window_lengths) == sorted(window_lengths)[::-1] and list(shift_lengths) == sorted(shift_lengths)[::-1]
)
# Check whether window lengths are longer than shift lengths
if len(window_lengths) > 1:
shift_length_check = all([w > s for w, s in zip(window_lengths, shift_lengths)])
else:
shift_length_check = window_lengths[0] > shift_lengths[0]
multiscale_args_dict = {'use_single_scale_clustering': False}
if all([length_check, scale_order_check, shift_length_check]):
if len(window_lengths) > 1:
multiscale_args_dict['scale_dict'] = {
k: (w, s) for k, (w, s) in enumerate(zip(window_lengths, shift_lengths))
}
else:
multiscale_args_dict['scale_dict'] = {0: (window_lengths[0], shift_lengths[0])}
multiscale_args_dict['multiscale_weights'] = multiscale_weights
return multiscale_args_dict
else:
raise ValueError('Multiscale parameters are not properly setup.')
elif any(check_list_config):
raise ValueError(
'You must provide a list config for all three parameters: window, shift and multiscale weights.'
)
else:
return None
def get_embs_and_timestamps(multiscale_embeddings_and_timestamps, multiscale_args_dict):
"""
The embeddings and timestamps in multiscale_embeddings_and_timestamps dictionary are
indexed by scale index. This function rearranges the extracted speaker embedding and
timestamps by unique ID to make the further processing more convenient.
Args:
multiscale_embeddings_and_timestamps (dict):
Dictionary of embeddings and timestamps for each scale.
multiscale_args_dict (dict):
Dictionary of scale information: window, shift and multiscale weights.
Returns:
embs_and_timestamps (dict)
A dictionary containing embeddings and timestamps of each scale, indexed by unique ID.
"""
embs_and_timestamps = {uniq_id: {} for uniq_id in multiscale_embeddings_and_timestamps[0][0].keys()}
if multiscale_args_dict['use_single_scale_clustering']:
_multiscale_args_dict = deepcopy(multiscale_args_dict)
_multiscale_args_dict['scale_dict'] = {0: multiscale_args_dict['scale_dict'][0]}
_multiscale_args_dict['multiscale_weights'] = multiscale_args_dict['multiscale_weights'][:1]
else:
_multiscale_args_dict = multiscale_args_dict
embeddings, timestamps = multiscale_embeddings_and_timestamps[0]
for uniq_id in embeddings.keys():
embeddings_list, time_stamps_list, segment_index_list = [], [], []
for scale_idx in sorted(_multiscale_args_dict['scale_dict'].keys()):
embeddings, timestamps = multiscale_embeddings_and_timestamps[scale_idx]
if len(embeddings[uniq_id]) != len(timestamps[uniq_id]):
raise ValueError("Mismatch of counts between embedding vectors and timestamps")
time_stamps_tensor = torch.tensor(timestamps[uniq_id])
embeddings_list.append(embeddings[uniq_id])
segment_index_list.append(embeddings[uniq_id].shape[0])
time_stamps_list.append(time_stamps_tensor)
embs_and_timestamps[uniq_id]['multiscale_weights'] = (
torch.tensor(_multiscale_args_dict['multiscale_weights']).unsqueeze(0).float()
)
embs_and_timestamps[uniq_id]['embeddings'] = torch.cat(embeddings_list, dim=0)
embs_and_timestamps[uniq_id]['timestamps'] = torch.cat(time_stamps_list, dim=0)
embs_and_timestamps[uniq_id]['multiscale_segment_counts'] = torch.tensor(segment_index_list)
return embs_and_timestamps
def get_timestamps(multiscale_timestamps, multiscale_args_dict):
"""
The timestamps in `multiscale_timestamps` dictionary are indexed by scale index.
This function rearranges the extracted speaker embedding and timestamps by unique ID to make the further processing more convenient.
Args:
multiscale_timestamps (dict):
Dictionary of timestamps for each scale.
multiscale_args_dict (dict):
Dictionary of scale information: window, shift and multiscale weights.
Returns:
timestamps_dict (dict)
A dictionary containing embeddings and timestamps of each scale, indexed by unique ID.
"""
timestamps_dict = {uniq_id: {'scale_dict': {}} for uniq_id in multiscale_timestamps[0].keys()}
for scale_idx in sorted(multiscale_args_dict['scale_dict'].keys()):
time_stamps = multiscale_timestamps[scale_idx]
for uniq_id in time_stamps.keys():
timestamps_dict[uniq_id]['scale_dict'][scale_idx] = {
'time_stamps': time_stamps[uniq_id],
}
return timestamps_dict
def get_contiguous_stamps(stamps):
"""
Return contiguous time stamps
"""
lines = deepcopy(stamps)
contiguous_stamps = []
for i in range(len(lines) - 1):
start, end, speaker = lines[i].split()
next_start, next_end, next_speaker = lines[i + 1].split()
if float(end) > float(next_start):
avg = str((float(next_start) + float(end)) / 2.0)
lines[i + 1] = ' '.join([avg, next_end, next_speaker])
contiguous_stamps.append(start + " " + avg + " " + speaker)
else:
contiguous_stamps.append(start + " " + end + " " + speaker)
start, end, speaker = lines[-1].split()
contiguous_stamps.append(start + " " + end + " " + speaker)
return contiguous_stamps
def merge_stamps(lines):
"""
Merge time stamps of the same speaker.
"""
stamps = deepcopy(lines)
overlap_stamps = []
for i in range(len(stamps) - 1):
start, end, speaker = stamps[i].split()
next_start, next_end, next_speaker = stamps[i + 1].split()
if float(end) == float(next_start) and speaker == next_speaker:
stamps[i + 1] = ' '.join([start, next_end, next_speaker])
else:
overlap_stamps.append(start + " " + end + " " + speaker)
start, end, speaker = stamps[-1].split()
overlap_stamps.append(start + " " + end + " " + speaker)
return overlap_stamps
def labels_to_pyannote_object(labels, uniq_name=''):
"""
Convert the given labels to pyannote object to calculate DER and for visualization
"""
annotation = Annotation(uri=uniq_name)
for label in labels:
start, end, speaker = label.strip().split()
start, end = float(start), float(end)
annotation[Segment(start, end)] = speaker
return annotation
def labels_to_rttmfile(labels, uniq_id, out_rttm_dir):
"""
Write rttm file with uniq_id name in out_rttm_dir with timestamps in labels
"""
filename = os.path.join(out_rttm_dir, uniq_id + '.rttm')
with open(filename, 'w') as f:
for line in labels:
line = line.strip()
start, end, speaker = line.split()
duration = float(end) - float(start)
start = float(start)
log = 'SPEAKER {} 1 {:.3f} {:.3f} <NA> <NA> {} <NA> <NA>\n'.format(uniq_id, start, duration, speaker)
f.write(log)
return filename
def string_to_float(x, round_digits):
"""
Convert string to float then round the number.
"""
return round(float(x), round_digits)
def convert_rttm_line(rttm_line, round_digits=3):
"""
Convert a line in RTTM file to speaker label, start and end timestamps.
Args:
rttm_line (str):
A line in RTTM formatted file containing offset and duration of each segment.
round_digits (int):
Number of digits to be rounded.
Returns:
start (float)
Start timestamp in floating point number.
end (float):
End timestamp in floating point number.
speaker (str):
speaker string in RTTM lines.
"""
rttm = rttm_line.strip().split()
start = string_to_float(rttm[3], round_digits)
end = string_to_float(rttm[4], round_digits) + string_to_float(rttm[3], round_digits)
speaker = rttm[7]
return start, end, speaker
def rttm_to_labels(rttm_filename):
"""
Prepare time stamps label list from rttm file
"""
labels = []
with open(rttm_filename, 'r') as f:
for line in f.readlines():
start, end, speaker = convert_rttm_line(line, round_digits=3)
labels.append('{} {} {}'.format(start, end, speaker))
return labels
def write_cluster_labels(base_scale_idx, lines_cluster_labels, out_rttm_dir):
"""
Write cluster labels that are generated from clustering into a file.
Args:
base_scale_idx (int): The base scale index which is the highest scale index.
lines_cluster_labels (list): The start and end time-stamps of each segment with the predicted cluster label.
out_rttm_dir (str): The path where output rttm files are saved.
"""
out_label_name = os.path.join(
out_rttm_dir, '../speaker_outputs', f'subsegments_scale{base_scale_idx}_cluster.label'
)
with open(out_label_name, 'w') as f:
for clus_label_line in lines_cluster_labels:
f.write(clus_label_line)
def generate_cluster_labels(segment_ranges: List[str], cluster_labels: List[int]):
"""
Generate cluster (speaker labels) from the segment_range list and cluster label list.
Args:
segment_ranges (list):
List containing intervals (start and end timestapms, ranges) of each segment
cluster_labels (list):
List containing a cluster label sequence
Returns:
diar_hyp (list):
List containing merged speaker-turn-level timestamps and labels in string format
Example:
>>> diar_hyp = ['0.0 4.375 speaker_1', '4.375 5.125 speaker_0', ...]
lines (list)
List containing raw segment-level timestamps and labels in raw digits
>>> diar_hyp = ['0.0 0.25 speaker_1', '0.25 0.5 speaker_1', ..., '4.125 4.375 speaker_1']
"""
lines = []
for idx, label in enumerate(cluster_labels):
tag = 'speaker_' + str(label)
stt, end = segment_ranges[idx]
lines.append(f"{stt} {end} {tag}")
cont_lines = get_contiguous_stamps(lines)
diar_hyp = merge_stamps(cont_lines)
return diar_hyp, lines
def perform_clustering(
embs_and_timestamps, AUDIO_RTTM_MAP, out_rttm_dir, clustering_params, device, verbose: bool = True
):
"""
Performs spectral clustering on embeddings with time stamps generated from VAD output
Args:
embs_and_timestamps (dict): This dictionary contains the following items indexed by unique IDs.
'embeddings' : Tensor containing embeddings. Dimensions:(# of embs) x (emb. dimension)
'timestamps' : Tensor containing ime stamps list for each audio recording
'multiscale_segment_counts' : Tensor containing the number of segments for each scale
AUDIO_RTTM_MAP (dict): AUDIO_RTTM_MAP for mapping unique id with audio file path and rttm path
out_rttm_dir (str): Path to write predicted rttms
clustering_params (dict): clustering parameters provided through config that contains max_num_speakers (int),
oracle_num_speakers (bool), max_rp_threshold(float), sparse_search_volume(int) and enhance_count_threshold (int)
use_torch_script (bool): Boolean that determines whether to use torch.jit.script for speaker clustering
device (torch.device): Device we are running on ('cpu', 'cuda').
verbose (bool): Enable TQDM progress bar.
Returns:
all_reference (list[uniq_name,Annotation]): reference annotations for score calculation
all_hypothesis (list[uniq_name,Annotation]): hypothesis annotations for score calculation
"""
all_hypothesis = []
all_reference = []
no_references = False
lines_cluster_labels = []
cuda = True
if device.type != 'cuda':
logging.warning("cuda=False, using CPU for eigen decomposition. This might slow down the clustering process.")
cuda = False
speaker_clustering = SpeakerClustering(cuda=cuda)
# If True, export torch script module and save it to the base folder.
if clustering_params.get('export_script_module', False):
speaker_clustering = torch.jit.script(speaker_clustering)
torch.jit.save(speaker_clustering, 'speaker_clustering_script.pt')
for uniq_id, audio_rttm_values in tqdm(AUDIO_RTTM_MAP.items(), desc='clustering', leave=True, disable=not verbose):
uniq_embs_and_timestamps = embs_and_timestamps[uniq_id]
if clustering_params.oracle_num_speakers:
num_speakers = audio_rttm_values.get('num_speakers', None)
if num_speakers is None:
raise ValueError("Provided option as oracle num of speakers but num_speakers in manifest is null")
else:
num_speakers = -1
base_scale_idx = uniq_embs_and_timestamps['multiscale_segment_counts'].shape[0] - 1
cluster_labels = speaker_clustering.forward_infer(
embeddings_in_scales=uniq_embs_and_timestamps['embeddings'],
timestamps_in_scales=uniq_embs_and_timestamps['timestamps'],
multiscale_segment_counts=uniq_embs_and_timestamps['multiscale_segment_counts'],
multiscale_weights=uniq_embs_and_timestamps['multiscale_weights'],
oracle_num_speakers=int(num_speakers),
max_num_speakers=int(clustering_params.max_num_speakers),
max_rp_threshold=float(clustering_params.max_rp_threshold),
sparse_search_volume=int(clustering_params.sparse_search_volume),
)
del uniq_embs_and_timestamps
if cuda:
torch.cuda.empty_cache()
else:
gc.collect()
timestamps = speaker_clustering.timestamps_in_scales[base_scale_idx]
cluster_labels = cluster_labels.cpu().numpy()
if len(cluster_labels) != timestamps.shape[0]:
raise ValueError("Mismatch of length between cluster_labels and timestamps.")
labels, lines = generate_cluster_labels(timestamps, cluster_labels)
if out_rttm_dir:
labels_to_rttmfile(labels, uniq_id, out_rttm_dir)
lines_cluster_labels.extend([f'{uniq_id} {seg_line}\n' for seg_line in lines])
hypothesis = labels_to_pyannote_object(labels, uniq_name=uniq_id)
all_hypothesis.append([uniq_id, hypothesis])
rttm_file = audio_rttm_values.get('rttm_filepath', None)
if rttm_file is not None and os.path.exists(rttm_file) and not no_references:
ref_labels = rttm_to_labels(rttm_file)
reference = labels_to_pyannote_object(ref_labels, uniq_name=uniq_id)
all_reference.append([uniq_id, reference])
else:
no_references = True
all_reference = []
if out_rttm_dir:
write_cluster_labels(base_scale_idx, lines_cluster_labels, out_rttm_dir)
return all_reference, all_hypothesis
def get_vad_out_from_rttm_line(rttm_line):
"""
Extract VAD timestamp from the given RTTM lines.
"""
vad_out = rttm_line.strip().split()
if len(vad_out) > 3:
start, dur, _ = float(vad_out[3]), float(vad_out[4]), vad_out[7]
else:
start, dur, _ = float(vad_out[0]), float(vad_out[1]), vad_out[2]
return start, dur
def get_offset_and_duration(AUDIO_RTTM_MAP, uniq_id, decimals=5):
"""
Extract offset and duration information from AUDIO_RTTM_MAP dictionary.
If duration information is not specified, a duration value is extracted from the audio file directly.
Args:
AUDIO_RTTM_MAP (dict):
Dictionary containing RTTM file information, which is indexed by unique file id.
uniq_id (str):
Unique file id
Returns:
offset (float):
The offset value that determines the beginning of the audio stream.
duration (float):
The length of audio stream that is expected to be used.
"""
audio_path = AUDIO_RTTM_MAP[uniq_id]['audio_filepath']
if AUDIO_RTTM_MAP[uniq_id].get('duration', None):
duration = round(AUDIO_RTTM_MAP[uniq_id]['duration'], decimals)
offset = round(AUDIO_RTTM_MAP[uniq_id]['offset'], decimals)
else:
sound = sf.SoundFile(audio_path)
duration = sound.frames / sound.samplerate
offset = 0.0
return offset, duration
def write_overlap_segments(outfile, AUDIO_RTTM_MAP, uniq_id, overlap_range_list, decimals=5):
"""
Write the json dictionary into the specified manifest file.
Args:
outfile:
File pointer that indicates output file path.
AUDIO_RTTM_MAP (dict):
Dictionary containing the input manifest information
uniq_id (str):
Unique file id
overlap_range_list (list):
List containing overlapping ranges between target and source.
decimals (int):
Number of decimals to round the offset and duration values.
"""
audio_path = AUDIO_RTTM_MAP[uniq_id]['audio_filepath']
for (stt, end) in overlap_range_list:
meta = {
"audio_filepath": audio_path,
"offset": round(stt, decimals),
"duration": round(end - stt, decimals),
"label": 'UNK',
"uniq_id": uniq_id,
}
json.dump(meta, outfile)
outfile.write("\n")
def read_rttm_lines(rttm_file_path):
"""
Read rttm files and return the rttm information lines.
Args:
rttm_file_path (str):
An absolute path to an RTTM file
Returns:
lines (list):
List containing the strings from the RTTM file.
"""
if rttm_file_path and os.path.exists(rttm_file_path):
with open(rttm_file_path, 'r') as f:
lines = f.readlines()
else:
raise FileNotFoundError(
"Requested to construct manifest from rttm with oracle VAD option or from NeMo VAD but received filename as {}".format(
rttm_file_path
)
)
return lines
def validate_vad_manifest(AUDIO_RTTM_MAP, vad_manifest):
"""
This function will check the valid speech segments in the manifest file which is either
generated from NeMo voice activity detection(VAD) or oracle VAD.
If an audio file does not contain any valid speech segments, we ignore the audio file
(indexed by uniq_id) for the rest of the processing steps.
"""
vad_uniq_ids = set()
with open(vad_manifest, 'r') as vad_file:
for line in vad_file:
line = line.strip()
dic = json.loads(line)
if dic['duration'] > 0:
vad_uniq_ids.add(dic['uniq_id'])
provided_uniq_ids = set(AUDIO_RTTM_MAP.keys())
silence_ids = provided_uniq_ids - vad_uniq_ids
for uniq_id in silence_ids:
del AUDIO_RTTM_MAP[uniq_id]
logging.warning(f"{uniq_id} is ignored since the file does not contain any speech signal to be processed.")
if len(AUDIO_RTTM_MAP) == 0:
raise ValueError("All files present in manifest contains silence, aborting next steps")
def is_overlap(rangeA: List[float], rangeB: List[float]) -> bool:
"""
Check whether two ranges have overlap.
Args:
rangeA (list, tuple):
List or tuple containing start and end value in float.
rangeB (list, tuple):
List or tuple containing start and end value in float.
Returns:
(bool):
Boolean that indicates whether the input ranges have overlap.
"""
start1, end1 = rangeA[0], rangeA[1]
start2, end2 = rangeB[0], rangeB[1]
return end1 > start2 and end2 > start1
def get_overlap_range(rangeA: List[float], rangeB: List[float]):
"""
Calculate the overlapping range between rangeA and rangeB.
Args:
rangeA (list, tuple):
List or tuple containing start and end value in float.
rangeB (list, tuple):
List or tuple containing start and end value in float.
Returns:
(list):
List containing the overlapping range between rangeA and rangeB.
"""
assert is_overlap(rangeA, rangeB), f"There is no overlap between rangeA:{rangeA} and rangeB:{rangeB}"
return [max(rangeA[0], rangeB[0]), min(rangeA[1], rangeB[1])]
def merge_int_intervals(intervals_in: List[List[int]]) -> List[List[int]]:
"""
Interval merging algorithm which has `O(N*logN)` time complexity. (N is number of intervals)
Merge the range pairs if there is overlap exists between the given ranges.
This algorithm needs a sorted range list in terms of the start time.
Note that neighboring numbers lead to a merged range.
Example:
input: [(1, 10), (11, 20)]
output: [(1, 20)]
Refer to the original code at https://stackoverflow.com/a/59378428
Args:
intervals_in (list):
List containing ranges.
Example:
>>> intervals_in
[(102, 103), (104, 109), (107, 120)]
Returns:
merged_list (list):
List containing the combined ranges.
Example:
>>> merged_list
[(102, 120)]
"""
num_intervals = len(intervals_in)
if num_intervals == 0:
return []
elif num_intervals == 1:
return intervals_in
else:
merged_list: List[List[int]] = []
stt2: int = 0
end2: int = 0
intervals_in = [[int(x[0]), int(x[1])] for x in intervals_in]
interval_tensor: torch.Tensor = torch.tensor(intervals_in)
_sorted, _ = torch.sort(interval_tensor, dim=0)
_sorted_int: List[List[int]] = [[int(x[0]), int(x[1])] for x in _sorted.cpu()]
intervals: List[List[int]] = _sorted_int
start, end = intervals[0][0], intervals[0][1]
for i in range(1, num_intervals):
stt2, end2 = intervals[i][0], intervals[i][1]
if end >= stt2:
end = max(end2, end)
else:
start, end = int(start), int(end)
merged_list.append([start, end])
start = stt2
end = max(end2, end)
start, end = int(start), int(end)
merged_list.append([start, end])
return merged_list
def fl2int(x: float, decimals: int = 3) -> int:
"""
Convert floating point number to integer.
"""
return torch.round(torch.tensor([x * (10 ** decimals)]), decimals=0).int().item()
def int2fl(x: int, decimals: int = 3) -> float:
"""
Convert integer to floating point number.
"""
return torch.round(torch.tensor([x / (10 ** decimals)]), decimals=decimals).item()
def merge_float_intervals(ranges: List[List[float]], decimals: int = 5, margin: int = 2) -> List[List[float]]:
"""
Combine overlaps with floating point numbers. Since neighboring integers are considered as continuous range,
we need to add margin to the starting range before merging then subtract margin from the result range.
Args:
ranges (list):
List containing ranges.
Example: [(10.2, 10.83), (10.42, 10.91), (10.45, 12.09)]
decimals (int):
Number of rounding decimals
margin (int):
margin for determining overlap of the two ranges when ranges are converted to integer ranges.
Default is margin=2 which follows the python index convention.
Examples:
If margin is 0:
[(1, 10), (10, 20)] -> [(1, 20)]
[(1, 10), (11, 20)] -> [(1, 20)]
If margin is 1:
[(1, 10), (10, 20)] -> [(1, 20)]
[(1, 10), (11, 20)] -> [(1, 10), (11, 20)]
If margin is 2:
[(1, 10), (10, 20)] -> [(1, 10), (10, 20)]
[(1, 10), (11, 20)] -> [(1, 10), (11, 20)]
Returns:
merged_list (list):
List containing the combined ranges.
Example: [(10.2, 12.09)]
"""
ranges_int: List[List[int]] = []
merged_ranges_int: List[List[int]] = []
for x in ranges:
stt, end = int(fl2int(x[0], decimals) + margin), int(fl2int(x[1], decimals))
if stt < end:
ranges_int.append([stt, end])
merged_ranges_int = merge_int_intervals(ranges_int)
merged_ranges_float: List[List[float]] = []
merged_ranges_float = [[int2fl(x[0] - margin, decimals), int2fl(x[1], decimals)] for x in merged_ranges_int]
return merged_ranges_float
def get_sub_range_list(target_range: List[float], source_range_list: List[List[float]]) -> List[List[float]]:
"""
Get the ranges that has overlaps with the target range from the source_range_list.
Example:
source range:
|===--======---=====---====--|
target range:
|--------================----|
out_range:
|--------===---=====---==----|
Args:
target_range (list):
A range (a start and end value pair) that defines the target range we want to select.
target_range = [(start, end)]
source_range_list (list):
List containing the subranges that need to be selected.
source_range = [(start0, end0), (start1, end1), ...]
Returns:
out_range (list):
List containing the overlap between target_range and
source_range_list.
"""
if len(target_range) == 0:
return []
else:
out_range: List[List[float]] = []
for s_range in source_range_list:
if is_overlap(s_range, target_range):
ovl_range = get_overlap_range(s_range, target_range)
out_range.append(ovl_range)
return out_range
def write_rttm2manifest(
AUDIO_RTTM_MAP: str, manifest_file: str, include_uniq_id: bool = False, decimals: int = 5
) -> str:
"""
Write manifest file based on rttm files (or vad table out files). This manifest file would be used by
speaker diarizer to compute embeddings and cluster them. This function takes care of overlapping VAD timestamps
and trimmed with the given offset and duration value.
Args:
AUDIO_RTTM_MAP (dict):
Dictionary containing keys to unique names, that contains audio filepath and rttm_filepath as its contents,
these are used to extract oracle vad timestamps.
manifest (str):
The path to the output manifest file.
Returns:
manifest (str):
The path to the output manifest file.
"""
with open(manifest_file, 'w') as outfile:
for uniq_id in AUDIO_RTTM_MAP:
rttm_file_path = AUDIO_RTTM_MAP[uniq_id]['rttm_filepath']
rttm_lines = read_rttm_lines(rttm_file_path)
offset, duration = get_offset_and_duration(AUDIO_RTTM_MAP, uniq_id, decimals)
vad_start_end_list_raw = []
for line in rttm_lines:
start, dur = get_vad_out_from_rttm_line(line)
vad_start_end_list_raw.append([start, start + dur])
vad_start_end_list = merge_float_intervals(vad_start_end_list_raw, decimals)
if len(vad_start_end_list) == 0:
logging.warning(f"File ID: {uniq_id}: The VAD label is not containing any speech segments.")
elif duration <= 0:
logging.warning(f"File ID: {uniq_id}: The audio file has negative or zero duration.")
else:
overlap_range_list = get_sub_range_list(
source_range_list=vad_start_end_list, target_range=[offset, offset + duration]
)
write_overlap_segments(outfile, AUDIO_RTTM_MAP, uniq_id, overlap_range_list, decimals)
return manifest_file
def segments_manifest_to_subsegments_manifest(
segments_manifest_file: str,
subsegments_manifest_file: str = None,
window: float = 1.5,
shift: float = 0.75,
min_subsegment_duration: float = 0.05,
include_uniq_id: bool = False,
):
"""
Generate subsegments manifest from segments manifest file
Args:
segments_manifest file (str): path to segments manifest file, typically from VAD output
subsegments_manifest_file (str): path to output subsegments manifest file (default (None) : writes to current working directory)
window (float): window length for segments to subsegments length
shift (float): hop length for subsegments shift
min_subsegments_duration (float): exclude subsegments smaller than this duration value
Returns:
returns path to subsegment manifest file
"""
if subsegments_manifest_file is None:
pwd = os.getcwd()
subsegments_manifest_file = os.path.join(pwd, 'subsegments.json')
with open(segments_manifest_file, 'r') as segments_manifest, open(
subsegments_manifest_file, 'w'
) as subsegments_manifest:
segments = segments_manifest.readlines()
for segment in segments:
segment = segment.strip()
dic = json.loads(segment)
audio, offset, duration, label = dic['audio_filepath'], dic['offset'], dic['duration'], dic['label']
subsegments = get_subsegments(offset=offset, window=window, shift=shift, duration=duration)
if include_uniq_id and 'uniq_id' in dic:
uniq_id = dic['uniq_id']
else:
uniq_id = None
for subsegment in subsegments:
start, dur = subsegment
if dur > min_subsegment_duration:
meta = {
"audio_filepath": audio,
"offset": start,
"duration": dur,
"label": label,
"uniq_id": uniq_id,
}
json.dump(meta, subsegments_manifest)
subsegments_manifest.write("\n")
return subsegments_manifest_file
def get_subsegments(offset: float, window: float, shift: float, duration: float) -> List[List[float]]:
"""
Return subsegments from a segment of audio file
Args:
offset (float): start time of audio segment
window (float): window length for segments to subsegments length
shift (float): hop length for subsegments shift
duration (float): duration of segment
Returns:
subsegments (List[tuple[float, float]]): subsegments generated for the segments as list of tuple of start and duration of each subsegment
"""
subsegments: List[List[float]] = []
start = offset
slice_end = start + duration
base = math.ceil((duration - window) / shift)
slices = 1 if base < 0 else base + 1
for slice_id in range(slices):
end = start + window
if end > slice_end:
end = slice_end
subsegments.append([start, end - start])
start = offset + (slice_id + 1) * shift
return subsegments
def get_target_sig(sig, start_sec: float, end_sec: float, slice_length: int, sample_rate: int,) -> torch.Tensor:
"""
Extract time-series signal from the given audio buffer based on the start and end
timestamps.
Args:
start_sec (float):
Start of the targeted segments in second
end_sec (float):
Start of the targeted segments in second
slice_length (int):
Length of the entire audio segment that the samples are extracted from
sample_rate (int):
Sampling rate of the time-series audio signal
Returns:
(Tensor) Trimmed ime-series audio signal samples
"""
start_idx = int(start_sec * sample_rate)
end_idx = min(int(end_sec * sample_rate), int(slice_length + start_idx))
return sig[start_idx:end_idx]
def check_ranges(range_tensor):
"""
Check whether the range list has any faulty timestamp order.
Args:
range_tensor (list):
List containing the start and end time of the segments.
Example:
>>> range_tensor = [[0.5, 3.12], [3.51, 7.26], ... ]
"""
for k in range(range_tensor.shape[0]):
range_tup = range_tensor[k]
if range_tup[1] < range_tup[0]:
raise ValueError("Range start time should be preceding the end time but we got: {range_tup}")
return True
def tensor_to_list(range_tensor: torch.Tensor) -> List[List[float]]:
"""
For online segmentation. Force the list elements to be float type.
"""
return [[float(range_tensor[k][0]), float(range_tensor[k][1])] for k in range(range_tensor.shape[0])]
def get_speech_labels_for_update(
frame_start: float,
buffer_end: float,
vad_timestamps: torch.Tensor,
cumulative_speech_labels: torch.Tensor,
cursor_for_old_segments: float,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Bring the new speech labels from the current buffer. Followingly:
1. Concatenate the old speech labels from self.cumulative_speech_labels for the overlapped region.
- This goes to new_speech_labels.
2. Update the new 1 sec of speech label (speech_label_for_new_segments) to self.cumulative_speech_labels.
3. Return the speech label from cursor_for_old_segments to buffer end.
Args:
frame_start (float):
Start of the middle audio chunk in the audio buffer
buffer_end (float):
End of the audio buffer
vad_timestamps (Tensor):
Tensor containing VAD intervals (start and end timestamps)
cumulative_speech_labels (torch.Tensor):
Cumulative speech/non-speech timestamps (equivalent to VAD timestamps)
cursor_for_old_segments (float):
Floating point number that indicates the point where new segments should replace
the old segments
Returns:
speech_label_for_new_segments (Tensor):
The intervals (start and end) timestamps where the new incoming speech segments should
be collected from
cumulative_speech_labels (Tensor):
Cumulative speech/non-speech timestamps (equivalent to VAD timestamps) with newly added
speech/non-speech timestamps from the `vad_timestamps` input
"""
update_overlap_range: List[float] = []
if cursor_for_old_segments < frame_start:
update_overlap_range = [float(cursor_for_old_segments), float(frame_start)]
# Get VAD timestamps that are in (frame_start, buffer_end) range
vad_timestamps = tensor_to_list(vad_timestamps)
cumulative_speech_labels = tensor_to_list(cumulative_speech_labels)
new_incoming_speech_labels = get_sub_range_list(
target_range=[float(frame_start), float(buffer_end)], source_range_list=vad_timestamps
)
# Update the speech label by including overlapping region with the previous output
update_overlap_speech_labels = get_sub_range_list(
target_range=update_overlap_range, source_range_list=cumulative_speech_labels
)
# Speech segments for embedding extractions
speech_label_for_new_segments = merge_float_intervals(
update_overlap_speech_labels + new_incoming_speech_labels, margin=0
)
# Keep cumulative VAD labels for the future use
cumulative_speech_labels = merge_float_intervals(cumulative_speech_labels + new_incoming_speech_labels, margin=0)
# Convert the lists back to type torch.Tensor
speech_label_for_new_segments = torch.tensor(speech_label_for_new_segments)
cumulative_speech_labels = torch.tensor(cumulative_speech_labels)
return speech_label_for_new_segments, cumulative_speech_labels
def get_new_cursor_for_update(frame_start: float, segment_range_ts: List[List[float]],) -> Tuple[float, int]:
"""
Function for updating a cursor online speaker diarization.
Remove the old segments that overlap with the new frame (self.frame_start)
cursor_for_old_segments is set to the onset of the t_range popped lastly.
Args:
frame_start (float):
Start of streaming pipeline frame
segment_range_ts (float):
Interval (start and end timestamps) of the targeted segments
Returns:
cursor_for_old_segments (float):
Floating point number that indicates the point where new segments should replace
the old segments
cursor_index (int):
The index of the first newly accepted segments
"""
cursor_for_old_segments = frame_start
cursor_index: int = len(segment_range_ts)
count = 0
while True and len(segment_range_ts) > 0:
t_range = segment_range_ts[-1 * (count + 1)]
if frame_start <= t_range[1]:
count += 1
cursor_for_old_segments = t_range[0]
else:
break
cursor_index = len(segment_range_ts) - count
return cursor_for_old_segments, cursor_index
def get_online_segments_from_slices(
sig: torch.Tensor,
buffer_start: float,
buffer_end: float,
subsegments: List[List[float]],
ind_offset: int,
window: float,
sample_rate: int,
) -> Tuple[int, List[torch.Tensor], List[List[float]], List[int]]:
"""
Create short speech segments from slices for online processing purpose.
Args:
sig (Tensor):
Tensor containing the raw time-series signal
buffer_start (float):
Start point of the time-series signal buffer
buffer_end (float):
End point of the time-series signal buffer
subsegments (list):
List containing the interval information (start and duration) of each segment
ind_offset (int):
Offset for index that compensates the point of the current position in the streaming session
window (float):
Window length in second
shift (float):
Shift length in second
Returns:
sigs_list (list):
list of sliced input signal
audio_lengths (list):
list of audio sample lengths
"""
sig_rangel_list: List[List[float]] = []
sig_indexes: List[int] = []
sigs_list: List[torch.Tensor] = []
slice_length: int = int(window * sample_rate)
end_sec: float = 0.0
for subseg in subsegments:
start_sec, dur = subseg[0], subseg[1]
if start_sec > buffer_end:
continue
ind_offset += 1
buffer_len = buffer_end - buffer_start
end_sec = float(start_sec + dur)
if end_sec > buffer_len:
end_sec = float(min(end_sec, buffer_len))
signal = get_target_sig(sig, start_sec, end_sec, slice_length, sample_rate)
if len(signal) == 0:
raise ValueError("len(signal) is zero. Signal length should not be zero.")
if len(signal) < slice_length:
signal = repeat_signal(signal, len(signal), slice_length)
start_abs_sec = buffer_start + start_sec
end_abs_sec = buffer_start + end_sec
sigs_list.append(signal)
sig_rangel_list.append([start_abs_sec, end_abs_sec])
sig_indexes.append(ind_offset)
if not len(sigs_list) == len(sig_rangel_list) == len(sig_indexes):
raise ValueError("Signal information lists have a mismatch.")
return ind_offset, sigs_list, sig_rangel_list, sig_indexes
def get_online_subsegments_from_buffer(
buffer_start: float,
buffer_end: float,
sample_rate: int,
speech_labels_for_update: torch.Tensor,
audio_buffer: torch.Tensor,
segment_indexes: List[int],
window: float,
shift: float,
) -> Tuple[List[torch.Tensor], List[List[float]], List[int]]:
"""
Generate subsegments for online processing from the given segment information.
This function extracts subsegments (embedding vector level) time-series from the
raw time-series buffer based on the segment interval (start and end timestamps) information.
Args:
buffer_start (float):
Start point of the time-series signal buffer
buffer_end (float):
End point of the time-series signal buffer
sample_rate (int):
Sampling rate of the audio input
speech_labels_for_update (Tensor):
Tensor containing intervals (start and end timestamps) of the speech segments
audio_buffer (Tensor):
Tensor containing the raw time-series signal
segment_indexes (list):
List containing the unique indices of segments
window (float):
Window length in second
shift (float):
Shift length in second
Returns:
sigs_list (list):
List containing the tensors of the old and the newly added time-series signals
sig_rangel_list (list):
List containing the old and the newly added intervals (timestamps) of the speech segments
sig_indexes (list):
List containing the old and the newly added unique indices of segments
"""
sigs_list: List[torch.Tensor] = []
sig_rangel_list: List[List[float]] = []
sig_indexes: List[int] = []
if len(segment_indexes) > 0:
ind_offset = segment_indexes[-1]
else:
ind_offset = -1
for idx, range_spl in enumerate(speech_labels_for_update):
range_offs = [float(range_spl[0].item() - buffer_start), float(range_spl[1].item() - buffer_start)]
range_t = [max(0, range_offs[0]), range_offs[1]]
subsegments = get_subsegments(
offset=range_t[0], window=window, shift=shift, duration=(range_t[1] - range_t[0]),
)
ind_offset, sigs, ranges, inds = get_online_segments_from_slices(
sig=audio_buffer,
buffer_start=buffer_start,
buffer_end=buffer_end,
subsegments=subsegments,
window=window,
ind_offset=ind_offset,
sample_rate=sample_rate,
)
sigs_list.extend(sigs)
sig_rangel_list.extend(ranges)
sig_indexes.extend(inds)
assert len(sigs_list) == len(sig_rangel_list) == len(sig_indexes)
return sigs_list, sig_rangel_list, sig_indexes
def get_scale_mapping_argmat(uniq_embs_and_timestamps: Dict[str, dict]) -> Dict[int, torch.Tensor]:
"""
Calculate cosine similarity values among speaker embeddings for each scale then
apply multiscale weights to calculate the fused similarity matrix.
Args:
uniq_embs_and_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
Returns:
scale_mapping_argmat (dict)
Dictionary containing scale mapping information matrix for each scale.
"""
scale_mapping_argmat = {}
embeddings_in_scales, timestamps_in_scales = split_input_data(
embeddings_in_scales=uniq_embs_and_timestamps['embeddings'],
timestamps_in_scales=uniq_embs_and_timestamps['timestamps'],
multiscale_segment_counts=uniq_embs_and_timestamps['multiscale_segment_counts'],
)
session_scale_mapping_list = get_argmin_mat(timestamps_in_scales)
for scale_idx in range(len(session_scale_mapping_list)):
mapping_argmat = session_scale_mapping_list[scale_idx]
scale_mapping_argmat[scale_idx] = mapping_argmat
return scale_mapping_argmat
def get_overlap_stamps(cont_stamps: List[str], ovl_spk_idx: List[str]):
"""
Generate timestamps that include overlap speech. Overlap-including timestamps are created based on the segments that are
created for clustering diarizer. Overlap speech is assigned to the existing speech segments in `cont_stamps`.
Args:
cont_stamps (list):
Non-overlapping (single speaker per segment) diarization output in string format.
Each line contains the start and end time of segments and corresponding speaker labels.
ovl_spk_idx (list):
List containing segment index of the estimated overlapped speech. The start and end of segments are based on the
single-speaker (i.e., non-overlap-aware) RTTM generation.
Returns:
total_ovl_cont_list (list):
Rendered diarization output in string format. Each line contains the start and end time of segments and
corresponding speaker labels. This format is identical to `cont_stamps`.
"""
ovl_spk_cont_list = [[] for _ in range(len(ovl_spk_idx))]
for spk_idx in range(len(ovl_spk_idx)):
for idx, cont_a_line in enumerate(cont_stamps):
start, end, speaker = cont_a_line.split()
if idx in ovl_spk_idx[spk_idx]:
ovl_spk_cont_list[spk_idx].append(f"{start} {end} speaker_{spk_idx}")
total_ovl_cont_list = []
for ovl_cont_list in ovl_spk_cont_list:
if len(ovl_cont_list) > 0:
total_ovl_cont_list.extend(merge_stamps(ovl_cont_list))
return total_ovl_cont_list
def get_adaptive_threshold(estimated_num_of_spks: int, min_threshold: float, overlap_infer_spk_limit: int):
"""
This function controls the magnitude of the sigmoid threshold based on the estimated number of speakers. As the number of
speakers becomes larger, diarization error rate is very sensitive on overlap speech detection. This function linearly increases
the threshold in proportion to the estimated number of speakers so more confident overlap speech results are reflected when
the number of estimated speakers are relatively high.
Args:
estimated_num_of_spks (int):
Estimated number of speakers from the clustering result.
min_threshold (float):
Sigmoid threshold value from the config file. This threshold value is minimum threshold value when `estimated_num_of_spks=2`
overlap_infer_spk_limit (int):
If the `estimated_num_of_spks` is less then `overlap_infer_spk_limit`, overlap speech estimation is skipped.
Returns:
adaptive_threshold (float):
Threshold value that is scaled based on the `estimated_num_of_spks`.
"""
adaptive_threshold = min_threshold - (estimated_num_of_spks - 2) * (min_threshold - 1) / (
overlap_infer_spk_limit - 2
)
return adaptive_threshold
def generate_speaker_timestamps(
clus_labels: List[Union[float, int]], msdd_preds: List[torch.Tensor], **params
) -> Tuple[List[str], List[str]]:
'''
Generate speaker timestamps from the segmentation information. If `use_clus_as_main=True`, use clustering result for main speaker
labels and use timestamps from the predicted sigmoid values. In this function, the main speaker labels in `maj_labels` exist for
every subsegment steps while overlap speaker labels in `ovl_labels` only exist for segments where overlap-speech is occuring.
Args:
clus_labels (list):
List containing integer-valued speaker clustering results.
msdd_preds (list):
List containing tensors of the predicted sigmoid values.
Each tensor has shape of: (Session length, estimated number of speakers).
params:
Parameters for generating RTTM output and evaluation. Parameters include:
infer_overlap (bool): If False, overlap-speech will not be detected.
use_clus_as_main (bool): Add overlap-speech detection from MSDD to clustering results. If False, only MSDD output
is used for constructing output RTTM files.
overlap_infer_spk_limit (int): Above this limit, overlap-speech detection is bypassed.
use_adaptive_thres (bool): Boolean that determines whehther to use adaptive_threshold depending on the estimated
number of speakers.
max_overlap_spks (int): Maximum number of overlap speakers detected. Default is 2.
threshold (float): Sigmoid threshold for MSDD output.
Returns:
maj_labels (list):
List containing string-formated single-speaker speech segment timestamps and corresponding speaker labels.
Example: [..., '551.685 552.77 speaker_1', '552.99 554.43 speaker_0', '554.97 558.19 speaker_0', ...]
ovl_labels (list):
List containing string-formated additional overlapping speech segment timestamps and corresponding speaker labels.
Note that `ovl_labels` includes only overlapping speech that is not included in `maj_labels`.
Example: [..., '152.495 152.745 speaker_1', '372.71 373.085 speaker_0', '554.97 555.885 speaker_1', ...]
'''
msdd_preds.squeeze(0)
estimated_num_of_spks = msdd_preds.shape[-1]
overlap_speaker_list = [[] for _ in range(estimated_num_of_spks)]
infer_overlap = estimated_num_of_spks < int(params['overlap_infer_spk_limit'])
main_speaker_lines = []
if params['use_adaptive_thres']:
threshold = get_adaptive_threshold(
estimated_num_of_spks, params['threshold'], params['overlap_infer_spk_limit']
)
else:
threshold = params['threshold']
for seg_idx, cluster_label in enumerate(clus_labels):
msdd_preds.squeeze(0)
spk_for_seg = (msdd_preds[0, seg_idx] > threshold).int().cpu().numpy().tolist()
sm_for_seg = msdd_preds[0, seg_idx].cpu().numpy()
if params['use_clus_as_main']:
main_spk_idx = int(cluster_label[2])
else:
main_spk_idx = np.argsort(msdd_preds[0, seg_idx].cpu().numpy())[::-1][0]
if sum(spk_for_seg) > 1 and infer_overlap:
idx_arr = np.argsort(sm_for_seg)[::-1]
for ovl_spk_idx in idx_arr[: params['max_overlap_spks']].tolist():
if ovl_spk_idx != int(main_spk_idx):
overlap_speaker_list[ovl_spk_idx].append(seg_idx)
main_speaker_lines.append(f"{cluster_label[0]} {cluster_label[1]} speaker_{main_spk_idx}")
cont_stamps = get_contiguous_stamps(main_speaker_lines)
maj_labels = merge_stamps(cont_stamps)
ovl_labels = get_overlap_stamps(cont_stamps, overlap_speaker_list)
return maj_labels, ovl_labels
def get_uniq_id_list_from_manifest(manifest_file: str):
"""Retrieve `uniq_id` values from the given manifest_file and save the IDs to a list.
"""
uniq_id_list = []
with open(manifest_file, 'r', encoding='utf-8') as manifest:
for i, line in enumerate(manifest.readlines()):
line = line.strip()
dic = json.loads(line)
uniq_id = get_uniqname_from_filepath(dic['audio_filepath'])
uniq_id_list.append(uniq_id)
return uniq_id_list
def get_id_tup_dict(uniq_id_list: List[str], test_data_collection, preds_list: List[torch.Tensor]):
"""
Create session-level dictionary containing data needed to construct RTTM diarization output.
Args:
uniq_id_list (list):
List containing the `uniq_id` values.
test_data_collection (collections.DiarizationLabelEntity):
Class instance that is containing session information such as targeted speaker indices, audio filepath and RTTM filepath.
preds_list (list):
List containing tensors of predicted sigmoid values.
Returns:
session_dict (dict):
Dictionary containing session-level target speakers data and predicted simoid values in tensor format.
"""
session_dict = {x: [] for x in uniq_id_list}
for idx, line in enumerate(test_data_collection):
uniq_id = get_uniqname_from_filepath(line.audio_file)
session_dict[uniq_id].append([line.target_spks, preds_list[idx]])
return session_dict
def prepare_split_data(manifest_filepath, _out_dir, multiscale_args_dict, global_rank):
"""
This function is needed for preparing diarization training data for multiscale diarization decoder (MSDD).
Prepare multiscale timestamp data for training. Oracle VAD timestamps from RTTM files are used as VAD timestamps.
In this function, timestamps for embedding extraction are extracted without extracting the embedding vectors.
Args:
manifest_filepath (str):
Input manifest file for creating audio-to-RTTM mapping.
_out_dir (str):
Output directory where timestamp json files are saved.
Returns:
multiscale_args_dict (dict):
- Dictionary containing two types of arguments: multi-scale weights and subsegment timestamps for each data sample.
- Each data sample has two keys: `multiscale_weights` and `scale_dict`.
- `multiscale_weights` key contains a list containing multiscale weights.
- `scale_dict` is indexed by integer keys which are scale index.
- Each data sample is indexed by using the following naming convention: `<uniq_id>_<start time in ms>_<end time in ms>`
Example: `fe_03_00106_mixed_626310_642300`
"""
speaker_dir = os.path.join(_out_dir, 'speaker_outputs')
# Only if this is for the first run of modelPT instance, remove temp folders.
if global_rank == 0:
if os.path.exists(speaker_dir):
shutil.rmtree(speaker_dir)
os.makedirs(speaker_dir)
split_audio_rttm_map = audio_rttm_map(manifest_filepath, attach_dur=True)
# Speech Activity Detection part
_speaker_manifest_path = os.path.join(speaker_dir, f'oracle_vad_manifest.json')
logging.info(f"Extracting oracle VAD timestamps and saving at {speaker_dir}")
if not os.path.exists(_speaker_manifest_path):
write_rttm2manifest(split_audio_rttm_map, _speaker_manifest_path, include_uniq_id=True)
multiscale_timestamps_by_scale = {}
# Segmentation
for scale_idx, (window, shift) in multiscale_args_dict['scale_dict'].items():
subsegments_manifest_path = os.path.join(speaker_dir, f'subsegments_scale{scale_idx}.json')
if not os.path.exists(subsegments_manifest_path):
# Sub-segmentation for the current scale (scale_idx)
segments_manifest_to_subsegments_manifest(
segments_manifest_file=_speaker_manifest_path,
subsegments_manifest_file=subsegments_manifest_path,
window=window,
shift=shift,
include_uniq_id=True,
)
logging.info(
f"Subsegmentation for timestamp extracted for: scale-{scale_idx} at {subsegments_manifest_path}"
)
multiscale_timestamps = extract_timestamps(subsegments_manifest_path)
multiscale_timestamps_by_scale[scale_idx] = multiscale_timestamps
multiscale_timestamps_dict = get_timestamps(multiscale_timestamps_by_scale, multiscale_args_dict)
return multiscale_timestamps_dict
def extract_timestamps(manifest_file: str):
"""
This method extracts timestamps from segments passed through manifest_file.
Args:
manifest_file (str):
Manifest file containing segmentation information.
Returns:
time_stamps (dict):
Dictionary containing lists of timestamps.
"""
logging.info(f"Extracting timestamps from {manifest_file} for multiscale subsegmentation.")
time_stamps = {}
with open(manifest_file, 'r', encoding='utf-8') as manifest:
for i, line in enumerate(manifest.readlines()):
line = line.strip()
dic = json.loads(line)
uniq_name = dic['uniq_id']
if uniq_name not in time_stamps:
time_stamps[uniq_name] = []
start = dic['offset']
end = start + dic['duration']
time_stamps[uniq_name].append([start, end])
return time_stamps
def make_rttm_with_overlap(
manifest_file_path: str,
clus_label_dict: Dict[str, List[Union[float, int]]],
msdd_preds: List[torch.Tensor],
**params,
):
"""
Create RTTM files that include detected overlap speech. Note that the effect of overlap detection is only
notable when RTTM files are evaluated with `ignore_overlap=False` option.
Args:
manifest_file_path (str):
Path to the input manifest file.
clus_label_dict (dict):
Dictionary containing subsegment timestamps in float type and cluster labels in integer type.
Indexed by `uniq_id` string.
msdd_preds (list):
List containing tensors of the predicted sigmoid values.
Each tensor has shape of: (Session length, estimated number of speakers).
params:
Parameters for generating RTTM output and evaluation. Parameters include:
infer_overlap (bool): If False, overlap-speech will not be detected.
See docstrings of `generate_speaker_timestamps` function for other variables in `params`.
Returns:
all_hypothesis (list):
List containing Pyannote's `Annotation` objects that are created from hypothesis RTTM outputs.
all_reference
List containing Pyannote's `Annotation` objects that are created from ground-truth RTTM outputs
"""
AUDIO_RTTM_MAP = audio_rttm_map(manifest_file_path)
manifest_file_lengths_list = []
all_hypothesis, all_reference = [], []
no_references = False
with open(manifest_file_path, 'r', encoding='utf-8') as manifest:
for i, line in enumerate(manifest.readlines()):
uniq_id = get_uniq_id_from_manifest_line(line)
manifest_dic = AUDIO_RTTM_MAP[uniq_id]
clus_labels = clus_label_dict[uniq_id]
manifest_file_lengths_list.append(len(clus_labels))
maj_labels, ovl_labels = generate_speaker_timestamps(clus_labels, msdd_preds[i], **params)
if params['infer_overlap']:
hyp_labels = maj_labels + ovl_labels
else:
hyp_labels = maj_labels
hypothesis = labels_to_pyannote_object(hyp_labels, uniq_name=uniq_id)
if params['out_rttm_dir']:
hyp_labels = sorted(hyp_labels, key=lambda x: float(x.split()[0]))
labels_to_rttmfile(hyp_labels, uniq_id, params['out_rttm_dir'])
all_hypothesis.append([uniq_id, hypothesis])
rttm_file = manifest_dic.get('rttm_filepath', None)
if rttm_file is not None and os.path.exists(rttm_file) and not no_references:
ref_labels = rttm_to_labels(rttm_file)
reference = labels_to_pyannote_object(ref_labels, uniq_name=uniq_id)
all_reference.append([uniq_id, reference])
else:
no_references = True
all_reference = []
return all_reference, all_hypothesis
def embedding_normalize(embs, use_std=False, eps=1e-10):
"""
Mean and l2 length normalize the input speaker embeddings
Args:
embs: embeddings of shape (Batch,emb_size)
Returns:
embs: normalized embeddings of shape (Batch,emb_size)
"""
embs = embs - embs.mean(axis=0)
if use_std:
embs = embs / (embs.std(axis=0) + eps)
embs_l2_norm = np.expand_dims(np.linalg.norm(embs, ord=2, axis=-1), axis=1)
embs = embs / embs_l2_norm
return embs
class OnlineSegmentor:
"""
Online Segmentor for online (streaming) diarizer.
- The class instances created by this class takes time-series signal from the audio buffer and
creates subsegments for embedding extraction.
- Since online segmentation is based on a short audio buffer, the methods in this class extracts
a few subsegments from the given intervals for the raw time-series signal.
Attributes:
frame_start (float):
Start of the middle chunk
buffer_start (float):
Start of the entire buffer
buffer_end (float):
End of the entire buffer
sample_rate (int):
Sampling rate of the input time-series signal
cumulative_speech_labels (Tensor):
Torch tensor matrix containing culmulative VAD (speech activity) timestamps
"""
def __init__(self, sample_rate: int):
self.frame_start: float = 0.0
self.buffer_start: float = 0.0
self.buffer_end: float = 0.0
self.sample_rate: int = sample_rate
self.cumulative_speech_labels: torch.Tensor = torch.tensor([])
def run_online_segmentation(
self,
audio_buffer: torch.Tensor,
vad_timestamps: torch.Tensor,
segment_raw_audio: List[torch.Tensor],
segment_range_ts: List[List[float]],
segment_indexes: List[int],
window: float,
shift: float,
):
"""
Remove the old segments that overlap with the new frame (self.frame_start)
cursor_for_old_segments is pointing at the onset of the t_range popped most recently.
Frame is in the middle of the buffer.
|___Buffer___[___________]____________|
|____________[ Frame ]____________|
| <- buffer start
|____________| <- frame start
Args:
audio_buffer (Tensor):
Tensor containing raw time-series signal
vad_timestamps (Tensor):
Tensor containing VAD intervals (start and end timestamps)
segment_raw_audio (list):
List containing the previously added tensors of the raw time-series signal segments
segment_range_ts (list):
List containing the previously added intervals (start and end timestamps) of each segment
segment_indexes (list):
List containing the previously added global integer indicies of the segments from
start to current cursor
window (float):
Window length in second
shift (float):
Shift length in second
Returns:
segment_raw_audio (list):
List containing the newly added tensors of the raw time-series signal
segment_range_ts (list):
List containing the newly added interval (start and end timestamps) of each segment
segment_indexes (list):
List containing the newly added global integer indicies of the segments from
start to current cursor
"""
if self.buffer_start >= 0:
# Check if this is the very first step
if len(segment_raw_audio) == 0 and vad_timestamps.shape[0] > 0:
vad_timestamps[0][0] = max(vad_timestamps[0][0], 0.0)
speech_labels_for_update = vad_timestamps
self.cumulative_speech_labels = speech_labels_for_update
else:
# Calculate a cursor for the update point
cursor_for_old_segments, cursor_index = get_new_cursor_for_update(self.frame_start, segment_range_ts)
segment_range_ts = segment_range_ts[:cursor_index]
segment_raw_audio = segment_raw_audio[:cursor_index]
segment_indexes = segment_indexes[:cursor_index]
if not len(segment_raw_audio) == len(segment_range_ts) == len(segment_indexes):
raise ValueError("Scale-wise segment information has a mismatch in length.")
speech_labels_for_update, self.cumulative_speech_labels = get_speech_labels_for_update(
self.frame_start,
self.buffer_end,
self.cumulative_speech_labels,
vad_timestamps,
cursor_for_old_segments,
)
# Collect the timeseries signal from the buffer
sigs_list, sig_rangel_list, sig_indexes = get_online_subsegments_from_buffer(
buffer_start=self.buffer_start,
buffer_end=self.buffer_end,
sample_rate=self.sample_rate,
speech_labels_for_update=speech_labels_for_update,
audio_buffer=audio_buffer,
segment_indexes=segment_indexes,
window=window,
shift=shift,
)
segment_raw_audio.extend(sigs_list)
segment_range_ts.extend(sig_rangel_list)
segment_indexes.extend(sig_indexes)
if not len(segment_raw_audio) == len(segment_range_ts) == len(segment_indexes):
raise ValueError("Segment information has a mismatch in length.")
return segment_raw_audio, segment_range_ts, segment_indexes
|
NeMo-main
|
nemo/collections/asr/parts/utils/speaker_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Tuple
from nemo.collections.asr.metrics.wer import word_error_rate_detail
from nemo.utils import logging
def clean_label(_str: str, num_to_words: bool = True, langid="en") -> str:
"""
Remove unauthorized characters in a string, lower it and remove unneeded spaces
"""
replace_with_space = [char for char in '/?*\",.:=?_{|}~¨«·»¡¿„…‧‹›≪≫!:;ː→']
replace_with_blank = [char for char in '`¨´‘’“”`ʻ‘’“"‘”']
replace_with_apos = [char for char in '‘’ʻ‘’‘']
_str = _str.strip()
_str = _str.lower()
for i in replace_with_blank:
_str = _str.replace(i, "")
for i in replace_with_space:
_str = _str.replace(i, " ")
for i in replace_with_apos:
_str = _str.replace(i, "'")
if num_to_words:
if langid == "en":
_str = convert_num_to_words(_str, langid="en")
else:
logging.info(
"Currently support basic num_to_words in English only. Please use Text Normalization to convert other languages! Skipping!"
)
ret = " ".join(_str.split())
return ret
def convert_num_to_words(_str: str, langid: str = "en") -> str:
"""
Convert digits to corresponding words. Note this is a naive approach and could be replaced with text normalization.
"""
if langid == "en":
num_to_words = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
_str = _str.strip()
words = _str.split()
out_str = ""
num_word = []
for word in words:
if word.isdigit():
num = int(word)
while num:
digit = num % 10
digit_word = num_to_words[digit]
num_word.append(digit_word)
num = int(num / 10)
if not (num):
num_str = ""
num_word = num_word[::-1]
for ele in num_word:
num_str += ele + " "
out_str += num_str + " "
num_word.clear()
else:
out_str += word + " "
out_str = out_str.strip()
else:
raise ValueError(
"Currently support basic num_to_words in English only. Please use Text Normalization to convert other languages!"
)
return out_str
def cal_write_wer(
pred_manifest: str = None,
pred_text_attr_name: str = "pred_text",
clean_groundtruth_text: bool = False,
langid: str = 'en',
use_cer: bool = False,
output_filename: str = None,
) -> Tuple[str, dict, str]:
"""
Calculate wer, inserion, deletion and substitution rate based on groundtruth text and pred_text_attr_name (pred_text)
We use WER in function name as a convention, but Error Rate (ER) currently support Word Error Rate (WER) and Character Error Rate (CER)
"""
samples = []
hyps = []
refs = []
eval_metric = "cer" if use_cer else "wer"
with open(pred_manifest, 'r') as fp:
for line in fp:
sample = json.loads(line)
if 'text' not in sample:
logging.info(
"ground-truth text is not present in manifest! Cannot calculate Word Error Rate. Returning!"
)
return None, None, eval_metric
hyp = sample[pred_text_attr_name]
ref = sample['text']
if clean_groundtruth_text:
ref = clean_label(ref, langid=langid)
wer, tokens, ins_rate, del_rate, sub_rate = word_error_rate_detail(
hypotheses=[hyp], references=[ref], use_cer=use_cer
)
sample[eval_metric] = wer # evaluatin metric, could be word error rate of character error rate
sample['tokens'] = tokens # number of word/characters/tokens
sample['ins_rate'] = ins_rate # insertion error rate
sample['del_rate'] = del_rate # deletion error rate
sample['sub_rate'] = sub_rate # substitution error rate
samples.append(sample)
hyps.append(hyp)
refs.append(ref)
total_wer, total_tokens, total_ins_rate, total_del_rate, total_sub_rate = word_error_rate_detail(
hypotheses=hyps, references=refs, use_cer=use_cer
)
if not output_filename:
output_manifest_w_wer = pred_manifest
else:
output_manifest_w_wer = output_filename
with open(output_manifest_w_wer, 'w') as fout:
for sample in samples:
json.dump(sample, fout)
fout.write('\n')
fout.flush()
total_res = {
"samples": len(samples),
"tokens": total_tokens,
eval_metric: total_wer,
"ins_rate": total_ins_rate,
"del_rate": total_del_rate,
"sub_rate": total_sub_rate,
}
return output_manifest_w_wer, total_res, eval_metric
|
NeMo-main
|
nemo/collections/asr/parts/utils/eval_utils.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from typing import Dict, List, Tuple, Type, Union
import numpy as np
import torch
from omegaconf import OmegaConf
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.metrics.wer import WER, CTCDecoding, CTCDecodingConfig
from nemo.collections.asr.metrics.wer_bpe import WERBPE, CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.models import EncDecCTCModel, EncDecCTCModelBPE
from nemo.collections.asr.parts.utils.audio_utils import get_samples
from nemo.collections.asr.parts.utils.speaker_utils import audio_rttm_map, get_uniqname_from_filepath
from nemo.collections.asr.parts.utils.streaming_utils import AudioFeatureIterator, FrameBatchASR
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
__all__ = ['ASRDecoderTimeStamps']
try:
from pyctcdecode import build_ctcdecoder
PYCTCDECODE = True
except ImportError:
PYCTCDECODE = False
def if_none_get_default(param, default_value):
return (param, default_value)[param is None]
class WERBPE_TS(WERBPE):
"""
This is WERBPE_TS class that is modified for generating word_timestamps with logits.
The functions in WER class is modified to save the word_timestamps whenever BPE token
is being saved into a list.
This class is designed to support ASR models based on CTC and BPE.
Please refer to the definition of WERBPE class for more information.
"""
def __init__(
self,
tokenizer: TokenizerSpec,
batch_dim_index=0,
use_cer=False,
ctc_decode=None,
log_prediction=True,
dist_sync_on_step=False,
):
if ctc_decode is not None:
logging.warning(f'`ctc_decode` was set to {ctc_decode}. Note that this is ignored.')
decoding_cfg = CTCBPEDecodingConfig(batch_dim_index=batch_dim_index)
decoding = CTCBPEDecoding(decoding_cfg, tokenizer=tokenizer)
super().__init__(decoding, use_cer, log_prediction, dist_sync_on_step)
def ctc_decoder_predictions_tensor_with_ts(
self, time_stride, predictions: torch.Tensor, predictions_len: torch.Tensor = None
) -> List[str]:
hypotheses, timestamps, word_timestamps = [], [], []
# '⁇' string should be removed since it causes error during string split.
unk = '⁇'
prediction_cpu_tensor = predictions.long().cpu()
# iterate over batch
self.time_stride = time_stride
for ind in range(prediction_cpu_tensor.shape[self.decoding.batch_dim_index]):
prediction = prediction_cpu_tensor[ind].detach().numpy().tolist()
if predictions_len is not None:
prediction = prediction[: predictions_len[ind]]
# CTC decoding procedure
decoded_prediction, char_ts, timestamp_list = [], [], []
previous = self.decoding.blank_id
for pdx, p in enumerate(prediction):
if (p != previous or previous == self.decoding.blank_id) and p != self.decoding.blank_id:
decoded_prediction.append(p)
char_ts.append(round(pdx * self.time_stride, 2))
timestamp_list.append(round(pdx * self.time_stride, 2))
previous = p
hypothesis = self.decode_tokens_to_str_with_ts(decoded_prediction)
hypothesis = hypothesis.replace(unk, '')
word_ts, word_seq = self.get_ts_from_decoded_prediction(decoded_prediction, hypothesis, char_ts)
hypotheses.append(" ".join(word_seq))
timestamps.append(timestamp_list)
word_timestamps.append(word_ts)
return hypotheses, timestamps, word_timestamps
def decode_tokens_to_str_with_ts(self, tokens: List[int]) -> str:
hypothesis = self.decoding.tokenizer.ids_to_text(tokens)
return hypothesis
def decode_ids_to_tokens_with_ts(self, tokens: List[int]) -> List[str]:
token_list = self.decoding.tokenizer.ids_to_tokens(tokens)
return token_list
def get_ts_from_decoded_prediction(
self, decoded_prediction: List[str], hypothesis: str, char_ts: List[str]
) -> Tuple[List[List[float]], List[str]]:
decoded_char_list = self.decoding.tokenizer.ids_to_tokens(decoded_prediction)
stt_idx, end_idx = 0, len(decoded_char_list) - 1
stt_ch_idx, end_ch_idx = 0, 0
space = '▁'
word_ts, word_seq = [], []
word_open_flag = False
for idx, ch in enumerate(decoded_char_list):
# If the symbol is space and not an end of the utterance, move on
if idx != end_idx and (space == ch and space in decoded_char_list[idx + 1]):
continue
# If the word does not containg space (the start of the word token), keep counting
if (idx == stt_idx or space == decoded_char_list[idx - 1] or (space in ch and len(ch) > 1)) and (
ch != space
):
_stt = char_ts[idx]
stt_ch_idx = idx
word_open_flag = True
# If this char has `word_open_flag=True` and meets any of one of the following condition:
# (1) last word (2) unknown word (3) start symbol in the following word,
# close the `word_open_flag` and add the word to the `word_seq` list.
close_cond = idx == end_idx or ch in ['<unk>'] or space in decoded_char_list[idx + 1]
if (word_open_flag and ch != space) and close_cond:
_end = round(char_ts[idx] + self.time_stride, 2)
end_ch_idx = idx
word_open_flag = False
word_ts.append([_stt, _end])
stitched_word = ''.join(decoded_char_list[stt_ch_idx : end_ch_idx + 1]).replace(space, '')
word_seq.append(stitched_word)
assert len(word_ts) == len(hypothesis.split()), "Text hypothesis does not match word timestamps."
return word_ts, word_seq
class WER_TS(WER):
"""
This is WER class that is modified for generating timestamps with logits.
The functions in WER class is modified to save the timestamps whenever character
is being saved into a list.
This class is designed to support ASR models based on CTC and Character-level tokens.
Please refer to the definition of WER class for more information.
"""
def __init__(
self,
vocabulary,
batch_dim_index=0,
use_cer=False,
ctc_decode=None,
log_prediction=True,
dist_sync_on_step=False,
):
if ctc_decode is not None:
logging.warning(f'`ctc_decode` was set to {ctc_decode}. Note that this is ignored.')
decoding_cfg = CTCDecodingConfig(batch_dim_index=batch_dim_index)
decoding = CTCDecoding(decoding_cfg, vocabulary=vocabulary)
super().__init__(decoding, use_cer, log_prediction, dist_sync_on_step)
def decode_tokens_to_str_with_ts(self, tokens: List[int], timestamps: List[int]) -> str:
"""
Take frame-level tokens and timestamp list and collect the timestamps for
start and end of each word.
"""
token_list, timestamp_list = self.decode_ids_to_tokens_with_ts(tokens, timestamps)
hypothesis = ''.join(self.decoding.decode_ids_to_tokens(tokens))
return hypothesis, timestamp_list
def decode_ids_to_tokens_with_ts(self, tokens: List[int], timestamps: List[int]) -> List[str]:
token_list, timestamp_list = [], []
for i, c in enumerate(tokens):
if c != self.decoding.blank_id:
token_list.append(self.decoding.labels_map[c])
timestamp_list.append(timestamps[i])
return token_list, timestamp_list
def ctc_decoder_predictions_tensor_with_ts(
self, predictions: torch.Tensor, predictions_len: torch.Tensor = None,
) -> List[str]:
"""
A shortened version of the original function ctc_decoder_predictions_tensor().
Replaced decode_tokens_to_str() function with decode_tokens_to_str_with_ts().
"""
hypotheses, timestamps = [], []
prediction_cpu_tensor = predictions.long().cpu()
for ind in range(prediction_cpu_tensor.shape[self.decoding.batch_dim_index]):
prediction = prediction_cpu_tensor[ind].detach().numpy().tolist()
if predictions_len is not None:
prediction = prediction[: predictions_len[ind]]
# CTC decoding procedure with timestamps
decoded_prediction, decoded_timing_list = [], []
previous = self.decoding.blank_id
for pdx, p in enumerate(prediction):
if (p != previous or previous == self.decoding.blank_id) and p != self.decoding.blank_id:
decoded_prediction.append(p)
decoded_timing_list.append(pdx)
previous = p
text, timestamp_list = self.decode_tokens_to_str_with_ts(decoded_prediction, decoded_timing_list)
hypotheses.append(text)
timestamps.append(timestamp_list)
return hypotheses, timestamps
def get_wer_feat_logit(audio_file_path, asr, frame_len, tokens_per_chunk, delay, model_stride_in_secs):
"""
Create a preprocessor to convert audio samples into raw features,
Normalization will be done per buffer in frame_bufferer.
"""
asr.reset()
asr.read_audio_file_and_return(audio_file_path, delay, model_stride_in_secs)
hyp, tokens, log_prob = asr.transcribe_with_ts(tokens_per_chunk, delay)
return hyp, tokens, log_prob
class FrameBatchASRLogits(FrameBatchASR):
"""
A class for streaming frame-based ASR.
Inherits from FrameBatchASR and adds new capability of returning the logit output.
Please refer to FrameBatchASR for more detailed information.
"""
def __init__(
self,
asr_model: Type[EncDecCTCModelBPE],
frame_len: float = 1.6,
total_buffer: float = 4.0,
batch_size: int = 4,
):
super().__init__(asr_model, frame_len, total_buffer, batch_size)
self.all_logprobs = []
def clear_buffer(self):
self.all_logprobs = []
self.all_preds = []
def read_audio_file_and_return(self, audio_filepath: str, delay: float, model_stride_in_secs: float):
samples = get_samples(audio_filepath)
samples = np.pad(samples, (0, int(delay * model_stride_in_secs * self.asr_model._cfg.sample_rate)))
frame_reader = AudioFeatureIterator(samples, self.frame_len, self.raw_preprocessor, self.asr_model.device)
self.set_frame_reader(frame_reader)
@torch.no_grad()
def _get_batch_preds(self, keep_logits):
device = self.asr_model.device
for batch in iter(self.data_loader):
feat_signal, feat_signal_len = batch
feat_signal, feat_signal_len = feat_signal.to(device), feat_signal_len.to(device)
log_probs, encoded_len, predictions = self.asr_model(
processed_signal=feat_signal, processed_signal_length=feat_signal_len
)
preds = torch.unbind(predictions)
for pred in preds:
self.all_preds.append(pred.cpu().numpy())
# Always keep logits in FrameBatchASRLogits
_ = keep_logits
log_probs_tup = torch.unbind(log_probs)
for log_prob in log_probs_tup:
self.all_logprobs.append(log_prob)
del log_probs, log_probs_tup
del encoded_len
del predictions
def transcribe_with_ts(
self, tokens_per_chunk: int, delay: int,
):
self.infer_logits()
self.unmerged = []
self.part_logprobs = []
for idx, pred in enumerate(self.all_preds):
decoded = pred.tolist()
_stt, _end = len(decoded) - 1 - delay, len(decoded) - 1 - delay + tokens_per_chunk
self.unmerged += decoded[len(decoded) - 1 - delay : len(decoded) - 1 - delay + tokens_per_chunk]
self.part_logprobs.append(self.all_logprobs[idx][_stt:_end, :])
self.unmerged_logprobs = torch.cat(self.part_logprobs, 0)
assert (
len(self.unmerged) == self.unmerged_logprobs.shape[0]
), "Unmerged decoded result and log prob lengths are different."
return self.greedy_merge(self.unmerged), self.unmerged, self.unmerged_logprobs
class ASRDecoderTimeStamps:
"""
A class designed for extracting word timestamps while the ASR decoding process.
This class contains a few setups for a slew of NeMo ASR models such as QuartzNet, CitriNet and ConformerCTC models.
"""
def __init__(self, cfg_diarizer):
self.manifest_filepath = cfg_diarizer.manifest_filepath
self.params = cfg_diarizer.asr.parameters
self.ctc_decoder_params = cfg_diarizer.asr.ctc_decoder_parameters
self.ASR_model_name = cfg_diarizer.asr.model_path
self.nonspeech_threshold = self.params.asr_based_vad_threshold
self.root_path = None
self.run_ASR = None
self.encdec_class = None
self.AUDIO_RTTM_MAP = audio_rttm_map(self.manifest_filepath)
self.audio_file_list = [value['audio_filepath'] for _, value in self.AUDIO_RTTM_MAP.items()]
def set_asr_model(self):
"""
Initialize the parameters for the given ASR model.
Currently, the following NGC models are supported:
stt_en_quartznet15x5,
stt_en_citrinet*,
stt_en_conformer_ctc*
To assign a proper decoding function for generating timestamp output,
the name of .nemo file should include the architecture name such as:
'quartznet', 'conformer', and 'citrinet'.
decoder_delay_in_sec is the amount of delay that is compensated during the word timestamp extraction.
word_ts_anchor_offset is the reference point for a word and used for matching the word with diarization labels.
Each ASR model has a different optimal decoder delay and word timestamp anchor offset.
To obtain an optimized diarization result with ASR, decoder_delay_in_sec and word_ts_anchor_offset
need to be searched on a development set.
"""
if 'quartznet' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_QuartzNet_CTC
self.encdec_class = EncDecCTCModel
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.04)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.12)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 4)
self.model_stride_in_secs = 0.02
elif 'conformer' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_BPE_CTC
self.encdec_class = EncDecCTCModelBPE
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.08)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.12)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 16)
self.model_stride_in_secs = 0.04
# Conformer requires buffered inference and the parameters for buffered processing.
self.chunk_len_in_sec = 5
self.total_buffer_in_secs = 25
elif 'citrinet' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_CitriNet_CTC
self.encdec_class = EncDecCTCModelBPE
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.16)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.2)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 4)
self.model_stride_in_secs = 0.08
else:
raise ValueError(f"Cannot find the ASR model class for: {self.params['self.ASR_model_name']}")
if self.ASR_model_name.endswith('.nemo'):
asr_model = self.encdec_class.restore_from(restore_path=self.ASR_model_name)
else:
asr_model = self.encdec_class.from_pretrained(model_name=self.ASR_model_name, strict=False)
if self.ctc_decoder_params['pretrained_language_model']:
if not PYCTCDECODE:
raise ImportError(
'LM for beam search decoding is provided but pyctcdecode is not installed. Install pyctcdecode using PyPI: pip install pyctcdecode'
)
self.beam_search_decoder = self.load_LM_for_CTC_decoder(asr_model)
else:
self.beam_search_decoder = None
asr_model.eval()
return asr_model
def load_LM_for_CTC_decoder(self, asr_model: Type[Union[EncDecCTCModel, EncDecCTCModelBPE]]):
"""
Load a language model for CTC decoder (pyctcdecode).
Note that only EncDecCTCModel and EncDecCTCModelBPE models can use pyctcdecode.
"""
kenlm_model = self.ctc_decoder_params['pretrained_language_model']
logging.info(f"Loading language model : {self.ctc_decoder_params['pretrained_language_model']}")
if 'EncDecCTCModelBPE' in str(type(asr_model)):
vocab = asr_model.tokenizer.tokenizer.get_vocab()
labels = list(vocab.keys())
labels[0] = "<unk>"
elif 'EncDecCTCModel' in str(type(asr_model)):
labels = asr_model.decoder.vocabulary
else:
raise ValueError(f"Cannot find a vocabulary or tokenizer for: {self.params['self.ASR_model_name']}")
decoder = build_ctcdecoder(
labels, kenlm_model, alpha=self.ctc_decoder_params['alpha'], beta=self.ctc_decoder_params['beta']
)
return decoder
def run_ASR_QuartzNet_CTC(self, asr_model: Type[EncDecCTCModel]) -> Tuple[Dict, Dict]:
"""
Launch QuartzNet ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary containing the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary containing the time-stamps of words.
"""
words_dict, word_ts_dict = {}, {}
wer_ts = WER_TS(
vocabulary=asr_model.decoder.vocabulary,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
with torch.cuda.amp.autocast():
transcript_logits_list = asr_model.transcribe(
self.audio_file_list, batch_size=self.asr_batch_size, logprobs=True
)
for idx, logit_np in enumerate(transcript_logits_list):
uniq_id = get_uniqname_from_filepath(self.audio_file_list[idx])
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder on {uniq_id} with LM {self.ctc_decoder_params['pretrained_language_model']}"
)
hyp_words, word_ts = self.run_pyctcdecode(logit_np)
else:
log_prob = torch.from_numpy(logit_np)
logits_len = torch.from_numpy(np.array([log_prob.shape[0]]))
greedy_predictions = log_prob.argmax(dim=-1, keepdim=False).unsqueeze(0)
text, char_ts = wer_ts.ctc_decoder_predictions_tensor_with_ts(
greedy_predictions, predictions_len=logits_len
)
trans, char_ts_in_feature_frame_idx = self.clean_trans_and_TS(text[0], char_ts[0])
spaces_in_sec, hyp_words = self._get_spaces(
trans, char_ts_in_feature_frame_idx, self.model_stride_in_secs
)
word_ts = self.get_word_ts_from_spaces(
char_ts_in_feature_frame_idx, spaces_in_sec, end_stamp=logit_np.shape[0]
)
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
@staticmethod
def clean_trans_and_TS(trans: str, char_ts: List[str]) -> Tuple[str, List[str]]:
"""
Remove the spaces in the beginning and the end.
The char_ts need to be changed and synced accordingly.
Args:
trans (list):
List containing the character output (str).
char_ts (list):
List containing the timestamps (int) for each character.
Returns:
trans (list):
List containing the cleaned character output.
char_ts (list):
List containing the cleaned timestamps for each character.
"""
assert (len(trans) > 0) and (len(char_ts) > 0)
assert len(trans) == len(char_ts)
trans = trans.lstrip()
diff_L = len(char_ts) - len(trans)
char_ts = char_ts[diff_L:]
trans = trans.rstrip()
diff_R = len(char_ts) - len(trans)
if diff_R > 0:
char_ts = char_ts[: -1 * diff_R]
return trans, char_ts
def _get_spaces(self, trans: str, char_ts: List[str], time_stride: float) -> Tuple[float, List[str]]:
"""
Collect the space symbols with a list of words.
Args:
trans (list):
List containing the character output (str).
char_ts (list):
List containing the timestamps of the characters.
time_stride (float):
The size of stride of the model in second.
Returns:
spaces_in_sec (list):
List containing the ranges of spaces
word_list (list):
List containing the words from ASR inference.
"""
blank = ' '
spaces_in_sec, word_list = [], []
stt_idx = 0
assert (len(trans) > 0) and (len(char_ts) > 0), "Transcript and char_ts length should not be 0."
assert len(trans) == len(char_ts), "Transcript and timestamp lengths do not match."
# If there is a blank, update the time stamps of the space and the word.
for k, s in enumerate(trans):
if s == blank:
spaces_in_sec.append(
[round(char_ts[k] * time_stride, 2), round((char_ts[k + 1] - 1) * time_stride, 2)]
)
word_list.append(trans[stt_idx:k])
stt_idx = k + 1
# Add the last word
if len(trans) > stt_idx and trans[stt_idx] != blank:
word_list.append(trans[stt_idx:])
return spaces_in_sec, word_list
def run_ASR_CitriNet_CTC(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[Dict, Dict]:
"""
Launch CitriNet ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary containing the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary containing the timestamps of hypothesis words.
"""
words_dict, word_ts_dict = {}, {}
werbpe_ts = WERBPE_TS(
tokenizer=asr_model.tokenizer,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
with torch.cuda.amp.autocast():
transcript_logits_list = asr_model.transcribe(
self.audio_file_list, batch_size=self.asr_batch_size, logprobs=True
)
for idx, logit_np in enumerate(transcript_logits_list):
uniq_id = get_uniqname_from_filepath(self.audio_file_list[idx])
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder with LM {self.ctc_decoder_params['pretrained_language_model']}"
)
hyp_words, word_ts = self.run_pyctcdecode(logit_np)
else:
log_prob = torch.from_numpy(logit_np)
greedy_predictions = log_prob.argmax(dim=-1, keepdim=False).unsqueeze(0)
logits_len = torch.from_numpy(np.array([log_prob.shape[0]]))
text, char_ts, word_ts = werbpe_ts.ctc_decoder_predictions_tensor_with_ts(
self.model_stride_in_secs, greedy_predictions, predictions_len=logits_len
)
hyp_words, word_ts = text[0].split(), word_ts[0]
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
def set_buffered_infer_params(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[float, float, float]:
"""
Prepare the parameters for the buffered inference.
"""
cfg = copy.deepcopy(asr_model._cfg)
OmegaConf.set_struct(cfg.preprocessor, False)
# some changes for streaming scenario
cfg.preprocessor.dither = 0.0
cfg.preprocessor.pad_to = 0
cfg.preprocessor.normalize = "None"
preprocessor = nemo_asr.models.EncDecCTCModelBPE.from_config_dict(cfg.preprocessor)
preprocessor.to(asr_model.device)
# Disable config overwriting
OmegaConf.set_struct(cfg.preprocessor, True)
onset_delay = (
math.ceil(((self.total_buffer_in_secs - self.chunk_len_in_sec) / 2) / self.model_stride_in_secs) + 1
)
mid_delay = math.ceil(
(self.chunk_len_in_sec + (self.total_buffer_in_secs - self.chunk_len_in_sec) / 2)
/ self.model_stride_in_secs
)
tokens_per_chunk = math.ceil(self.chunk_len_in_sec / self.model_stride_in_secs)
return onset_delay, mid_delay, tokens_per_chunk
def run_ASR_BPE_CTC(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[Dict, Dict]:
"""
Launch CTC-BPE based ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary containing the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary containing the time-stamps of words.
"""
torch.manual_seed(0)
torch.set_grad_enabled(False)
words_dict, word_ts_dict = {}, {}
werbpe_ts = WERBPE_TS(
tokenizer=asr_model.tokenizer,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
frame_asr = FrameBatchASRLogits(
asr_model=asr_model,
frame_len=self.chunk_len_in_sec,
total_buffer=self.total_buffer_in_secs,
batch_size=self.asr_batch_size,
)
onset_delay, mid_delay, tokens_per_chunk = self.set_buffered_infer_params(asr_model)
onset_delay_in_sec = round(onset_delay * self.model_stride_in_secs, 2)
with torch.cuda.amp.autocast():
logging.info(f"Running ASR model {self.ASR_model_name}")
for idx, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
logging.info(f"[{idx+1}/{len(self.audio_file_list)}] FrameBatchASR: {audio_file_path}")
frame_asr.clear_buffer()
hyp, greedy_predictions_list, log_prob = get_wer_feat_logit(
audio_file_path,
frame_asr,
self.chunk_len_in_sec,
tokens_per_chunk,
mid_delay,
self.model_stride_in_secs,
)
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder with LM {self.ctc_decoder_params['pretrained_language_model']}"
)
log_prob = log_prob.unsqueeze(0).cpu().numpy()[0]
hyp_words, word_ts = self.run_pyctcdecode(log_prob, onset_delay_in_sec=onset_delay_in_sec)
else:
logits_len = torch.from_numpy(np.array([len(greedy_predictions_list)]))
greedy_predictions_list = greedy_predictions_list[onset_delay:]
greedy_predictions = torch.from_numpy(np.array(greedy_predictions_list)).unsqueeze(0)
text, char_ts, word_ts = werbpe_ts.ctc_decoder_predictions_tensor_with_ts(
self.model_stride_in_secs, greedy_predictions, predictions_len=logits_len
)
hyp_words, word_ts = text[0].split(), word_ts[0]
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
def get_word_ts_from_spaces(self, char_ts: List[float], spaces_in_sec: List[float], end_stamp: float) -> List[str]:
"""
Take word timestamps from the spaces from the decoded prediction.
Args:
char_ts (list):
List containing the timestamp for each character.
spaces_in_sec (list):
List containing the start and the end time of each space token.
end_stamp (float):
The end time of the session in sec.
Returns:
word_timestamps (list):
List containing the timestamps for the resulting words.
"""
end_stamp = min(end_stamp, (char_ts[-1] + 2))
start_stamp_in_sec = round(char_ts[0] * self.model_stride_in_secs, 2)
end_stamp_in_sec = round(end_stamp * self.model_stride_in_secs, 2)
# In case of one word output with no space information.
if len(spaces_in_sec) == 0:
word_timestamps = [[start_stamp_in_sec, end_stamp_in_sec]]
elif len(spaces_in_sec) > 0:
# word_timetamps_middle should be an empty list if len(spaces_in_sec) == 1.
word_timetamps_middle = [
[round(spaces_in_sec[k][1], 2), round(spaces_in_sec[k + 1][0], 2),]
for k in range(len(spaces_in_sec) - 1)
]
word_timestamps = (
[[start_stamp_in_sec, round(spaces_in_sec[0][0], 2)]]
+ word_timetamps_middle
+ [[round(spaces_in_sec[-1][1], 2), end_stamp_in_sec]]
)
return word_timestamps
def run_pyctcdecode(
self, logprob: np.ndarray, onset_delay_in_sec: float = 0, beam_width: int = 32
) -> Tuple[List[str], List[str]]:
"""
Launch pyctcdecode with the loaded pretrained language model.
Args:
logprob (np.ndarray):
The log probability from the ASR model inference in numpy array format.
onset_delay_in_sec (float):
The amount of delay that needs to be compensated for the timestamp outputs froM pyctcdecode.
beam_width (int):
The beam width parameter for beam search decodring.
Returns:
hyp_words (list):
List containing the words in the hypothesis.
word_ts (list):
List containing the word timestamps from the decoder.
"""
beams = self.beam_search_decoder.decode_beams(logprob, beam_width=self.ctc_decoder_params['beam_width'])
word_ts_beam, words_beam = [], []
for idx, (word, _) in enumerate(beams[0][2]):
ts = self.get_word_ts_from_wordframes(idx, beams[0][2], self.model_stride_in_secs, onset_delay_in_sec)
word_ts_beam.append(ts)
words_beam.append(word)
hyp_words, word_ts = words_beam, word_ts_beam
return hyp_words, word_ts
@staticmethod
def get_word_ts_from_wordframes(
idx, word_frames: List[List[float]], frame_duration: float, onset_delay: float, word_block_delay: float = 2.25
):
"""
Extract word timestamps from word frames generated from pyctcdecode.
"""
offset = -1 * word_block_delay * frame_duration - onset_delay
frame_begin = word_frames[idx][1][0]
if frame_begin == -1:
frame_begin = word_frames[idx - 1][1][1] if idx != 0 else 0
frame_end = word_frames[idx][1][1]
return [
round(max(frame_begin * frame_duration + offset, 0), 2),
round(max(frame_end * frame_duration + offset, 0), 2),
]
@staticmethod
def align_decoder_delay(word_ts, decoder_delay_in_sec: float):
"""
Subtract decoder_delay_in_sec from the word timestamp output.
"""
for k in range(len(word_ts)):
word_ts[k] = [
round(word_ts[k][0] - decoder_delay_in_sec, 2),
round(word_ts[k][1] - decoder_delay_in_sec, 2),
]
return word_ts
|
NeMo-main
|
nemo/collections/asr/parts/utils/decoder_timestamps_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import shutil
from collections import defaultdict
from typing import IO, Dict, List, Optional, Tuple
import numpy as np
import torch
from scipy.stats import beta, gamma
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.perturb import AudioAugmentor
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_ctm, write_manifest, write_text
from nemo.collections.asr.parts.utils.speaker_utils import labels_to_rttmfile
from nemo.utils import logging
def get_cleaned_base_path(output_dir: str, overwrite_output: bool = True) -> str:
"""
Delete output directory if it exists or throw warning.
Args:
output_dir (str): Path to output directory
overwrite_output (bool): If True, delete output directory if it exists
Returns:
basepath (str): Path to base-path directory for writing output files
"""
if os.path.isdir(output_dir) and os.listdir(output_dir):
if overwrite_output:
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
else:
raise Exception("Output directory is nonempty and overwrite_output = false")
elif not os.path.isdir(output_dir):
os.makedirs(output_dir)
# only add root if paths are relative
if not os.path.isabs(output_dir):
ROOT = os.getcwd()
basepath = os.path.join(ROOT, output_dir)
else:
basepath = output_dir
return basepath
def binary_search_alignments(
inds: List[int], max_audio_read_sec: float, min_alignment_count: int, alignments: List[float],
) -> int:
"""
Binary search to find the index of the alignment that satisfies the maximum audio read duration,
`max_audio_read_sec`. This is used to avoid reading the short audio files.
NOTE: `offset_max` should be at least 1 to avoid feeding max=0 to random sampling function.
Args:
inds (list): List of indices to search from
max_audio_read_sec (float): Maximum audio read duration
min_alignment_count (int): Minimum number of alignments to read
audio_manifest (dict): Dictionary containing the audio file's alignments
Returns:
offset_max (int) Index of the alignment that satisfies the maximum audio read duration
"""
# Start from the left end (at index 0) and -1 * min_alignment_count for the right end
left, right = 0, len(inds) - 1 - min_alignment_count
while left < right:
mid = left + (right - left) // 2
dur_left = alignments[-1 * min_alignment_count] - alignments[inds[mid]]
if dur_left < max_audio_read_sec:
right = mid - 1
elif dur_left > max_audio_read_sec:
left = mid + 1
else:
break
mid_out = left + (right - left) // 2
# If mid_out is on the boundary, move it to the left.
if alignments[-1 * min_alignment_count] - alignments[inds[mid_out]] < max_audio_read_sec:
mid_out -= 1
offset_max = max(mid_out, 1)
return offset_max
def get_subset_of_audio_manifest(
audio_manifest: dict, offset_index: int, max_audio_read_sec: float, min_alignment_count: int,
) -> dict:
"""
Get a subset of `audio_manifest` for faster audio-file reading.
Args:
audio_manifest (dict): Audio manifest dictionary.
keys: 'offset', 'duration', 'alignments', 'words'
offset_index (int): Index of the offset.
max_audio_read_sec (float): Maximum audio read duration.
min_alignment_count (int): Minimum number of alignments to read.
Returns:
audio_manifest (dict): Subset of `audio_manifest` is returned for `words` and `alignments` keys.
"""
alignment_array = np.array(audio_manifest['alignments'])
alignment_array_pr = np.array(alignment_array[offset_index:]) - alignment_array[offset_index]
subset_alignments = alignment_array_pr[alignment_array_pr < max_audio_read_sec]
if len(subset_alignments) < min_alignment_count:
# Cases where the word next to the offset is longer than the max_audio_read_sec.
logging.warning(
f"subset_alignments of {audio_manifest['audio_filepath']} \n"
f"has subset alignment length:{len(subset_alignments)} at offset_index:{offset_index}, "
f"word:{audio_manifest['words'][offset_index:offset_index+min_alignment_count]}, "
f"alignments:{alignment_array_pr[:min_alignment_count]} which is longer than _max_audio_read_sec:{[0, max_audio_read_sec]}."
" Truncating the alignements."
)
# Attach the `_max_audio_read_sec` to the `subset_alignments` to truncate the alignment timestamp.
subset_alignments = np.concatenate([subset_alignments, np.array([max_audio_read_sec])])
audio_manifest['offset'], audio_manifest['duration'] = (
alignment_array[offset_index],
subset_alignments[-1] - subset_alignments[0],
)
audio_manifest['alignments'] = subset_alignments.tolist()
audio_manifest['words'] = audio_manifest['words'][offset_index : offset_index + len(subset_alignments)]
return audio_manifest
def read_audio_from_buffer(
audio_manifest: dict,
buffer_dict: dict,
offset_index: int,
device: torch.device,
max_audio_read_sec: float = 2.5,
min_alignment_count: int = 2,
read_subset: bool = True,
) -> Tuple[torch.Tensor, int, dict]:
"""
Read from the provided file path while maintaining a hash-table that saves loading time.
Also, this function only reads a subset of the audio file if `read_subset` is True for faster audio-file reading.
Args:
audio_manifest (dict): Audio manifest dictionary.
keys: 'audio_filepath', 'duration', 'alignments', 'words'
buffer_dict (dict): Hash-table that saves loaded audio files.
offset_index (int): Index of the offset for the audio file.
device (torch.device): Device to load the audio file.
max_audio_read_sec (float): Maximum audio read duration.
min_alignment_count (int): Minimum number of alignments to read.
read_subset (bool): If True, read a subset of the audio file.
To control the length of the audio file, use data_simulator.session_params.max_audio_read_sec.
Note that using large value (greater than 3~4 sec) for `max_audio_read_sec` will slow down the generation process.
If False, read the entire audio file.
Returns:
audio_file (torch.Tensor): Time-series audio data in a tensor.
sr (int): Sample rate of the audio file.
audio_manifest (dict): (modified) audio manifest dictionary.
"""
audio_file_id = f"{audio_manifest['audio_filepath']}#{offset_index}"
if audio_file_id in buffer_dict:
audio_file, sr, audio_manifest = buffer_dict[audio_file_id]
else:
if read_subset:
audio_manifest = get_subset_of_audio_manifest(
audio_manifest=audio_manifest,
offset_index=offset_index,
max_audio_read_sec=max_audio_read_sec,
min_alignment_count=min_alignment_count,
)
segment = AudioSegment.from_file(
audio_file=audio_manifest['audio_filepath'],
offset=audio_manifest['offset'],
duration=audio_manifest['duration'],
)
else:
segment = AudioSegment.from_file(audio_file=audio_manifest['audio_filepath'])
audio_file, sr = torch.from_numpy(segment.samples).to(device), segment.sample_rate
if read_subset and segment.duration < (audio_manifest['alignments'][-1] - audio_manifest['alignments'][0]):
audio_manifest['alignments'][-1] = min(segment.duration, audio_manifest['alignments'][-1])
if audio_file.ndim > 1:
audio_file = torch.mean(audio_file, 1, False).to(device)
buffer_dict[audio_file_id] = (audio_file, sr, audio_manifest)
return audio_file, sr, audio_manifest
def perturb_audio(
audio: torch.Tensor, sr: int, augmentor: Optional[AudioAugmentor] = None, device: Optional[torch.device] = None
) -> torch.Tensor:
"""
Perturb the audio (segment or session) using audio augmentor.
Args:
audio (torch.Tensor): Time-series signal of the segment
sr (int): Sample rate of the original audio file
augmentor (AudioAugmentor): Audio augmentor to use
device (torch.device): Device to load the audio file
Returns:
audio (torch.Tensor): Perturbed audio (time-series signal) of the segment
"""
if augmentor is None:
return audio
device = device if device is not None else torch.device('cpu')
if isinstance(audio, torch.Tensor):
audio = audio.cpu().numpy()
audio_segment = AudioSegment(audio, sample_rate=sr)
augmentor.perturb(audio_segment)
audio_segment = torch.from_numpy(audio_segment.samples).to(device)
return audio_segment
def normalize_audio(array: torch.Tensor) -> torch.Tensor:
"""
Normalize the audio signal to avoid clipping.
Args:
array (torch.Tensor): Time-series audio data in a tensor.
Returns:
(torch.Tensor): Normalized audio signal.
"""
return array / (1.0 * torch.max(torch.abs(array)))
def get_power_of_audio_file(audio_file: str, end_audio_file: int, running_len_samples: int, device: torch.device):
"""
Calculate the power of the audio signal.
Args:
audio_file (torch.Tensor): Time-series audio data in a tensor.
end_audio_file (int): End index of the audio file.
running_len_samples (int): Running length of the audio file.
device (torch.device): Device to use.
Returns:
(float): Power of the audio signal.
"""
return torch.mean(audio_file[: end_audio_file - running_len_samples] ** 2).to(device)
def get_scaled_audio_signal(
audio_file: torch.Tensor,
end_audio_file: int,
running_len_samples: int,
desired_avg_power_noise: float,
device: torch.device,
):
"""
Scale the audio signal to the desired average power.
Args:
audio_file (torch.Tensor): Time-series audio data in a tensor.
end_audio_file (int): End index of the audio file.
running_len_samples (int): Running length of the audio file.
desired_avg_power_noise (float): Desired average power of the audio file.
device (torch.device): Device to use.
Returns:
scaled_audio_file (torch.Tensor): Scaled audio signal.
"""
pow_audio_file = get_power_of_audio_file(
audio_file=audio_file, end_audio_file=end_audio_file, running_len_samples=running_len_samples, device=device
)
scaled_audio_file = audio_file[: end_audio_file - running_len_samples] * torch.sqrt(
desired_avg_power_noise / pow_audio_file
).to(device)
return scaled_audio_file
def get_desired_avg_power_noise(
power_array: float, snr_min: float, snr_max: float, background_noise_snr: float,
):
"""
Calculate the desired average power of the noise.
Args:
power_array (float): Power of the audio signal.
snr_min (float): Minimum SNR.
snr_max (float): Maximum SNR.
background_noise_snr (float): SNR of the background noise.
Returns:
desired_avg_power_noise (float): Desired average power of the noise.
"""
if (snr_min is not None) and (snr_max is not None) and (snr_min <= snr_max):
desired_snr = np.random.uniform(snr_min, snr_max)
else:
desired_snr = background_noise_snr
ratio = 10 ** (desired_snr / 20)
desired_avg_power_noise = power_array / ratio
return desired_avg_power_noise, desired_snr
def get_background_noise(
len_array: int,
power_array: float,
noise_samples: list,
audio_read_buffer_dict: dict,
snr_min: float,
snr_max: float,
background_noise_snr: float,
seed: int,
device: torch.device,
):
"""
Augment with background noise (inserting ambient background noise up to the desired SNR for the full clip).
Args:
len_array (int): Length of background noise required.
power_array (float): Power of the audio signal.
noise_samples (list): List of noise samples.
audio_read_buffer_dict (dict): Dictionary containing audio read buffer.
snr_min (float): Minimum SNR.
snr_max (float): Maximum SNR.
background_noise_snr (float): SNR of the background noise.
seed (int): Seed for random number generator.
device (torch.device): Device to use.
Returns:
bg_array (tensor): Tensor containing background noise.
desired_snr (float): Desired SNR for adding background noise.
"""
np.random.seed(seed)
bg_array = torch.zeros(len_array).to(device)
desired_avg_power_noise, desired_snr = get_desired_avg_power_noise(
power_array=power_array, snr_min=snr_min, snr_max=snr_max, background_noise_snr=background_noise_snr
)
running_len_samples = 0
while running_len_samples < len_array: # build background audio stream (the same length as the full file)
file_id = np.random.randint(len(noise_samples))
audio_file, sr, audio_manifest = read_audio_from_buffer(
audio_manifest=noise_samples[file_id],
buffer_dict=audio_read_buffer_dict,
offset_index=0,
device=device,
read_subset=False,
)
if running_len_samples + len(audio_file) < len_array:
end_audio_file = running_len_samples + len(audio_file)
else:
end_audio_file = len_array
scaled_audio_file = get_scaled_audio_signal(
audio_file=audio_file,
end_audio_file=end_audio_file,
running_len_samples=running_len_samples,
desired_avg_power_noise=desired_avg_power_noise,
device=device,
)
bg_array[running_len_samples:end_audio_file] = scaled_audio_file
running_len_samples = end_audio_file
return bg_array, desired_snr
def get_random_offset_index(
audio_manifest: dict,
audio_read_buffer_dict: dict,
offset_min: int = 0,
max_audio_read_sec: float = 2.5,
min_alignment_count: int = 2,
) -> int:
"""
Get an index for randomly accessing the silence in alignment timestamps.
Args:
audio_manifest (dict): Audio manifest dictionary.
keys: 'audio_filepath', 'duration', 'alignments', 'words'
audio_read_buffer_dict (dict): Dictionary containing audio read buffer.
offset_min (int): Minimum offset index. (Default: 0)
max_audio_read_sec (float): Maximum audio read duration in seconds. (Default: 2.5)
min_alignment_count (int): Minimum number of alignment timestamps. (Default: 2)
Returns:
(int): Random offset index smaller than `offset_count`.
"""
if len(audio_manifest['alignments']) <= min_alignment_count:
raise ValueError(
f"Audio file {audio_manifest['audio_filepath']} has less than {min_alignment_count} alignment timestamps."
)
index_file_id = f"{audio_manifest['audio_filepath']}#index"
# Avoid multiple indexings of the same audio file by using a hash-table.
if index_file_id in audio_read_buffer_dict:
(sil_inds, offset_max) = audio_read_buffer_dict[index_file_id]
else:
# Find all silence indices
sil_inds = np.where((np.array(audio_manifest['words']) == '') == True)[0]
if audio_manifest['alignments'][-1] - audio_manifest['alignments'][0] < max_audio_read_sec:
# The total duration is already short, therefore skip range search.
offset_max = 1
else:
# Find the range that satisfies `max_audio_read_sec` duration.
offset_max = binary_search_alignments(
inds=sil_inds,
max_audio_read_sec=max_audio_read_sec,
min_alignment_count=min_alignment_count,
alignments=audio_manifest['alignments'],
)
audio_read_buffer_dict[index_file_id] = (sil_inds, offset_max)
# If the audio file is shorter than the max_audio_read_sec, then we don't need to read a subset of the audio file.
if (
len(sil_inds) <= min_alignment_count
or (audio_manifest['alignments'][-1] - audio_manifest['alignments'][0]) < max_audio_read_sec
):
return offset_min
else:
offset_index = np.random.randint(offset_min, offset_max)
return sil_inds[offset_index]
def get_speaker_ids(sess_idx: int, speaker_samples: dict, permutated_speaker_inds: list) -> List[str]:
"""
Randomly select speaker IDs from the loaded manifest file.
Args:
sess_idx (int): Session index in integer.
speaker_samples (dict): Dictionary mapping speaker ID to their list of samples.
permutated_speaker_inds (list): List of permutated speaker indices.
Returns:
speaker_ids (list): List of speaker IDs
"""
all_speaker_ids = list(speaker_samples.keys())
idx_list = permutated_speaker_inds[sess_idx, :]
speaker_ids = [all_speaker_ids[i] for i in idx_list]
return speaker_ids
def build_speaker_samples_map(manifest: dict) -> dict:
"""
Build a dictionary for mapping speaker ID to their list of samples
Returns:
speaker_samples (Dict[list]):
Dictionary mapping speaker ID to their list of samples
"""
speaker_samples = defaultdict(list)
logging.info("Building speaker to samples map...")
for sample in tqdm(manifest, total=len(manifest)):
speaker_id = sample['speaker_id']
speaker_samples[speaker_id].append(sample)
return speaker_samples
def read_noise_manifest(add_bg: bool, background_manifest: str):
"""
Read the noise manifest file and sample the noise manifest.
Args:
add_bg (bool): Whether to add background noise.
background_manifest (str): Path to the background noise manifest file.
Returns:
noise_manifest (list): List of the entire noise source samples.
"""
noise_manifest = []
if add_bg is True:
if background_manifest is not None:
background_manifest_list = background_manifest
if isinstance(background_manifest_list, str):
background_manifest_list = [background_manifest_list]
for background_manifest in background_manifest_list:
if os.path.exists(background_manifest):
noise_manifest += read_manifest(background_manifest)
else:
raise FileNotFoundError(f"Noise manifest file: {background_manifest} file not found.")
else:
raise FileNotFoundError(
f"Noise manifest file is {background_manifest}. Please provide a valid noise manifest file/list if add_bg=True."
)
return noise_manifest
def get_speaker_samples(speaker_ids: List[str], speaker_samples: dict) -> Dict[str, list]:
"""
Get a list of the samples for each of the specified speakers.
Args:
speaker_ids (list): LibriSpeech speaker IDs for each speaker in the current session.
speaker_samples (dict): Dictionary mapping speaker ID to their list of samples.
Returns:
speaker_wav_align_map (dict): Dictionary containing speaker IDs and their corresponding wav filepath and alignments.
"""
speaker_wav_align_map = defaultdict(list)
for sid in speaker_ids:
speaker_wav_align_map[sid] = speaker_samples[sid]
return speaker_wav_align_map
def add_silence_to_alignments(audio_manifest: dict):
"""
Add silence to the beginning of the alignments and words.
Args:
audio_manifest (dict): Audio manifest dictionary.
keys: 'audio_filepath', 'duration', 'alignments', 'words'
Returns:
audio_manifest (dict): Audio manifest dictionary with silence added to the beginning.
"""
if type(audio_manifest['words'][0]) == str and len(audio_manifest['words'][0]) > 0:
audio_manifest['words'].insert(0, "")
audio_manifest['alignments'].insert(0, 0.0)
return audio_manifest
def load_speaker_sample(
speaker_wav_align_map: List[dict], speaker_ids: List[str], speaker_turn: int, min_alignment_count: int,
) -> str:
"""
Load a sample for the selected speaker ID.
The first alignment and word must be silence that determines the start of the alignments.
Args:
speaker_wav_align_map (dict): Dictionary containing speaker IDs and their corresponding wav filepath and alignments.
speaker_ids (list): LibriSpeech speaker IDs for each speaker in the current session.
speaker_turn (int): Current speaker turn.
output_precision (int): Precision of the output alignments in integer.
min_alignment_count (int): Minimum number of alignments in the audio file.
Returns:
audio_manifest (dict): Audio manifest dictionary containing the wav filepath, words and alignments.
"""
speaker_id = speaker_ids[speaker_turn]
file_id = np.random.randint(0, max(len(speaker_wav_align_map[str(speaker_id)]) - 1, 1))
audio_manifest = speaker_wav_align_map[str(speaker_id)][file_id]
# Check if the alignment file has at least 2 words.
if len(audio_manifest['alignments']) < min_alignment_count:
raise ValueError(
f"Alignment file {audio_manifest['audio_filepath']} has an inappropriate length of {len(audio_manifest['alignments'])} < 2."
)
# Check whether the first word is silence and insert a silence token if the first token is not silence.
if audio_manifest['words'][0] != "":
audio_manifest = add_silence_to_alignments(audio_manifest)
audio_manifest = copy.deepcopy(audio_manifest)
return audio_manifest
def get_split_points_in_alignments(
words: List[str],
alignments: List[float],
split_buffer: float,
sr: int,
sentence_audio_len: int,
new_start: float = 0,
):
"""
Collect split points in the alignment based on silence.
Silence is defined as a blank symbol between two words that is longer than 2 * split_buffer.
Args:
words (List[str]): List of words in the sentence.
alignments (List[float]): List of alignment timestamps in the sentence.
split_buffer (float): Buffer length in seconds.
sr (int): Sample rate of the audio.
sentence_audio_len (int): Length of the sentence audio in samples.
new_start (float): Start of the sentence audio in seconds.
Returns:
splits (List[List[int]]): List of integer split points in the sentence audio.
"""
splits = []
for i in range(len(words)):
if words[i] == "" and i != 0 and i != len(words) - 1:
silence_length = alignments[i] - alignments[i - 1]
if silence_length > 2 * split_buffer: # split utterance on silence
new_end = alignments[i - 1] + split_buffer
splits.append(
[int(new_start * sr), int(new_end * sr),]
)
new_start = alignments[i] - split_buffer
# The last split point should be added
splits.append([int(new_start * sr), sentence_audio_len])
return splits
def per_speaker_normalize(
sentence_audio: torch.Tensor, splits: List[List[int]], speaker_turn: int, volume: List[float], device: torch.device
) -> torch.Tensor:
"""
Normalize time-series audio signal per speaker.
Args:
sentence_audio (torch.Tensor): Time-series audio signal.
splits (List[List[int]]): List of integer split points in the sentence audio.
speaker_turn (int): Speaker ID of the current speaker.
volume (List[float]): List of volume levels for each speaker.
device (torch.device): Device to use for computations.
Returns:
sentence_audio (torch.Tensor): Normalized time-series audio signal.
"""
split_length = torch.tensor(0).to(device).double()
split_sum = torch.tensor(0).to(device).double()
for split in splits:
split_length += len(sentence_audio[split[0] : split[1]])
split_sum += torch.sum(sentence_audio[split[0] : split[1]] ** 2)
average_rms = torch.sqrt(split_sum * 1.0 / split_length)
sentence_audio = sentence_audio / (1.0 * average_rms) * volume[speaker_turn]
return sentence_audio
class DataAnnotator(object):
"""
Class containing the functions that create RTTM, CTM, JSON files.
Arguments in config:
data_simulator:
session_config:
num_speakers (int): Number of unique speakers per multispeaker audio session
session_params:
split_buffer (float): Split RTTM labels if greater than twice this amount of silence (to avoid long gaps between
utterances as being labelled as speech)
outputs:
output_dir (str): Output directory for audio sessions and corresponding label files
output_filename (str): Output filename for the wav and RTTM files
overwrite_output (bool): If true, delete the output directory if it exists
output_precision (int): Number of decimal places in output files
"""
def __init__(self, cfg):
"""
Args:
cfg: OmegaConf configuration loaded from yaml file.
"""
self._params = cfg
self._files = {}
self._init_file_write()
self._init_filelist_lists()
def _init_file_write(self):
"""
Initialize file writing arguments
"""
self._file_base_str = "synthetic"
self._file_types = ["wav", "rttm", "json", "ctm", "txt", "meta"]
self._annotation_types = ["rttm", "json", "ctm"]
def _init_filelist_lists(self):
"""
Initialize lists to store the filelists for each file type
"""
self.annote_lists = {}
for file_type in self._file_types:
self.annote_lists[f"{file_type}_list"] = []
def init_annotation_lists(self):
"""
Initialize lists to store the annotations for each file type
"""
for file_type in self._file_types:
self.annote_lists[file_type] = []
def create_new_rttm_entry(
self, words: List[str], alignments: List[float], start: int, end: int, speaker_id: int
) -> List[str]:
"""
Create new RTTM entries (to write to output rttm file)
Args:
words (list): List of words in the current audio file.
alignments (list): List of alignments (timestamps) for the current audio file.
start (int): Current start of the audio file being inserted.
end (int): End of the audio file being inserted.
speaker_id (int): LibriSpeech speaker ID for the current entry.
Returns:
rttm_list (list): List of rttm entries
"""
rttm_list = []
new_start = start
# look for split locations
for i in range(len(words)):
if words[i] == "" and i != 0 and i != len(words) - 1:
silence_length = alignments[i] - alignments[i - 1]
if (
silence_length > 2 * self._params.data_simulator.session_params.split_buffer
): # split utterance on silence
new_end = start + alignments[i - 1] + self._params.data_simulator.session_params.split_buffer
t_stt = round(float(new_start), self._params.data_simulator.outputs.output_precision)
t_end = round(float(new_end), self._params.data_simulator.outputs.output_precision)
rttm_list.append(f"{t_stt} {t_end} {speaker_id}")
new_start = start + alignments[i] - self._params.data_simulator.session_params.split_buffer
t_stt = round(float(new_start), self._params.data_simulator.outputs.output_precision)
t_end = round(float(end), self._params.data_simulator.outputs.output_precision)
rttm_list.append(f"{t_stt} {t_end} {speaker_id}")
return rttm_list
def create_new_json_entry(
self,
text: List[str],
wav_filename: str,
start: float,
length: float,
speaker_id: int,
rttm_filepath: str,
ctm_filepath: str,
) -> dict:
"""
Create new JSON entries (to write to output json file).
Args:
text (list): string of text for the current entry.
wav_filename (str): Filename of the wav file.
start (float): Start time of the current entry.
length (float): Length of the current entry.
speaker_id (int): speaker ID for the current entry.
rttm_filepath (str): Path to the RTTM file.
ctm_filepath (str): Path to the CTM file.
Returns:
meta (dict): JSON entry dictionary.
"""
start = round(float(start), self._params.data_simulator.outputs.output_precision)
length = round(float(length), self._params.data_simulator.outputs.output_precision)
meta = {
"audio_filepath": wav_filename,
"offset": start,
"duration": length,
"label": speaker_id,
"text": text,
"num_speakers": self._params.data_simulator.session_config.num_speakers,
"rttm_filepath": rttm_filepath,
"ctm_filepath": ctm_filepath,
"uem_filepath": None,
}
return meta
def create_new_ctm_entry(
self, words: List[str], alignments: List[float], session_name: str, speaker_id: int, start: int
) -> List[str]:
"""
Create new CTM entry (to write to output ctm file)
Args:
words (list): List of words in the current audio file.
alignments (list): List of alignments (timestamps) for the current audio file.
session_name (str): Current session name.
speaker_id (int): LibriSpeech speaker ID for the current entry.
start (int): Current start of the audio file being inserted.
Returns:
arr (list): List of ctm entries
"""
arr = []
start = float(round(start, self._params.data_simulator.outputs.output_precision))
for i in range(len(words)):
word = words[i]
if (
word != ""
): # note that using the current alignments the first word is always empty, so there is no error from indexing the array with i-1
prev_align = 0 if i == 0 else alignments[i - 1]
align1 = round(float(prev_align + start), self._params.data_simulator.outputs.output_precision)
align2 = round(float(alignments[i] - prev_align), self._params.data_simulator.outputs.output_precision)
text = f"{session_name} {speaker_id} {align1} {align2} {word} 0\n"
arr.append((align1, text))
return arr
def add_to_filename_lists(self, basepath: str, filename: str):
"""
Add the current filename to the list of filenames for each file type.
Args:
basepath (str): Basepath for output files.
filename (str): Base filename for all output files.
"""
full_base_filepath = os.path.join(basepath, filename)
for file_type in self._file_types:
self.annote_lists[f"{file_type}_list"].append(f"{full_base_filepath}.{file_type}")
def write_filelist_files(self, basepath):
"""
Write all filelist files.
Args:
basepath (str): Basepath for output files.
"""
for file_type in self._file_types:
with open(f"{basepath}/{self._file_base_str}_{file_type}.list", "w") as list_file:
list_file.write("\n".join(self.annote_lists[f"{file_type}_list"]))
list_file.close()
def write_annotation_files(self, basepath: str, filename: str, meta_data: dict):
"""
Write all annotation files: RTTM, JSON, CTM, TXT, and META.
Args:
basepath (str): Basepath for output files.
filename (str): Base filename for all output files.
meta_data (dict): Metadata for the current session.
rttm_list (list): List of RTTM entries.
json_list (list): List of JSON entries.
ctm_list (list): List of CTM entries.
"""
labels_to_rttmfile(self.annote_lists['rttm'], filename, self._params.data_simulator.outputs.output_dir)
write_manifest(os.path.join(basepath, filename + '.json'), self.annote_lists['json'])
write_ctm(os.path.join(basepath, filename + '.ctm'), self.annote_lists['ctm'])
write_text(os.path.join(basepath, filename + '.txt'), self.annote_lists['ctm'])
write_manifest(os.path.join(basepath, filename + '.meta'), [meta_data])
class SpeechSampler(object):
"""
Class for sampling speech samples for Multispeaker Audio Session Simulator
Args:
cfg: OmegaConf configuration loaded from yaml file.
Variables for sampling speech:
self.running_speech_len_samples (int): Running total of speech samples in the current audio session.
self.running_silence_len_samples (int): Running total of silence samples in the current audio session.
self.running_overlap_len_samples (int): Running total of overlap samples in the current audio session.
self.sess_silence_mean (int) : Targeted mean number of silence samples in the current audio session.
self.per_silence_min_len (int): Minimum number of silence samples in the silence segment.
self.per_silence_max_len (int): Maximum number of silence samples in the silence segment.
self.sess_overlap_mean (int): Targeted mean number of overlap samples in the current audio session.
self.per_overlap_min_len (int): Minimum number of overlap samples in the overlap segment.
self.per_overlap_max_len (int): Maximum number of overlap samples in the overlap segment.
data_simulator:
session_params:
mean_silence (float): Mean proportion of silence to speaking time in the audio session. Should be in range [0, 1).
mean_silence_var (float): Variance for mean silence in all audio sessions.
This value should be 0 <= mean_silence_var < mean_silence * (1 - mean_silence).
per_silence_var (float): Variance for each silence in an audio session, set large values (e.g., 20) for de-correlation.
per_silence_min (float): Minimum duration for each silence, default to 0.
per_silence_max (float): Maximum duration for each silence, default to -1 for no maximum.
mean_overlap (float): Mean proportion of overlap in the overall non-silence duration. Should be in range [0, 1) and
recommend [0, 0.15] range for accurate results.
mean_overlap_var (float): Variance for mean overlap in all audio sessions.
This value should be 0 <= mean_overlap_var < mean_overlap * (1 - mean_overlap).
per_overlap_var (float): Variance for per overlap in each session, set large values to de-correlate silence lengths
with the latest speech segment lengths
per_overlap_min (float): Minimum per overlap duration in seconds
per_overlap_max (float): Maximum per overlap duration in seconds, set -1 for no maximum
"""
def __init__(self, cfg):
"""
Args:
cfg: OmegaConf configuration loaded from yaml file.
"""
self._params = cfg
self.running_speech_len_samples = 0
self.running_silence_len_samples = 0
self.running_overlap_len_samples = 0
self.sess_silence_mean = None
self.per_silence_min_len = 0
self.per_silence_max_len = 0
self.sess_overlap_mean = None
self.per_overlap_min_len = 0
self.per_overlap_max_len = 0
self.mean_overlap = float(self._params.data_simulator.session_params.mean_overlap)
self.mean_overlap_var = float(self._params.data_simulator.session_params.mean_overlap_var)
self.mean_silence = float(self._params.data_simulator.session_params.mean_silence)
self.mean_silence_var = float(self._params.data_simulator.session_params.mean_silence_var)
self.per_silence_var = float(self._params.data_simulator.session_params.per_silence_var)
self.per_overlap_var = float(self._params.data_simulator.session_params.per_overlap_var)
self.num_noise_files = int(self._params.data_simulator.background_noise.num_noise_files)
def _mean_var_to_a_and_b(self, mean: float, var: float) -> Tuple[float, float]:
"""
Convert mean and variance to a and b parameters for beta distribution.
Args:
mean (float): Mean of the beta distribution.
var (float): Variance of the beta distribution.
Returns:
Tuple[float, float]: a and b parameters for beta distribution.
"""
a = mean ** 2 * (1 - mean) / var - mean
b = mean * (1 - mean) ** 2 / var - (1 - mean)
return a, b
def _init_silence_params(self):
"""
Initialize parameters for silence insertion in the current session.
"""
self.running_speech_len_samples = 0
self.running_silence_len_samples = 0
self.per_silence_min_len = int(
max(0, self._params.data_simulator.session_params.per_silence_min) * self._params.data_simulator.sr
)
if self._params.data_simulator.session_params.per_silence_max > 0:
self.per_silence_max_len = int(
self._params.data_simulator.session_params.per_silence_max * self._params.data_simulator.sr
)
else:
self.per_silence_max_len = int(
self._params.data_simulator.session_config.session_length * self._params.data_simulator.sr
)
def _init_overlap_params(self):
"""
Initialize parameters for overlap insertion in the current session.
"""
self.running_overlap_len_samples = 0
self.per_overlap_min_len = int(
max(0, self._params.data_simulator.session_params.per_overlap_min) * self._params.data_simulator.sr
)
if self._params.data_simulator.session_params.per_overlap_max > 0:
self.per_overlap_max_len = int(
self._params.data_simulator.session_params.per_overlap_max * self._params.data_simulator.sr
)
else:
self.per_overlap_max_len = int(
self._params.data_simulator.session_config.session_length * self._params.data_simulator.sr
)
def silence_vs_overlap_selector(self, running_len_samples: int, non_silence_len_samples: int) -> bool:
"""
Compare the current silence ratio to the current overlap ratio. Switch to either silence or overlap mode according
to the amount of the gap between current ratio and session mean in config.
Args:
running_len_samples (int): Length of the current session in samples.
non_silence_len_samples (int): Length of the signal that is not silence in samples.
Returns:
add_overlap (bool): True if the current silence ratio is less than the current overlap ratio, False otherwise.
"""
if running_len_samples > 0:
self.current_silence_ratio = (running_len_samples - self.running_speech_len_samples) / running_len_samples
self.current_overlap_ratio = self.running_overlap_len_samples / non_silence_len_samples
else:
self.current_silence_ratio, self.current_overlap_ratio = 0, 0
# self.silence_discrepancy = max(0, self.sess_silence_mean - self.current_silence_ratio)
# self.overlap_discrepancy = max(0, self.sess_overlap_mean - self.current_overlap_ratio)
# threshold = self.silence_discrepancy / (self.overlap_discrepancy + self.silence_discrepancy + 1e-10)
# add_overlap = np.random.rand() > threshold
self.silence_discrepancy = self.current_silence_ratio - self.sess_silence_mean
self.overlap_discrepancy = self.current_overlap_ratio - self.sess_overlap_mean
add_overlap = bool(self.overlap_discrepancy < self.silence_discrepancy)
return add_overlap
def get_session_silence_mean(self):
"""
Get the target mean silence for current session using re-parameterized Beta distribution.
The following constraints are applied to make a > 0 and b > 0:
0 < mean_silence < 1
0 < mean_silence_var < mean_silence * (1 - mean_silence)
Args:
silence_mean (float):
Target mean silence for the current session
"""
self._init_silence_params()
mean, var = self.mean_silence, self.mean_silence_var
if var > 0:
a, b = self._mean_var_to_a_and_b(mean, var)
if a < 0 or b < 0:
raise ValueError(
f"Beta(a, b), a = {a:.3f} and b = {b:.3f} should be both greater than 0. "
f"Invalid `mean_silence_var` value {var} for sampling from Beta distribution. "
f"`mean_silence_var` should be less than `mean_silence * (1 - mean_silence)`. "
f"Please check `mean_silence_var` and try again."
)
self.sess_silence_mean = beta(a, b).rvs()
else:
self.sess_silence_mean = mean
return self.sess_silence_mean
def get_session_overlap_mean(self):
"""
Get the target mean overlap for current session using re-parameterized Beta distribution.
The following constraints are applied to make a > 0 and b > 0:
0 < mean_overlap < 1
0 < mean_overlap_var < mean_overlap * (1 - mean_overlap)
Returns:
overlap_mean (float):
Target mean overlap for the current session
"""
self._init_overlap_params()
mean, var = self.mean_overlap, self.mean_overlap_var
if var > 0:
a, b = self._mean_var_to_a_and_b(mean, var)
if a < 0 or b < 0:
raise ValueError(
f"Beta(a, b), a = {a:.3f} and b = {b:.3f} should be both greater than 0. "
f"Invalid `mean_overlap_var` value {var} for sampling from Beta distribution. "
f"`mean_overlap_var` should be less than `mean_overlap * (1 - mean_overlap)`. "
f"Please check `mean_overlap_var` and try again."
)
self.sess_overlap_mean = beta(a, b).rvs()
else:
self.sess_overlap_mean = mean
return self.sess_overlap_mean
def sample_from_silence_model(self, running_len_samples: int) -> int:
"""
Sample from the silence model to determine the amount of silence to add between sentences.
Gamma distribution is employed for modeling the highly skewed distribution of silence length distribution.
When we add silence between sentences, we want to ensure that the proportion of silence meets the `sess_silence_mean`.
Thus, [Session Silence Mean] = [Total Running Silence Time] / [Total Running Session Time] equation holds. We employ the following
formula to determine the amount of silence to add, which is `silence_mean`:
self.sess_silence_mean = (silence_mean + self.running_silence_len_samples) / (silence_mean + running_len_samples)
The above equation is setting `silence_mean` to yield the desired silence ratio `self.sess_silence_mean`.
We use the above `silence_mean` value to sample silence-length for each silence occurrence.
Args:
running_len_samples (int):
Running length of the session (in terms of number of samples).
session_len_samples (int):
Targeted total session length (in terms of number of samples).
Returns:
silence_amount (int): Amount of silence to add between sentences (in terms of number of samples).
"""
silence_mean = ((self.sess_silence_mean * running_len_samples) - self.running_silence_len_samples) / (
1 - self.sess_silence_mean
)
silence_mean = max(self.per_silence_min_len, min(silence_mean, self.per_silence_max_len))
if silence_mean > 0:
self.per_silence_var = self._params.data_simulator.session_params.per_silence_var
silence_amount = (
int(
gamma(
a=(silence_mean ** 2) / self.per_silence_var, scale=self.per_silence_var / silence_mean
).rvs()
)
if self.per_silence_var > 0
else int(silence_mean)
)
silence_amount = max(self.per_silence_min_len, min(silence_amount, self.per_silence_max_len))
else:
silence_amount = 0
return silence_amount
def sample_from_overlap_model(self, non_silence_len_samples: int):
"""
Sample from the overlap model to determine the amount of overlap between segments.
Gamma distribution is employed for modeling the highly skewed distribution of overlap length distribution.
When we add an overlap occurrence, we want to meet the desired overlap ratio defined by `self.sess_overlap_mean`.
Thus, [Session Overlap Mean] = [Total Running Overlap Speech Time] / [Total Running Non-Silence Speech Time].
Let `overlap_mean` be the desired overlap amount, then the mean and variance of the gamma distribution is given by:
self.sess_overlap_mean = (overlap_mean + self.running_overlap_len_samples) / (non_silence_len_samples - overlap_mean)
The above equation is setting `overlap_mean` to yield the desired overlap ratio `self.sess_overlap_mean`.
We use the above `overlap_mean` value to sample overlap-length for each overlap occurrence.
Args:
non_silence_len_samples (int):
The total amount of non-silence (speech) region regardless of overlap status
Returns:
desired_overlap_amount (int):
Amount of overlap between segments (in terms of number of samples).
"""
overlap_mean = ((self.sess_overlap_mean * non_silence_len_samples) - self.running_overlap_len_samples) / (
1 + self.sess_overlap_mean
)
overlap_mean = max(self.per_overlap_min_len, min(max(0, overlap_mean), self.per_overlap_max_len))
if overlap_mean > 0:
desired_overlap_amount = (
int(gamma(a=overlap_mean ** 2 / self.per_overlap_var, scale=self.per_overlap_var / overlap_mean).rvs())
if self.per_overlap_var > 0
else int(overlap_mean)
)
desired_overlap_amount = max(
self.per_overlap_min_len, min(desired_overlap_amount, self.per_overlap_max_len)
)
else:
desired_overlap_amount = 0
return desired_overlap_amount
def sample_noise_manifest(self, noise_manifest: dict) -> list:
"""
Sample noise manifest to a specified count `num_noise_files` for the current simulated audio session.
Args:
noise_manifest (list):
List of noise source samples to be sampled from.
Returns:
sampled_noise_manifest (list):
List of noise samples to be used for the current session.
"""
num_noise_files = min(len(noise_manifest), self.num_noise_files)
sampled_noise_manifest = []
if num_noise_files > 0:
selected_noise_ids = np.random.choice(range(len(noise_manifest)), num_noise_files, replace=False)
for k in selected_noise_ids:
sampled_noise_manifest.append(noise_manifest[k])
return sampled_noise_manifest
|
NeMo-main
|
nemo/collections/asr/parts/utils/data_simulation_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.asr.modules import rnnt_abstract
from nemo.collections.asr.parts.utils import rnnt_utils
from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceMeasureConfig, ConfidenceMeasureMixin
from nemo.collections.common.parts.rnn import label_collate
from nemo.core.classes import Typing, typecheck
from nemo.core.neural_types import AcousticEncodedRepresentation, ElementType, HypothesisType, LengthsType, NeuralType
from nemo.utils import logging
def pack_hypotheses(hypotheses: List[rnnt_utils.Hypothesis], logitlen: torch.Tensor,) -> List[rnnt_utils.Hypothesis]:
if hasattr(logitlen, 'cpu'):
logitlen_cpu = logitlen.to('cpu')
else:
logitlen_cpu = logitlen
for idx, hyp in enumerate(hypotheses): # type: rnnt_utils.Hypothesis
hyp.y_sequence = torch.tensor(hyp.y_sequence, dtype=torch.long)
hyp.length = logitlen_cpu[idx]
if hyp.dec_state is not None:
hyp.dec_state = _states_to_device(hyp.dec_state)
return hypotheses
def _states_to_device(dec_state, device='cpu'):
if torch.is_tensor(dec_state):
dec_state = dec_state.to(device)
elif isinstance(dec_state, (list, tuple)):
dec_state = tuple(_states_to_device(dec_i, device) for dec_i in dec_state)
return dec_state
class _GreedyRNNTInfer(Typing, ConfidenceMeasureMixin):
"""A greedy transducer decoder.
Provides a common abstraction for sample level and batch level greedy decoding.
Args:
decoder_model: rnnt_utils.AbstractRNNTDecoder implementation.
joint_model: rnnt_utils.AbstractRNNTJoint implementation.
blank_index: int index of the blank token. Can be 0 or len(vocabulary).
max_symbols_per_step: Optional int. The maximum number of symbols that can be added
to a sequence in a single time step; if set to None then there is
no limit.
preserve_alignments: Bool flag which preserves the history of alignments generated during
greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1), Tensor(scalar, label after argmax)).
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores generated
during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
confidence_measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"encoder_output": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
"partial_hypotheses": [NeuralType(elements_type=HypothesisType(), optional=True)], # must always be last
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"predictions": [NeuralType(elements_type=HypothesisType())]}
def __init__(
self,
decoder_model: rnnt_abstract.AbstractRNNTDecoder,
joint_model: rnnt_abstract.AbstractRNNTJoint,
blank_index: int,
max_symbols_per_step: Optional[int] = None,
preserve_alignments: bool = False,
preserve_frame_confidence: bool = False,
confidence_measure_cfg: Optional[DictConfig] = None,
):
super().__init__()
self.decoder = decoder_model
self.joint = joint_model
self._blank_index = blank_index
self._SOS = blank_index # Start of single index
self.max_symbols = max_symbols_per_step
self.preserve_alignments = preserve_alignments
self.preserve_frame_confidence = preserve_frame_confidence
# set confidence calculation measure
self._init_confidence_measure(confidence_measure_cfg)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
@torch.no_grad()
def _pred_step(
self,
label: Union[torch.Tensor, int],
hidden: Optional[torch.Tensor],
add_sos: bool = False,
batch_size: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Common prediction step based on the AbstractRNNTDecoder implementation.
Args:
label: (int/torch.Tensor): Label or "Start-of-Signal" token.
hidden: (Optional torch.Tensor): RNN State vector
add_sos (bool): Whether to add a zero vector at the begging as "start of sentence" token.
batch_size: Batch size of the output tensor.
Returns:
g: (B, U, H) if add_sos is false, else (B, U + 1, H)
hid: (h, c) where h is the final sequence hidden state and c is
the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
"""
if isinstance(label, torch.Tensor):
# label: [batch, 1]
if label.dtype != torch.long:
label = label.long()
else:
# Label is an integer
if label == self._SOS:
return self.decoder.predict(None, hidden, add_sos=add_sos, batch_size=batch_size)
label = label_collate([[label]])
# output: [B, 1, K]
return self.decoder.predict(label, hidden, add_sos=add_sos, batch_size=batch_size)
def _joint_step(self, enc, pred, log_normalize: Optional[bool] = None):
"""
Common joint step based on AbstractRNNTJoint implementation.
Args:
enc: Output of the Encoder model. A torch.Tensor of shape [B, 1, H1]
pred: Output of the Decoder model. A torch.Tensor of shape [B, 1, H2]
log_normalize: Whether to log normalize or not. None will log normalize only for CPU.
Returns:
logits of shape (B, T=1, U=1, V + 1)
"""
with torch.no_grad():
logits = self.joint.joint(enc, pred)
if log_normalize is None:
if not logits.is_cuda: # Use log softmax only if on CPU
logits = logits.log_softmax(dim=len(logits.shape) - 1)
else:
if log_normalize:
logits = logits.log_softmax(dim=len(logits.shape) - 1)
return logits
class GreedyRNNTInfer(_GreedyRNNTInfer):
"""A greedy transducer decoder.
Sequence level greedy decoding, performed auto-regressively.
Args:
decoder_model: rnnt_utils.AbstractRNNTDecoder implementation.
joint_model: rnnt_utils.AbstractRNNTJoint implementation.
blank_index: int index of the blank token. Can be 0 or len(vocabulary).
max_symbols_per_step: Optional int. The maximum number of symbols that can be added
to a sequence in a single time step; if set to None then there is
no limit.
preserve_alignments: Bool flag which preserves the history of alignments generated during
greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1), Tensor(scalar, label after argmax)).
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores generated
during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
confidence_measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
def __init__(
self,
decoder_model: rnnt_abstract.AbstractRNNTDecoder,
joint_model: rnnt_abstract.AbstractRNNTJoint,
blank_index: int,
max_symbols_per_step: Optional[int] = None,
preserve_alignments: bool = False,
preserve_frame_confidence: bool = False,
confidence_measure_cfg: Optional[DictConfig] = None,
):
super().__init__(
decoder_model=decoder_model,
joint_model=joint_model,
blank_index=blank_index,
max_symbols_per_step=max_symbols_per_step,
preserve_alignments=preserve_alignments,
preserve_frame_confidence=preserve_frame_confidence,
confidence_measure_cfg=confidence_measure_cfg,
)
@typecheck()
def forward(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
"""Returns a list of hypotheses given an input batch of the encoder hidden embedding.
Output token is generated auto-regressively.
Args:
encoder_output: A tensor of size (batch, features, timesteps).
encoded_lengths: list of int representing the length of each sequence
output sequence.
Returns:
packed list containing batch number of sentences (Hypotheses).
"""
# Preserve decoder and joint training state
decoder_training_state = self.decoder.training
joint_training_state = self.joint.training
with torch.inference_mode():
# Apply optional preprocessing
encoder_output = encoder_output.transpose(1, 2) # (B, T, D)
self.decoder.eval()
self.joint.eval()
hypotheses = []
# Process each sequence independently
with self.decoder.as_frozen(), self.joint.as_frozen():
for batch_idx in range(encoder_output.size(0)):
inseq = encoder_output[batch_idx, :, :].unsqueeze(1) # [T, 1, D]
logitlen = encoded_lengths[batch_idx]
partial_hypothesis = partial_hypotheses[batch_idx] if partial_hypotheses is not None else None
hypothesis = self._greedy_decode(inseq, logitlen, partial_hypotheses=partial_hypothesis)
hypotheses.append(hypothesis)
# Pack results into Hypotheses
packed_result = pack_hypotheses(hypotheses, encoded_lengths)
self.decoder.train(decoder_training_state)
self.joint.train(joint_training_state)
return (packed_result,)
@torch.no_grad()
def _greedy_decode(
self, x: torch.Tensor, out_len: torch.Tensor, partial_hypotheses: Optional[rnnt_utils.Hypothesis] = None
):
# x: [T, 1, D]
# out_len: [seq_len]
# Initialize blank state and empty label set in Hypothesis
hypothesis = rnnt_utils.Hypothesis(score=0.0, y_sequence=[], dec_state=None, timestep=[], last_token=None)
if partial_hypotheses is not None:
hypothesis.last_token = partial_hypotheses.last_token
hypothesis.y_sequence = (
partial_hypotheses.y_sequence.cpu().tolist()
if isinstance(partial_hypotheses.y_sequence, torch.Tensor)
else partial_hypotheses.y_sequence
)
if partial_hypotheses.dec_state is not None:
hypothesis.dec_state = self.decoder.batch_concat_states([partial_hypotheses.dec_state])
hypothesis.dec_state = _states_to_device(hypothesis.dec_state, x.device)
if self.preserve_alignments:
# Alignments is a 2-dimensional dangling list representing T x U
hypothesis.alignments = [[]]
if self.preserve_frame_confidence:
hypothesis.frame_confidence = [[]]
# For timestep t in X_t
for time_idx in range(out_len):
# Extract encoder embedding at timestep t
# f = x[time_idx, :, :].unsqueeze(0) # [1, 1, D]
f = x.narrow(dim=0, start=time_idx, length=1)
# Setup exit flags and counter
not_blank = True
symbols_added = 0
# While blank is not predicted, or we dont run out of max symbols per timestep
while not_blank and (self.max_symbols is None or symbols_added < self.max_symbols):
# In the first timestep, we initialize the network with RNNT Blank
# In later timesteps, we provide previous predicted label as input.
if hypothesis.last_token is None and hypothesis.dec_state is None:
last_label = self._SOS
else:
last_label = label_collate([[hypothesis.last_token]])
# Perform prediction network and joint network steps.
g, hidden_prime = self._pred_step(last_label, hypothesis.dec_state)
# If preserving per-frame confidence, log_normalize must be true
logp = self._joint_step(f, g, log_normalize=True if self.preserve_frame_confidence else None)[
0, 0, 0, :
]
del g
# torch.max(0) op doesnt exist for FP 16.
if logp.dtype != torch.float32:
logp = logp.float()
# get index k, of max prob
v, k = logp.max(0)
k = k.item() # K is the label at timestep t_s in inner loop, s >= 0.
if self.preserve_alignments:
# insert logprobs into last timestep
hypothesis.alignments[-1].append((logp.to('cpu'), torch.tensor(k, dtype=torch.int32)))
if self.preserve_frame_confidence:
# insert confidence into last timestep
hypothesis.frame_confidence[-1].append(self._get_confidence(logp))
del logp
# If blank token is predicted, exit inner loop, move onto next timestep t
if k == self._blank_index:
not_blank = False
if self.preserve_alignments:
# convert Ti-th logits into a torch array
hypothesis.alignments.append([]) # blank buffer for next timestep
if self.preserve_frame_confidence:
hypothesis.frame_confidence.append([]) # blank buffer for next timestep
else:
# Append token to label set, update RNN state.
hypothesis.y_sequence.append(k)
hypothesis.score += float(v)
hypothesis.timestep.append(time_idx)
hypothesis.dec_state = hidden_prime
hypothesis.last_token = k
# Increment token counter.
symbols_added += 1
# Remove trailing empty list of Alignments
if self.preserve_alignments:
if len(hypothesis.alignments[-1]) == 0:
del hypothesis.alignments[-1]
# Remove trailing empty list of per-frame confidence
if self.preserve_frame_confidence:
if len(hypothesis.frame_confidence[-1]) == 0:
del hypothesis.frame_confidence[-1]
# Unpack the hidden states
hypothesis.dec_state = self.decoder.batch_select_state(hypothesis.dec_state, 0)
return hypothesis
class GreedyBatchedRNNTInfer(_GreedyRNNTInfer):
"""A batch level greedy transducer decoder.
Batch level greedy decoding, performed auto-regressively.
Args:
decoder_model: rnnt_utils.AbstractRNNTDecoder implementation.
joint_model: rnnt_utils.AbstractRNNTJoint implementation.
blank_index: int index of the blank token. Can be 0 or len(vocabulary).
max_symbols_per_step: Optional int. The maximum number of symbols that can be added
to a sequence in a single time step; if set to None then there is
no limit.
preserve_alignments: Bool flag which preserves the history of alignments generated during
greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1), Tensor(scalar, label after argmax)).
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores generated
during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
confidence_measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
def __init__(
self,
decoder_model: rnnt_abstract.AbstractRNNTDecoder,
joint_model: rnnt_abstract.AbstractRNNTJoint,
blank_index: int,
max_symbols_per_step: Optional[int] = None,
preserve_alignments: bool = False,
preserve_frame_confidence: bool = False,
confidence_measure_cfg: Optional[DictConfig] = None,
):
super().__init__(
decoder_model=decoder_model,
joint_model=joint_model,
blank_index=blank_index,
max_symbols_per_step=max_symbols_per_step,
preserve_alignments=preserve_alignments,
preserve_frame_confidence=preserve_frame_confidence,
confidence_measure_cfg=confidence_measure_cfg,
)
# Depending on availability of `blank_as_pad` support
# switch between more efficient batch decoding technique
if self.decoder.blank_as_pad:
self._greedy_decode = self._greedy_decode_blank_as_pad
else:
self._greedy_decode = self._greedy_decode_masked
@typecheck()
def forward(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
"""Returns a list of hypotheses given an input batch of the encoder hidden embedding.
Output token is generated auto-regressively.
Args:
encoder_output: A tensor of size (batch, features, timesteps).
encoded_lengths: list of int representing the length of each sequence
output sequence.
Returns:
packed list containing batch number of sentences (Hypotheses).
"""
# Preserve decoder and joint training state
decoder_training_state = self.decoder.training
joint_training_state = self.joint.training
with torch.inference_mode():
# Apply optional preprocessing
encoder_output = encoder_output.transpose(1, 2) # (B, T, D)
logitlen = encoded_lengths
self.decoder.eval()
self.joint.eval()
with self.decoder.as_frozen(), self.joint.as_frozen():
inseq = encoder_output # [B, T, D]
hypotheses = self._greedy_decode(
inseq, logitlen, device=inseq.device, partial_hypotheses=partial_hypotheses
)
# Pack the hypotheses results
packed_result = pack_hypotheses(hypotheses, logitlen)
self.decoder.train(decoder_training_state)
self.joint.train(joint_training_state)
return (packed_result,)
def _greedy_decode_blank_as_pad(
self,
x: torch.Tensor,
out_len: torch.Tensor,
device: torch.device,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
if partial_hypotheses is not None:
raise NotImplementedError("`partial_hypotheses` support is not supported")
with torch.inference_mode():
# x: [B, T, D]
# out_len: [B]
# device: torch.device
# Initialize list of Hypothesis
batchsize = x.shape[0]
hypotheses = [
rnnt_utils.Hypothesis(score=0.0, y_sequence=[], timestep=[], dec_state=None) for _ in range(batchsize)
]
# Initialize Hidden state matrix (shared by entire batch)
hidden = None
# If alignments need to be preserved, register a dangling list to hold the values
if self.preserve_alignments:
# alignments is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.alignments = [[]]
# If confidence scores need to be preserved, register a dangling list to hold the values
if self.preserve_frame_confidence:
# frame_confidence is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.frame_confidence = [[]]
hyp.y_3best = [[]]
hyp.frame_confidence_3best = [[[]]]
hyp.logp = [[]]
# Last Label buffer + Last Label without blank buffer
# batch level equivalent of the last_label
last_label = torch.full([batchsize, 1], fill_value=self._blank_index, dtype=torch.long, device=device)
# Mask buffers
blank_mask = torch.full([batchsize], fill_value=0, dtype=torch.bool, device=device)
# Get max sequence length
max_out_len = out_len.max()
for time_idx in range(max_out_len):
f = x.narrow(dim=1, start=time_idx, length=1) # [B, 1, D]
# Prepare t timestamp batch variables
not_blank = True
symbols_added = 0
# Reset blank mask
blank_mask.mul_(False)
# Update blank mask with time mask
# Batch: [B, T, D], but Bi may have seq len < max(seq_lens_in_batch)
# Forcibly mask with "blank" tokens, for all sample where current time step T > seq_len
blank_mask = time_idx >= out_len
# Start inner loop
while not_blank and (self.max_symbols is None or symbols_added < self.max_symbols):
# Batch prediction and joint network steps
# If very first prediction step, submit SOS tag (blank) to pred_step.
# This feeds a zero tensor as input to AbstractRNNTDecoder to prime the state
if time_idx == 0 and symbols_added == 0 and hidden is None:
g, hidden_prime = self._pred_step(self._SOS, hidden, batch_size=batchsize)
else:
# Perform batch step prediction of decoder, getting new states and scores ("g")
g, hidden_prime = self._pred_step(last_label, hidden, batch_size=batchsize)
# Batched joint step - Output = [B, V + 1]
# If preserving per-frame confidence, log_normalize must be true
logp = self._joint_step(f, g, log_normalize=True if self.preserve_frame_confidence else None)[
:, 0, 0, :
]
if logp.dtype != torch.float32:
logp = logp.float()
# Get index k, of max prob for batch
v, k = logp.max(1)
del g
# Update blank mask with current predicted blanks
# This is accumulating blanks over all time steps T and all target steps min(max_symbols, U)
k_is_blank = k == self._blank_index
blank_mask.bitwise_or_(k_is_blank)
all_blanks = torch.all(blank_mask)
del k_is_blank
# If preserving alignments, check if sequence length of sample has been reached
# before adding alignment
if self.preserve_alignments:
# Insert logprobs into last timestep per sample
logp_vals = logp.to('cpu')
logp_ids = logp_vals.max(1)[1]
for batch_idx, is_blank in enumerate(blank_mask):
# we only want to update non-blanks, unless we are at the last step in the loop where
# all elements produced blanks, otherwise there will be duplicate predictions
# saved in alignments
if time_idx < out_len[batch_idx] and (all_blanks or not is_blank):
hypotheses[batch_idx].alignments[-1].append(
(logp_vals[batch_idx], logp_ids[batch_idx])
)
del logp_vals
# If preserving per-frame confidence, check if sequence length of sample has been reached
# before adding confidence scores
if self.preserve_frame_confidence:
# Insert probabilities into last timestep per sample
confidence = self._get_confidence(logp)
for batch_idx, is_blank in enumerate(blank_mask):
if time_idx < out_len[batch_idx] and (all_blanks or not is_blank):
hypotheses[batch_idx].frame_confidence[-1].append(confidence[batch_idx])
del logp
# If all samples predict / have predicted prior blanks, exit loop early
# This is equivalent to if single sample predicted k
if all_blanks:
not_blank = False
# If preserving alignments, convert the current Uj alignments into a torch.Tensor
# Then preserve U at current timestep Ti
# Finally, forward the timestep history to Ti+1 for that sample
# All of this should only be done iff the current time index <= sample-level AM length.
# Otherwise ignore and move to next sample / next timestep.
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for batch_idx in range(batchsize):
# this checks if current timestep <= sample-level AM length
# If current timestep > sample-level AM length, no alignments will be added
# Therefore the list of Uj alignments is empty here.
if len(hypotheses[batch_idx].alignments[-1]) > 0:
hypotheses[batch_idx].alignments.append([]) # blank buffer for next timestep
# Do the same if preserving per-frame confidence
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) > 0:
hypotheses[batch_idx].frame_confidence.append([]) # blank buffer for next timestep
hypotheses[batch_idx].y_3best.append([])
hypotheses[batch_idx].frame_confidence_3best.append([])
hypotheses[batch_idx].logp.append([])
else:
# Collect batch indices where blanks occurred now/past
blank_indices = (blank_mask == 1).nonzero(as_tuple=False)
# Recover prior state for all samples which predicted blank now/past
if hidden is not None:
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, hidden, blank_indices)
elif len(blank_indices) > 0 and hidden is None:
# Reset state if there were some blank and other non-blank predictions in batch
# Original state is filled with zeros so we just multiply
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, None, blank_indices, value=0.0)
# Recover prior predicted label for all samples which predicted blank now/past
k[blank_indices] = last_label[blank_indices, 0]
# Update new label and hidden state for next iteration
last_label = k.clone().view(-1, 1)
hidden = hidden_prime
# Update predicted labels, accounting for time mask
# If blank was predicted even once, now or in the past,
# Force the current predicted label to also be blank
# This ensures that blanks propogate across all timesteps
# once they have occured (normally stopping condition of sample level loop).
for kidx, ki in enumerate(k):
if blank_mask[kidx] == 0:
hypotheses[kidx].y_sequence.append(ki)
hypotheses[kidx].timestep.append(time_idx)
hypotheses[kidx].score += float(v[kidx])
symbols_added += 1
# Remove trailing empty list of alignments at T_{am-len} x Uj
if self.preserve_alignments:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].alignments[-1]) == 0:
del hypotheses[batch_idx].alignments[-1]
# Remove trailing empty list of confidence scores at T_{am-len} x Uj
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) == 0:
del hypotheses[batch_idx].frame_confidence[-1]
del hypotheses[batch_idx].y_3best[-1]
del hypotheses[batch_idx].frame_confidence_3best[-1]
del hypotheses[batch_idx].logp[-1]
# Preserve states
for batch_idx in range(batchsize):
hypotheses[batch_idx].dec_state = self.decoder.batch_select_state(hidden, batch_idx)
return hypotheses
def _greedy_decode_masked(
self,
x: torch.Tensor,
out_len: torch.Tensor,
device: torch.device,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
if partial_hypotheses is not None:
raise NotImplementedError("`partial_hypotheses` support is not supported")
# x: [B, T, D]
# out_len: [B]
# device: torch.device
# Initialize state
batchsize = x.shape[0]
hypotheses = [
rnnt_utils.Hypothesis(score=0.0, y_sequence=[], timestep=[], dec_state=None) for _ in range(batchsize)
]
# Initialize Hidden state matrix (shared by entire batch)
hidden = None
# If alignments need to be preserved, register a danling list to hold the values
if self.preserve_alignments:
# alignments is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.alignments = [[]]
else:
alignments = None
# If confidence scores need to be preserved, register a danling list to hold the values
if self.preserve_frame_confidence:
# frame_confidence is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.frame_confidence = [[]]
# Last Label buffer + Last Label without blank buffer
# batch level equivalent of the last_label
last_label = torch.full([batchsize, 1], fill_value=self._blank_index, dtype=torch.long, device=device)
last_label_without_blank = last_label.clone()
# Mask buffers
blank_mask = torch.full([batchsize], fill_value=0, dtype=torch.bool, device=device)
# Get max sequence length
max_out_len = out_len.max()
with torch.inference_mode():
for time_idx in range(max_out_len):
f = x.narrow(dim=1, start=time_idx, length=1) # [B, 1, D]
# Prepare t timestamp batch variables
not_blank = True
symbols_added = 0
# Reset blank mask
blank_mask.mul_(False)
# Update blank mask with time mask
# Batch: [B, T, D], but Bi may have seq len < max(seq_lens_in_batch)
# Forcibly mask with "blank" tokens, for all sample where current time step T > seq_len
blank_mask = time_idx >= out_len
# Start inner loop
while not_blank and (self.max_symbols is None or symbols_added < self.max_symbols):
# Batch prediction and joint network steps
# If very first prediction step, submit SOS tag (blank) to pred_step.
# This feeds a zero tensor as input to AbstractRNNTDecoder to prime the state
if time_idx == 0 and symbols_added == 0 and hidden is None:
g, hidden_prime = self._pred_step(self._SOS, hidden, batch_size=batchsize)
else:
# Set a dummy label for the blank value
# This value will be overwritten by "blank" again the last label update below
# This is done as vocabulary of prediction network does not contain "blank" token of RNNT
last_label_without_blank_mask = last_label == self._blank_index
last_label_without_blank[last_label_without_blank_mask] = 0 # temp change of label
last_label_without_blank[~last_label_without_blank_mask] = last_label[
~last_label_without_blank_mask
]
# Perform batch step prediction of decoder, getting new states and scores ("g")
g, hidden_prime = self._pred_step(last_label_without_blank, hidden, batch_size=batchsize)
# Batched joint step - Output = [B, V + 1]
# If preserving per-frame confidence, log_normalize must be true
logp = self._joint_step(f, g, log_normalize=True if self.preserve_frame_confidence else None)[
:, 0, 0, :
]
if logp.dtype != torch.float32:
logp = logp.float()
# Get index k, of max prob for batch
v, k = logp.max(1)
del g
# Update blank mask with current predicted blanks
# This is accumulating blanks over all time steps T and all target steps min(max_symbols, U)
k_is_blank = k == self._blank_index
blank_mask.bitwise_or_(k_is_blank)
all_blanks = torch.all(blank_mask)
# If preserving alignments, check if sequence length of sample has been reached
# before adding alignment
if self.preserve_alignments:
# Insert logprobs into last timestep per sample
logp_vals = logp.to('cpu')
logp_ids = logp_vals.max(1)[1]
for batch_idx, is_blank in enumerate(blank_mask):
# we only want to update non-blanks, unless we are at the last step in the loop where
# all elements produced blanks, otherwise there will be duplicate predictions
# saved in alignments
if time_idx < out_len[batch_idx] and (all_blanks or not is_blank):
hypotheses[batch_idx].alignments[-1].append(
(logp_vals[batch_idx], logp_ids[batch_idx])
)
del logp_vals
# If preserving per-frame confidence, check if sequence length of sample has been reached
# before adding confidence scores
if self.preserve_frame_confidence:
# Insert probabilities into last timestep per sample
confidence = self._get_confidence(logp)
for batch_idx, is_blank in enumerate(blank_mask):
if time_idx < out_len[batch_idx] and (all_blanks or not is_blank):
hypotheses[batch_idx].frame_confidence[-1].append(confidence[batch_idx])
del logp
# If all samples predict / have predicted prior blanks, exit loop early
# This is equivalent to if single sample predicted k
if blank_mask.all():
not_blank = False
# If preserving alignments, convert the current Uj alignments into a torch.Tensor
# Then preserve U at current timestep Ti
# Finally, forward the timestep history to Ti+1 for that sample
# All of this should only be done iff the current time index <= sample-level AM length.
# Otherwise ignore and move to next sample / next timestep.
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for batch_idx in range(batchsize):
# this checks if current timestep <= sample-level AM length
# If current timestep > sample-level AM length, no alignments will be added
# Therefore the list of Uj alignments is empty here.
if len(hypotheses[batch_idx].alignments[-1]) > 0:
hypotheses[batch_idx].alignments.append([]) # blank buffer for next timestep
# Do the same if preserving per-frame confidence
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) > 0:
hypotheses[batch_idx].frame_confidence.append([]) # blank buffer for next timestep
else:
# Collect batch indices where blanks occurred now/past
blank_indices = (blank_mask == 1).nonzero(as_tuple=False)
# Recover prior state for all samples which predicted blank now/past
if hidden is not None:
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, hidden, blank_indices)
elif len(blank_indices) > 0 and hidden is None:
# Reset state if there were some blank and other non-blank predictions in batch
# Original state is filled with zeros so we just multiply
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, None, blank_indices, value=0.0)
# Recover prior predicted label for all samples which predicted blank now/past
k[blank_indices] = last_label[blank_indices, 0]
# Update new label and hidden state for next iteration
last_label = k.view(-1, 1)
hidden = hidden_prime
# Update predicted labels, accounting for time mask
# If blank was predicted even once, now or in the past,
# Force the current predicted label to also be blank
# This ensures that blanks propogate across all timesteps
# once they have occured (normally stopping condition of sample level loop).
for kidx, ki in enumerate(k):
if blank_mask[kidx] == 0:
hypotheses[kidx].y_sequence.append(ki)
hypotheses[kidx].timestep.append(time_idx)
hypotheses[kidx].score += float(v[kidx])
symbols_added += 1
# Remove trailing empty list of alignments at T_{am-len} x Uj
if self.preserve_alignments:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].alignments[-1]) == 0:
del hypotheses[batch_idx].alignments[-1]
# Remove trailing empty list of confidence scores at T_{am-len} x Uj
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) == 0:
del hypotheses[batch_idx].frame_confidence[-1]
# Preserve states
for batch_idx in range(batchsize):
hypotheses[batch_idx].dec_state = self.decoder.batch_select_state(hidden, batch_idx)
return hypotheses
class ExportedModelGreedyBatchedRNNTInfer:
def __init__(self, encoder_model: str, decoder_joint_model: str, max_symbols_per_step: Optional[int] = None):
self.encoder_model_path = encoder_model
self.decoder_joint_model_path = decoder_joint_model
self.max_symbols_per_step = max_symbols_per_step
# Will be populated at runtime
self._blank_index = None
def __call__(self, audio_signal: torch.Tensor, length: torch.Tensor):
"""Returns a list of hypotheses given an input batch of the encoder hidden embedding.
Output token is generated auto-regressively.
Args:
encoder_output: A tensor of size (batch, features, timesteps).
encoded_lengths: list of int representing the length of each sequence
output sequence.
Returns:
packed list containing batch number of sentences (Hypotheses).
"""
with torch.no_grad():
# Apply optional preprocessing
encoder_output, encoded_lengths = self.run_encoder(audio_signal=audio_signal, length=length)
if torch.is_tensor(encoder_output):
encoder_output = encoder_output.transpose(1, 2)
else:
encoder_output = encoder_output.transpose([0, 2, 1]) # (B, T, D)
logitlen = encoded_lengths
inseq = encoder_output # [B, T, D]
hypotheses, timestamps = self._greedy_decode(inseq, logitlen)
# Pack the hypotheses results
packed_result = [rnnt_utils.Hypothesis(score=-1.0, y_sequence=[]) for _ in range(len(hypotheses))]
for i in range(len(packed_result)):
packed_result[i].y_sequence = torch.tensor(hypotheses[i], dtype=torch.long)
packed_result[i].length = timestamps[i]
del hypotheses
return packed_result
def _greedy_decode(self, x, out_len):
# x: [B, T, D]
# out_len: [B]
# Initialize state
batchsize = x.shape[0]
hidden = self._get_initial_states(batchsize)
target_lengths = torch.ones(batchsize, dtype=torch.int32)
# Output string buffer
label = [[] for _ in range(batchsize)]
timesteps = [[] for _ in range(batchsize)]
# Last Label buffer + Last Label without blank buffer
# batch level equivalent of the last_label
last_label = torch.full([batchsize, 1], fill_value=self._blank_index, dtype=torch.long).numpy()
if torch.is_tensor(x):
last_label = torch.from_numpy(last_label).to(self.device)
# Mask buffers
blank_mask = torch.full([batchsize], fill_value=0, dtype=torch.bool).numpy()
# Get max sequence length
max_out_len = out_len.max()
for time_idx in range(max_out_len):
f = x[:, time_idx : time_idx + 1, :] # [B, 1, D]
if torch.is_tensor(f):
f = f.transpose(1, 2)
else:
f = f.transpose([0, 2, 1])
# Prepare t timestamp batch variables
not_blank = True
symbols_added = 0
# Reset blank mask
blank_mask *= False
# Update blank mask with time mask
# Batch: [B, T, D], but Bi may have seq len < max(seq_lens_in_batch)
# Forcibly mask with "blank" tokens, for all sample where current time step T > seq_len
blank_mask = time_idx >= out_len
# Start inner loop
while not_blank and (self.max_symbols_per_step is None or symbols_added < self.max_symbols_per_step):
# Batch prediction and joint network steps
# If very first prediction step, submit SOS tag (blank) to pred_step.
# This feeds a zero tensor as input to AbstractRNNTDecoder to prime the state
if time_idx == 0 and symbols_added == 0:
g = torch.tensor([self._blank_index] * batchsize, dtype=torch.int32).view(-1, 1)
else:
if torch.is_tensor(last_label):
g = last_label.type(torch.int32)
else:
g = last_label.astype(np.int32)
# Batched joint step - Output = [B, V + 1]
joint_out, hidden_prime = self.run_decoder_joint(f, g, target_lengths, *hidden)
logp, pred_lengths = joint_out
logp = logp[:, 0, 0, :]
# Get index k, of max prob for batch
if torch.is_tensor(logp):
v, k = logp.max(1)
else:
k = np.argmax(logp, axis=1).astype(np.int32)
# Update blank mask with current predicted blanks
# This is accumulating blanks over all time steps T and all target steps min(max_symbols, U)
k_is_blank = k == self._blank_index
blank_mask |= k_is_blank
del k_is_blank
del logp
# If all samples predict / have predicted prior blanks, exit loop early
# This is equivalent to if single sample predicted k
if blank_mask.all():
not_blank = False
else:
# Collect batch indices where blanks occurred now/past
if torch.is_tensor(blank_mask):
blank_indices = (blank_mask == 1).nonzero(as_tuple=False)
else:
blank_indices = blank_mask.astype(np.int32).nonzero()
if type(blank_indices) in (list, tuple):
blank_indices = blank_indices[0]
# Recover prior state for all samples which predicted blank now/past
if hidden is not None:
# LSTM has 2 states
for state_id in range(len(hidden)):
hidden_prime[state_id][:, blank_indices, :] = hidden[state_id][:, blank_indices, :]
elif len(blank_indices) > 0 and hidden is None:
# Reset state if there were some blank and other non-blank predictions in batch
# Original state is filled with zeros so we just multiply
# LSTM has 2 states
for state_id in range(len(hidden_prime)):
hidden_prime[state_id][:, blank_indices, :] *= 0.0
# Recover prior predicted label for all samples which predicted blank now/past
k[blank_indices] = last_label[blank_indices, 0]
# Update new label and hidden state for next iteration
if torch.is_tensor(k):
last_label = k.clone().reshape(-1, 1)
else:
last_label = k.copy().reshape(-1, 1)
hidden = hidden_prime
# Update predicted labels, accounting for time mask
# If blank was predicted even once, now or in the past,
# Force the current predicted label to also be blank
# This ensures that blanks propogate across all timesteps
# once they have occured (normally stopping condition of sample level loop).
for kidx, ki in enumerate(k):
if blank_mask[kidx] == 0:
label[kidx].append(ki)
timesteps[kidx].append(time_idx)
symbols_added += 1
return label, timesteps
def _setup_blank_index(self):
raise NotImplementedError()
def run_encoder(self, audio_signal, length):
raise NotImplementedError()
def run_decoder_joint(self, enc_logits, targets, target_length, *states):
raise NotImplementedError()
def _get_initial_states(self, batchsize):
raise NotImplementedError()
class ONNXGreedyBatchedRNNTInfer(ExportedModelGreedyBatchedRNNTInfer):
def __init__(self, encoder_model: str, decoder_joint_model: str, max_symbols_per_step: Optional[int] = 10):
super().__init__(
encoder_model=encoder_model,
decoder_joint_model=decoder_joint_model,
max_symbols_per_step=max_symbols_per_step,
)
try:
import onnx
import onnxruntime
except (ModuleNotFoundError, ImportError):
raise ImportError(f"`onnx` or `onnxruntime` could not be imported, please install the libraries.\n")
if torch.cuda.is_available():
# Try to use onnxruntime-gpu
providers = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
else:
# Fall back to CPU and onnxruntime-cpu
providers = ['CPUExecutionProvider']
onnx_session_opt = onnxruntime.SessionOptions()
onnx_session_opt.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
onnx_model = onnx.load(self.encoder_model_path)
onnx.checker.check_model(onnx_model, full_check=True)
self.encoder_model = onnx_model
self.encoder = onnxruntime.InferenceSession(
onnx_model.SerializeToString(), providers=providers, provider_options=onnx_session_opt
)
onnx_model = onnx.load(self.decoder_joint_model_path)
onnx.checker.check_model(onnx_model, full_check=True)
self.decoder_joint_model = onnx_model
self.decoder_joint = onnxruntime.InferenceSession(
onnx_model.SerializeToString(), providers=providers, provider_options=onnx_session_opt
)
logging.info("Successfully loaded encoder, decoder and joint onnx models !")
# Will be populated at runtime
self._blank_index = None
self.max_symbols_per_step = max_symbols_per_step
self._setup_encoder_input_output_keys()
self._setup_decoder_joint_input_output_keys()
self._setup_blank_index()
def _setup_encoder_input_output_keys(self):
self.encoder_inputs = list(self.encoder_model.graph.input)
self.encoder_outputs = list(self.encoder_model.graph.output)
def _setup_decoder_joint_input_output_keys(self):
self.decoder_joint_inputs = list(self.decoder_joint_model.graph.input)
self.decoder_joint_outputs = list(self.decoder_joint_model.graph.output)
def _setup_blank_index(self):
# ASSUME: Single input with no time length information
dynamic_dim = 257
shapes = self.encoder_inputs[0].type.tensor_type.shape.dim
ip_shape = []
for shape in shapes:
if hasattr(shape, 'dim_param') and 'dynamic' in shape.dim_param:
ip_shape.append(dynamic_dim) # replace dynamic axes with constant
else:
ip_shape.append(int(shape.dim_value))
enc_logits, encoded_length = self.run_encoder(
audio_signal=torch.randn(*ip_shape), length=torch.randint(0, 1, size=(dynamic_dim,))
)
# prepare states
states = self._get_initial_states(batchsize=dynamic_dim)
# run decoder 1 step
joint_out, states = self.run_decoder_joint(enc_logits, None, None, *states)
log_probs, lengths = joint_out
self._blank_index = log_probs.shape[-1] - 1 # last token of vocab size is blank token
logging.info(
f"Enc-Dec-Joint step was evaluated, blank token id = {self._blank_index}; vocab size = {log_probs.shape[-1]}"
)
def run_encoder(self, audio_signal, length):
if hasattr(audio_signal, 'cpu'):
audio_signal = audio_signal.cpu().numpy()
if hasattr(length, 'cpu'):
length = length.cpu().numpy()
ip = {
self.encoder_inputs[0].name: audio_signal,
self.encoder_inputs[1].name: length,
}
enc_out = self.encoder.run(None, ip)
enc_out, encoded_length = enc_out # ASSUME: single output
return enc_out, encoded_length
def run_decoder_joint(self, enc_logits, targets, target_length, *states):
# ASSUME: Decoder is RNN Transducer
if targets is None:
targets = torch.zeros(enc_logits.shape[0], 1, dtype=torch.int32)
target_length = torch.ones(enc_logits.shape[0], dtype=torch.int32)
if hasattr(targets, 'cpu'):
targets = targets.cpu().numpy()
if hasattr(target_length, 'cpu'):
target_length = target_length.cpu().numpy()
ip = {
self.decoder_joint_inputs[0].name: enc_logits,
self.decoder_joint_inputs[1].name: targets,
self.decoder_joint_inputs[2].name: target_length,
}
num_states = 0
if states is not None and len(states) > 0:
num_states = len(states)
for idx, state in enumerate(states):
if hasattr(state, 'cpu'):
state = state.cpu().numpy()
ip[self.decoder_joint_inputs[len(ip)].name] = state
dec_out = self.decoder_joint.run(None, ip)
# unpack dec output
if num_states > 0:
new_states = dec_out[-num_states:]
dec_out = dec_out[:-num_states]
else:
new_states = None
return dec_out, new_states
def _get_initial_states(self, batchsize):
# ASSUME: LSTM STATES of shape (layers, batchsize, dim)
input_state_nodes = [ip for ip in self.decoder_joint_inputs if 'state' in ip.name]
num_states = len(input_state_nodes)
if num_states == 0:
return
input_states = []
for state_id in range(num_states):
node = input_state_nodes[state_id]
ip_shape = []
for shape_idx, shape in enumerate(node.type.tensor_type.shape.dim):
if hasattr(shape, 'dim_param') and 'dynamic' in shape.dim_param:
ip_shape.append(batchsize) # replace dynamic axes with constant
else:
ip_shape.append(int(shape.dim_value))
input_states.append(torch.zeros(*ip_shape))
return input_states
class TorchscriptGreedyBatchedRNNTInfer(ExportedModelGreedyBatchedRNNTInfer):
def __init__(
self,
encoder_model: str,
decoder_joint_model: str,
cfg: DictConfig,
device: str,
max_symbols_per_step: Optional[int] = 10,
):
super().__init__(
encoder_model=encoder_model,
decoder_joint_model=decoder_joint_model,
max_symbols_per_step=max_symbols_per_step,
)
self.cfg = cfg
self.device = device
self.encoder = torch.jit.load(self.encoder_model_path, map_location=self.device)
self.decoder_joint = torch.jit.load(self.decoder_joint_model_path, map_location=self.device)
logging.info("Successfully loaded encoder, decoder and joint torchscript models !")
# Will be populated at runtime
self._blank_index = None
self.max_symbols_per_step = max_symbols_per_step
self._setup_encoder_input_keys()
self._setup_decoder_joint_input_keys()
self._setup_blank_index()
def _setup_encoder_input_keys(self):
arguments = self.encoder.forward.schema.arguments[1:]
self.encoder_inputs = [arg for arg in arguments]
def _setup_decoder_joint_input_keys(self):
arguments = self.decoder_joint.forward.schema.arguments[1:]
self.decoder_joint_inputs = [arg for arg in arguments]
def _setup_blank_index(self):
self._blank_index = len(self.cfg.joint.vocabulary)
logging.info(f"Blank token id = {self._blank_index}; vocab size = {len(self.cfg.joint.vocabulary) + 1}")
def run_encoder(self, audio_signal, length):
enc_out = self.encoder(audio_signal, length)
enc_out, encoded_length = enc_out # ASSUME: single output
return enc_out, encoded_length
def run_decoder_joint(self, enc_logits, targets, target_length, *states):
# ASSUME: Decoder is RNN Transducer
if targets is None:
targets = torch.zeros(enc_logits.shape[0], 1, dtype=torch.int32, device=enc_logits.device)
target_length = torch.ones(enc_logits.shape[0], dtype=torch.int32, device=enc_logits.device)
num_states = 0
if states is not None and len(states) > 0:
num_states = len(states)
dec_out = self.decoder_joint(enc_logits, targets, target_length, *states)
# unpack dec output
if num_states > 0:
new_states = dec_out[-num_states:]
dec_out = dec_out[:-num_states]
else:
new_states = None
return dec_out, new_states
def _get_initial_states(self, batchsize):
# ASSUME: LSTM STATES of shape (layers, batchsize, dim)
input_state_nodes = [ip for ip in self.decoder_joint_inputs if 'state' in ip.name]
num_states = len(input_state_nodes)
if num_states == 0:
return
input_states = []
for state_id in range(num_states):
# Hardcode shape size for LSTM (1 is for num layers in LSTM, which is flattened for export)
ip_shape = [1, batchsize, self.cfg.model_defaults.pred_hidden]
input_states.append(torch.zeros(*ip_shape, device=self.device))
return input_states
class GreedyMultiblankRNNTInfer(GreedyRNNTInfer):
"""A greedy transducer decoder for multi-blank RNN-T.
Sequence level greedy decoding, performed auto-regressively.
Args:
decoder_model: rnnt_utils.AbstractRNNTDecoder implementation.
joint_model: rnnt_utils.AbstractRNNTJoint implementation.
blank_index: int index of the blank token. Must be len(vocabulary) for multi-blank RNNTs.
big_blank_durations: a list containing durations for big blanks the model supports.
max_symbols_per_step: Optional int. The maximum number of symbols that can be added
to a sequence in a single time step; if set to None then there is
no limit.
preserve_alignments: Bool flag which preserves the history of alignments generated during
greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1 + num-big-blanks), Tensor(scalar, label after argmax)).
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores generated
during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
confidence_measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
def __init__(
self,
decoder_model: rnnt_abstract.AbstractRNNTDecoder,
joint_model: rnnt_abstract.AbstractRNNTJoint,
blank_index: int,
big_blank_durations: list,
max_symbols_per_step: Optional[int] = None,
preserve_alignments: bool = False,
preserve_frame_confidence: bool = False,
confidence_measure_cfg: Optional[DictConfig] = None,
):
super().__init__(
decoder_model=decoder_model,
joint_model=joint_model,
blank_index=blank_index,
max_symbols_per_step=max_symbols_per_step,
preserve_alignments=preserve_alignments,
preserve_frame_confidence=preserve_frame_confidence,
confidence_measure_cfg=confidence_measure_cfg,
)
self.big_blank_durations = big_blank_durations
self._SOS = blank_index - len(big_blank_durations)
@torch.no_grad()
def _greedy_decode(
self, x: torch.Tensor, out_len: torch.Tensor, partial_hypotheses: Optional[rnnt_utils.Hypothesis] = None
):
# x: [T, 1, D]
# out_len: [seq_len]
# Initialize blank state and empty label set in Hypothesis
hypothesis = rnnt_utils.Hypothesis(score=0.0, y_sequence=[], dec_state=None, timestep=[], last_token=None)
if partial_hypotheses is not None:
hypothesis.last_token = partial_hypotheses.last_token
hypothesis.y_sequence = (
partial_hypotheses.y_sequence.cpu().tolist()
if isinstance(partial_hypotheses.y_sequence, torch.Tensor)
else partial_hypotheses.y_sequence
)
if partial_hypotheses.dec_state is not None:
hypothesis.dec_state = self.decoder.batch_concat_states([partial_hypotheses.dec_state])
hypothesis.dec_state = _states_to_device(hypothesis.dec_state, x.device)
if self.preserve_alignments:
# Alignments is a 2-dimensional dangling list representing T x U
hypothesis.alignments = [[]]
if self.preserve_frame_confidence:
hypothesis.frame_confidence = [[]]
# if this variable is > 1, it means the last emission was a big-blank and we need to skip frames.
big_blank_duration = 1
# For timestep t in X_t
for time_idx in range(out_len):
if big_blank_duration > 1:
# skip frames until big_blank_duration == 1.
big_blank_duration -= 1
continue
# Extract encoder embedding at timestep t
# f = x[time_idx, :, :].unsqueeze(0) # [1, 1, D]
f = x.narrow(dim=0, start=time_idx, length=1)
# Setup exit flags and counter
not_blank = True
symbols_added = 0
# While blank is not predicted, or we dont run out of max symbols per timestep
while not_blank and (self.max_symbols is None or symbols_added < self.max_symbols):
# In the first timestep, we initialize the network with RNNT Blank
# In later timesteps, we provide previous predicted label as input.
if hypothesis.last_token is None and hypothesis.dec_state is None:
last_label = self._SOS
else:
last_label = label_collate([[hypothesis.last_token]])
# Perform prediction network and joint network steps.
g, hidden_prime = self._pred_step(last_label, hypothesis.dec_state)
# If preserving per-frame confidence, log_normalize must be true
logp = self._joint_step(f, g, log_normalize=True if self.preserve_frame_confidence else None)[
0, 0, 0, :
]
del g
# torch.max(0) op doesnt exist for FP 16.
if logp.dtype != torch.float32:
logp = logp.float()
# get index k, of max prob
v, k = logp.max(0)
k = k.item() # K is the label at timestep t_s in inner loop, s >= 0.
# Note, we have non-blanks in the vocab first, followed by big blanks, and standard blank at last.
# here we check if it's a big blank and if yes, set the duration variable.
if k >= self._blank_index - len(self.big_blank_durations) and k < self._blank_index:
big_blank_duration = self.big_blank_durations[self._blank_index - k - 1]
if self.preserve_alignments:
# insert logprobs into last timestep
hypothesis.alignments[-1].append((logp.to('cpu'), torch.tensor(k, dtype=torch.int32)))
if self.preserve_frame_confidence:
# insert confidence into last timestep
hypothesis.frame_confidence[-1].append(self._get_confidence(logp))
del logp
# If any type of blank token is predicted, exit inner loop, move onto next timestep t
if k >= self._blank_index - len(self.big_blank_durations):
not_blank = False
if self.preserve_alignments:
# convert Ti-th logits into a torch array
hypothesis.alignments.append([]) # blank buffer for next timestep
if self.preserve_frame_confidence:
hypothesis.frame_confidence.append([]) # blank buffer for next timestep
else:
# Append token to label set, update RNN state.
hypothesis.y_sequence.append(k)
hypothesis.score += float(v)
hypothesis.timestep.append(time_idx)
hypothesis.dec_state = hidden_prime
hypothesis.last_token = k
# Increment token counter.
symbols_added += 1
# Remove trailing empty list of Alignments
if self.preserve_alignments:
if len(hypothesis.alignments[-1]) == 0:
del hypothesis.alignments[-1]
# Remove trailing empty list of per-frame confidence
if self.preserve_frame_confidence:
if len(hypothesis.frame_confidence[-1]) == 0:
del hypothesis.frame_confidence[-1]
# Unpack the hidden states
hypothesis.dec_state = self.decoder.batch_select_state(hypothesis.dec_state, 0)
return hypothesis
class GreedyBatchedMultiblankRNNTInfer(GreedyBatchedRNNTInfer):
"""A batch level greedy transducer decoder.
Batch level greedy decoding, performed auto-regressively.
Args:
decoder_model: rnnt_utils.AbstractRNNTDecoder implementation.
joint_model: rnnt_utils.AbstractRNNTJoint implementation.
blank_index: int index of the blank token. Must be len(vocabulary) for multi-blank RNNTs.
big_blank_durations: a list containing durations for big blanks the model supports.
max_symbols_per_step: Optional int. The maximum number of symbols that can be added
to a sequence in a single time step; if set to None then there is
no limit.
preserve_alignments: Bool flag which preserves the history of alignments generated during
greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1 + num-big-blanks), Tensor(scalar, label after argmax)).
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores generated
during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
confidence_measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
def __init__(
self,
decoder_model: rnnt_abstract.AbstractRNNTDecoder,
joint_model: rnnt_abstract.AbstractRNNTJoint,
blank_index: int,
big_blank_durations: List[int],
max_symbols_per_step: Optional[int] = None,
preserve_alignments: bool = False,
preserve_frame_confidence: bool = False,
confidence_measure_cfg: Optional[DictConfig] = None,
):
super().__init__(
decoder_model=decoder_model,
joint_model=joint_model,
blank_index=blank_index,
max_symbols_per_step=max_symbols_per_step,
preserve_alignments=preserve_alignments,
preserve_frame_confidence=preserve_frame_confidence,
confidence_measure_cfg=confidence_measure_cfg,
)
self.big_blank_durations = big_blank_durations
# Depending on availability of `blank_as_pad` support
# switch between more efficient batch decoding technique
if self.decoder.blank_as_pad:
self._greedy_decode = self._greedy_decode_blank_as_pad
else:
self._greedy_decode = self._greedy_decode_masked
self._SOS = blank_index - len(big_blank_durations)
def _greedy_decode_blank_as_pad(
self,
x: torch.Tensor,
out_len: torch.Tensor,
device: torch.device,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
if partial_hypotheses is not None:
raise NotImplementedError("`partial_hypotheses` support is not supported")
with torch.inference_mode():
# x: [B, T, D]
# out_len: [B]
# device: torch.device
# Initialize list of Hypothesis
batchsize = x.shape[0]
hypotheses = [
rnnt_utils.Hypothesis(score=0.0, y_sequence=[], timestep=[], dec_state=None) for _ in range(batchsize)
]
# Initialize Hidden state matrix (shared by entire batch)
hidden = None
# If alignments need to be preserved, register a danling list to hold the values
if self.preserve_alignments:
# alignments is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.alignments = [[]]
# If confidence scores need to be preserved, register a danling list to hold the values
if self.preserve_frame_confidence:
# frame_confidence is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.frame_confidence = [[]]
hyp.y_3best = [[]]
hyp.frame_confidence_3best = [[[]]]
hyp.logp = [[]]
# Last Label buffer + Last Label without blank buffer
# batch level equivalent of the last_label
last_label = torch.full([batchsize, 1], fill_value=self._SOS, dtype=torch.long, device=device)
# this mask is true for if the emission is *any type* of blank.
blank_mask = torch.full([batchsize], fill_value=0, dtype=torch.bool, device=device)
# Get max sequence length
max_out_len = out_len.max()
# We have a mask for each big blank. A mask is "true" means: the previous emission is exactly the big-blank
# with the corresponding duration, or has larger duration. E.g., for big_blank_mask for duration 2, it will
# be set true if the previous emission was a big blank with duration 4, or 3 or 2; but false if prevoius
# emission was a standard blank (with duration = 1).
big_blank_masks = [torch.full([batchsize], fill_value=0, dtype=torch.bool, device=device)] * len(
self.big_blank_durations
)
# if this variable > 1, it means the previous emission was big-blank and we need to skip frames.
big_blank_duration = 1
for time_idx in range(max_out_len):
if big_blank_duration > 1:
# skip frames until big_blank_duration == 1
big_blank_duration -= 1
continue
f = x.narrow(dim=1, start=time_idx, length=1) # [B, 1, D]
# Prepare t timestamp batch variables
not_blank = True
symbols_added = 0
# Reset all blank masks
blank_mask.mul_(False)
for i in range(len(big_blank_masks)):
big_blank_masks[i].mul_(False)
# Update blank mask with time mask
# Batch: [B, T, D], but Bi may have seq len < max(seq_lens_in_batch)
# Forcibly mask with "blank" tokens, for all sample where current time step T > seq_len
blank_mask = time_idx >= out_len
for i in range(len(big_blank_masks)):
big_blank_masks[i] = time_idx >= out_len
# Start inner loop
while not_blank and (self.max_symbols is None or symbols_added < self.max_symbols):
# Batch prediction and joint network steps
# If very first prediction step, submit SOS tag (blank) to pred_step.
# This feeds a zero tensor as input to AbstractRNNTDecoder to prime the state
if time_idx == 0 and symbols_added == 0 and hidden is None:
g, hidden_prime = self._pred_step(self._SOS, hidden, batch_size=batchsize)
else:
# Perform batch step prediction of decoder, getting new states and scores ("g")
g, hidden_prime = self._pred_step(last_label, hidden, batch_size=batchsize)
# Batched joint step - Output = [B, V + 1 + num-big-blanks]
# If preserving per-frame confidence, log_normalize must be true
logp = self._joint_step(f, g, log_normalize=True if self.preserve_frame_confidence else None)[
:, 0, 0, :
]
if logp.dtype != torch.float32:
logp = logp.float()
# Get index k, of max prob for batch
v, k = logp.max(1)
del g
# Update blank mask with current predicted blanks
# This is accumulating blanks over all time steps T and all target steps min(max_symbols, U)
k_is_blank = k >= self._blank_index - len(self.big_blank_durations)
blank_mask.bitwise_or_(k_is_blank)
for i in range(len(big_blank_masks)):
# using <= since as we mentioned before, the mask doesn't store exact matches.
# instead, it is True when the predicted blank's duration is >= the duration that the
# mask corresponds to.
k_is_big_blank = k <= self._blank_index - 1 - i
# need to do a bitwise_and since it could also be a non-blank.
k_is_big_blank.bitwise_and_(k_is_blank)
big_blank_masks[i].bitwise_or_(k_is_big_blank)
del k_is_blank
# If preserving alignments, check if sequence length of sample has been reached
# before adding alignment
if self.preserve_alignments:
# Insert logprobs into last timestep per sample
logp_vals = logp.to('cpu')
logp_ids = logp_vals.max(1)[1]
for batch_idx in range(batchsize):
if time_idx < out_len[batch_idx]:
hypotheses[batch_idx].alignments[-1].append(
(logp_vals[batch_idx], logp_ids[batch_idx])
)
del logp_vals
# If preserving per-frame confidence, check if sequence length of sample has been reached
# before adding confidence scores
if self.preserve_frame_confidence:
# Insert probabilities into last timestep per sample
confidence = self._get_confidence(logp)
for batch_idx in range(batchsize):
if time_idx < out_len[batch_idx]:
hypotheses[batch_idx].frame_confidence[-1].append(confidence[batch_idx])
del logp
# If all samples predict / have predicted prior blanks, exit loop early
# This is equivalent to if single sample predicted k
if blank_mask.all():
not_blank = False
for i in range(len(big_blank_masks) + 1):
# The task here is find the shortest blank duration of all batches.
# so we start from the shortest blank duration and go up,
# and stop once we found the duration whose corresponding mask isn't all True.
if i == len(big_blank_masks) or not big_blank_masks[i].all():
big_blank_duration = self.big_blank_durations[i - 1] if i > 0 else 1
break
# If preserving alignments, convert the current Uj alignments into a torch.Tensor
# Then preserve U at current timestep Ti
# Finally, forward the timestep history to Ti+1 for that sample
# All of this should only be done iff the current time index <= sample-level AM length.
# Otherwise ignore and move to next sample / next timestep.
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for batch_idx in range(batchsize):
# this checks if current timestep <= sample-level AM length
# If current timestep > sample-level AM length, no alignments will be added
# Therefore the list of Uj alignments is empty here.
if len(hypotheses[batch_idx].alignments[-1]) > 0:
hypotheses[batch_idx].alignments.append([]) # blank buffer for next timestep
# Do the same if preserving per-frame confidence
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) > 0:
hypotheses[batch_idx].frame_confidence.append([]) # blank buffer for next timestep
hypotheses[batch_idx].y_3best.append([])
hypotheses[batch_idx].frame_confidence_3best.append([])
hypotheses[batch_idx].logp.append([])
else:
# Collect batch indices where blanks occurred now/past
blank_indices = (blank_mask == 1).nonzero(as_tuple=False)
# Recover prior state for all samples which predicted blank now/past
if hidden is not None:
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, hidden, blank_indices)
elif len(blank_indices) > 0 and hidden is None:
# Reset state if there were some blank and other non-blank predictions in batch
# Original state is filled with zeros so we just multiply
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, None, blank_indices, value=0.0)
# Recover prior predicted label for all samples which predicted blank now/past
k[blank_indices] = last_label[blank_indices, 0]
# Update new label and hidden state for next iteration
last_label = k.clone().view(-1, 1)
hidden = hidden_prime
# Update predicted labels, accounting for time mask
# If blank was predicted even once, now or in the past,
# Force the current predicted label to also be blank
# This ensures that blanks propogate across all timesteps
# once they have occured (normally stopping condition of sample level loop).
for kidx, ki in enumerate(k):
if blank_mask[kidx] == 0:
hypotheses[kidx].y_sequence.append(ki)
hypotheses[kidx].timestep.append(time_idx)
hypotheses[kidx].score += float(v[kidx])
symbols_added += 1
# Remove trailing empty list of alignments at T_{am-len} x Uj
if self.preserve_alignments:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].alignments[-1]) == 0:
del hypotheses[batch_idx].alignments[-1]
# Remove trailing empty list of confidence scores at T_{am-len} x Uj
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) == 0:
del hypotheses[batch_idx].frame_confidence[-1]
del hypotheses[batch_idx].y_3best[-1]
del hypotheses[batch_idx].frame_confidence_3best[-1]
del hypotheses[batch_idx].logp[-1]
# Preserve states
for batch_idx in range(batchsize):
hypotheses[batch_idx].dec_state = self.decoder.batch_select_state(hidden, batch_idx)
return hypotheses
def _greedy_decode_masked(
self,
x: torch.Tensor,
out_len: torch.Tensor,
device: torch.device,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
if partial_hypotheses is not None:
raise NotImplementedError("`partial_hypotheses` support is not supported")
if self.big_blank_durations != [1] * len(self.big_blank_durations):
raise NotImplementedError(
"Efficient frame-skipping version for multi-blank masked decoding is not supported."
)
# x: [B, T, D]
# out_len: [B]
# device: torch.device
# Initialize state
batchsize = x.shape[0]
hypotheses = [
rnnt_utils.Hypothesis(score=0.0, y_sequence=[], timestep=[], dec_state=None) for _ in range(batchsize)
]
# Initialize Hidden state matrix (shared by entire batch)
hidden = None
# If alignments need to be preserved, register a danling list to hold the values
if self.preserve_alignments:
# alignments is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.alignments = [[]]
else:
hyp.alignments = None
# If confidence scores need to be preserved, register a danling list to hold the values
if self.preserve_frame_confidence:
# frame_confidence is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.frame_confidence = [[]]
# Last Label buffer + Last Label without blank buffer
# batch level equivalent of the last_label
last_label = torch.full([batchsize, 1], fill_value=self._blank_index, dtype=torch.long, device=device)
last_label_without_blank = last_label.clone()
# Mask buffers
blank_mask = torch.full([batchsize], fill_value=0, dtype=torch.bool, device=device)
# Get max sequence length
max_out_len = out_len.max()
with torch.inference_mode():
for time_idx in range(max_out_len):
f = x.narrow(dim=1, start=time_idx, length=1) # [B, 1, D]
# Prepare t timestamp batch variables
not_blank = True
symbols_added = 0
# Reset blank mask
blank_mask.mul_(False)
# Update blank mask with time mask
# Batch: [B, T, D], but Bi may have seq len < max(seq_lens_in_batch)
# Forcibly mask with "blank" tokens, for all sample where current time step T > seq_len
blank_mask = time_idx >= out_len
# Start inner loop
while not_blank and (self.max_symbols is None or symbols_added < self.max_symbols):
# Batch prediction and joint network steps
# If very first prediction step, submit SOS tag (blank) to pred_step.
# This feeds a zero tensor as input to AbstractRNNTDecoder to prime the state
if time_idx == 0 and symbols_added == 0 and hidden is None:
g, hidden_prime = self._pred_step(self._SOS, hidden, batch_size=batchsize)
else:
# Set a dummy label for the blank value
# This value will be overwritten by "blank" again the last label update below
# This is done as vocabulary of prediction network does not contain "blank" token of RNNT
last_label_without_blank_mask = last_label >= self._blank_index
last_label_without_blank[last_label_without_blank_mask] = 0 # temp change of label
last_label_without_blank[~last_label_without_blank_mask] = last_label[
~last_label_without_blank_mask
]
# Perform batch step prediction of decoder, getting new states and scores ("g")
g, hidden_prime = self._pred_step(last_label_without_blank, hidden, batch_size=batchsize)
# Batched joint step - Output = [B, V + 1 + num-big-blanks]
# If preserving per-frame confidence, log_normalize must be true
logp = self._joint_step(f, g, log_normalize=True if self.preserve_frame_confidence else None)[
:, 0, 0, :
]
if logp.dtype != torch.float32:
logp = logp.float()
# Get index k, of max prob for batch
v, k = logp.max(1)
del g
# Update blank mask with current predicted blanks
# This is accumulating blanks over all time steps T and all target steps min(max_symbols, U)
k_is_blank = k == self._blank_index
blank_mask.bitwise_or_(k_is_blank)
# If preserving alignments, check if sequence length of sample has been reached
# before adding alignment
if self.preserve_alignments:
# Insert logprobs into last timestep per sample
logp_vals = logp.to('cpu')
logp_ids = logp_vals.max(1)[1]
for batch_idx in range(batchsize):
if time_idx < out_len[batch_idx]:
hypotheses[batch_idx].alignments[-1].append(
(logp_vals[batch_idx], logp_ids[batch_idx])
)
del logp_vals
# If preserving per-frame confidence, check if sequence length of sample has been reached
# before adding confidence scores
if self.preserve_frame_confidence:
# Insert probabilities into last timestep per sample
confidence = self._get_confidence(logp)
for batch_idx in range(batchsize):
if time_idx < out_len[batch_idx]:
hypotheses[batch_idx].frame_confidence[-1].append(confidence[batch_idx])
del logp
# If all samples predict / have predicted prior blanks, exit loop early
# This is equivalent to if single sample predicted k
if blank_mask.all():
not_blank = False
# If preserving alignments, convert the current Uj alignments into a torch.Tensor
# Then preserve U at current timestep Ti
# Finally, forward the timestep history to Ti+1 for that sample
# All of this should only be done iff the current time index <= sample-level AM length.
# Otherwise ignore and move to next sample / next timestep.
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for batch_idx in range(batchsize):
# this checks if current timestep <= sample-level AM length
# If current timestep > sample-level AM length, no alignments will be added
# Therefore the list of Uj alignments is empty here.
if len(hypotheses[batch_idx].alignments[-1]) > 0:
hypotheses[batch_idx].alignments.append([]) # blank buffer for next timestep
# Do the same if preserving per-frame confidence
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) > 0:
hypotheses[batch_idx].frame_confidence.append([]) # blank buffer for next timestep
else:
# Collect batch indices where blanks occurred now/past
blank_indices = (blank_mask == 1).nonzero(as_tuple=False)
# Recover prior state for all samples which predicted blank now/past
if hidden is not None:
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, hidden, blank_indices)
elif len(blank_indices) > 0 and hidden is None:
# Reset state if there were some blank and other non-blank predictions in batch
# Original state is filled with zeros so we just multiply
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, None, blank_indices, value=0.0)
# Recover prior predicted label for all samples which predicted blank now/past
k[blank_indices] = last_label[blank_indices, 0]
# Update new label and hidden state for next iteration
last_label = k.view(-1, 1)
hidden = hidden_prime
# Update predicted labels, accounting for time mask
# If blank was predicted even once, now or in the past,
# Force the current predicted label to also be blank
# This ensures that blanks propogate across all timesteps
# once they have occured (normally stopping condition of sample level loop).
for kidx, ki in enumerate(k):
if blank_mask[kidx] == 0:
hypotheses[kidx].y_sequence.append(ki)
hypotheses[kidx].timestep.append(time_idx)
hypotheses[kidx].score += float(v[kidx])
symbols_added += 1
# Remove trailing empty list of alignments at T_{am-len} x Uj
if self.preserve_alignments:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].alignments[-1]) == 0:
del hypotheses[batch_idx].alignments[-1]
# Remove trailing empty list of confidence scores at T_{am-len} x Uj
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) == 0:
del hypotheses[batch_idx].frame_confidence[-1]
# Preserve states
for batch_idx in range(batchsize):
hypotheses[batch_idx].dec_state = self.decoder.batch_select_state(hidden, batch_idx)
return hypotheses
@dataclass
class GreedyRNNTInferConfig:
max_symbols_per_step: Optional[int] = 10
preserve_alignments: bool = False
preserve_frame_confidence: bool = False
confidence_measure_cfg: Optional[ConfidenceMeasureConfig] = ConfidenceMeasureConfig()
confidence_method_cfg: str = "DEPRECATED"
def __post_init__(self):
# OmegaConf.structured ensures that post_init check is always executed
self.confidence_measure_cfg = OmegaConf.structured(
self.confidence_measure_cfg
if isinstance(self.confidence_measure_cfg, ConfidenceMeasureConfig)
else ConfidenceMeasureConfig(**self.confidence_measure_cfg)
)
if self.confidence_method_cfg != "DEPRECATED":
logging.warning(
"`confidence_method_cfg` is deprecated and will be removed in the future. "
"Please use `confidence_measure_cfg` instead."
)
# TODO (alaptev): delete the following two lines sometime in the future
logging.warning("Re-writing `confidence_measure_cfg` with the value of `confidence_method_cfg`.")
# OmegaConf.structured ensures that post_init check is always executed
self.confidence_measure_cfg = OmegaConf.structured(
self.confidence_method_cfg
if isinstance(self.confidence_method_cfg, ConfidenceMeasureConfig)
else ConfidenceMeasureConfig(**self.confidence_method_cfg)
)
self.confidence_method_cfg = "DEPRECATED"
@dataclass
class GreedyBatchedRNNTInferConfig:
max_symbols_per_step: Optional[int] = 10
preserve_alignments: bool = False
preserve_frame_confidence: bool = False
confidence_measure_cfg: Optional[ConfidenceMeasureConfig] = ConfidenceMeasureConfig()
confidence_method_cfg: str = "DEPRECATED"
def __post_init__(self):
# OmegaConf.structured ensures that post_init check is always executed
self.confidence_measure_cfg = OmegaConf.structured(
self.confidence_measure_cfg
if isinstance(self.confidence_measure_cfg, ConfidenceMeasureConfig)
else ConfidenceMeasureConfig(**self.confidence_measure_cfg)
)
if self.confidence_method_cfg != "DEPRECATED":
logging.warning(
"`confidence_method_cfg` is deprecated and will be removed in the future. "
"Please use `confidence_measure_cfg` instead."
)
# TODO (alaptev): delete the following two lines sometime in the future
logging.warning("Re-writing `confidence_measure_cfg` with the value of `confidence_method_cfg`.")
# OmegaConf.structured ensures that post_init check is always executed
self.confidence_measure_cfg = OmegaConf.structured(
self.confidence_method_cfg
if isinstance(self.confidence_method_cfg, ConfidenceMeasureConfig)
else ConfidenceMeasureConfig(**self.confidence_method_cfg)
)
self.confidence_method_cfg = "DEPRECATED"
class GreedyTDTInfer(_GreedyRNNTInfer):
"""A greedy TDT decoder.
Sequence level greedy decoding, performed auto-regressively.
Args:
decoder_model: rnnt_utils.AbstractRNNTDecoder implementation.
joint_model: rnnt_utils.AbstractRNNTJoint implementation.
blank_index: int index of the blank token. Must be len(vocabulary) for TDT models.
durations: a list containing durations for TDT.
max_symbols_per_step: Optional int. The maximum number of symbols that can be added
to a sequence in a single time step; if set to None then there is
no limit.
preserve_alignments: Bool flag which preserves the history of alignments generated during
greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1 + num-big-blanks), Tensor(scalar, label after argmax)).
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores generated
during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
confidence_measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
def __init__(
self,
decoder_model: rnnt_abstract.AbstractRNNTDecoder,
joint_model: rnnt_abstract.AbstractRNNTJoint,
blank_index: int,
durations: list,
max_symbols_per_step: Optional[int] = None,
preserve_alignments: bool = False,
preserve_frame_confidence: bool = False,
confidence_measure_cfg: Optional[DictConfig] = None,
):
super().__init__(
decoder_model=decoder_model,
joint_model=joint_model,
blank_index=blank_index,
max_symbols_per_step=max_symbols_per_step,
preserve_alignments=preserve_alignments,
preserve_frame_confidence=preserve_frame_confidence,
confidence_measure_cfg=confidence_measure_cfg,
)
self.durations = durations
@typecheck()
def forward(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
"""Returns a list of hypotheses given an input batch of the encoder hidden embedding.
Output token is generated auto-regressively.
Args:
encoder_output: A tensor of size (batch, features, timesteps).
encoded_lengths: list of int representing the length of each sequence
output sequence.
Returns:
packed list containing batch number of sentences (Hypotheses).
"""
# Preserve decoder and joint training state
decoder_training_state = self.decoder.training
joint_training_state = self.joint.training
with torch.inference_mode():
# Apply optional preprocessing
encoder_output = encoder_output.transpose(1, 2) # (B, T, D)
self.decoder.eval()
self.joint.eval()
hypotheses = []
# Process each sequence independently
with self.decoder.as_frozen(), self.joint.as_frozen():
for batch_idx in range(encoder_output.size(0)):
inseq = encoder_output[batch_idx, :, :].unsqueeze(1) # [T, 1, D]
logitlen = encoded_lengths[batch_idx]
partial_hypothesis = partial_hypotheses[batch_idx] if partial_hypotheses is not None else None
hypothesis = self._greedy_decode(inseq, logitlen, partial_hypotheses=partial_hypothesis)
hypotheses.append(hypothesis)
# Pack results into Hypotheses
packed_result = pack_hypotheses(hypotheses, encoded_lengths)
self.decoder.train(decoder_training_state)
self.joint.train(joint_training_state)
return (packed_result,)
@torch.no_grad()
def _greedy_decode(
self, x: torch.Tensor, out_len: torch.Tensor, partial_hypotheses: Optional[rnnt_utils.Hypothesis] = None
):
# x: [T, 1, D]
# out_len: [seq_len]
# Initialize blank state and empty label set in Hypothesis
hypothesis = rnnt_utils.Hypothesis(score=0.0, y_sequence=[], dec_state=None, timestep=[], last_token=None)
if partial_hypotheses is not None:
hypothesis.last_token = partial_hypotheses.last_token
hypothesis.y_sequence = (
partial_hypotheses.y_sequence.cpu().tolist()
if isinstance(partial_hypotheses.y_sequence, torch.Tensor)
else partial_hypotheses.y_sequence
)
if partial_hypotheses.dec_state is not None:
hypothesis.dec_state = self.decoder.batch_concat_states([partial_hypotheses.dec_state])
hypothesis.dec_state = _states_to_device(hypothesis.dec_state, x.device)
if self.preserve_alignments:
# Alignments is a 2-dimensional dangling list representing T x U
hypothesis.alignments = [[]]
if self.preserve_frame_confidence:
hypothesis.frame_confidence = [[]]
time_idx = 0
while time_idx < out_len:
# Extract encoder embedding at timestep t
# f = x[time_idx, :, :].unsqueeze(0) # [1, 1, D]
f = x.narrow(dim=0, start=time_idx, length=1)
# Setup exit flags and counter
not_blank = True
symbols_added = 0
need_loop = True
# While blank is not predicted, or we dont run out of max symbols per timestep
while need_loop and (self.max_symbols is None or symbols_added < self.max_symbols):
# In the first timestep, we initialize the network with RNNT Blank
# In later timesteps, we provide previous predicted label as input.
if hypothesis.last_token is None and hypothesis.dec_state is None:
last_label = self._SOS
else:
last_label = label_collate([[hypothesis.last_token]])
# Perform prediction network and joint network steps.
g, hidden_prime = self._pred_step(last_label, hypothesis.dec_state)
# If preserving per-frame confidence, log_normalize must be true
logits = self._joint_step(f, g, log_normalize=False)
logp = logits[0, 0, 0, : -len(self.durations)]
if self.preserve_frame_confidence:
logp = torch.log_softmax(logp, -1)
duration_logp = torch.log_softmax(logits[0, 0, 0, -len(self.durations) :], dim=-1)
del g
# torch.max(0) op doesnt exist for FP 16.
if logp.dtype != torch.float32:
logp = logp.float()
# get index k, of max prob
v, k = logp.max(0)
k = k.item() # K is the label at timestep t_s in inner loop, s >= 0.
d_v, d_k = duration_logp.max(0)
d_k = d_k.item()
skip = self.durations[d_k]
if self.preserve_alignments:
# insert logprobs into last timestep
hypothesis.alignments[-1].append((logp.to('cpu'), torch.tensor(k, dtype=torch.int32)))
if self.preserve_frame_confidence:
# insert confidence into last timestep
hypothesis.frame_confidence[-1].append(self._get_confidence(logp))
del logp
# If blank token is predicted, exit inner loop, move onto next timestep t
if k == self._blank_index:
not_blank = False
# this rarely happens, but we manually increment the `skip` number
# if blank is emitted and duration=0 is predicted. This prevents possible
# infinite loops.
if skip == 0:
skip = 1
if self.preserve_alignments:
# convert Ti-th logits into a torch array
hypothesis.alignments.append([]) # blank buffer for next timestep
if self.preserve_frame_confidence:
hypothesis.frame_confidence.append([]) # blank buffer for next timestep
else:
# Append token to label set, update RNN state.
hypothesis.y_sequence.append(k)
hypothesis.score += float(v)
hypothesis.timestep.append(time_idx)
hypothesis.dec_state = hidden_prime
hypothesis.last_token = k
# Increment token counter.
symbols_added += 1
time_idx += skip
need_loop = skip == 0
if symbols_added == self.max_symbols:
time_idx += 1
# Remove trailing empty list of Alignments
if self.preserve_alignments:
if len(hypothesis.alignments[-1]) == 0:
del hypothesis.alignments[-1]
# Remove trailing empty list of per-frame confidence
if self.preserve_frame_confidence:
if len(hypothesis.frame_confidence[-1]) == 0:
del hypothesis.frame_confidence[-1]
# Unpack the hidden states
hypothesis.dec_state = self.decoder.batch_select_state(hypothesis.dec_state, 0)
return hypothesis
class GreedyBatchedTDTInfer(_GreedyRNNTInfer):
"""A batch level greedy TDT decoder.
Batch level greedy decoding, performed auto-regressively.
Args:
decoder_model: rnnt_utils.AbstractRNNTDecoder implementation.
joint_model: rnnt_utils.AbstractRNNTJoint implementation.
blank_index: int index of the blank token. Must be len(vocabulary) for TDT models.
durations: a list containing durations.
max_symbols_per_step: Optional int. The maximum number of symbols that can be added
to a sequence in a single time step; if set to None then there is
no limit.
preserve_alignments: Bool flag which preserves the history of alignments generated during
greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1 + num-big-blanks), Tensor(scalar, label after argmax)).
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores generated
during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
confidence_measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
def __init__(
self,
decoder_model: rnnt_abstract.AbstractRNNTDecoder,
joint_model: rnnt_abstract.AbstractRNNTJoint,
blank_index: int,
durations: List[int],
max_symbols_per_step: Optional[int] = None,
preserve_alignments: bool = False,
preserve_frame_confidence: bool = False,
confidence_measure_cfg: Optional[DictConfig] = None,
):
super().__init__(
decoder_model=decoder_model,
joint_model=joint_model,
blank_index=blank_index,
max_symbols_per_step=max_symbols_per_step,
preserve_alignments=preserve_alignments,
preserve_frame_confidence=preserve_frame_confidence,
confidence_measure_cfg=confidence_measure_cfg,
)
self.durations = durations
# Depending on availability of `blank_as_pad` support
# switch between more efficient batch decoding technique
if self.decoder.blank_as_pad:
self._greedy_decode = self._greedy_decode_blank_as_pad
else:
self._greedy_decode = self._greedy_decode_masked
@typecheck()
def forward(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
"""Returns a list of hypotheses given an input batch of the encoder hidden embedding.
Output token is generated auto-regressively.
Args:
encoder_output: A tensor of size (batch, features, timesteps).
encoded_lengths: list of int representing the length of each sequence
output sequence.
Returns:
packed list containing batch number of sentences (Hypotheses).
"""
# Preserve decoder and joint training state
decoder_training_state = self.decoder.training
joint_training_state = self.joint.training
with torch.inference_mode():
# Apply optional preprocessing
encoder_output = encoder_output.transpose(1, 2) # (B, T, D)
logitlen = encoded_lengths
self.decoder.eval()
self.joint.eval()
with self.decoder.as_frozen(), self.joint.as_frozen():
inseq = encoder_output # [B, T, D]
hypotheses = self._greedy_decode(
inseq, logitlen, device=inseq.device, partial_hypotheses=partial_hypotheses
)
# Pack the hypotheses results
packed_result = pack_hypotheses(hypotheses, logitlen)
self.decoder.train(decoder_training_state)
self.joint.train(joint_training_state)
return (packed_result,)
def _greedy_decode_blank_as_pad(
self,
x: torch.Tensor,
out_len: torch.Tensor,
device: torch.device,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
if partial_hypotheses is not None:
raise NotImplementedError("`partial_hypotheses` support is not supported")
with torch.inference_mode():
# x: [B, T, D]
# out_len: [B]
# device: torch.device
# Initialize list of Hypothesis
batchsize = x.shape[0]
hypotheses = [
rnnt_utils.Hypothesis(score=0.0, y_sequence=[], timestep=[], dec_state=None) for _ in range(batchsize)
]
# Initialize Hidden state matrix (shared by entire batch)
hidden = None
# If alignments need to be preserved, register a danling list to hold the values
if self.preserve_alignments:
# alignments is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.alignments = [[]]
# If confidence scores need to be preserved, register a danling list to hold the values
if self.preserve_frame_confidence:
# frame_confidence is a 3-dimensional dangling list representing B x T x U
for hyp in hypotheses:
hyp.frame_confidence = [[]]
hyp.y_3best = [[]]
hyp.frame_confidence_3best = [[[]]]
hyp.logp = [[]]
# Last Label buffer + Last Label without blank buffer
# batch level equivalent of the last_label
last_label = torch.full([batchsize, 1], fill_value=self._blank_index, dtype=torch.long, device=device)
# Mask buffers
blank_mask = torch.full([batchsize], fill_value=0, dtype=torch.bool, device=device)
# Get max sequence length
max_out_len = out_len.max()
# skip means the number of frames the next decoding step should "jump" to. When skip == 1
# it means the next decoding step will just use the next input frame.
skip = 1
for time_idx in range(max_out_len):
if skip > 1: # if skip > 1 at the current step, we decrement it and skip the current frame.
skip -= 1
continue
f = x.narrow(dim=1, start=time_idx, length=1) # [B, 1, D]
# need_to_stay is a boolean indicates whether the next decoding step should remain in the same frame.
need_to_stay = True
symbols_added = 0
# Reset blank mask
blank_mask.mul_(False)
# Update blank mask with time mask
# Batch: [B, T, D], but Bi may have seq len < max(seq_lens_in_batch)
# Forcibly mask with "blank" tokens, for all sample where current time step T > seq_len
blank_mask = time_idx >= out_len
# Start inner loop
while need_to_stay and (self.max_symbols is None or symbols_added < self.max_symbols):
# Batch prediction and joint network steps
# If very first prediction step, submit SOS tag (blank) to pred_step.
# This feeds a zero tensor as input to AbstractRNNTDecoder to prime the state
if time_idx == 0 and symbols_added == 0 and hidden is None:
g, hidden_prime = self._pred_step(self._SOS, hidden, batch_size=batchsize)
else:
# Perform batch step prediction of decoder, getting new states and scores ("g")
g, hidden_prime = self._pred_step(last_label, hidden, batch_size=batchsize)
# Batched joint step - Output = [B, V + 1 + num-big-blanks]
# Note: log_normalize must not be True here since the joiner output is contanetation of both token logits and duration logits,
# and they need to be normalized independently.
joined = self._joint_step(f, g, log_normalize=None)
logp = joined[:, 0, 0, : -len(self.durations)]
duration_logp = joined[:, 0, 0, -len(self.durations) :]
if logp.dtype != torch.float32:
logp = logp.float()
duration_logp = duration_logp.float()
# get the max for both token and duration predictions.
v, k = logp.max(1)
dv, dk = duration_logp.max(1)
# here we set the skip value to be the minimum of all predicted durations, hense the "torch.min(dk)" call there.
# Please refer to Section 5.2 of our paper https://arxiv.org/pdf/2304.06795.pdf for explanation of this.
skip = self.durations[int(torch.min(dk))]
# this is a special case: if all batches emit blanks, we require that skip be at least 1
# so we don't loop forever at the current frame.
if blank_mask.all():
if skip == 0:
skip = 1
need_to_stay = skip == 0
del g
# Update blank mask with current predicted blanks
# This is accumulating blanks over all time steps T and all target steps min(max_symbols, U)
k_is_blank = k == self._blank_index
blank_mask.bitwise_or_(k_is_blank)
del k_is_blank
del logp, duration_logp
# If all samples predict / have predicted prior blanks, exit loop early
# This is equivalent to if single sample predicted k
if not blank_mask.all():
# Collect batch indices where blanks occurred now/past
blank_indices = (blank_mask == 1).nonzero(as_tuple=False)
# Recover prior state for all samples which predicted blank now/past
if hidden is not None:
hidden_prime = self.decoder.batch_copy_states(hidden_prime, hidden, blank_indices)
elif len(blank_indices) > 0 and hidden is None:
# Reset state if there were some blank and other non-blank predictions in batch
# Original state is filled with zeros so we just multiply
# LSTM has 2 states
hidden_prime = self.decoder.batch_copy_states(hidden_prime, None, blank_indices, value=0.0)
# Recover prior predicted label for all samples which predicted blank now/past
k[blank_indices] = last_label[blank_indices, 0]
# Update new label and hidden state for next iteration
last_label = k.clone().view(-1, 1)
hidden = hidden_prime
# Update predicted labels, accounting for time mask
# If blank was predicted even once, now or in the past,
# Force the current predicted label to also be blank
# This ensures that blanks propogate across all timesteps
# once they have occured (normally stopping condition of sample level loop).
for kidx, ki in enumerate(k):
if blank_mask[kidx] == 0:
hypotheses[kidx].y_sequence.append(ki)
hypotheses[kidx].timestep.append(time_idx)
hypotheses[kidx].score += float(v[kidx])
symbols_added += 1
# Remove trailing empty list of alignments at T_{am-len} x Uj
if self.preserve_alignments:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].alignments[-1]) == 0:
del hypotheses[batch_idx].alignments[-1]
# Remove trailing empty list of confidence scores at T_{am-len} x Uj
if self.preserve_frame_confidence:
for batch_idx in range(batchsize):
if len(hypotheses[batch_idx].frame_confidence[-1]) == 0:
del hypotheses[batch_idx].frame_confidence[-1]
del hypotheses[batch_idx].y_3best[-1]
del hypotheses[batch_idx].frame_confidence_3best[-1]
del hypotheses[batch_idx].logp[-1]
# Preserve states
for batch_idx in range(batchsize):
hypotheses[batch_idx].dec_state = self.decoder.batch_select_state(hidden, batch_idx)
return hypotheses
def _greedy_decode_masked(
self,
x: torch.Tensor,
out_len: torch.Tensor,
device: torch.device,
partial_hypotheses: Optional[List[rnnt_utils.Hypothesis]] = None,
):
raise NotImplementedError("masked greedy-batched decode is not supported for TDT models.")
|
NeMo-main
|
nemo/collections/asr/parts/submodules/rnnt_greedy_decoding.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch import nn as nn
from torch.nn import LayerNorm
from nemo.collections.asr.parts.submodules.conformer_modules import ConformerConvolution, ConformerFeedForward
from nemo.collections.asr.parts.submodules.multi_head_attention import (
MultiHeadAttention,
RelPositionMultiHeadAttention,
)
from nemo.collections.common.parts import adapter_modules
from nemo.core.classes.mixins import AccessMixin
from nemo.core.classes.mixins.adapter_mixins import AdapterModuleMixin
__all__ = ['SqueezeformerLayer', 'ConformerFeedForward', 'SqueezeformerLayer']
class ScaleBiasLayer(torch.nn.Module):
"""
Computes an affine transformation y = x * scale + bias, either learned via adaptive weights, or fixed.
Efficient alternative to LayerNorm where we can avoid computing the mean and variance of the input, and
just rescale the output of the previous layer.
Args:
d_model (int): input dimension of layer.
adaptive_scale (bool): whether to learn the affine transformation parameters or not. If set to False,
the scale is fixed to 1 and bias to 0, effectively performing a No-Op on the input.
This is done for export compatibility.
"""
def __init__(self, d_model: int, adaptive_scale: bool):
super().__init__()
self.adaptive_scale = adaptive_scale
if adaptive_scale:
self.scale = nn.Parameter(torch.ones(d_model))
self.bias = nn.Parameter(torch.zeros(d_model))
else:
self.register_buffer('scale', torch.ones(d_model), persistent=True)
self.register_buffer('bias', torch.zeros(d_model), persistent=True)
def forward(self, x):
scale = self.scale.view(1, 1, -1)
bias = self.bias.view(1, 1, -1)
return x * scale + bias
class SqueezeformerLayer(torch.nn.Module, AdapterModuleMixin, AccessMixin):
"""A single block of the Squeezeformer encoder.
Args:
d_model (int): input dimension of MultiheadAttentionMechanism and PositionwiseFeedForward
d_ff (int): hidden dimension of PositionwiseFeedForward
n_heads (int): number of heads for multi-head attention
conv_kernel_size (int): kernel size for depthwise convolution in convolution module
dropout (float): dropout probabilities for linear layers
dropout_att (float): dropout probabilities for attention distributions
adaptive_scale (bool): Whether to scale the inputs to each component by affine `scale` and `bias` layer.
Or use a fixed scale=1 and bias=0.
"""
def __init__(
self,
d_model,
d_ff,
self_attention_model='rel_pos',
n_heads=4,
conv_kernel_size=31,
conv_norm_type='batch_norm',
dropout=0.1,
dropout_att=0.1,
pos_bias_u=None,
pos_bias_v=None,
adaptive_scale: bool = True,
):
super().__init__()
self.self_attention_model = self_attention_model
self.n_heads = n_heads
self.fc_factor = 1.0
self.adaptive_scale = adaptive_scale
# first feed forward module
self.norm_feed_forward1 = LayerNorm(d_model)
self.feed_forward1 = ConformerFeedForward(d_model=d_model, d_ff=d_ff, dropout=dropout)
self.feed_forward1_scale = ScaleBiasLayer(d_model=d_model, adaptive_scale=adaptive_scale)
# convolution module
self.norm_conv = LayerNorm(d_model)
self.conv = ConformerConvolution(
d_model=d_model, kernel_size=conv_kernel_size, norm_type=conv_norm_type, pointwise_activation='swish'
)
self.conv_scale = ScaleBiasLayer(d_model=d_model, adaptive_scale=adaptive_scale)
# multi-headed self-attention module
self.norm_self_att = LayerNorm(d_model)
if self_attention_model == 'rel_pos':
self.self_attn = RelPositionMultiHeadAttention(
n_head=n_heads, n_feat=d_model, dropout_rate=dropout_att, pos_bias_u=pos_bias_u, pos_bias_v=pos_bias_v
)
elif self_attention_model == 'abs_pos':
self.self_attn = MultiHeadAttention(n_head=n_heads, n_feat=d_model, dropout_rate=dropout_att)
else:
raise ValueError(
f"'{self_attention_model}' is not not a valid value for 'self_attention_model', "
f"valid values can be from ['rel_pos', 'abs_pos']"
)
self.self_attn_scale = ScaleBiasLayer(d_model=d_model, adaptive_scale=adaptive_scale)
# second feed forward module
self.norm_feed_forward2 = LayerNorm(d_model)
self.feed_forward2 = ConformerFeedForward(d_model=d_model, d_ff=d_ff, dropout=dropout)
self.feed_forward2_scale = ScaleBiasLayer(d_model=d_model, adaptive_scale=adaptive_scale)
self.dropout = nn.Dropout(dropout)
# self.norm_out = LayerNorm(d_model)
# initialize parameters properly
self.reset_parameters()
def forward(self, x, att_mask=None, pos_emb=None, pad_mask=None):
"""
Args:
x (torch.Tensor): input signals (B, T, d_model)
att_mask (torch.Tensor): attention masks(B, T, T)
pos_emb (torch.Tensor): (L, 1, d_model)
pad_mask (torch.tensor): padding mask
Returns:
x (torch.Tensor): (B, T, d_model)
"""
residual = x
x = self.self_attn_scale(x)
if self.self_attention_model == 'rel_pos':
x = self.self_attn(query=x, key=x, value=x, mask=att_mask, pos_emb=pos_emb)
elif self.self_attention_model == 'abs_pos':
x = self.self_attn(query=x, key=x, value=x, mask=att_mask)
else:
x = None
x = residual + self.dropout(x)
x = self.norm_self_att(x)
residual = x
if self.is_adapter_available():
# Call the MHA adapters
pack_ip = {
'x': residual,
'loc': 'mha',
'att_mask': att_mask,
'pos_emb': pos_emb,
}
pack_ip = self.forward_enabled_adapters(pack_ip)
x = pack_ip['x']
x = self.feed_forward1_scale(x)
x = self.feed_forward1(x)
x = residual + self.dropout(x) * self.fc_factor
x = self.norm_feed_forward1(x)
residual = x
x = self.conv_scale(x)
x = self.conv(x, pad_mask)
x = residual + self.dropout(x)
x = self.norm_conv(x)
residual = x
x = self.feed_forward2_scale(x)
x = self.feed_forward2(x)
x = residual + self.dropout(x) * self.fc_factor
x = self.norm_feed_forward2(x)
if self.is_adapter_available():
# Call the adapters
pack_ip = {
'x': x,
'loc': 'post',
}
pack_ip = self.forward_enabled_adapters(pack_ip)
x = pack_ip['x']
if self.is_access_enabled() and self.access_cfg.get('save_encoder_tensors', False):
self.register_accessible_tensor(name='encoder', tensor=x)
return x
def forward_single_enabled_adapter_(
self,
input: dict,
adapter_module: torch.nn.Module,
*,
adapter_name: str,
adapter_strategy: 'nemo.core.classes.mixins.adapter_mixin_strategies.AbstractAdapterStrategy',
):
"""
Perform the forward step of a single adapter module on some input data.
**Note**: Subclasses can override this method to accommodate more complicate adapter forward steps.
Args:
input: Dictionary of packed tensors. The dict should contain at least
`x`: output tensor
`loc`: Semantic location in module where this adapter was called
`att_mask`: Optional, Attention mask
`pos_emb`: Optional, Positional Embedding for Relative Positional Encoding.
The output tensor of the calling module is the input to the first adapter, whose output
is then chained to the next adapter until all adapters are consumed.
adapter_module: The adapter module that is currently required to perform the forward pass.
adapter_name: The resolved name of the adapter that is undergoing the current forward pass.
adapter_strategy: A subclass of `AbstractAdapterStrategy`, that determines how the
output of the adapter should be merged with the input, or if it should be merged at all.
Returns:
The result tensor, after the current active adapter has finished its forward pass.
"""
# (input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin')
x = input['x']
loc = input['loc']
att_mask = input.get('att_mask', None)
pos_emb = input.get('pos_emb', None)
if isinstance(adapter_module, adapter_modules.LinearAdapter) and loc == 'post':
output = adapter_strategy(x, adapter_module, module=self)
elif isinstance(adapter_module, MultiHeadAttention) and loc == 'mha':
if self.self_attention_model == 'rel_pos':
x = dict(query=x, key=x, value=x, mask=att_mask, pos_emb=pos_emb)
output = adapter_strategy(x, adapter_module, module=self)
elif self.self_attention_model == 'abs_pos':
x = dict(query=x, key=x, value=x, mask=att_mask)
output = adapter_strategy(x, adapter_module, module=self)
else:
raise ValueError(f"Unsupported value of self_attention_model , provided {self.self_attention_model}!")
else:
# No adapter compatible, skip
output = x
input['x'] = output
return input
def reset_parameters(self):
# Used for Squeezeformer initialization only
self.feed_forward1.reset_parameters_ff()
self.feed_forward2.reset_parameters_ff()
self.conv.reset_parameters_conv()
|
NeMo-main
|
nemo/collections/asr/parts/submodules/squeezeformer_modules.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Optional
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.asr.parts.utils import rnnt_utils
from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceMeasureConfig, ConfidenceMeasureMixin
from nemo.core.classes import Typing, typecheck
from nemo.core.neural_types import HypothesisType, LengthsType, LogprobsType, NeuralType
from nemo.utils import logging
def pack_hypotheses(hypotheses: List[rnnt_utils.Hypothesis], logitlen: torch.Tensor,) -> List[rnnt_utils.Hypothesis]:
if logitlen is not None:
if hasattr(logitlen, 'cpu'):
logitlen_cpu = logitlen.to('cpu')
else:
logitlen_cpu = logitlen
for idx, hyp in enumerate(hypotheses): # type: rnnt_utils.Hypothesis
hyp.y_sequence = torch.tensor(hyp.y_sequence, dtype=torch.long)
if logitlen is not None:
hyp.length = logitlen_cpu[idx]
if hyp.dec_state is not None:
hyp.dec_state = _states_to_device(hyp.dec_state)
return hypotheses
def _states_to_device(dec_state, device='cpu'):
if torch.is_tensor(dec_state):
dec_state = dec_state.to(device)
elif isinstance(dec_state, (list, tuple)):
dec_state = tuple(_states_to_device(dec_i, device) for dec_i in dec_state)
return dec_state
class GreedyCTCInfer(Typing, ConfidenceMeasureMixin):
"""A greedy CTC decoder.
Provides a common abstraction for sample level and batch level greedy decoding.
Args:
blank_index: int index of the blank token. Can be 0 or len(vocabulary).
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `logprobs` in it. Here, `logprobs` is a torch.Tensors.
compute_timestamps: A bool flag, which determines whether to compute the character/subword, or
word based timestamp mapping the output log-probabilities to discrite intervals of timestamps.
The timestamps will be available in the returned Hypothesis.timestep as a dictionary.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding. When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of floats.
confidence_measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str). Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
# Input can be of dimention -
# ('B', 'T', 'D') [Log probs] or ('B', 'T') [Labels]
return {
"decoder_output": NeuralType(None, LogprobsType()),
"decoder_lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"predictions": [NeuralType(elements_type=HypothesisType())]}
def __init__(
self,
blank_id: int,
preserve_alignments: bool = False,
compute_timestamps: bool = False,
preserve_frame_confidence: bool = False,
confidence_measure_cfg: Optional[DictConfig] = None,
):
super().__init__()
self.blank_id = blank_id
self.preserve_alignments = preserve_alignments
# we need timestamps to extract non-blank per-frame confidence
self.compute_timestamps = compute_timestamps | preserve_frame_confidence
self.preserve_frame_confidence = preserve_frame_confidence
# set confidence calculation measure
self._init_confidence_measure(confidence_measure_cfg)
@typecheck()
def forward(
self, decoder_output: torch.Tensor, decoder_lengths: torch.Tensor,
):
"""Returns a list of hypotheses given an input batch of the encoder hidden embedding.
Output token is generated auto-repressively.
Args:
decoder_output: A tensor of size (batch, timesteps, features) or (batch, timesteps) (each timestep is a label).
decoder_lengths: list of int representing the length of each sequence
output sequence.
Returns:
packed list containing batch number of sentences (Hypotheses).
"""
with torch.inference_mode():
hypotheses = []
# Process each sequence independently
prediction_cpu_tensor = decoder_output.cpu()
if prediction_cpu_tensor.ndim < 2 or prediction_cpu_tensor.ndim > 3:
raise ValueError(
f"`decoder_output` must be a tensor of shape [B, T] (labels, int) or "
f"[B, T, V] (log probs, float). Provided shape = {prediction_cpu_tensor.shape}"
)
# determine type of input - logprobs or labels
if prediction_cpu_tensor.ndim == 2: # labels
greedy_decode = self._greedy_decode_labels
else:
greedy_decode = self._greedy_decode_logprobs
for ind in range(prediction_cpu_tensor.shape[0]):
out_len = decoder_lengths[ind] if decoder_lengths is not None else None
hypothesis = greedy_decode(prediction_cpu_tensor[ind], out_len)
hypotheses.append(hypothesis)
# Pack results into Hypotheses
packed_result = pack_hypotheses(hypotheses, decoder_lengths)
return (packed_result,)
@torch.no_grad()
def _greedy_decode_logprobs(self, x: torch.Tensor, out_len: torch.Tensor):
# x: [T, D]
# out_len: [seq_len]
# Initialize blank state and empty label set in Hypothesis
hypothesis = rnnt_utils.Hypothesis(score=0.0, y_sequence=[], dec_state=None, timestep=[], last_token=None)
prediction = x.detach().cpu()
if out_len is not None:
prediction = prediction[:out_len]
prediction_logprobs, prediction_labels = prediction.max(dim=-1)
non_blank_ids = prediction_labels != self.blank_id
hypothesis.y_sequence = prediction_labels.numpy().tolist()
hypothesis.score = (prediction_logprobs[non_blank_ids]).sum()
if self.preserve_alignments:
# Preserve the logprobs, as well as labels after argmax
hypothesis.alignments = (prediction.clone(), prediction_labels.clone())
if self.compute_timestamps:
hypothesis.timestep = torch.nonzero(non_blank_ids, as_tuple=False)[:, 0].numpy().tolist()
if self.preserve_frame_confidence:
hypothesis.frame_confidence = self._get_confidence(prediction)
return hypothesis
@torch.no_grad()
def _greedy_decode_labels(self, x: torch.Tensor, out_len: torch.Tensor):
# x: [T]
# out_len: [seq_len]
# Initialize blank state and empty label set in Hypothesis
hypothesis = rnnt_utils.Hypothesis(score=0.0, y_sequence=[], dec_state=None, timestep=[], last_token=None)
prediction_labels = x.detach().cpu()
if out_len is not None:
prediction_labels = prediction_labels[:out_len]
non_blank_ids = prediction_labels != self.blank_id
hypothesis.y_sequence = prediction_labels.numpy().tolist()
hypothesis.score = -1.0
if self.preserve_alignments:
raise ValueError("Requested for alignments, but predictions provided were labels, not log probabilities.")
if self.compute_timestamps:
hypothesis.timestep = torch.nonzero(non_blank_ids, as_tuple=False)[:, 0].numpy().tolist()
if self.preserve_frame_confidence:
raise ValueError(
"Requested for per-frame confidence, but predictions provided were labels, not log probabilities."
)
return hypothesis
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
@dataclass
class GreedyCTCInferConfig:
preserve_alignments: bool = False
compute_timestamps: bool = False
preserve_frame_confidence: bool = False
confidence_measure_cfg: Optional[ConfidenceMeasureConfig] = ConfidenceMeasureConfig()
confidence_method_cfg: str = "DEPRECATED"
def __post_init__(self):
# OmegaConf.structured ensures that post_init check is always executed
self.confidence_measure_cfg = OmegaConf.structured(
self.confidence_measure_cfg
if isinstance(self.confidence_measure_cfg, ConfidenceMeasureConfig)
else ConfidenceMeasureConfig(**self.confidence_measure_cfg)
)
if self.confidence_method_cfg != "DEPRECATED":
logging.warning(
"`confidence_method_cfg` is deprecated and will be removed in the future. "
"Please use `confidence_measure_cfg` instead."
)
# TODO (alaptev): delete the following two lines sometime in the future
logging.warning("Re-writing `confidence_measure_cfg` with the value of `confidence_method_cfg`.")
# OmegaConf.structured ensures that post_init check is always executed
self.confidence_measure_cfg = OmegaConf.structured(
self.confidence_method_cfg
if isinstance(self.confidence_method_cfg, ConfidenceMeasureConfig)
else ConfidenceMeasureConfig(**self.confidence_method_cfg)
)
|
NeMo-main
|
nemo/collections/asr/parts/submodules/ctc_greedy_decoding.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
from torch.nn import LayerNorm
from nemo.collections.asr.parts.submodules.causal_convs import CausalConv1D, CausalConv2D
from nemo.utils import logging
class StackingSubsampling(torch.nn.Module):
"""Stacking subsampling which simply stacks consecutive frames to reduce the sampling rate
Args:
subsampling_factor (int): The subsampling factor
feat_in (int): size of the input features
feat_out (int): size of the output features
norm (bool): whether to use an MLP layer after the stacking along with normalization. default is False.
"""
def __init__(self, subsampling_factor, feat_in, feat_out, norm=False):
super(StackingSubsampling, self).__init__()
self.subsampling_factor = subsampling_factor
self.proj_out = torch.nn.Linear(subsampling_factor * feat_in, feat_out)
if norm:
self.pre_norm = LayerNorm(feat_in)
else:
self.pre_norm = None
def get_sampling_frames(self):
return self.subsampling_factor
def get_streaming_cache_size(self):
return 0
def forward(self, x, lengths):
b, t, h = x.size()
pad_size = (self.subsampling_factor - (t % self.subsampling_factor)) % self.subsampling_factor
x = torch.nn.functional.pad(x, (0, 0, 0, pad_size))
if self.pre_norm is not None:
x = self.pre_norm(x)
_, t, _ = x.size()
x = torch.reshape(x, (b, t // self.subsampling_factor, h * self.subsampling_factor))
x = self.proj_out(x)
lengths = torch.div(lengths + pad_size, self.subsampling_factor, rounding_mode='floor')
return x, lengths
class ConvSubsampling(torch.nn.Module):
"""Convolutional subsampling which supports VGGNet and striding approach introduced in:
VGGNet Subsampling: Transformer-transducer: end-to-end speech recognition with self-attention (https://arxiv.org/pdf/1910.12977.pdf)
Striding Subsampling: "Speech-Transformer: A No-Recurrence Sequence-to-Sequence Model for Speech Recognition" by Linhao Dong et al. (https://ieeexplore.ieee.org/document/8462506)
Args:
subsampling (str): The subsampling technique from {"vggnet", "striding", "dw-striding"}
subsampling_factor (int): The subsampling factor which should be a power of 2
subsampling_conv_chunking_factor (int): Input chunking factor which can be -1 (no chunking)
1 (auto) or a power of 2. Default is 1
feat_in (int): size of the input features
feat_out (int): size of the output features
conv_channels (int): Number of channels for the convolution layers.
activation (Module): activation function, default is nn.ReLU()
"""
def __init__(
self,
subsampling,
subsampling_factor,
feat_in,
feat_out,
conv_channels,
subsampling_conv_chunking_factor=1,
activation=nn.ReLU(),
is_causal=False,
):
super(ConvSubsampling, self).__init__()
self._subsampling = subsampling
self._conv_channels = conv_channels
self._feat_in = feat_in
self._feat_out = feat_out
if subsampling_factor % 2 != 0:
raise ValueError("Sampling factor should be a multiply of 2!")
self._sampling_num = int(math.log(subsampling_factor, 2))
self.subsampling_factor = subsampling_factor
self.is_causal = is_causal
if (
subsampling_conv_chunking_factor != -1
and subsampling_conv_chunking_factor != 1
and subsampling_conv_chunking_factor % 2 != 0
):
raise ValueError("subsampling_conv_chunking_factor should be -1, 1, or a power of 2")
self.subsampling_conv_chunking_factor = subsampling_conv_chunking_factor
in_channels = 1
layers = []
if subsampling == 'vggnet':
self._stride = 2
self._kernel_size = 2
self._ceil_mode = True
self._left_padding = 0
self._right_padding = 0
for i in range(self._sampling_num):
layers.append(
torch.nn.Conv2d(
in_channels=in_channels, out_channels=conv_channels, kernel_size=3, stride=1, padding=1
)
)
layers.append(activation)
layers.append(
torch.nn.Conv2d(
in_channels=conv_channels, out_channels=conv_channels, kernel_size=3, stride=1, padding=1
)
)
layers.append(activation)
layers.append(
torch.nn.MaxPool2d(
kernel_size=self._kernel_size,
stride=self._stride,
padding=self._left_padding,
ceil_mode=self._ceil_mode,
)
)
in_channels = conv_channels
elif subsampling == 'dw_striding':
self._stride = 2
self._kernel_size = 3
self._ceil_mode = False
if self.is_causal:
self._left_padding = self._kernel_size - 1
self._right_padding = self._stride - 1
self._max_cache_len = subsampling_factor + 1
else:
self._left_padding = (self._kernel_size - 1) // 2
self._right_padding = (self._kernel_size - 1) // 2
self._max_cache_len = 0
# Layer 1
if self.is_causal:
layers.append(
CausalConv2D(
in_channels=in_channels,
out_channels=conv_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=None,
)
)
else:
layers.append(
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=conv_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=self._left_padding,
)
)
in_channels = conv_channels
layers.append(activation)
for i in range(self._sampling_num - 1):
if self.is_causal:
layers.append(
CausalConv2D(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=None,
groups=in_channels,
)
)
else:
layers.append(
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=self._left_padding,
groups=in_channels,
)
)
layers.append(
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=conv_channels,
kernel_size=1,
stride=1,
padding=0,
groups=1,
)
)
layers.append(activation)
in_channels = conv_channels
elif subsampling == 'striding':
self._stride = 2
self._kernel_size = 3
self._ceil_mode = False
if self.is_causal:
self._left_padding = self._kernel_size - 1
self._right_padding = self._stride - 1
self._max_cache_len = subsampling_factor + 1
else:
self._left_padding = (self._kernel_size - 1) // 2
self._right_padding = (self._kernel_size - 1) // 2
self._max_cache_len = 0
for i in range(self._sampling_num):
if self.is_causal:
layers.append(
CausalConv2D(
in_channels=in_channels,
out_channels=conv_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=None,
)
)
else:
layers.append(
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=conv_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=self._left_padding,
)
)
layers.append(activation)
in_channels = conv_channels
elif subsampling == 'striding_conv1d':
in_channels = feat_in
self._stride = 2
self._kernel_size = 5
self._ceil_mode = False
if self.is_causal:
self._left_padding = self._kernel_size - 1
self._right_padding = self._stride - 1
self._max_cache_len = subsampling_factor + 1
else:
self._left_padding = (self._kernel_size - 1) // 2
self._right_padding = (self._kernel_size - 1) // 2
self._max_cache_len = 0
for i in range(self._sampling_num):
if self.is_causal:
layers.append(
CausalConv1D(
in_channels=in_channels,
out_channels=feat_out if self._sampling_num == i + 1 else conv_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=None,
)
)
else:
layers.append(
torch.nn.Conv1d(
in_channels=in_channels,
out_channels=feat_out if self._sampling_num == i + 1 else conv_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=self._left_padding,
)
)
layers.append(activation)
in_channels = conv_channels
elif subsampling == 'dw_striding_conv1d':
in_channels = feat_in
self._stride = 2
self._kernel_size = 5
self._ceil_mode = False
self._left_padding = (self._kernel_size - 1) // 2
self._right_padding = (self._kernel_size - 1) // 2
# Layer 1
layers.extend(
[
torch.nn.Conv1d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=self._left_padding,
groups=in_channels,
),
torch.nn.Conv1d(
in_channels=in_channels,
out_channels=feat_out if self._sampling_num == 1 else conv_channels,
kernel_size=1,
stride=1,
padding=0,
groups=1,
),
]
)
in_channels = conv_channels
layers.append(activation)
for i in range(self._sampling_num - 1):
layers.extend(
[
torch.nn.Conv1d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=self._kernel_size,
stride=self._stride,
padding=self._left_padding,
groups=in_channels,
),
torch.nn.Conv1d(
in_channels=in_channels,
out_channels=feat_out if self._sampling_num == i + 2 else conv_channels,
kernel_size=1,
stride=1,
padding=0,
groups=1,
),
]
)
layers.append(activation)
in_channels = conv_channels
else:
raise ValueError(f"Not valid sub-sampling: {subsampling}!")
if subsampling in ["vggnet", "dw_striding", "striding"]:
in_length = torch.tensor(feat_in, dtype=torch.float)
out_length = calc_length(
lengths=in_length,
all_paddings=self._left_padding + self._right_padding,
kernel_size=self._kernel_size,
stride=self._stride,
ceil_mode=self._ceil_mode,
repeat_num=self._sampling_num,
)
self.out = torch.nn.Linear(conv_channels * int(out_length), feat_out)
self.conv2d_subsampling = True
elif subsampling in ["striding_conv1d", "dw_striding_conv1d"]:
self.conv2d_subsampling = False
else:
raise ValueError(f"Not valid sub-sampling: {subsampling}!")
self.conv = torch.nn.Sequential(*layers)
def get_sampling_frames(self):
return [1, self.subsampling_factor]
def get_streaming_cache_size(self):
return [0, self.subsampling_factor + 1]
def forward(self, x, lengths):
lengths = calc_length(
lengths,
all_paddings=self._left_padding + self._right_padding,
kernel_size=self._kernel_size,
stride=self._stride,
ceil_mode=self._ceil_mode,
repeat_num=self._sampling_num,
)
# Unsqueeze Channel Axis
if self.conv2d_subsampling:
x = x.unsqueeze(1)
# Transpose to Channel First mode
else:
x = x.transpose(1, 2)
# split inputs if chunking_factor is set
if self.subsampling_conv_chunking_factor != -1 and self.conv2d_subsampling:
if self.subsampling_conv_chunking_factor == 1:
# if subsampling_conv_chunking_factor is 1, we split only if needed
# avoiding a bug / feature limiting indexing of tensors to 2**31
# see https://github.com/pytorch/pytorch/issues/80020
x_ceil = 2 ** 31 / self._conv_channels * self._stride * self._stride
if torch.numel(x) > x_ceil:
need_to_split = True
else:
need_to_split = False
else:
# if subsampling_conv_chunking_factor > 1 we always split
need_to_split = True
if need_to_split:
x, success = self.conv_split_by_batch(x)
if not success: # if unable to split by batch, try by channel
if self._subsampling == 'dw_striding':
x = self.conv_split_by_channel(x)
else:
x = self.conv(x) # try anyway
else:
x = self.conv(x)
else:
x = self.conv(x)
# Flatten Channel and Frequency Axes
if self.conv2d_subsampling:
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).reshape(b, t, -1))
# Transpose to Channel Last mode
else:
x = x.transpose(1, 2)
return x, lengths
def reset_parameters(self):
# initialize weights
if self._subsampling == 'dw_striding':
with torch.no_grad():
# init conv
scale = 1.0 / self._kernel_size
dw_max = (self._kernel_size ** 2) ** -0.5
pw_max = self._conv_channels ** -0.5
torch.nn.init.uniform_(self.conv[0].weight, -scale, scale)
torch.nn.init.uniform_(self.conv[0].bias, -scale, scale)
for idx in range(2, len(self.conv), 3):
torch.nn.init.uniform_(self.conv[idx].weight, -dw_max, dw_max)
torch.nn.init.uniform_(self.conv[idx].bias, -dw_max, dw_max)
torch.nn.init.uniform_(self.conv[idx + 1].weight, -pw_max, pw_max)
torch.nn.init.uniform_(self.conv[idx + 1].bias, -pw_max, pw_max)
# init fc (80 * 64 = 5120 from https://github.com/kssteven418/Squeezeformer/blob/13c97d6cf92f2844d2cb3142b4c5bfa9ad1a8951/src/models/conformer_encoder.py#L487
fc_scale = (self._feat_out * self._feat_in / self._sampling_num) ** -0.5
torch.nn.init.uniform_(self.out.weight, -fc_scale, fc_scale)
torch.nn.init.uniform_(self.out.bias, -fc_scale, fc_scale)
def conv_split_by_batch(self, x):
""" Tries to split input by batch, run conv and concat results """
b, _, _, _ = x.size()
if b == 1: # can't split if batch size is 1
return x, False
if self.subsampling_conv_chunking_factor > 1:
cf = self.subsampling_conv_chunking_factor
logging.debug(f'using manually set chunking factor: {cf}')
else:
# avoiding a bug / feature limiting indexing of tensors to 2**31
# see https://github.com/pytorch/pytorch/issues/80020
x_ceil = 2 ** 31 / self._conv_channels * self._stride * self._stride
p = math.ceil(math.log(torch.numel(x) / x_ceil, 2))
cf = 2 ** p
logging.debug(f'using auto set chunking factor: {cf}')
new_batch_size = b // cf
if new_batch_size == 0: # input is too big
return x, False
logging.debug(f'conv subsampling: using split batch size {new_batch_size}')
return torch.cat([self.conv(chunk) for chunk in torch.split(x, new_batch_size, 0)]), True
def conv_split_by_channel(self, x):
""" For dw convs, tries to split input by time, run conv and concat results """
x = self.conv[0](x) # full conv2D
x = self.conv[1](x) # activation
for i in range(self._sampling_num - 1):
_, c, t, _ = x.size()
if self.subsampling_conv_chunking_factor > 1:
cf = self.subsampling_conv_chunking_factor
logging.debug(f'using manually set chunking factor: {cf}')
else:
# avoiding a bug / feature limiting indexing of tensors to 2**31
# see https://github.com/pytorch/pytorch/issues/80020
p = math.ceil(math.log(torch.numel(x) / 2 ** 31, 2))
cf = 2 ** p
logging.debug(f'using auto set chunking factor: {cf}')
new_c = int(c // cf)
if new_c == 0:
logging.warning(f'chunking factor {cf} is too high; splitting down to one channel.')
new_c = 1
new_t = int(t // cf)
if new_t == 0:
logging.warning(f'chunking factor {cf} is too high; splitting down to one timestep.')
new_t = 1
logging.debug(f'conv dw subsampling: using split C size {new_c} and split T size {new_t}')
x = self.channel_chunked_conv(self.conv[i * 3 + 2], new_c, x) # conv2D, depthwise
# splitting pointwise convs by time
x = torch.cat([self.conv[i * 3 + 3](chunk) for chunk in torch.split(x, new_t, 2)], 2) # conv2D, pointwise
x = self.conv[i * 3 + 4](x) # activation
return x
def channel_chunked_conv(self, conv, chunk_size, x):
""" Performs channel chunked convolution"""
ind = 0
out_chunks = []
for chunk in torch.split(x, chunk_size, 1):
step = chunk.size()[1]
if self.is_causal:
chunk = nn.functional.pad(
chunk, pad=(self._kernel_size - 1, self._stride - 1, self._kernel_size - 1, self._stride - 1)
)
ch_out = nn.functional.conv2d(
chunk,
conv.weight[ind : ind + step, :, :, :],
bias=conv.bias[ind : ind + step],
stride=self._stride,
padding=0,
groups=step,
)
else:
ch_out = nn.functional.conv2d(
chunk,
conv.weight[ind : ind + step, :, :, :],
bias=conv.bias[ind : ind + step],
stride=self._stride,
padding=self._left_padding,
groups=step,
)
out_chunks.append(ch_out)
ind += step
return torch.cat(out_chunks, 1)
def change_subsampling_conv_chunking_factor(self, subsampling_conv_chunking_factor: int):
if (
subsampling_conv_chunking_factor != -1
and subsampling_conv_chunking_factor != 1
and subsampling_conv_chunking_factor % 2 != 0
):
raise ValueError("subsampling_conv_chunking_factor should be -1, 1, or a power of 2")
self.subsampling_conv_chunking_factor = subsampling_conv_chunking_factor
def calc_length(lengths, all_paddings, kernel_size, stride, ceil_mode, repeat_num=1):
""" Calculates the output length of a Tensor passed through a convolution or max pooling layer"""
add_pad: float = all_paddings - kernel_size
one: float = 1.0
for i in range(repeat_num):
lengths = torch.div(lengths.to(dtype=torch.float) + add_pad, stride) + one
if ceil_mode:
lengths = torch.ceil(lengths)
else:
lengths = torch.floor(lengths)
return lengths.to(dtype=torch.int)
class TimeReductionModule(nn.Module):
"""
Squeezeformer Time Reduction procedure. Downsamples the audio by `stride` in the time dimension.
Args:
d_model (int): input dimension of MultiheadAttentionMechanism and PositionwiseFeedForward
out_dim (int): Output dimension of the module.
kernel_size (int): Conv kernel size for depthwise convolution in convolution module
stride (int): Downsampling factor in time dimension.
"""
def __init__(self, d_model: int, out_dim: int, kernel_size: int = 5, stride: int = 2):
super().__init__()
self.d_model = d_model
self.out_dim = out_dim
self.kernel_size = kernel_size
self.stride = stride
self.padding = max(0, self.kernel_size - self.stride)
self.dw_conv = nn.Conv1d(
in_channels=d_model,
out_channels=d_model,
kernel_size=kernel_size,
stride=stride,
padding=self.padding,
groups=d_model,
)
self.pw_conv = nn.Conv1d(
in_channels=d_model, out_channels=out_dim, kernel_size=1, stride=1, padding=0, groups=1,
)
self.reset_parameters()
def forward(self, x, att_mask=None, pad_mask=None):
x = x.transpose(1, 2) # [B, C, T]
if pad_mask is not None:
x = x.float().masked_fill(pad_mask.unsqueeze(1), 0.0)
x = self.dw_conv(x)
x = self.pw_conv(x)
x = x.transpose(1, 2) # [B, T, C]
B, T, D = x.size()
if att_mask is not None and pad_mask is not None:
att_mask = att_mask[:, :: self.stride, :: self.stride]
pad_mask = pad_mask[:, :: self.stride]
L = pad_mask.size(-1)
x = torch.nn.functional.pad(x, (0, 0, 0, L - T))
return x, att_mask, pad_mask
def reset_parameters(self):
dw_max = self.kernel_size ** -0.5
pw_max = self.d_model ** -0.5
with torch.no_grad():
torch.nn.init.uniform_(self.dw_conv.weight, -dw_max, dw_max)
torch.nn.init.uniform_(self.dw_conv.bias, -dw_max, dw_max)
torch.nn.init.uniform_(self.pw_conv.weight, -pw_max, pw_max)
torch.nn.init.uniform_(self.pw_conv.bias, -pw_max, pw_max)
class SubsamplingReductionModule(nn.Module):
"""Downsamples the audio signal in time dimension."""
def __init__(self, reduction: str, d_model: int, reduction_factor: int = 2):
super().__init__()
assert reduction in ['pooling', 'striding']
self.reduction = reduction
self.d_model = d_model
self._sampling_num = int(math.log(reduction_factor, 2))
if reduction == 'pooling':
self.reduction_enc = nn.MaxPool1d(kernel_size=reduction_factor)
self.padding = 0
self.kernel_size = self.reduction_enc.kernel_size
self.stride = self.reduction_enc.stride
elif reduction == 'striding':
self.reduction_enc = ConvSubsampling(
subsampling='striding',
subsampling_factor=reduction_factor,
feat_in=d_model,
feat_out=d_model,
conv_channels=d_model,
activation=nn.ReLU(),
is_causal=False,
)
def forward(self, x, lengths):
"""Shapes:
- x: [B, T, C]
- lengths: [B]
"""
if self.reduction == 'striding':
x, lengths = self.reduction_enc(x=x, lengths=lengths)
else:
x = torch.transpose(x, 1, 2) # [B, C, T]
lengths = calc_length(
lengths=lengths,
all_paddings=self.padding,
kernel_size=self.kernel_size,
stride=self.stride,
ceil_mode=False,
repeat_num=self._sampling_num,
)
x = self.reduction_enc(x)
x = torch.transpose(x, 1, 2) # [B, T, C]
return x, lengths
|
NeMo-main
|
nemo/collections/asr/parts/submodules/subsampling.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
class StatelessNet(torch.nn.Module):
"""
Helper class used in transducer models with stateless decoders. This stateless
simply outputs embedding or concatenated embeddings for the input label[s],
depending on the configured context size.
Args:
context_size: history context size for the stateless decoder network. Could be any positive integer. We recommend setting this as 2.
vocab_size: total vocabulary size.
emb_dim: total embedding size of the stateless net output.
blank_idx: index for the blank symbol for the transducer model.
normalization_mode: normalization run on the output embeddings. Could be either 'layer' or None. We recommend using 'layer' to stabilize training.
dropout: dropout rate on the embedding outputs.
"""
def __init__(self, context_size, vocab_size, emb_dim, blank_idx, normalization_mode, dropout):
super().__init__()
assert context_size > 0
self.context_size = context_size
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.dropout = torch.nn.Dropout(dropout)
self.norm = torch.nn.Identity()
if normalization_mode == 'layer':
self.norm = torch.nn.LayerNorm(emb_dim, elementwise_affine=False)
embeds = []
for i in range(self.context_size):
# We use different embedding matrices for different context positions.
# In this list, a smaller index means more recent history word.
# We assign more dimensions for the most recent word in the history.
# The detailed method is, we first allocate half the embedding-size
# to the most recent history word, and then allocate the remaining
# dimensions evenly among all history contexts. E.g. if total embedding
# size is 200, and context_size is 2, then we allocate 150 dimensions
# to the last word, and 50 dimensions to the second-to-last word.
if i != 0:
embed_size = emb_dim // 2 // self.context_size
else:
embed_size = emb_dim - (emb_dim // 2 // self.context_size) * (self.context_size - 1)
embed = torch.nn.Embedding(vocab_size + 1, embed_size, padding_idx=blank_idx)
embeds.append(embed)
self.embeds = torch.nn.ModuleList(embeds)
self.blank_idx = blank_idx
def forward(
self, y: Optional[torch.Tensor] = None, state: Optional[List[torch.Tensor]] = None,
):
"""
Although this is a *stateless* net, we use the "state" parameter to
pass in the previous labels, unlike LSTMs where state would represent
hidden activations of the network.
Args:
y: a Integer tensor of shape B x U.
state: a list of 1 tensor in order to be consistent with the stateful
decoder interface, and the element is a tensor of shape [B x context-length].
Returns:
The return dimension of this function's output is B x U x D, with D being the total embedding dim.
"""
outs = []
[B, U] = y.shape
appended_y = y
if state != None:
appended_y = torch.concat([state[0], y], axis=1)
context_size = appended_y.shape[1]
if context_size < self.context_size:
# This is the case at the beginning of an utterance where we have
# seen less words than context_size. In this case, we need to pad
# it to the right length.
padded_state = torch.ones([B, self.context_size], dtype=torch.long, device=y.device) * self.blank_idx
padded_state[:, self.context_size - context_size :] = appended_y
elif context_size == self.context_size + 1:
padded_state = appended_y[:, 1:]
# This is the case where the previous state already has reached context_size.
# We need to truncate the history by omitting the 0'th token.
else:
# Context has just the right size. Copy directly.
padded_state = appended_y
for i in range(self.context_size):
out = self.embeds[i](padded_state[:, self.context_size - 1 - i : self.context_size - i])
outs.append(out)
else:
for i in range(self.context_size):
out = self.embeds[i](y)
if i != 0:
out[:, i:, :] = out[
:, :-i, :
].clone() # needs clone() here or it might complain about src and dst mem location have overlaps.
out[:, :i, :] *= 0.0
outs.append(out)
out = self.dropout(torch.concat(outs, axis=-1))
out = self.norm(out)
state = None
if y is not None:
state = [appended_y[:, appended_y.shape[1] - self.context_size + 1 :]]
return out, state
|
NeMo-main
|
nemo/collections/asr/parts/submodules/stateless_net.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from functools import reduce
from typing import List
import torch
import torch.nn as nn
class FusedBatchNorm1d(nn.Module):
"""
Fused BatchNorm to use in Conformer to improve accuracy in finetuning with TTS scenario
Drop-in replacement for BatchNorm1d with simple affine projection
"""
def __init__(self, num_features: int):
"""
Args:
num_features: number of channels, see original BatchNorm1d documentation
"""
super().__init__()
self.num_features = num_features
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
@classmethod
def from_batchnorm(cls, bn: nn.BatchNorm1d) -> FusedBatchNorm1d:
"""
Construct FusedBatchNorm1d module from BatchNorm1d
Args:
bn: original BatchNorm module
Returns:
FusedBatchNorm1d module with initialized params; in eval mode result is equivalent to original BatchNorm
"""
assert isinstance(bn, nn.BatchNorm1d)
fused_bn = FusedBatchNorm1d(bn.num_features)
# init projection params from original batch norm
# so, for inference mode output is the same
std = torch.sqrt(bn.running_var.data + bn.eps)
fused_bn.weight.data = bn.weight.data / std
fused_bn.bias.data = bn.bias.data - bn.running_mean.data * fused_bn.weight.data
return fused_bn
def forward(self, x: torch.Tensor):
if x.dim() == 3:
return x * self.weight.unsqueeze(-1) + self.bias.unsqueeze(-1)
assert x.dim() == 2
return x * self.weight + self.bias
def _get_module_by_name(module: nn.Module, full_layer_name: str) -> nn.Module:
names = full_layer_name.split(sep='.')
return reduce(getattr, names, module)
def replace_bn_with_fused_bn(module: nn.Module, full_layer_name: str):
"""
Replace BatchNorm1d named `full_layer_name` in nn.Module with FusedBatchNorm1d
Args:
module: nn.Module instance, modified inplace
full_layer_name: name of BatchNorm1d submodule in module to replace
"""
bn = _get_module_by_name(module, full_layer_name)
assert isinstance(bn, nn.BatchNorm1d)
fused_bn = FusedBatchNorm1d.from_batchnorm(bn)
try:
parent_name, norm_name = full_layer_name.rsplit(".", maxsplit=1)
setattr(_get_module_by_name(module, parent_name), norm_name, fused_bn)
except ValueError:
norm_name = full_layer_name
setattr(module, norm_name, fused_bn)
def replace_bn_with_fused_bn_all(model: nn.Module) -> List[str]:
"""
Replace BatchNorm1d with FusedBatchNorm1d in model
Args:
model: nn.Module instance, modified inplace
Returns:
list of replaced module names
"""
replaced_module_names = []
for name, module in model.named_modules():
if isinstance(module, nn.BatchNorm1d):
replace_bn_with_fused_bn(model, name)
replaced_module_names.append(name)
return replaced_module_names
|
NeMo-main
|
nemo/collections/asr/parts/submodules/batchnorm.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from torch import nn
from nemo.collections.asr.parts.submodules.jasper import jasper_activations
from nemo.core import NeuralModule
from nemo.core.neural_types import EncodedRepresentation, LossType, NeuralType
class GumbelVectorQuantizer(NeuralModule):
def __init__(
self,
dim,
num_vars,
temp,
groups,
combine_groups,
vq_dim,
time_first,
activation="gelu",
weight_proj_depth=1,
weight_proj_factor=1,
):
"""Vector quantization using gumbel softmax
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1
weight_proj_depth: number of layers (with activation in between) to project input before computing logits
weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of
projections by this factor
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.time_first = time_first
assert vq_dim % groups == 0, f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim))
nn.init.uniform_(self.vars)
if weight_proj_depth > 1:
activation = jasper_activations["gelu"]
def block(input_dim, output_dim):
return nn.Sequential(nn.Linear(input_dim, output_dim), activation)
inner_dim = self.input_dim * weight_proj_factor
self.weight_proj = nn.Sequential(
*[block(self.input_dim if i == 0 else inner_dim, inner_dim) for i in range(weight_proj_depth - 1)],
nn.Linear(inner_dim, groups * num_vars),
)
else:
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
assert len(temp) == 3, "Quantize temperature should be a tuple of 3 elements: (start, stop, decay factor)"
self.max_temp, self.min_temp, self.temp_decay = temp
self.curr_temp = self.max_temp
self.codebook_indices = None
def set_num_updates(self, num_updates):
self.curr_temp = max(self.max_temp * self.temp_decay ** num_updates, self.min_temp)
def get_codebook_indices(self):
if self.codebook_indices is None:
from itertools import product
p = [range(self.num_vars)] * self.groups
inds = list(product(*p))
self.codebook_indices = torch.tensor(inds, dtype=torch.long, device=self.vars.device).flatten()
if not self.combine_groups:
self.codebook_indices = self.codebook_indices.view(self.num_vars ** self.groups, -1)
for b in range(1, self.groups):
self.codebook_indices[:, b] += self.num_vars * b
self.codebook_indices = self.codebook_indices.flatten()
return self.codebook_indices
def sample_from_codebook(self, b, n):
indices = self.get_codebook_indices()
indices = indices.view(-1, self.groups)
cb_size = indices.size(0)
assert n < cb_size, f"sample size {n} is greater than size of codebook {cb_size}"
sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))
indices = indices[sample_idx]
z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)
return z
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
if self.time_first:
return {"x": NeuralType(('B', 'T', 'D'), EncodedRepresentation())}
return {"x": NeuralType(('B', 'D', 'T'), EncodedRepresentation())}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
if self.time_first:
return {
"x": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"quantize_prob_ppl": NeuralType(elements_type=LossType()),
}
return {
"x": NeuralType(('B', 'D', 'T'), EncodedRepresentation()),
"quantize_prob_ppl": NeuralType(elements_type=LossType()),
}
def forward(self, x, return_ids=False):
if not self.time_first:
x = x.transpose(1, 2)
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
_, k = x.max(-1)
hard_x = x.new_zeros(*x.shape).scatter_(-1, k.view(-1, 1), 1.0).view(bsz * tsz, self.groups, -1)
# Calculate quantize prob perplexity
num_vars = self.num_vars * self.groups
avg_probs = torch.softmax(x.view(bsz * tsz, self.groups, -1).float(), dim=-1).mean(dim=0)
quantize_prob_ppl = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)).sum()
quantize_prob_ppl = (num_vars - quantize_prob_ppl) / num_vars
if self.training:
x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
if self.combine_groups:
vars = vars.repeat(1, self.groups, 1)
x = x.unsqueeze(-1) * vars
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
cur_codebook_temp = self.curr_temp
if not self.time_first:
x = x.transpose(1, 2) # BTC -> BCT
if return_ids:
hard_x_max = hard_x.argmax(-1).reshape(bsz, tsz, -1)
# BxTxG
# create single id from multiple group ids
target_ids = hard_x.new_zeros(bsz, tsz).long()
for i in range(self.groups):
target_ids *= self.num_vars
target_ids += hard_x_max[:, :, i]
return x, quantize_prob_ppl, cur_codebook_temp, target_ids
else:
return x, quantize_prob_ppl, cur_codebook_temp
|
NeMo-main
|
nemo/collections/asr/parts/submodules/ssl_quantizers.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import torch
import torch.nn.functional as F
from torch import nn
__all__ = ['CausalConv2D', 'CausalConv1D']
class CausalConv2D(nn.Conv2d):
"""
A causal version of nn.Conv2d where each location in the 2D matrix would have no access to locations on its right or down
All arguments are the same as nn.Conv2d except padding which should be set as None
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: Union[str, int] = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None,
) -> None:
if padding is not None:
raise ValueError("Argument padding should be set to None for CausalConv2D.")
self._left_padding = kernel_size - 1
self._right_padding = stride - 1
padding = 0
super(CausalConv2D, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
device,
dtype,
)
def forward(
self, x,
):
x = F.pad(x, pad=(self._left_padding, self._right_padding, self._left_padding, self._right_padding))
x = super().forward(x)
return x
class CausalConv1D(nn.Conv1d):
"""
A causal version of nn.Conv1d where each step would have limited access to locations on its right or left
All arguments are the same as nn.Conv1d except padding.
If padding is set None, then paddings are set automatically to make it a causal convolution where each location would not see any steps on its right.
If padding is set as a list (size of 2), then padding[0] would be used as left padding and padding[1] as right padding.
It would make it possible to control the number of steps to be accessible on the right and left.
This mode is not supported when stride > 1. padding[0]+padding[1] should be equal to (kernel_size - 1).
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: Union[str, int] = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None,
) -> None:
self.cache_drop_size = None
if padding is None:
self._left_padding = kernel_size - 1
self._right_padding = stride - 1
else:
if stride != 1 and padding != kernel_size - 1:
raise ValueError("No striding allowed for non-symmetric convolutions!")
if isinstance(padding, int):
self._left_padding = padding
self._right_padding = padding
elif isinstance(padding, list) and len(padding) == 2 and padding[0] + padding[1] == kernel_size - 1:
self._left_padding = padding[0]
self._right_padding = padding[1]
else:
raise ValueError(f"Invalid padding param: {padding}!")
self._max_cache_len = self._left_padding
super(CausalConv1D, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode,
device=device,
dtype=dtype,
)
def update_cache(self, x, cache=None):
if cache is None:
new_x = F.pad(x, pad=(self._left_padding, self._right_padding))
next_cache = cache
else:
new_x = F.pad(x, pad=(0, self._right_padding))
new_x = torch.cat([cache, new_x], dim=-1)
if self.cache_drop_size > 0:
next_cache = new_x[:, :, : -self.cache_drop_size]
else:
next_cache = new_x
next_cache = next_cache[:, :, -cache.size(-1) :]
return new_x, next_cache
def forward(self, x, cache=None):
x, cache = self.update_cache(x, cache=cache)
x = super().forward(x)
if cache is None:
return x
else:
return x, cache
|
NeMo-main
|
nemo/collections/asr/parts/submodules/causal_convs.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/parts/submodules/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch import nn as nn
from torch.nn import LayerNorm
from nemo.collections.asr.parts.submodules.batchnorm import FusedBatchNorm1d
from nemo.collections.asr.parts.submodules.causal_convs import CausalConv1D
from nemo.collections.asr.parts.submodules.multi_head_attention import (
MultiHeadAttention,
RelPositionMultiHeadAttention,
RelPositionMultiHeadAttentionLongformer,
)
from nemo.collections.asr.parts.utils.activations import Swish
from nemo.collections.common.parts import adapter_modules
from nemo.collections.common.parts.utils import activation_registry
from nemo.core.classes.mixins import AccessMixin
from nemo.core.classes.mixins.adapter_mixins import AdapterModuleMixin
__all__ = ['ConformerConvolution', 'ConformerFeedForward', 'ConformerLayer']
class ConformerLayer(torch.nn.Module, AdapterModuleMixin, AccessMixin):
"""A single block of the Conformer encoder.
Args:
d_model (int): input dimension of MultiheadAttentionMechanism and PositionwiseFeedForward
d_ff (int): hidden dimension of PositionwiseFeedForward
self_attention_model (str): type of the attention layer and positional encoding
'rel_pos': relative positional embedding and Transformer-XL
'rel_pos_local_attn': relative positional embedding and Transformer-XL with local attention using
overlapping chunks. Attention context is determined by att_context_size parameter.
'abs_pos': absolute positional embedding and Transformer
Default is rel_pos.
global_tokens (int): number of tokens to be used for global attention.
Only relevant if self_attention_model is 'rel_pos_local_attn'.
Defaults to 0.
global_tokens_spacing (int): how far apart the global tokens are
Defaults to 1.
global_attn_separate (bool): whether the q, k, v layers used for global tokens should be separate.
Defaults to False.
n_heads (int): number of heads for multi-head attention
conv_kernel_size (int): kernel size for depthwise convolution in convolution module
dropout (float): dropout probabilities for linear layers
dropout_att (float): dropout probabilities for attention distributions
"""
def __init__(
self,
d_model,
d_ff,
self_attention_model='rel_pos',
global_tokens=0,
global_tokens_spacing=1,
global_attn_separate=False,
n_heads=4,
conv_kernel_size=31,
conv_norm_type='batch_norm',
conv_context_size=None,
dropout=0.1,
dropout_att=0.1,
pos_bias_u=None,
pos_bias_v=None,
att_context_size=[-1, -1],
):
super(ConformerLayer, self).__init__()
self.self_attention_model = self_attention_model
self.n_heads = n_heads
self.fc_factor = 0.5
# first feed forward module
self.norm_feed_forward1 = LayerNorm(d_model)
self.feed_forward1 = ConformerFeedForward(d_model=d_model, d_ff=d_ff, dropout=dropout)
# convolution module
self.norm_conv = LayerNorm(d_model)
self.conv = ConformerConvolution(
d_model=d_model,
kernel_size=conv_kernel_size,
norm_type=conv_norm_type,
conv_context_size=conv_context_size,
)
# multi-headed self-attention module
self.norm_self_att = LayerNorm(d_model)
MHA_max_cache_len = att_context_size[0]
if self_attention_model == 'rel_pos':
self.self_attn = RelPositionMultiHeadAttention(
n_head=n_heads,
n_feat=d_model,
dropout_rate=dropout_att,
pos_bias_u=pos_bias_u,
pos_bias_v=pos_bias_v,
max_cache_len=MHA_max_cache_len,
)
elif self_attention_model == 'rel_pos_local_attn':
self.self_attn = RelPositionMultiHeadAttentionLongformer(
n_head=n_heads,
n_feat=d_model,
dropout_rate=dropout_att,
pos_bias_u=pos_bias_u,
pos_bias_v=pos_bias_v,
max_cache_len=MHA_max_cache_len,
att_context_size=att_context_size,
global_tokens=global_tokens,
global_tokens_spacing=global_tokens_spacing,
global_attn_separate=global_attn_separate,
)
elif self_attention_model == 'abs_pos':
self.self_attn = MultiHeadAttention(
n_head=n_heads, n_feat=d_model, dropout_rate=dropout_att, max_cache_len=MHA_max_cache_len
)
else:
raise ValueError(
f"'{self_attention_model}' is not not a valid value for 'self_attention_model', "
f"valid values can be from ['rel_pos', 'rel_pos_local_attn', 'abs_pos']"
)
# second feed forward module
self.norm_feed_forward2 = LayerNorm(d_model)
self.feed_forward2 = ConformerFeedForward(d_model=d_model, d_ff=d_ff, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.norm_out = LayerNorm(d_model)
def forward(self, x, att_mask=None, pos_emb=None, pad_mask=None, cache_last_channel=None, cache_last_time=None):
"""
Args:
x (torch.Tensor): input signals (B, T, d_model)
att_mask (torch.Tensor): attention masks(B, T, T)
pos_emb (torch.Tensor): (L, 1, d_model)
pad_mask (torch.tensor): padding mask
cache_last_channel (torch.tensor) : cache for MHA layers (B, T_cache, d_model)
cache_last_time (torch.tensor) : cache for convolutional layers (B, d_model, T_cache)
Returns:
x (torch.Tensor): (B, T, d_model)
cache_last_channel (torch.tensor) : next cache for MHA layers (B, T_cache, d_model)
cache_last_time (torch.tensor) : next cache for convolutional layers (B, d_model, T_cache)
"""
residual = x
x = self.norm_feed_forward1(x)
x = self.feed_forward1(x)
residual = residual + self.dropout(x) * self.fc_factor
x = self.norm_self_att(residual)
if self.self_attention_model == 'rel_pos':
x = self.self_attn(query=x, key=x, value=x, mask=att_mask, pos_emb=pos_emb, cache=cache_last_channel)
elif self.self_attention_model == 'rel_pos_local_attn':
x = self.self_attn(query=x, key=x, value=x, pad_mask=pad_mask, pos_emb=pos_emb, cache=cache_last_channel)
elif self.self_attention_model == 'abs_pos':
x = self.self_attn(query=x, key=x, value=x, mask=att_mask, cache=cache_last_channel)
else:
x = None
if x is not None and cache_last_channel is not None:
(x, cache_last_channel) = x
residual = residual + self.dropout(x)
if self.is_adapter_available():
# Call the MHA adapters
pack_ip = {
'x': residual,
'loc': 'mha',
'att_mask': att_mask,
'pos_emb': pos_emb,
}
pack_ip = self.forward_enabled_adapters(pack_ip)
residual = pack_ip['x']
x = self.norm_conv(residual)
x = self.conv(x, pad_mask=pad_mask, cache=cache_last_time)
if cache_last_time is not None:
(x, cache_last_time) = x
residual = residual + self.dropout(x)
x = self.norm_feed_forward2(residual)
x = self.feed_forward2(x)
residual = residual + self.dropout(x) * self.fc_factor
x = self.norm_out(residual)
if self.is_adapter_available():
# Call the adapters
pack_ip = {
'x': x,
'loc': 'post',
}
pack_ip = self.forward_enabled_adapters(pack_ip)
x = pack_ip['x']
if self.is_access_enabled() and self.access_cfg.get('save_encoder_tensors', False):
self.register_accessible_tensor(name='encoder', tensor=x)
if cache_last_channel is None:
return x
else:
return x, cache_last_channel, cache_last_time
def forward_single_enabled_adapter_(
self,
input: dict,
adapter_module: torch.nn.Module,
*,
adapter_name: str,
adapter_strategy: 'nemo.core.classes.mixins.adapter_mixin_strategies.AbstractAdapterStrategy',
):
"""
Perform the forward step of a single adapter module on some input data.
**Note**: Subclasses can override this method to accommodate more complicate adapter forward steps.
Args:
input: Dictionary of packed tensors. The dict should contain at least
`x`: output tensor
`loc`: Semantic location in module where this adapter was called
`att_mask`: Optional, Attention mask
`pos_emb`: Optional, Positional Embedding for Relative Positional Encoding.
The output tensor of the calling module is the input to the first adapter, whose output
is then chained to the next adapter until all adapters are consumed.
adapter_module: The adapter module that is currently required to perform the forward pass.
adapter_name: The resolved name of the adapter that is undergoing the current forward pass.
adapter_strategy: A subclass of `AbstractAdapterStrategy`, that determines how the
output of the adapter should be merged with the input, or if it should be merged at all.
Returns:
The result tensor, after the current active adapter has finished its forward pass.
"""
# (input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin')
x = input['x']
loc = input['loc']
att_mask = input.get('att_mask', None)
pos_emb = input.get('pos_emb', None)
if isinstance(adapter_module, adapter_modules.LinearAdapter) and loc == 'post':
output = adapter_strategy(x, adapter_module, module=self)
elif isinstance(adapter_module, MultiHeadAttention) and loc == 'mha':
if self.self_attention_model == 'rel_pos':
x = dict(query=x, key=x, value=x, mask=att_mask, pos_emb=pos_emb)
output = adapter_strategy(x, adapter_module, module=self)
elif self.self_attention_model == 'abs_pos':
x = dict(query=x, key=x, value=x, mask=att_mask)
output = adapter_strategy(x, adapter_module, module=self)
else:
raise ValueError(f"Unsupported value of self_attention_model , provided {self.self_attention_model}!")
else:
# No adapter compatible, skip
output = x
input['x'] = output
return input
class ConformerConvolution(nn.Module):
"""The convolution module for the Conformer model.
Args:
d_model (int): hidden dimension
kernel_size (int): kernel size for depthwise convolution
pointwise_activation (str): name of the activation function to be used for the pointwise conv.
Note that Conformer uses a special key `glu_` which is treated as the original default from
the paper.
"""
def __init__(
self, d_model, kernel_size, norm_type='batch_norm', conv_context_size=None, pointwise_activation='glu_'
):
super(ConformerConvolution, self).__init__()
assert (kernel_size - 1) % 2 == 0
self.d_model = d_model
self.kernel_size = kernel_size
self.norm_type = norm_type
if conv_context_size is None:
conv_context_size = (kernel_size - 1) // 2
if pointwise_activation in activation_registry:
self.pointwise_activation = activation_registry[pointwise_activation]()
dw_conv_input_dim = d_model * 2
if hasattr(self.pointwise_activation, 'inplace'):
self.pointwise_activation.inplace = True
else:
self.pointwise_activation = pointwise_activation
dw_conv_input_dim = d_model
self.pointwise_conv1 = nn.Conv1d(
in_channels=d_model, out_channels=d_model * 2, kernel_size=1, stride=1, padding=0, bias=True
)
self.depthwise_conv = CausalConv1D(
in_channels=dw_conv_input_dim,
out_channels=dw_conv_input_dim,
kernel_size=kernel_size,
stride=1,
padding=conv_context_size,
groups=dw_conv_input_dim,
bias=True,
)
if norm_type == 'batch_norm':
self.batch_norm = nn.BatchNorm1d(dw_conv_input_dim)
elif norm_type == 'instance_norm':
self.batch_norm = nn.InstanceNorm1d(dw_conv_input_dim)
elif norm_type == 'layer_norm':
self.batch_norm = nn.LayerNorm(dw_conv_input_dim)
elif norm_type == 'fused_batch_norm':
self.batch_norm = FusedBatchNorm1d(dw_conv_input_dim)
elif norm_type.startswith('group_norm'):
num_groups = int(norm_type.replace("group_norm", ""))
self.batch_norm = nn.GroupNorm(num_groups=num_groups, num_channels=d_model)
else:
raise ValueError(f"conv_norm_type={norm_type} is not valid!")
self.activation = Swish()
self.pointwise_conv2 = nn.Conv1d(
in_channels=dw_conv_input_dim, out_channels=d_model, kernel_size=1, stride=1, padding=0, bias=True
)
def forward(self, x, pad_mask=None, cache=None):
x = x.transpose(1, 2)
x = self.pointwise_conv1(x)
# Compute the activation function or use GLU for original Conformer
if self.pointwise_activation == 'glu_':
x = nn.functional.glu(x, dim=1)
else:
x = self.pointwise_activation(x)
if pad_mask is not None:
x = x.float().masked_fill(pad_mask.unsqueeze(1), 0.0)
x = self.depthwise_conv(x, cache=cache)
if cache is not None:
x, cache = x
if self.norm_type == "layer_norm":
x = x.transpose(1, 2)
x = self.batch_norm(x)
x = x.transpose(1, 2)
else:
x = self.batch_norm(x)
x = self.activation(x)
x = self.pointwise_conv2(x)
x = x.transpose(1, 2)
if cache is None:
return x
else:
return x, cache
def reset_parameters_conv(self):
pw1_max = pw2_max = self.d_model ** -0.5
dw_max = self.kernel_size ** -0.5
with torch.no_grad():
nn.init.uniform_(self.pointwise_conv1.weight, -pw1_max, pw1_max)
nn.init.uniform_(self.pointwise_conv1.bias, -pw1_max, pw1_max)
nn.init.uniform_(self.pointwise_conv2.weight, -pw2_max, pw2_max)
nn.init.uniform_(self.pointwise_conv2.bias, -pw2_max, pw2_max)
nn.init.uniform_(self.depthwise_conv.weight, -dw_max, dw_max)
nn.init.uniform_(self.depthwise_conv.bias, -dw_max, dw_max)
class ConformerFeedForward(nn.Module):
"""
feed-forward module of Conformer model.
"""
def __init__(self, d_model, d_ff, dropout, activation=Swish()):
super(ConformerFeedForward, self).__init__()
self.d_model = d_model
self.d_ff = d_ff
self.linear1 = nn.Linear(d_model, d_ff)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
x = self.dropout(x)
x = self.linear2(x)
return x
def reset_parameters_ff(self):
ffn1_max = self.d_model ** -0.5
ffn2_max = self.d_ff ** -0.5
with torch.no_grad():
nn.init.uniform_(self.linear1.weight, -ffn1_max, ffn1_max)
nn.init.uniform_(self.linear1.bias, -ffn1_max, ffn1_max)
nn.init.uniform_(self.linear2.weight, -ffn2_max, ffn2_max)
nn.init.uniform_(self.linear2.bias, -ffn2_max, ffn2_max)
|
NeMo-main
|
nemo/collections/asr/parts/submodules/conformer_modules.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import torch
import torch.nn as nn
from nemo.core.classes import Typing, typecheck
from nemo.core.neural_types import LengthsType, NeuralType, SpectrogramType
class SpecAugment(nn.Module, Typing):
"""
Zeroes out(cuts) random continuous horisontal or
vertical segments of the spectrogram as described in
SpecAugment (https://arxiv.org/abs/1904.08779).
params:
freq_masks - how many frequency segments should be cut
time_masks - how many time segments should be cut
freq_width - maximum number of frequencies to be cut in one segment
time_width - maximum number of time steps to be cut in one segment.
Can be a positive integer or a float value in the range [0, 1].
If positive integer value, defines maximum number of time steps
to be cut in one segment.
If a float value, defines maximum percentage of timesteps that
are cut adaptively.
"""
@property
def input_types(self):
"""Returns definitions of module input types
"""
return {
"input_spec": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output types
"""
return {"augmented_spec": NeuralType(('B', 'D', 'T'), SpectrogramType())}
def __init__(
self, freq_masks=0, time_masks=0, freq_width=10, time_width=10, rng=None, mask_value=0.0,
):
super().__init__()
self._rng = random.Random() if rng is None else rng
self.freq_masks = freq_masks
self.time_masks = time_masks
self.freq_width = freq_width
self.time_width = time_width
self.mask_value = mask_value
if isinstance(time_width, int):
self.adaptive_temporal_width = False
else:
if time_width > 1.0 or time_width < 0.0:
raise ValueError("If `time_width` is a float value, must be in range [0, 1]")
self.adaptive_temporal_width = True
@typecheck()
@torch.no_grad()
def forward(self, input_spec, length):
batch_size, num_freq_bins, _ = input_spec.shape
# Move lengths to CPU before repeated indexing
lengths_cpu = length.cpu().numpy()
# Generate a numpy boolean mask. `True` elements represent where the input spec will be augmented.
fill_mask: np.array = np.full(shape=input_spec.shape, fill_value=False)
freq_start_upper_bound = num_freq_bins - self.freq_width
# Choose different mask ranges for each element of the batch
for idx in range(batch_size):
# Set freq masking
for _ in range(self.freq_masks):
start = self._rng.randint(0, freq_start_upper_bound)
width = self._rng.randint(0, self.freq_width)
fill_mask[idx, start : start + width, :] = True
# Derive time width, sometimes based percentage of input length.
if self.adaptive_temporal_width:
time_max_width = max(1, int(lengths_cpu[idx] * self.time_width))
else:
time_max_width = self.time_width
time_start_upper_bound = max(1, lengths_cpu[idx] - time_max_width)
# Set time masking
for _ in range(self.time_masks):
start = self._rng.randint(0, time_start_upper_bound)
width = self._rng.randint(0, time_max_width)
fill_mask[idx, :, start : start + width] = True
# Bring the mask to device and fill spec
fill_mask = torch.from_numpy(fill_mask).to(input_spec.device)
masked_spec = input_spec.masked_fill(mask=fill_mask, value=self.mask_value)
return masked_spec
class SpecCutout(nn.Module, Typing):
"""
Zeroes out(cuts) random rectangles in the spectrogram
as described in (https://arxiv.org/abs/1708.04552).
params:
rect_masks - how many rectangular masks should be cut
rect_freq - maximum size of cut rectangles along the frequency dimension
rect_time - maximum size of cut rectangles along the time dimension
"""
@property
def input_types(self):
"""Returns definitions of module input types
"""
return {"input_spec": NeuralType(('B', 'D', 'T'), SpectrogramType())}
@property
def output_types(self):
"""Returns definitions of module output types
"""
return {"augmented_spec": NeuralType(('B', 'D', 'T'), SpectrogramType())}
def __init__(self, rect_masks=0, rect_time=5, rect_freq=20, rng=None):
super(SpecCutout, self).__init__()
self._rng = random.Random() if rng is None else rng
self.rect_masks = rect_masks
self.rect_time = rect_time
self.rect_freq = rect_freq
@typecheck()
@torch.no_grad()
def forward(self, input_spec):
sh = input_spec.shape
for idx in range(sh[0]):
for i in range(self.rect_masks):
rect_x = self._rng.randint(0, sh[1] - self.rect_freq)
rect_y = self._rng.randint(0, sh[2] - self.rect_time)
w_x = self._rng.randint(0, self.rect_freq)
w_y = self._rng.randint(0, self.rect_time)
input_spec[idx, rect_x : rect_x + w_x, rect_y : rect_y + w_y] = 0.0
return input_spec
|
NeMo-main
|
nemo/collections/asr/parts/submodules/spectr_augment.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
from nemo.collections.asr.parts.utils import rnnt_utils
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.core.classes import Typing, typecheck
from nemo.core.neural_types import HypothesisType, LengthsType, LogprobsType, NeuralType
from nemo.utils import logging
DEFAULT_TOKEN_OFFSET = 100
def pack_hypotheses(
hypotheses: List[rnnt_utils.NBestHypotheses], logitlen: torch.Tensor,
) -> List[rnnt_utils.NBestHypotheses]:
if logitlen is not None:
if hasattr(logitlen, 'cpu'):
logitlen_cpu = logitlen.to('cpu')
else:
logitlen_cpu = logitlen
for idx, hyp in enumerate(hypotheses): # type: rnnt_utils.NBestHypotheses
for candidate_idx, cand in enumerate(hyp.n_best_hypotheses):
cand.y_sequence = torch.tensor(cand.y_sequence, dtype=torch.long)
if logitlen is not None:
cand.length = logitlen_cpu[idx]
if cand.dec_state is not None:
cand.dec_state = _states_to_device(cand.dec_state)
return hypotheses
def _states_to_device(dec_state, device='cpu'):
if torch.is_tensor(dec_state):
dec_state = dec_state.to(device)
elif isinstance(dec_state, (list, tuple)):
dec_state = tuple(_states_to_device(dec_i, device) for dec_i in dec_state)
return dec_state
class AbstractBeamCTCInfer(Typing):
"""A beam CTC decoder.
Provides a common abstraction for sample level beam decoding.
Args:
blank_id: int, index of the blank token. Can be 0 or len(vocabulary).
beam_size: int, size of the beam used in the underlying beam search engine.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"decoder_output": NeuralType(('B', 'T', 'D'), LogprobsType()),
"decoder_lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"predictions": [NeuralType(elements_type=HypothesisType())]}
def __init__(self, blank_id: int, beam_size: int):
self.blank_id = blank_id
if beam_size < 1:
raise ValueError("Beam search size cannot be less than 1!")
self.beam_size = beam_size
# Variables set by corresponding setter methods
self.vocab = None
self.decoding_type = None
self.tokenizer = None
# Utility maps for vocabulary
self.vocab_index_map = None
self.index_vocab_map = None
# Internal variable, used to prevent double reduction of consecutive tokens (ctc collapse)
self.override_fold_consecutive_value = None
def set_vocabulary(self, vocab: List[str]):
"""
Set the vocabulary of the decoding framework.
Args:
vocab: List of str. Each token corresponds to its location in the vocabulary emitted by the model.
Note that this vocabulary must NOT contain the "BLANK" token.
"""
self.vocab = vocab
self.vocab_index_map = {v: i for i, v in enumerate(vocab)}
self.index_vocab_map = {i: v for i, v in enumerate(vocab)}
def set_decoding_type(self, decoding_type: str):
"""
Sets the decoding type of the framework. Can support either char or subword models.
Args:
decoding_type: Str corresponding to decoding type. Only supports "char" and "subword".
"""
decoding_type = decoding_type.lower()
supported_types = ['char', 'subword']
if decoding_type not in supported_types:
raise ValueError(
f"Unsupported decoding type. Supported types = {supported_types}.\n" f"Given = {decoding_type}"
)
self.decoding_type = decoding_type
def set_tokenizer(self, tokenizer: TokenizerSpec):
"""
Set the tokenizer of the decoding framework.
Args:
tokenizer: NeMo tokenizer object, which inherits from TokenizerSpec.
"""
self.tokenizer = tokenizer
@typecheck()
def forward(
self, decoder_output: torch.Tensor, decoder_lengths: torch.Tensor,
) -> Tuple[List[Union[rnnt_utils.Hypothesis, rnnt_utils.NBestHypotheses]]]:
"""Returns a list of hypotheses given an input batch of the encoder hidden embedding.
Output token is generated auto-repressively.
Args:
decoder_output: A tensor of size (batch, timesteps, features) or (batch, timesteps) (each timestep is a label).
decoder_lengths: list of int representing the length of each sequence
output sequence.
Returns:
packed list containing batch number of sentences (Hypotheses).
"""
raise NotImplementedError()
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
class BeamCTCInfer(AbstractBeamCTCInfer):
"""A greedy CTC decoder.
Provides a common abstraction for sample level and batch level greedy decoding.
Args:
blank_index: int index of the blank token. Can be 0 or len(vocabulary).
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `logprobs` in it. Here, `logprobs` is a torch.Tensors.
compute_timestamps: A bool flag, which determines whether to compute the character/subword, or
word based timestamp mapping the output log-probabilities to discrite intervals of timestamps.
The timestamps will be available in the returned Hypothesis.timestep as a dictionary.
"""
def __init__(
self,
blank_id: int,
beam_size: int,
search_type: str = "default",
return_best_hypothesis: bool = True,
preserve_alignments: bool = False,
compute_timestamps: bool = False,
beam_alpha: float = 1.0,
beam_beta: float = 0.0,
kenlm_path: str = None,
flashlight_cfg: Optional['FlashlightConfig'] = None,
pyctcdecode_cfg: Optional['PyCTCDecodeConfig'] = None,
):
super().__init__(blank_id=blank_id, beam_size=beam_size)
self.search_type = search_type
self.return_best_hypothesis = return_best_hypothesis
self.preserve_alignments = preserve_alignments
self.compute_timestamps = compute_timestamps
if self.compute_timestamps:
raise ValueError(f"Currently this flag is not supported for beam search algorithms.")
self.vocab = None # This must be set by specific method by user before calling forward() !
if search_type == "default" or search_type == "nemo":
self.search_algorithm = self.default_beam_search
elif search_type == "pyctcdecode":
self.search_algorithm = self._pyctcdecode_beam_search
elif search_type == "flashlight":
self.search_algorithm = self.flashlight_beam_search
else:
raise NotImplementedError(
f"The search type ({search_type}) supplied is not supported!\n"
f"Please use one of : (default, nemo, pyctcdecode)"
)
# Log the beam search algorithm
logging.info(f"Beam search algorithm: {search_type}")
self.beam_alpha = beam_alpha
self.beam_beta = beam_beta
# Default beam search args
self.kenlm_path = kenlm_path
# PyCTCDecode params
if pyctcdecode_cfg is None:
pyctcdecode_cfg = PyCTCDecodeConfig()
self.pyctcdecode_cfg = pyctcdecode_cfg # type: PyCTCDecodeConfig
if flashlight_cfg is None:
flashlight_cfg = FlashlightConfig()
self.flashlight_cfg = flashlight_cfg
# Default beam search scorer functions
self.default_beam_scorer = None
self.pyctcdecode_beam_scorer = None
self.flashlight_beam_scorer = None
self.token_offset = 0
@typecheck()
def forward(
self, decoder_output: torch.Tensor, decoder_lengths: torch.Tensor,
) -> Tuple[List[Union[rnnt_utils.Hypothesis, rnnt_utils.NBestHypotheses]]]:
"""Returns a list of hypotheses given an input batch of the encoder hidden embedding.
Output token is generated auto-repressively.
Args:
decoder_output: A tensor of size (batch, timesteps, features).
decoder_lengths: list of int representing the length of each sequence
output sequence.
Returns:
packed list containing batch number of sentences (Hypotheses).
"""
if self.vocab is None:
raise RuntimeError("Please set the vocabulary with `set_vocabulary()` before calling this function.")
if self.decoding_type is None:
raise ValueError("Please set the decoding type with `set_decoding_type()` before calling this function.")
with torch.no_grad(), torch.inference_mode():
# Process each sequence independently
prediction_tensor = decoder_output
if prediction_tensor.ndim != 3:
raise ValueError(
f"`decoder_output` must be a tensor of shape [B, T, V] (log probs, float). "
f"Provided shape = {prediction_tensor.shape}"
)
# determine type of input - logprobs or labels
out_len = decoder_lengths if decoder_lengths is not None else None
hypotheses = self.search_algorithm(prediction_tensor, out_len)
# Pack results into Hypotheses
packed_result = pack_hypotheses(hypotheses, decoder_lengths)
# Pack the result
if self.return_best_hypothesis and isinstance(packed_result[0], rnnt_utils.NBestHypotheses):
packed_result = [res.n_best_hypotheses[0] for res in packed_result] # type: Hypothesis
return (packed_result,)
@torch.no_grad()
def default_beam_search(
self, x: torch.Tensor, out_len: torch.Tensor
) -> List[Union[rnnt_utils.Hypothesis, rnnt_utils.NBestHypotheses]]:
"""
Open Seq2Seq Beam Search Algorithm (DeepSpeed)
Args:
x: Tensor of shape [B, T, V+1], where B is the batch size, T is the maximum sequence length,
and V is the vocabulary size. The tensor contains log-probabilities.
out_len: Tensor of shape [B], contains lengths of each sequence in the batch.
Returns:
A list of NBestHypotheses objects, one for each sequence in the batch.
"""
if self.compute_timestamps:
raise ValueError(
f"Beam Search with strategy `{self.search_type}` does not support time stamp calculation!"
)
if self.default_beam_scorer is None:
# Check for filepath
if self.kenlm_path is None or not os.path.exists(self.kenlm_path):
raise FileNotFoundError(
f"KenLM binary file not found at : {self.kenlm_path}. "
f"Please set a valid path in the decoding config."
)
# perform token offset for subword models
if self.decoding_type == 'subword':
vocab = [chr(idx + self.token_offset) for idx in range(len(self.vocab))]
else:
# char models
vocab = self.vocab
# Must import at runtime to avoid circular dependency due to module level import.
from nemo.collections.asr.modules.beam_search_decoder import BeamSearchDecoderWithLM
self.default_beam_scorer = BeamSearchDecoderWithLM(
vocab=vocab,
lm_path=self.kenlm_path,
beam_width=self.beam_size,
alpha=self.beam_alpha,
beta=self.beam_beta,
num_cpus=max(1, os.cpu_count()),
input_tensor=False,
)
x = x.to('cpu')
with typecheck.disable_checks():
data = [x[sample_id, : out_len[sample_id], :].softmax(dim=-1) for sample_id in range(len(x))]
beams_batch = self.default_beam_scorer.forward(log_probs=data, log_probs_length=None)
# For each sample in the batch
nbest_hypotheses = []
for beams_idx, beams in enumerate(beams_batch):
# For each beam candidate / hypothesis in each sample
hypotheses = []
for candidate_idx, candidate in enumerate(beams):
hypothesis = rnnt_utils.Hypothesis(
score=0.0, y_sequence=[], dec_state=None, timestep=[], last_token=None
)
# For subword encoding, NeMo will double encode the subword (multiple tokens) into a
# singular unicode id. In doing so, we preserve the semantic of the unicode token, and
# compress the size of the final KenLM ARPA / Binary file.
# In order to do double encoding, we shift the subword by some token offset.
# This step is ignored for character based models.
if self.decoding_type == 'subword':
pred_token_ids = [ord(c) - self.token_offset for c in candidate[1]]
else:
# Char models
pred_token_ids = [self.vocab_index_map[c] for c in candidate[1]]
# We preserve the token ids and the score for this hypothesis
hypothesis.y_sequence = pred_token_ids
hypothesis.score = candidate[0]
# If alignment must be preserved, we preserve a view of the output logprobs.
# Note this view is shared amongst all beams within the sample, be sure to clone it if you
# require specific processing for each sample in the beam.
# This is done to preserve memory.
if self.preserve_alignments:
hypothesis.alignments = x[beams_idx][: out_len[beams_idx]]
hypotheses.append(hypothesis)
# Wrap the result in NBestHypothesis.
hypotheses = rnnt_utils.NBestHypotheses(hypotheses)
nbest_hypotheses.append(hypotheses)
return nbest_hypotheses
@torch.no_grad()
def _pyctcdecode_beam_search(
self, x: torch.Tensor, out_len: torch.Tensor
) -> List[Union[rnnt_utils.Hypothesis, rnnt_utils.NBestHypotheses]]:
"""
PyCTCDecode Beam Search Algorithm. Should support Char and Subword models.
Args:
x: Tensor of shape [B, T, V+1], where B is the batch size, T is the maximum sequence length,
and V is the vocabulary size. The tensor contains log-probabilities.
out_len: Tensor of shape [B], contains lengths of each sequence in the batch.
Returns:
A list of NBestHypotheses objects, one for each sequence in the batch.
"""
if self.compute_timestamps:
raise ValueError(
f"Beam Search with strategy `{self.search_type}` does not support time stamp calculation!"
)
try:
import pyctcdecode
except (ImportError, ModuleNotFoundError):
raise ImportError(
f"Could not load `pyctcdecode` library. Please install it from pip using :\n"
f"pip install --upgrade pyctcdecode"
)
if self.pyctcdecode_beam_scorer is None:
self.pyctcdecode_beam_scorer = pyctcdecode.build_ctcdecoder(
labels=self.vocab, kenlm_model_path=self.kenlm_path, alpha=self.beam_alpha, beta=self.beam_beta
) # type: pyctcdecode.BeamSearchDecoderCTC
x = x.to('cpu').numpy()
with typecheck.disable_checks():
beams_batch = []
for sample_id in range(len(x)):
logprobs = x[sample_id, : out_len[sample_id], :]
result = self.pyctcdecode_beam_scorer.decode_beams(
logprobs,
beam_width=self.beam_size,
beam_prune_logp=self.pyctcdecode_cfg.beam_prune_logp,
token_min_logp=self.pyctcdecode_cfg.token_min_logp,
prune_history=self.pyctcdecode_cfg.prune_history,
hotwords=self.pyctcdecode_cfg.hotwords,
hotword_weight=self.pyctcdecode_cfg.hotword_weight,
lm_start_state=None,
) # Output format: text, last_lm_state, text_frames, logit_score, lm_score
beams_batch.append(result)
nbest_hypotheses = []
for beams_idx, beams in enumerate(beams_batch):
hypotheses = []
for candidate_idx, candidate in enumerate(beams):
# Candidate = (text, last_lm_state, text_frames, logit_score, lm_score)
hypothesis = rnnt_utils.Hypothesis(
score=0.0, y_sequence=[], dec_state=None, timestep=[], last_token=None
)
# TODO: Requires token ids to be returned rather than text.
if self.decoding_type == 'subword':
if self.tokenizer is None:
raise ValueError("Tokenizer must be provided for subword decoding. Use set_tokenizer().")
pred_token_ids = self.tokenizer.text_to_ids(candidate[0])
else:
if self.vocab is None:
raise ValueError("Vocab must be provided for character decoding. Use set_vocab().")
chars = list(candidate[0])
pred_token_ids = [self.vocab_index_map[c] for c in chars]
hypothesis.y_sequence = pred_token_ids
hypothesis.text = candidate[0] # text
hypothesis.score = candidate[4] # score
# Inject word level timestamps
hypothesis.timestep = candidate[2] # text_frames
if self.preserve_alignments:
hypothesis.alignments = torch.from_numpy(x[beams_idx][: out_len[beams_idx]])
hypotheses.append(hypothesis)
hypotheses = rnnt_utils.NBestHypotheses(hypotheses)
nbest_hypotheses.append(hypotheses)
return nbest_hypotheses
@torch.no_grad()
def flashlight_beam_search(
self, x: torch.Tensor, out_len: torch.Tensor
) -> List[Union[rnnt_utils.Hypothesis, rnnt_utils.NBestHypotheses]]:
"""
Flashlight Beam Search Algorithm. Should support Char and Subword models.
Args:
x: Tensor of shape [B, T, V+1], where B is the batch size, T is the maximum sequence length,
and V is the vocabulary size. The tensor contains log-probabilities.
out_len: Tensor of shape [B], contains lengths of each sequence in the batch.
Returns:
A list of NBestHypotheses objects, one for each sequence in the batch.
"""
if self.compute_timestamps:
raise ValueError(
f"Beam Search with strategy `{self.search_type}` does not support time stamp calculation!"
)
if self.flashlight_beam_scorer is None:
# Check for filepath
if self.kenlm_path is None or not os.path.exists(self.kenlm_path):
raise FileNotFoundError(
f"KenLM binary file not found at : {self.kenlm_path}. "
f"Please set a valid path in the decoding config."
)
# perform token offset for subword models
# if self.decoding_type == 'subword':
# vocab = [chr(idx + self.token_offset) for idx in range(len(self.vocab))]
# else:
# # char models
# vocab = self.vocab
# Must import at runtime to avoid circular dependency due to module level import.
from nemo.collections.asr.modules.flashlight_decoder import FlashLightKenLMBeamSearchDecoder
self.flashlight_beam_scorer = FlashLightKenLMBeamSearchDecoder(
lm_path=self.kenlm_path,
vocabulary=self.vocab,
tokenizer=self.tokenizer,
lexicon_path=self.flashlight_cfg.lexicon_path,
boost_path=self.flashlight_cfg.boost_path,
beam_size=self.beam_size,
beam_size_token=self.flashlight_cfg.beam_size_token,
beam_threshold=self.flashlight_cfg.beam_threshold,
lm_weight=self.beam_alpha,
word_score=self.beam_beta,
unk_weight=self.flashlight_cfg.unk_weight,
sil_weight=self.flashlight_cfg.sil_weight,
)
x = x.to('cpu')
with typecheck.disable_checks():
beams_batch = self.flashlight_beam_scorer.forward(log_probs=x)
# For each sample in the batch
nbest_hypotheses = []
for beams_idx, beams in enumerate(beams_batch):
# For each beam candidate / hypothesis in each sample
hypotheses = []
for candidate_idx, candidate in enumerate(beams):
hypothesis = rnnt_utils.Hypothesis(
score=0.0, y_sequence=[], dec_state=None, timestep=[], last_token=None
)
# We preserve the token ids and the score for this hypothesis
hypothesis.y_sequence = candidate['tokens'].tolist()
hypothesis.score = candidate['score']
# If alignment must be preserved, we preserve a view of the output logprobs.
# Note this view is shared amongst all beams within the sample, be sure to clone it if you
# require specific processing for each sample in the beam.
# This is done to preserve memory.
if self.preserve_alignments:
hypothesis.alignments = x[beams_idx][: out_len[beams_idx]]
hypotheses.append(hypothesis)
# Wrap the result in NBestHypothesis.
hypotheses = rnnt_utils.NBestHypotheses(hypotheses)
nbest_hypotheses.append(hypotheses)
return nbest_hypotheses
def set_decoding_type(self, decoding_type: str):
super().set_decoding_type(decoding_type)
# Please check train_kenlm.py in scripts/asr_language_modeling/ to find out why we need
# TOKEN_OFFSET for BPE-based models
if self.decoding_type == 'subword':
self.token_offset = DEFAULT_TOKEN_OFFSET
@dataclass
class PyCTCDecodeConfig:
# These arguments cannot be imported from pyctcdecode (optional dependency)
# Therefore we copy the values explicitly
# Taken from pyctcdecode.constant
beam_prune_logp: float = -10.0
token_min_logp: float = -5.0
prune_history: bool = False
hotwords: Optional[List[str]] = None
hotword_weight: float = 10.0
@dataclass
class FlashlightConfig:
lexicon_path: Optional[str] = None
boost_path: Optional[str] = None
beam_size_token: int = 16
beam_threshold: float = 20.0
unk_weight: float = -math.inf
sil_weight: float = 0.0
@dataclass
class BeamCTCInferConfig:
beam_size: int
search_type: str = 'default'
preserve_alignments: bool = False
compute_timestamps: bool = False
return_best_hypothesis: bool = True
beam_alpha: float = 1.0
beam_beta: float = 0.0
kenlm_path: Optional[str] = None
flashlight_cfg: Optional[FlashlightConfig] = FlashlightConfig()
pyctcdecode_cfg: Optional[PyCTCDecodeConfig] = PyCTCDecodeConfig()
|
NeMo-main
|
nemo/collections/asr/parts/submodules/ctc_beam_decoding.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from tqdm import tqdm
from nemo.collections.asr.modules import rnnt_abstract
from nemo.collections.asr.parts.utils.rnnt_utils import (
HATJointOutput,
Hypothesis,
NBestHypotheses,
is_prefix,
select_k_expansions,
)
from nemo.core.classes import Typing, typecheck
from nemo.core.neural_types import AcousticEncodedRepresentation, HypothesisType, LengthsType, NeuralType
from nemo.utils import logging
try:
import kenlm
KENLM_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
KENLM_AVAILABLE = False
def pack_hypotheses(hypotheses: List[Hypothesis]) -> List[Hypothesis]:
for idx, hyp in enumerate(hypotheses): # type: rnnt_utils.Hypothesis
hyp.y_sequence = torch.tensor(hyp.y_sequence, dtype=torch.long)
if hyp.dec_state is not None:
hyp.dec_state = _states_to_device(hyp.dec_state)
# Remove -1 from timestep
if hyp.timestep is not None and len(hyp.timestep) > 0 and hyp.timestep[0] == -1:
hyp.timestep = hyp.timestep[1:]
return hypotheses
def _states_to_device(dec_state, device='cpu'):
if torch.is_tensor(dec_state):
dec_state = dec_state.to(device)
elif isinstance(dec_state, (list, tuple)):
dec_state = tuple(_states_to_device(dec_i, device) for dec_i in dec_state)
return dec_state
class BeamRNNTInfer(Typing):
"""
Beam Search implementation ported from ESPNet implementation -
https://github.com/espnet/espnet/blob/master/espnet/nets/beam_search_transducer.py
Sequence level beam decoding or batched-beam decoding, performed auto-repressively
depending on the search type chosen.
Args:
decoder_model: rnnt_utils.AbstractRNNTDecoder implementation.
joint_model: rnnt_utils.AbstractRNNTJoint implementation.
beam_size: number of beams for beam search. Must be a positive integer >= 1.
If beam size is 1, defaults to stateful greedy search.
This greedy search might result in slightly different results than
the greedy results obtained by GreedyRNNTInfer due to implementation differences.
For accurate greedy results, please use GreedyRNNTInfer or GreedyBatchedRNNTInfer.
search_type: str representing the type of beam search to perform.
Must be one of ['beam', 'tsd', 'alsd']. 'nsc' is currently not supported.
Algoritm used:
`beam` - basic beam search strategy. Larger beams generally result in better decoding,
however the time required for the search also grows steadily.
`tsd` - time synchronous decoding. Please refer to the paper:
[Alignment-Length Synchronous Decoding for RNN Transducer](https://ieeexplore.ieee.org/document/9053040)
for details on the algorithm implemented.
Time synchronous decoding (TSD) execution time grows by the factor T * max_symmetric_expansions.
For longer sequences, T is greater, and can therefore take a long time for beams to obtain
good results. This also requires greater memory to execute.
`alsd` - alignment-length synchronous decoding. Please refer to the paper:
[Alignment-Length Synchronous Decoding for RNN Transducer](https://ieeexplore.ieee.org/document/9053040)
for details on the algorithm implemented.
Alignment-length synchronous decoding (ALSD) execution time is faster than TSD, with growth
factor of T + U_max, where U_max is the maximum target length expected during execution.
Generally, T + U_max < T * max_symmetric_expansions. However, ALSD beams are non-unique,
therefore it is required to use larger beam sizes to achieve the same (or close to the same)
decoding accuracy as TSD.
For a given decoding accuracy, it is possible to attain faster decoding via ALSD than TSD.
`maes` = modified adaptive expansion searcn. Please refer to the paper:
[Accelerating RNN Transducer Inference via Adaptive Expansion Search](https://ieeexplore.ieee.org/document/9250505)
Modified Adaptive Synchronous Decoding (mAES) execution time is adaptive w.r.t the
number of expansions (for tokens) required per timestep. The number of expansions can usually
be constrained to 1 or 2, and in most cases 2 is sufficient.
This beam search technique can possibly obtain superior WER while sacrificing some evaluation time.
score_norm: bool, whether to normalize the scores of the log probabilities.
return_best_hypothesis: bool, decides whether to return a single hypothesis (the best out of N),
or return all N hypothesis (sorted with best score first). The container class changes based
this flag -
When set to True (default), returns a single Hypothesis.
When set to False, returns a NBestHypotheses container, which contains a list of Hypothesis.
# The following arguments are specific to the chosen `search_type`
tsd_max_sym_exp_per_step: Used for `search_type=tsd`. The maximum symmetric expansions allowed
per timestep during beam search. Larger values should be used to attempt decoding of longer
sequences, but this in turn increases execution time and memory usage.
alsd_max_target_len: Used for `search_type=alsd`. The maximum expected target sequence length
during beam search. Larger values allow decoding of longer sequences at the expense of
execution time and memory.
# The following two flags are placeholders and unused until `nsc` implementation is stabilized.
nsc_max_timesteps_expansion: Unused int.
nsc_prefix_alpha: Unused int.
# mAES flags
maes_num_steps: Number of adaptive steps to take. From the paper, 2 steps is generally sufficient. int > 1.
maes_prefix_alpha: Maximum prefix length in prefix search. Must be an integer, and is advised to keep this as 1
in order to reduce expensive beam search cost later. int >= 0.
maes_expansion_beta: Maximum number of prefix expansions allowed, in addition to the beam size.
Effectively, the number of hypothesis = beam_size + maes_expansion_beta. Must be an int >= 0,
and affects the speed of inference since large values will perform large beam search in the next step.
maes_expansion_gamma: Float pruning threshold used in the prune-by-value step when computing the expansions.
The default (2.3) is selected from the paper. It performs a comparison (max_log_prob - gamma <= log_prob[v])
where v is all vocabulary indices in the Vocab set and max_log_prob is the "most" likely token to be
predicted. Gamma therefore provides a margin of additional tokens which can be potential candidates for
expansion apart from the "most likely" candidate.
Lower values will reduce the number of expansions (by increasing pruning-by-value, thereby improving speed
but hurting accuracy). Higher values will increase the number of expansions (by reducing pruning-by-value,
thereby reducing speed but potentially improving accuracy). This is a hyper parameter to be experimentally
tuned on a validation set.
softmax_temperature: Scales the logits of the joint prior to computing log_softmax.
preserve_alignments: Bool flag which preserves the history of alignments generated during
beam decoding (sample). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of Tensor (of length V + 1).
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
NOTE: `preserve_alignments` is an invalid argument for any `search_type`
other than basic beam search.
ngram_lm_model: str
The path to the N-gram LM
ngram_lm_alpha: float
Alpha weight of N-gram LM
tokens_type: str
Tokenization type ['subword', 'char']
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"encoder_output": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
"partial_hypotheses": [NeuralType(elements_type=HypothesisType(), optional=True)], # must always be last
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"predictions": [NeuralType(elements_type=HypothesisType())]}
def __init__(
self,
decoder_model: rnnt_abstract.AbstractRNNTDecoder,
joint_model: rnnt_abstract.AbstractRNNTJoint,
beam_size: int,
search_type: str = 'default',
score_norm: bool = True,
return_best_hypothesis: bool = True,
tsd_max_sym_exp_per_step: Optional[int] = 50,
alsd_max_target_len: Union[int, float] = 1.0,
nsc_max_timesteps_expansion: int = 1,
nsc_prefix_alpha: int = 1,
maes_num_steps: int = 2,
maes_prefix_alpha: int = 1,
maes_expansion_gamma: float = 2.3,
maes_expansion_beta: int = 2,
language_model: Optional[Dict[str, Any]] = None,
softmax_temperature: float = 1.0,
preserve_alignments: bool = False,
ngram_lm_model: Optional[str] = None,
ngram_lm_alpha: float = 0.0,
hat_subtract_ilm: bool = False,
hat_ilm_weight: float = 0.0,
):
self.decoder = decoder_model
self.joint = joint_model
self.blank = decoder_model.blank_idx
self.vocab_size = decoder_model.vocab_size
self.search_type = search_type
self.return_best_hypothesis = return_best_hypothesis
if beam_size < 1:
raise ValueError("Beam search size cannot be less than 1!")
self.beam_size = beam_size
self.score_norm = score_norm
self.max_candidates = beam_size
if self.beam_size == 1:
logging.info("Beam size of 1 was used, switching to sample level `greedy_search`")
self.search_algorithm = self.greedy_search
elif search_type == "default":
self.search_algorithm = self.default_beam_search
elif search_type == "tsd":
self.search_algorithm = self.time_sync_decoding
elif search_type == "alsd":
self.search_algorithm = self.align_length_sync_decoding
elif search_type == "nsc":
raise NotImplementedError("`nsc` (Constrained Beam Search) has not been implemented.")
# self.search_algorithm = self.nsc_beam_search
elif search_type == "maes":
self.search_algorithm = self.modified_adaptive_expansion_search
else:
raise NotImplementedError(
f"The search type ({search_type}) supplied is not supported!\n"
f"Please use one of : (default, tsd, alsd, nsc)"
)
if tsd_max_sym_exp_per_step is None:
tsd_max_sym_exp_per_step = -1
if search_type in ['tsd', 'alsd', 'nsc'] and not self.decoder.blank_as_pad:
raise ValueError(
f"Search type was chosen as '{search_type}', however the decoder module provided "
f"does not support the `blank` token as a pad value. {search_type} requires "
f"the blank token as pad value support in order to perform batched beam search."
f"Please chose one of the other beam search methods, or re-train your model "
f"with this support."
)
self.tsd_max_symmetric_expansion_per_step = tsd_max_sym_exp_per_step
self.alsd_max_target_length = alsd_max_target_len
self.nsc_max_timesteps_expansion = nsc_max_timesteps_expansion
self.nsc_prefix_alpha = int(nsc_prefix_alpha)
self.maes_prefix_alpha = int(maes_prefix_alpha)
self.maes_num_steps = int(maes_num_steps)
self.maes_expansion_gamma = float(maes_expansion_gamma)
self.maes_expansion_beta = int(maes_expansion_beta)
if self.search_type == 'maes' and self.maes_prefix_alpha < 0:
raise ValueError("`maes_prefix_alpha` must be a positive integer.")
if self.search_type == 'maes' and self.vocab_size < beam_size + maes_expansion_beta:
raise ValueError(
f"beam_size ({beam_size}) + expansion_beta ({maes_expansion_beta}) "
f"should be smaller or equal to vocabulary size ({self.vocab_size})."
)
if search_type == 'maes':
self.max_candidates += maes_expansion_beta
if self.search_type == 'maes' and self.maes_num_steps < 2:
raise ValueError("`maes_num_steps` must be greater than 1.")
if softmax_temperature != 1.0 and language_model is not None:
logging.warning(
"Softmax temperature is not supported with LM decoding." "Setting softmax-temperature value to 1.0."
)
self.softmax_temperature = 1.0
else:
self.softmax_temperature = softmax_temperature
self.language_model = language_model
self.preserve_alignments = preserve_alignments
self.token_offset = 0
if ngram_lm_model:
if KENLM_AVAILABLE:
self.ngram_lm = kenlm.Model(ngram_lm_model)
self.ngram_lm_alpha = ngram_lm_alpha
else:
raise ImportError(
"KenLM package (https://github.com/kpu/kenlm) is not installed. " "Use ngram_lm_model=None."
)
else:
self.ngram_lm = None
if hat_subtract_ilm:
assert hasattr(self.joint, "return_hat_ilm")
assert search_type == "maes"
self.hat_subtract_ilm = hat_subtract_ilm
self.hat_ilm_weight = hat_ilm_weight
@typecheck()
def __call__(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
partial_hypotheses: Optional[List[Hypothesis]] = None,
) -> Union[Hypothesis, NBestHypotheses]:
"""Perform general beam search.
Args:
encoder_output: Encoded speech features (B, D_enc, T_max)
encoded_lengths: Lengths of the encoder outputs
Returns:
Either a list containing a single Hypothesis (when `return_best_hypothesis=True`,
otherwise a list containing a single NBestHypotheses, which itself contains a list of
Hypothesis. This list is sorted such that the best hypothesis is the first element.
"""
# Preserve decoder and joint training state
decoder_training_state = self.decoder.training
joint_training_state = self.joint.training
# setup hat outputs mode
return_hat_ilm_default = False
if self.hat_subtract_ilm:
assert hasattr(self.joint, "return_hat_ilm")
return_hat_ilm_default = self.joint.return_hat_ilm
self.joint.return_hat_ilm = self.hat_subtract_ilm
with torch.no_grad():
# Apply optional preprocessing
encoder_output = encoder_output.transpose(1, 2) # (B, T, D)
self.decoder.eval()
self.joint.eval()
hypotheses = []
with tqdm(
range(encoder_output.size(0)),
desc='Beam search progress:',
total=encoder_output.size(0),
unit='sample',
) as idx_gen:
# Freeze the decoder and joint to prevent recording of gradients
# during the beam loop.
with self.decoder.as_frozen(), self.joint.as_frozen():
_p = next(self.joint.parameters())
dtype = _p.dtype
# Decode every sample in the batch independently.
for batch_idx in idx_gen:
inseq = encoder_output[batch_idx : batch_idx + 1, : encoded_lengths[batch_idx], :] # [1, T, D]
logitlen = encoded_lengths[batch_idx]
if inseq.dtype != dtype:
inseq = inseq.to(dtype=dtype)
# Extract partial hypothesis if exists
partial_hypothesis = partial_hypotheses[batch_idx] if partial_hypotheses is not None else None
# Execute the specific search strategy
nbest_hyps = self.search_algorithm(
inseq, logitlen, partial_hypotheses=partial_hypothesis
) # sorted list of hypothesis
# Prepare the list of hypotheses
nbest_hyps = pack_hypotheses(nbest_hyps)
# Pack the result
if self.return_best_hypothesis:
best_hypothesis = nbest_hyps[0] # type: Hypothesis
else:
best_hypothesis = NBestHypotheses(nbest_hyps) # type: NBestHypotheses
hypotheses.append(best_hypothesis)
self.decoder.train(decoder_training_state)
self.joint.train(joint_training_state)
if self.hat_subtract_ilm:
self.joint.return_hat_ilm = return_hat_ilm_default
return (hypotheses,)
def sort_nbest(self, hyps: List[Hypothesis]) -> List[Hypothesis]:
"""Sort hypotheses by score or score given sequence length.
Args:
hyps: list of hypotheses
Return:
hyps: sorted list of hypotheses
"""
if self.score_norm:
return sorted(hyps, key=lambda x: x.score / len(x.y_sequence), reverse=True)
else:
return sorted(hyps, key=lambda x: x.score, reverse=True)
def greedy_search(
self, h: torch.Tensor, encoded_lengths: torch.Tensor, partial_hypotheses: Optional[Hypothesis] = None
) -> List[Hypothesis]:
"""Greedy search implementation for transducer.
Generic case when beam size = 1. Results might differ slightly due to implementation details
as compared to `GreedyRNNTInfer` and `GreedyBatchRNNTInfer`.
Args:
h: Encoded speech features (1, T_max, D_enc)
Returns:
hyp: 1-best decoding results
"""
if self.preserve_alignments:
# Alignments is a 2-dimensional dangling list representing T x U
alignments = [[]]
else:
alignments = None
# Initialize zero state vectors
dec_state = self.decoder.initialize_state(h)
# Construct initial hypothesis
hyp = Hypothesis(
score=0.0, y_sequence=[self.blank], dec_state=dec_state, timestep=[-1], length=encoded_lengths
)
if partial_hypotheses is not None:
if len(partial_hypotheses.y_sequence) > 0:
hyp.y_sequence = [int(partial_hypotheses.y_sequence[-1].cpu().numpy())]
hyp.dec_state = partial_hypotheses.dec_state
hyp.dec_state = _states_to_device(hyp.dec_state, h.device)
cache = {}
# Initialize state and first token
y, state, _ = self.decoder.score_hypothesis(hyp, cache)
for i in range(int(encoded_lengths)):
hi = h[:, i : i + 1, :] # [1, 1, D]
not_blank = True
symbols_added = 0
# TODO: Figure out how to remove this hard coding afterwords
while not_blank and (symbols_added < 5):
ytu = torch.log_softmax(self.joint.joint(hi, y) / self.softmax_temperature, dim=-1) # [1, 1, 1, V + 1]
ytu = ytu[0, 0, 0, :] # [V + 1]
# max() requires float
if ytu.dtype != torch.float32:
ytu = ytu.float()
logp, pred = torch.max(ytu, dim=-1) # [1, 1]
pred = pred.item()
if self.preserve_alignments:
# insert logprobs into last timestep
alignments[-1].append((ytu.to('cpu'), torch.tensor(pred, dtype=torch.int32)))
if pred == self.blank:
not_blank = False
if self.preserve_alignments:
# convert Ti-th logits into a torch array
alignments.append([]) # blank buffer for next timestep
else:
# Update state and current sequence
hyp.y_sequence.append(int(pred))
hyp.score += float(logp)
hyp.dec_state = state
hyp.timestep.append(i)
# Compute next state and token
y, state, _ = self.decoder.score_hypothesis(hyp, cache)
symbols_added += 1
# Remove trailing empty list of alignments
if self.preserve_alignments:
if len(alignments[-1]) == 0:
del alignments[-1]
# attach alignments to hypothesis
hyp.alignments = alignments
# Remove the original input label if partial hypothesis was provided
if partial_hypotheses is not None:
hyp.y_sequence = hyp.y_sequence[1:]
return [hyp]
def default_beam_search(
self, h: torch.Tensor, encoded_lengths: torch.Tensor, partial_hypotheses: Optional[Hypothesis] = None
) -> List[Hypothesis]:
"""Beam search implementation.
Args:
x: Encoded speech features (1, T_max, D_enc)
Returns:
nbest_hyps: N-best decoding results
"""
# Initialize states
beam = min(self.beam_size, self.vocab_size)
beam_k = min(beam, (self.vocab_size - 1))
blank_tensor = torch.tensor([self.blank], device=h.device, dtype=torch.long)
# Precompute some constants for blank position
ids = list(range(self.vocab_size + 1))
ids.remove(self.blank)
# Used when blank token is first vs last token
if self.blank == 0:
index_incr = 1
else:
index_incr = 0
# Initialize zero vector states
dec_state = self.decoder.initialize_state(h)
# Initialize first hypothesis for the beam (blank)
kept_hyps = [Hypothesis(score=0.0, y_sequence=[self.blank], dec_state=dec_state, timestep=[-1], length=0)]
cache = {}
if partial_hypotheses is not None:
if len(partial_hypotheses.y_sequence) > 0:
kept_hyps[0].y_sequence = [int(partial_hypotheses.y_sequence[-1].cpu().numpy())]
kept_hyps[0].dec_state = partial_hypotheses.dec_state
kept_hyps[0].dec_state = _states_to_device(kept_hyps[0].dec_state, h.device)
if self.preserve_alignments:
kept_hyps[0].alignments = [[]]
for i in range(int(encoded_lengths)):
hi = h[:, i : i + 1, :] # [1, 1, D]
hyps = kept_hyps
kept_hyps = []
while True:
max_hyp = max(hyps, key=lambda x: x.score)
hyps.remove(max_hyp)
# update decoder state and get next score
y, state, lm_tokens = self.decoder.score_hypothesis(max_hyp, cache) # [1, 1, D]
# get next token
ytu = torch.log_softmax(self.joint.joint(hi, y) / self.softmax_temperature, dim=-1) # [1, 1, 1, V + 1]
ytu = ytu[0, 0, 0, :] # [V + 1]
# preserve alignments
if self.preserve_alignments:
logprobs = ytu.cpu().clone()
# remove blank token before top k
top_k = ytu[ids].topk(beam_k, dim=-1)
# Two possible steps - blank token or non-blank token predicted
ytu = (
torch.cat((top_k[0], ytu[self.blank].unsqueeze(0))),
torch.cat((top_k[1] + index_incr, blank_tensor)),
)
# for each possible step
for logp, k in zip(*ytu):
# construct hypothesis for step
new_hyp = Hypothesis(
score=(max_hyp.score + float(logp)),
y_sequence=max_hyp.y_sequence[:],
dec_state=max_hyp.dec_state,
lm_state=max_hyp.lm_state,
timestep=max_hyp.timestep[:],
length=encoded_lengths,
)
if self.preserve_alignments:
new_hyp.alignments = copy.deepcopy(max_hyp.alignments)
# if current token is blank, dont update sequence, just store the current hypothesis
if k == self.blank:
kept_hyps.append(new_hyp)
else:
# if non-blank token was predicted, update state and sequence and then search more hypothesis
new_hyp.dec_state = state
new_hyp.y_sequence.append(int(k))
new_hyp.timestep.append(i)
hyps.append(new_hyp)
# Determine whether the alignment should be blank or token
if self.preserve_alignments:
if k == self.blank:
new_hyp.alignments[-1].append(
(logprobs.clone(), torch.tensor(self.blank, dtype=torch.int32))
)
else:
new_hyp.alignments[-1].append(
(logprobs.clone(), torch.tensor(new_hyp.y_sequence[-1], dtype=torch.int32))
)
# keep those hypothesis that have scores greater than next search generation
hyps_max = float(max(hyps, key=lambda x: x.score).score)
kept_most_prob = sorted([hyp for hyp in kept_hyps if hyp.score > hyps_max], key=lambda x: x.score,)
# If enough hypothesis have scores greater than next search generation,
# stop beam search.
if len(kept_most_prob) >= beam:
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for kept_h in kept_most_prob:
kept_h.alignments.append([]) # blank buffer for next timestep
kept_hyps = kept_most_prob
break
# Remove trailing empty list of alignments
if self.preserve_alignments:
for h in kept_hyps:
if len(h.alignments[-1]) == 0:
del h.alignments[-1]
# Remove the original input label if partial hypothesis was provided
if partial_hypotheses is not None:
for hyp in kept_hyps:
if hyp.y_sequence[0] == partial_hypotheses.y_sequence[-1] and len(hyp.y_sequence) > 1:
hyp.y_sequence = hyp.y_sequence[1:]
return self.sort_nbest(kept_hyps)
def time_sync_decoding(
self, h: torch.Tensor, encoded_lengths: torch.Tensor, partial_hypotheses: Optional[Hypothesis] = None
) -> List[Hypothesis]:
"""Time synchronous beam search implementation.
Based on https://ieeexplore.ieee.org/document/9053040
Args:
h: Encoded speech features (1, T_max, D_enc)
Returns:
nbest_hyps: N-best decoding results
"""
if partial_hypotheses is not None:
raise NotImplementedError("`partial_hypotheses` support is not supported")
# Precompute some constants for blank position
ids = list(range(self.vocab_size + 1))
ids.remove(self.blank)
# Used when blank token is first vs last token
if self.blank == 0:
index_incr = 1
else:
index_incr = 0
# prepare the batched beam states
beam = min(self.beam_size, self.vocab_size)
beam_state = self.decoder.initialize_state(
torch.zeros(beam, device=h.device, dtype=h.dtype)
) # [L, B, H], [L, B, H] (for LSTMs)
# Initialize first hypothesis for the beam (blank)
B = [
Hypothesis(
y_sequence=[self.blank],
score=0.0,
dec_state=self.decoder.batch_select_state(beam_state, 0),
timestep=[-1],
length=0,
)
]
cache = {}
# Initialize alignments
if self.preserve_alignments:
for hyp in B:
hyp.alignments = [[]]
for i in range(int(encoded_lengths)):
hi = h[:, i : i + 1, :]
# Update caches
A = []
C = B
h_enc = hi
# For a limited number of symmetric expansions per timestep "i"
for v in range(self.tsd_max_symmetric_expansion_per_step):
D = []
# Decode a batch of beam states and scores
beam_y, beam_state, beam_lm_tokens = self.decoder.batch_score_hypothesis(C, cache, beam_state)
# Extract the log probabilities and the predicted tokens
beam_logp = torch.log_softmax(
self.joint.joint(h_enc, beam_y) / self.softmax_temperature, dim=-1
) # [B, 1, 1, V + 1]
beam_logp = beam_logp[:, 0, 0, :] # [B, V + 1]
beam_topk = beam_logp[:, ids].topk(beam, dim=-1)
seq_A = [h.y_sequence for h in A]
for j, hyp in enumerate(C):
# create a new hypothesis in A
if hyp.y_sequence not in seq_A:
# If the sequence is not in seq_A, add it as the blank token
# In this step, we dont add a token but simply update score
_temp_hyp = Hypothesis(
score=(hyp.score + float(beam_logp[j, self.blank])),
y_sequence=hyp.y_sequence[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
timestep=hyp.timestep[:],
length=encoded_lengths,
)
# Preserve the blank token alignment
if self.preserve_alignments:
_temp_hyp.alignments = copy.deepcopy(hyp.alignments)
_temp_hyp.alignments[-1].append(
(beam_logp[j].clone(), torch.tensor(self.blank, dtype=torch.int32)),
)
A.append(_temp_hyp)
else:
# merge the existing blank hypothesis score with current score.
dict_pos = seq_A.index(hyp.y_sequence)
A[dict_pos].score = np.logaddexp(
A[dict_pos].score, (hyp.score + float(beam_logp[j, self.blank]))
)
if v < self.tsd_max_symmetric_expansion_per_step:
for j, hyp in enumerate(C):
# for each current hypothesis j
# extract the top token score and top token id for the jth hypothesis
for logp, k in zip(beam_topk[0][j], beam_topk[1][j] + index_incr):
# create new hypothesis and store in D
# Note: This loop does *not* include the blank token!
new_hyp = Hypothesis(
score=(hyp.score + float(logp)),
y_sequence=(hyp.y_sequence + [int(k)]),
dec_state=self.decoder.batch_select_state(beam_state, j),
lm_state=hyp.lm_state,
timestep=hyp.timestep[:] + [i],
length=encoded_lengths,
)
# Preserve token alignment
if self.preserve_alignments:
new_hyp.alignments = copy.deepcopy(hyp.alignments)
new_hyp.alignments[-1].append(
(beam_topk[0].clone().cpu(), torch.tensor(k, dtype=torch.int32)),
)
D.append(new_hyp)
# Prune beam
C = sorted(D, key=lambda x: x.score, reverse=True)[:beam]
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for C_i in C:
# Check if the last token emitted at last timestep was a blank
# If so, move to next timestep
logp, label = C_i.alignments[-1][-1] # The last alignment of this step
if int(label) == self.blank:
C_i.alignments.append([]) # blank buffer for next timestep
# Prune beam
B = sorted(A, key=lambda x: x.score, reverse=True)[:beam]
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for B_i in B:
# Check if the last token emitted at last timestep was a blank
# If so, move to next timestep
logp, label = B_i.alignments[-1][-1] # The last alignment of this step
if int(label) == self.blank:
B_i.alignments.append([]) # blank buffer for next timestep
# Remove trailing empty list of alignments
if self.preserve_alignments:
for h in B:
if len(h.alignments[-1]) == 0:
del h.alignments[-1]
return self.sort_nbest(B)
def align_length_sync_decoding(
self, h: torch.Tensor, encoded_lengths: torch.Tensor, partial_hypotheses: Optional[Hypothesis] = None
) -> List[Hypothesis]:
"""Alignment-length synchronous beam search implementation.
Based on https://ieeexplore.ieee.org/document/9053040
Args:
h: Encoded speech features (1, T_max, D_enc)
Returns:
nbest_hyps: N-best decoding results
"""
# delay this import here instead of at the beginning to avoid circular imports.
from nemo.collections.asr.modules.rnnt import RNNTDecoder, StatelessTransducerDecoder
if partial_hypotheses is not None:
raise NotImplementedError("`partial_hypotheses` support is not supported")
# Precompute some constants for blank position
ids = list(range(self.vocab_size + 1))
ids.remove(self.blank)
# Used when blank token is first vs last token
if self.blank == 0:
index_incr = 1
else:
index_incr = 0
# prepare the batched beam states
beam = min(self.beam_size, self.vocab_size)
h = h[0] # [T, D]
h_length = int(encoded_lengths)
beam_state = self.decoder.initialize_state(
torch.zeros(beam, device=h.device, dtype=h.dtype)
) # [L, B, H], [L, B, H] for LSTMS
# compute u_max as either a specific static limit,
# or a multiple of current `h_length` dynamically.
if type(self.alsd_max_target_length) == float:
u_max = int(self.alsd_max_target_length * h_length)
else:
u_max = int(self.alsd_max_target_length)
# Initialize first hypothesis for the beam (blank)
B = [
Hypothesis(
y_sequence=[self.blank],
score=0.0,
dec_state=self.decoder.batch_select_state(beam_state, 0),
timestep=[-1],
length=0,
)
]
# Initialize alignments
if self.preserve_alignments:
B[0].alignments = [[]]
final = []
cache = {}
# ALSD runs for T + U_max steps
for i in range(h_length + u_max):
# Update caches
A = []
B_ = []
h_states = []
# preserve the list of batch indices which are added into the list
# and those which are removed from the list
# This is necessary to perform state updates in the correct batch indices later
batch_ids = list(range(len(B))) # initialize as a list of all batch ids
batch_removal_ids = [] # update with sample ids which are removed
for bid, hyp in enumerate(B):
u = len(hyp.y_sequence) - 1
t = i - u
if t > (h_length - 1):
batch_removal_ids.append(bid)
continue
B_.append(hyp)
h_states.append((t, h[t]))
if B_:
# Compute the subset of batch ids which were *not* removed from the list above
sub_batch_ids = None
if len(B_) != beam:
sub_batch_ids = batch_ids
for id in batch_removal_ids:
# sub_batch_ids contains list of ids *that were not removed*
sub_batch_ids.remove(id)
# extract the states of the sub batch only.
if isinstance(self.decoder, RNNTDecoder):
# LSTM decoder, state is [layer x batch x hidden]
beam_state_ = [
beam_state[state_id][:, sub_batch_ids, :] for state_id in range(len(beam_state))
]
elif isinstance(self.decoder, StatelessTransducerDecoder):
# stateless decoder, state is [batch x hidden]
beam_state_ = [beam_state[state_id][sub_batch_ids, :] for state_id in range(len(beam_state))]
else:
raise NotImplementedError("Unknown decoder type.")
else:
# If entire batch was used (none were removed), simply take all the states
beam_state_ = beam_state
# Decode a batch/sub-batch of beam states and scores
beam_y, beam_state_, beam_lm_tokens = self.decoder.batch_score_hypothesis(B_, cache, beam_state_)
# If only a subset of batch ids were updated (some were removed)
if sub_batch_ids is not None:
# For each state in the RNN (2 for LSTM)
for state_id in range(len(beam_state)):
# Update the current batch states with the sub-batch states (in the correct indices)
# These indices are specified by sub_batch_ids, the ids of samples which were updated.
if isinstance(self.decoder, RNNTDecoder):
# LSTM decoder, state is [layer x batch x hidden]
beam_state[state_id][:, sub_batch_ids, :] = beam_state_[state_id][...]
elif isinstance(self.decoder, StatelessTransducerDecoder):
# stateless decoder, state is [batch x hidden]
beam_state[state_id][sub_batch_ids, :] = beam_state_[state_id][...]
else:
raise NotImplementedError("Unknown decoder type.")
else:
# If entire batch was updated, simply update all the states
beam_state = beam_state_
# h_states = list of [t, h[t]]
# so h[1] here is a h[t] of shape [D]
# Simply stack all of the h[t] within the sub_batch/batch (T <= beam)
h_enc = torch.stack([h[1] for h in h_states]) # [T=beam, D]
h_enc = h_enc.unsqueeze(1) # [B=beam, T=1, D]; batch over the beams
# Extract the log probabilities and the predicted tokens
beam_logp = torch.log_softmax(
self.joint.joint(h_enc, beam_y) / self.softmax_temperature, dim=-1
) # [B=beam, 1, 1, V + 1]
beam_logp = beam_logp[:, 0, 0, :] # [B=beam, V + 1]
beam_topk = beam_logp[:, ids].topk(beam, dim=-1)
for j, hyp in enumerate(B_):
# For all updated samples in the batch, add it as the blank token
# In this step, we dont add a token but simply update score
new_hyp = Hypothesis(
score=(hyp.score + float(beam_logp[j, self.blank])),
y_sequence=hyp.y_sequence[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
timestep=hyp.timestep[:],
length=i,
)
if self.preserve_alignments:
new_hyp.alignments = copy.deepcopy(hyp.alignments)
# Add the alignment of blank at this step
new_hyp.alignments[-1].append(
(beam_logp[j].clone().cpu(), torch.tensor(self.blank, dtype=torch.int32))
)
# Add blank prediction to A
A.append(new_hyp)
# If the prediction "timestep" t has reached the length of the input sequence
# we can add it to the "finished" hypothesis list.
if h_states[j][0] == (h_length - 1):
final.append(new_hyp)
# Here, we carefully select the indices of the states that we want to preserve
# for the next token (non-blank) update.
if sub_batch_ids is not None:
h_states_idx = sub_batch_ids[j]
else:
h_states_idx = j
# for each current hypothesis j
# extract the top token score and top token id for the jth hypothesis
for logp, k in zip(beam_topk[0][j], beam_topk[1][j] + index_incr):
# create new hypothesis and store in A
# Note: This loop does *not* include the blank token!
new_hyp = Hypothesis(
score=(hyp.score + float(logp)),
y_sequence=(hyp.y_sequence[:] + [int(k)]),
dec_state=self.decoder.batch_select_state(beam_state, h_states_idx),
lm_state=hyp.lm_state,
timestep=hyp.timestep[:] + [i],
length=i,
)
if self.preserve_alignments:
new_hyp.alignments = copy.deepcopy(hyp.alignments)
# Add the alignment of Uj for this beam candidate at this step
new_hyp.alignments[-1].append(
(beam_logp[j].clone().cpu(), torch.tensor(new_hyp.y_sequence[-1], dtype=torch.int32))
)
A.append(new_hyp)
# Prune and recombine same hypothesis
# This may cause next beam to be smaller than max beam size
# Therefore larger beam sizes may be required for better decoding.
B = sorted(A, key=lambda x: x.score, reverse=True)[:beam]
B = self.recombine_hypotheses(B)
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for B_i in B:
# Check if the last token emitted at last timestep was a blank
# If so, move to next timestep
logp, label = B_i.alignments[-1][-1] # The last alignment of this step
if int(label) == self.blank:
B_i.alignments.append([]) # blank buffer for next timestep
# If B_ is empty list, then we may be able to early exit
elif len(batch_ids) == len(batch_removal_ids):
# break early
break
if final:
# Remove trailing empty list of alignments
if self.preserve_alignments:
for h in final:
if len(h.alignments[-1]) == 0:
del h.alignments[-1]
return self.sort_nbest(final)
else:
# Remove trailing empty list of alignments
if self.preserve_alignments:
for h in B:
if len(h.alignments[-1]) == 0:
del h.alignments[-1]
return B
def modified_adaptive_expansion_search(
self, h: torch.Tensor, encoded_lengths: torch.Tensor, partial_hypotheses: Optional[Hypothesis] = None
) -> List[Hypothesis]:
"""
Based on/modified from https://ieeexplore.ieee.org/document/9250505
Args:
h: Encoded speech features (1, T_max, D_enc)
Returns:
nbest_hyps: N-best decoding results
"""
if partial_hypotheses is not None:
raise NotImplementedError("`partial_hypotheses` support is not supported")
h = h[0] # [T, D]
# prepare the batched beam states
beam = min(self.beam_size, self.vocab_size)
beam_state = self.decoder.initialize_state(
torch.zeros(beam, device=h.device, dtype=h.dtype)
) # [L, B, H], [L, B, H] for LSTMS
# Initialize first hypothesis for the beam (blank)
init_tokens = [
Hypothesis(
y_sequence=[self.blank],
score=0.0,
dec_state=self.decoder.batch_select_state(beam_state, 0),
timestep=[-1],
length=0,
)
]
cache = {}
# Initialize alignment buffer
if self.preserve_alignments:
for hyp in init_tokens:
hyp.alignments = [[]]
# Decode a batch of beam states and scores
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score_hypothesis(init_tokens, cache, beam_state)
state = self.decoder.batch_select_state(beam_state, 0)
# Setup ngram LM:
if self.ngram_lm:
init_lm_state = kenlm.State()
self.ngram_lm.BeginSentenceWrite(init_lm_state)
# TODO: Setup LM
if self.language_model is not None:
# beam_lm_states, beam_lm_scores = self.lm.buff_predict(
# None, beam_lm_tokens, 1
# )
# lm_state = select_lm_state(
# beam_lm_states, 0, self.lm_layers, self.is_wordlm
# )
# lm_scores = beam_lm_scores[0]
raise NotImplementedError()
else:
lm_state = None
lm_scores = None
# Initialize first hypothesis for the beam (blank) for kept hypotheses
kept_hyps = [
Hypothesis(
y_sequence=[self.blank],
score=0.0,
dec_state=state,
dec_out=[beam_dec_out[0]],
lm_state=lm_state,
lm_scores=lm_scores,
timestep=[-1],
length=0,
)
]
if self.ngram_lm:
kept_hyps[0].ngram_lm_state = init_lm_state
# Initialize alignment buffer
if self.preserve_alignments:
for hyp in kept_hyps:
hyp.alignments = [[]]
for t in range(encoded_lengths):
enc_out_t = h[t : t + 1].unsqueeze(0) # [1, 1, D]
# Perform prefix search to obtain hypothesis
hyps = self.prefix_search(
sorted(kept_hyps, key=lambda x: len(x.y_sequence), reverse=True),
enc_out_t,
prefix_alpha=self.maes_prefix_alpha,
) # type: List[Hypothesis]
kept_hyps = []
# Prepare output tensor
beam_enc_out = enc_out_t
# List that contains the blank token emisions
list_b = []
duplication_check = [hyp.y_sequence for hyp in hyps]
# Repeat for number of mAES steps
for n in range(self.maes_num_steps):
# Pack the decoder logits for all current hypothesis
beam_dec_out = torch.stack([h.dec_out[-1] for h in hyps]) # [H, 1, D]
# Extract the log probabilities
ytm, ilm_ytm = self.resolve_joint_output(beam_enc_out, beam_dec_out)
beam_logp, beam_idx = ytm.topk(self.max_candidates, dim=-1)
beam_logp = beam_logp[:, 0, 0, :] # [B, V + 1]
beam_idx = beam_idx[:, 0, 0, :] # [B, max_candidates]
# Compute k expansions for all the current hypotheses
k_expansions = select_k_expansions(
hyps, beam_idx, beam_logp, self.maes_expansion_gamma, self.maes_expansion_beta
)
# List that contains the hypothesis after prefix expansion
list_exp = []
for i, hyp in enumerate(hyps): # For all hypothesis
for k, new_score in k_expansions[i]: # for all expansion within these hypothesis
new_hyp = Hypothesis(
y_sequence=hyp.y_sequence[:],
score=new_score,
dec_out=hyp.dec_out[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
lm_scores=hyp.lm_scores,
timestep=hyp.timestep[:],
length=t,
)
if self.ngram_lm:
new_hyp.ngram_lm_state = hyp.ngram_lm_state
# If the expansion was for blank
if k == self.blank:
list_b.append(new_hyp)
else:
# If the expansion was a token
# new_hyp.y_sequence.append(int(k))
if (new_hyp.y_sequence + [int(k)]) not in duplication_check:
new_hyp.y_sequence.append(int(k))
new_hyp.timestep.append(t)
# Setup ngram LM:
if self.ngram_lm:
lm_score, new_hyp.ngram_lm_state = self.compute_ngram_score(
hyp.ngram_lm_state, int(k)
)
if self.hat_subtract_ilm:
new_hyp.score += self.ngram_lm_alpha * lm_score - float(
self.hat_ilm_weight * ilm_ytm[i, 0, 0, k]
)
else:
new_hyp.score += self.ngram_lm_alpha * lm_score
# TODO: Setup LM
if self.language_model is not None:
# new_hyp.score += self.lm_weight * float(
# hyp.lm_scores[k]
# )
pass
list_exp.append(new_hyp)
# Preserve alignments
if self.preserve_alignments:
new_hyp.alignments = copy.deepcopy(hyp.alignments)
if k == self.blank:
new_hyp.alignments[-1].append(
(beam_logp[i].clone().cpu(), torch.tensor(self.blank, dtype=torch.int32)),
)
else:
new_hyp.alignments[-1].append(
(
beam_logp[i].clone().cpu(),
torch.tensor(new_hyp.y_sequence[-1], dtype=torch.int32),
),
)
# If there were no token expansions in any of the hypotheses,
# Early exit
if not list_exp:
kept_hyps = sorted(list_b, key=lambda x: x.score, reverse=True)[:beam]
# Update aligments with next step
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for h_i in kept_hyps:
# Check if the last token emitted at last timestep was a blank
# If so, move to next timestep
logp, label = h_i.alignments[-1][-1] # The last alignment of this step
if int(label) == self.blank:
h_i.alignments.append([]) # blank buffer for next timestep
# Early exit
break
else:
# Initialize the beam states for the hypotheses in the expannsion list
beam_state = self.decoder.batch_initialize_states(
beam_state,
[hyp.dec_state for hyp in list_exp],
# [hyp.y_sequence for hyp in list_exp], # <look into when this is necessary>
)
# Decode a batch of beam states and scores
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score_hypothesis(
list_exp,
cache,
beam_state,
# self.language_model is not None,
)
# TODO: Setup LM
if self.language_model is not None:
# beam_lm_states = create_lm_batch_states(
# [hyp.lm_state for hyp in list_exp],
# self.lm_layers,
# self.is_wordlm,
# )
# beam_lm_states, beam_lm_scores = self.lm.buff_predict(
# beam_lm_states, beam_lm_tokens, len(list_exp)
# )
pass
# If this isnt the last mAES step
if n < (self.maes_num_steps - 1):
# For all expanded hypothesis
for i, hyp in enumerate(list_exp):
# Preserve the decoder logits for the current beam
hyp.dec_out.append(beam_dec_out[i])
hyp.dec_state = self.decoder.batch_select_state(beam_state, i)
# TODO: Setup LM
if self.language_model is not None:
# hyp.lm_state = select_lm_state(
# beam_lm_states, i, self.lm_layers, self.is_wordlm
# )
# hyp.lm_scores = beam_lm_scores[i]
pass
# Copy the expanded hypothesis
hyps = list_exp[:]
# Update aligments with next step
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for h_i in hyps:
# Check if the last token emitted at last timestep was a blank
# If so, move to next timestep
logp, label = h_i.alignments[-1][-1] # The last alignment of this step
if int(label) == self.blank:
h_i.alignments.append([]) # blank buffer for next timestep
else:
# Extract the log probabilities
beam_logp, _ = self.resolve_joint_output(beam_enc_out, beam_dec_out)
beam_logp = beam_logp[:, 0, 0, :]
# For all expansions, add the score for the blank label
for i, hyp in enumerate(list_exp):
hyp.score += float(beam_logp[i, self.blank])
# Preserve the decoder's output and state
hyp.dec_out.append(beam_dec_out[i])
hyp.dec_state = self.decoder.batch_select_state(beam_state, i)
# TODO: Setup LM
if self.language_model is not None:
# hyp.lm_state = select_lm_state(
# beam_lm_states, i, self.lm_layers, self.is_wordlm
# )
# hyp.lm_scores = beam_lm_scores[i]
pass
# Finally, update the kept hypothesis of sorted top Beam candidates
kept_hyps = sorted(list_b + list_exp, key=lambda x: x.score, reverse=True)[:beam]
# Update aligments with next step
if self.preserve_alignments:
# convert Ti-th logits into a torch array
for h_i in kept_hyps:
# Check if the last token emitted at last timestep was a blank
# If so, move to next timestep
logp, label = h_i.alignments[-1][-1] # The last alignment of this step
if int(label) == self.blank:
h_i.alignments.append([]) # blank buffer for next timestep
# Remove trailing empty list of alignments
if self.preserve_alignments:
for h in kept_hyps:
if len(h.alignments[-1]) == 0:
del h.alignments[-1]
# Sort the hypothesis with best scores
return self.sort_nbest(kept_hyps)
def recombine_hypotheses(self, hypotheses: List[Hypothesis]) -> List[Hypothesis]:
"""Recombine hypotheses with equivalent output sequence.
Args:
hypotheses (list): list of hypotheses
Returns:
final (list): list of recombined hypotheses
"""
final = []
for hyp in hypotheses:
seq_final = [f.y_sequence for f in final if f.y_sequence]
if hyp.y_sequence in seq_final:
seq_pos = seq_final.index(hyp.y_sequence)
final[seq_pos].score = np.logaddexp(final[seq_pos].score, hyp.score)
else:
final.append(hyp)
return hypotheses
def resolve_joint_output(self, enc_out: torch.Tensor, dec_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Resolve output types for RNNT and HAT joint models
"""
joint_output = self.joint.joint(enc_out, dec_out)
if torch.is_tensor(joint_output):
ytm = torch.log_softmax(joint_output / self.softmax_temperature, dim=-1)
ilm_ytm = None
elif self.hat_subtract_ilm and isinstance(joint_output, HATJointOutput):
ytm, ilm_ytm = joint_output.hat_logprobs, joint_output.ilm_logprobs
else:
raise TypeError(
f"Joint output ({type(joint_output)}) must be torch.Tensor or HATJointOutput in case of HAT joint"
)
return ytm, ilm_ytm
def prefix_search(
self, hypotheses: List[Hypothesis], enc_out: torch.Tensor, prefix_alpha: int
) -> List[Hypothesis]:
"""
Prefix search for NSC and mAES strategies.
Based on https://arxiv.org/pdf/1211.3711.pdf
"""
for j, hyp_j in enumerate(hypotheses[:-1]):
for hyp_i in hypotheses[(j + 1) :]:
curr_id = len(hyp_j.y_sequence)
pref_id = len(hyp_i.y_sequence)
if is_prefix(hyp_j.y_sequence, hyp_i.y_sequence) and (curr_id - pref_id) <= prefix_alpha:
logp, ilm_logp = self.resolve_joint_output(enc_out, hyp_i.dec_out[-1])
logp = logp[0, 0, 0, :]
curr_score = hyp_i.score + float(logp[hyp_j.y_sequence[pref_id]])
# Setup ngram LM:
if self.ngram_lm:
lm_score, next_state = self.compute_ngram_score(
hyp_i.ngram_lm_state, int(hyp_j.y_sequence[pref_id])
)
if self.hat_subtract_ilm:
curr_score += self.ngram_lm_alpha * lm_score - self.hat_ilm_weight * float(
ilm_logp[0, 0, hyp_j.y_sequence[pref_id]]
)
else:
curr_score += self.ngram_lm_alpha * lm_score
for k in range(pref_id, (curr_id - 1)):
logp, ilm_logp = self.resolve_joint_output(enc_out, hyp_j.dec_out[k])
logp = logp[0, 0, 0, :]
curr_score += float(logp[hyp_j.y_sequence[k + 1]])
# Setup ngram LM:
if self.ngram_lm:
lm_score, next_state = self.compute_ngram_score(next_state, int(hyp_j.y_sequence[k + 1]))
if self.hat_subtract_ilm:
curr_score += self.ngram_lm_alpha * lm_score - self.hat_ilm_weight * float(
ilm_logp[0, 0, hyp_j.y_sequence[k + 1]]
)
else:
curr_score += self.ngram_lm_alpha * lm_score
hyp_j.score = np.logaddexp(hyp_j.score, curr_score)
return hypotheses
def compute_ngram_score(self, current_lm_state: "kenlm.State", label: int) -> Tuple[float, "kenlm.State"]:
"""
Score computation for kenlm ngram language model.
"""
if self.token_offset:
label = chr(label + self.token_offset)
else:
label = str(label)
next_state = kenlm.State()
lm_score = self.ngram_lm.BaseScore(current_lm_state, label, next_state)
lm_score *= 1.0 / np.log10(np.e)
return lm_score, next_state
def set_decoding_type(self, decoding_type: str):
# Please check train_kenlm.py in scripts/asr_language_modeling/ to find out why we need
# TOKEN_OFFSET for BPE-based models
if decoding_type == 'subword':
from nemo.collections.asr.parts.submodules.ctc_beam_decoding import DEFAULT_TOKEN_OFFSET
self.token_offset = DEFAULT_TOKEN_OFFSET
@dataclass
class BeamRNNTInferConfig:
beam_size: int
search_type: str = 'default'
score_norm: bool = True
return_best_hypothesis: bool = True
tsd_max_sym_exp_per_step: Optional[int] = 50
alsd_max_target_len: float = 1.0
nsc_max_timesteps_expansion: int = 1
nsc_prefix_alpha: int = 1
maes_num_steps: int = 2
maes_prefix_alpha: int = 1
maes_expansion_gamma: float = 2.3
maes_expansion_beta: int = 2
language_model: Optional[Dict[str, Any]] = None
softmax_temperature: float = 1.0
preserve_alignments: bool = False
ngram_lm_model: Optional[str] = None
ngram_lm_alpha: Optional[float] = 0.0
hat_subtract_ilm: bool = False
hat_ilm_weight: float = 0.0
|
NeMo-main
|
nemo/collections/asr/parts/submodules/rnnt_beam_decoding.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Callable, Iterable, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.init import _calculate_correct_fan
from torch.nn.modules.utils import _single
from nemo.collections.common.parts.utils import activation_registry
from nemo.core.classes.mixins import AccessMixin
from nemo.core.classes.mixins.adapter_mixins import AdapterModuleMixin
from nemo.utils import logging
try:
from pytorch_quantization import calib
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import quant_modules
from pytorch_quantization.tensor_quant import QuantDescriptor
PYTORCH_QUANTIZATION_AVAILABLE = True
except ImportError:
PYTORCH_QUANTIZATION_AVAILABLE = False
jasper_activations = activation_registry
def tds_uniform_(tensor, mode='fan_in'):
"""
Uniform Initialization from the paper [Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions](https://www.isca-speech.org/archive/Interspeech_2019/pdfs/2460.pdf)
Normalized to -
.. math::
\\text{bound} = \\text{2} \\times \\sqrt{\\frac{1}{\\text{fan\\_mode}}}
Args:
tensor: an n-dimensional `torch.Tensor`
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
"""
fan = _calculate_correct_fan(tensor, mode)
gain = 2.0 # sqrt(4.0) = 2
std = gain / math.sqrt(fan) # sqrt(4.0 / fan_in)
bound = std # Calculate uniform bounds from standard deviation
with torch.no_grad():
return tensor.uniform_(-bound, bound)
def tds_normal_(tensor, mode='fan_in'):
"""
Normal Initialization from the paper [Sequence-to-Sequence Speech Recognition with Time-Depth Separable Convolutions](https://www.isca-speech.org/archive/Interspeech_2019/pdfs/2460.pdf)
Normalized to -
.. math::
\\text{bound} = \\text{2} \\times \\sqrt{\\frac{1}{\\text{fan\\_mode}}}
Args:
tensor: an n-dimensional `torch.Tensor`
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
"""
fan = _calculate_correct_fan(tensor, mode)
gain = 2.0
std = gain / math.sqrt(fan) # sqrt(4.0 / fan_in)
bound = std # Calculate uniform bounds from standard deviation
with torch.no_grad():
return tensor.normal_(0.0, bound)
def init_weights(m, mode: Optional[str] = 'xavier_uniform'):
if isinstance(m, MaskedConv1d):
init_weights(m.conv, mode)
if isinstance(m, (nn.Conv1d, nn.Linear)):
if mode is not None:
if mode == 'xavier_uniform':
nn.init.xavier_uniform_(m.weight, gain=1.0)
elif mode == 'xavier_normal':
nn.init.xavier_normal_(m.weight, gain=1.0)
elif mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
elif mode == 'tds_uniform':
tds_uniform_(m.weight)
elif mode == 'tds_normal':
tds_normal_(m.weight)
else:
raise ValueError("Unknown Initialization mode: {0}".format(mode))
elif isinstance(m, nn.BatchNorm1d):
if m.track_running_stats:
m.running_mean.zero_()
m.running_var.fill_(1)
m.num_batches_tracked.zero_()
if m.affine:
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def compute_new_kernel_size(kernel_size, kernel_width):
new_kernel_size = max(int(kernel_size * kernel_width), 1)
# If kernel is even shape, round up to make it odd
if new_kernel_size % 2 == 0:
new_kernel_size += 1
return new_kernel_size
def get_same_padding(kernel_size, stride, dilation) -> int:
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
return (dilation * (kernel_size - 1)) // 2
def get_asymtric_padding(kernel_size, stride, dilation, future_context):
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
left_context = kernel_size - 1 - future_context
right_context = future_context
symmetric_padding = get_same_padding(kernel_size, stride, dilation)
if kernel_size <= future_context:
# kernel size is smaller than future context, equivalent to using entire context of kernel
# simply return symmetric padding for this scenario
logging.warning(
f"Future context window is larger than the kernel size!\n"
f"Left context = {left_context} | Right context = greater than {right_context} | "
f"Kernel size = {kernel_size}\n"
f"Switching to symmetric padding (left context = right context = {symmetric_padding})"
)
return symmetric_padding
if left_context < symmetric_padding:
logging.warning(
f"Future context window is larger than half the kernel size!\n"
f"Conv layer therefore uses more future information than past to compute its output!\n"
f"Left context = {left_context} | Right context = {right_context} | "
f"Kernel size = {kernel_size}"
)
if dilation > 1:
left_context = dilation * kernel_size - 1 - dilation * future_context
right_context = dilation * future_context
return (left_context, right_context)
return (left_context, right_context)
@torch.jit.script
def _se_pool_step_script_infer(x: torch.Tensor, context_window: int, mask: torch.Tensor):
"""
Calculates the masked average over padded limited context segment during inference mode.
Args:
x: Input tensor. Shape = [B, C, T]
context_window: Integer context window, must be 0 or greater.
mask: Mask tensor, 1 represents value index, 0 represents padded index. Shape = [B, 1, T].
Returns:
A tensor reduced via masked average pool over some limited context. Shape = [B, C, 1]
"""
timesteps = x.shape[-1]
if timesteps < context_window:
y = torch.sum(x, dim=-1, keepdim=True) / mask.sum(dim=-1, keepdim=True).to(x.dtype)
else:
# << During inference prefer to use entire context >>
# x = x[:, :, :context_window] # [B, C, context_window]
# mask = mask[:, :, :context_window] # [B, 1, context_window]
#
# mask = mask.sum(dim=-1, keepdim=True).to(x.dtype) # [B, C, 1]
# y = x.sum(dim=-1, keepdim=True) # [B, 1, 1]
# y = y / (mask + 1e-8) # [B, C, 1]
y = torch.sum(x, dim=-1, keepdim=True) / mask.sum(dim=-1, keepdim=True).to(x.dtype)
return y
@torch.jit.script
def _se_pool_step_script_train(x: torch.Tensor, context_window: int, mask: torch.Tensor):
"""
Calculates the masked average over padded limited context segment during training mode.
Randomly slices a segment of length `context_window` from signal+padded input tensor across all channels and
uses it for computing masked limited context.
Args:
x: Input tensor. Shape = [B, C, T]
context_window: Integer context window, must be 0 or greater.
mask: Mask tensor, 1 represents value index, 0 represents padded index. Shape = [B, 1, T].
Returns:
A tensor reduced via masked average pool over some limited context. Shape = [B, C, 1]
"""
timesteps = x.shape[-1]
if timesteps < context_window:
y = torch.sum(x, dim=-1, keepdim=True) / mask.sum(dim=-1, keepdim=True).to(x.dtype)
else:
start_idx = torch.randint(0, timesteps - context_window, size=[1], dtype=torch.int32)[0]
x = x[:, :, start_idx : (start_idx + context_window)] # [B, C, context_window]
mask = mask[:, :, start_idx : (start_idx + context_window)] # [B, 1, context_window]
mask = mask.sum(dim=-1, keepdim=True).to(x.dtype) # [B, C, 1]
y = x.sum(dim=-1, keepdim=True) # [B, 1, 1]
y = y / (mask + 1e-8) # [B, C, 1]
return y
@torch.jit.script
def _masked_conv_init_lens(lens: torch.Tensor, current_maxlen: int, original_maxlen: torch.Tensor):
if current_maxlen > original_maxlen:
new_lens = torch.arange(current_maxlen)
new_max_lens = torch.tensor(current_maxlen)
else:
new_lens = lens
new_max_lens = original_maxlen
return new_lens, new_max_lens
class MaskedConv1d(nn.Module):
__constants__ = ["use_conv_mask", "real_out_channels", "heads"]
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
heads=-1,
bias=False,
use_mask=True,
quantize=False,
):
super(MaskedConv1d, self).__init__()
if not (heads == -1 or groups == in_channels):
raise ValueError("Only use heads for depthwise convolutions")
self.real_out_channels = out_channels
if heads != -1:
in_channels = heads
out_channels = heads
groups = heads
# preserve original padding
self._padding = padding
# if padding is a tuple/list, it is considered as asymmetric padding
if type(padding) in (tuple, list):
self.pad_layer = nn.ConstantPad1d(padding, value=0.0)
# reset padding for conv since pad_layer will handle this
padding = 0
else:
self.pad_layer = None
if PYTORCH_QUANTIZATION_AVAILABLE and quantize:
self.conv = quant_nn.QuantConv1d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
elif not PYTORCH_QUANTIZATION_AVAILABLE and quantize:
raise ImportError(
"pytorch-quantization is not installed. Install from "
"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
)
else:
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
self.use_mask = use_mask
self.heads = heads
# Calculations for "same" padding cache
self.same_padding = (self.conv.stride[0] == 1) and (
2 * self.conv.padding[0] == self.conv.dilation[0] * (self.conv.kernel_size[0] - 1)
)
if self.pad_layer is None:
self.same_padding_asymmetric = False
else:
self.same_padding_asymmetric = (self.conv.stride[0] == 1) and (
sum(self._padding) == self.conv.dilation[0] * (self.conv.kernel_size[0] - 1)
)
# `self.lens` caches consecutive integers from 0 to `self.max_len` that are used to compute the mask for a
# batch. Recomputed to bigger size as needed. Stored on a device of the latest batch lens.
if self.use_mask:
self.max_len = torch.tensor(0)
self.lens = torch.tensor(0)
def get_seq_len(self, lens):
if self.same_padding or self.same_padding_asymmetric:
return lens
if self.pad_layer is None:
return (
torch.div(
lens + 2 * self.conv.padding[0] - self.conv.dilation[0] * (self.conv.kernel_size[0] - 1) - 1,
self.conv.stride[0],
rounding_mode='trunc',
)
+ 1
)
else:
return (
torch.div(
lens + sum(self._padding) - self.conv.dilation[0] * (self.conv.kernel_size[0] - 1) - 1,
self.conv.stride[0],
rounding_mode='trunc',
)
+ 1
)
def forward(self, x, lens):
if self.use_mask:
# Generally will be called by ConvASREncoder, but kept as single gpu backup.
if x.size(2) > self.max_len:
self.update_masked_length(x.size(2), device=lens.device)
x = self.mask_input(x, lens)
# Update lengths
lens = self.get_seq_len(lens)
# asymmtric pad if necessary
if self.pad_layer is not None:
x = self.pad_layer(x)
sh = x.shape
if self.heads != -1:
x = x.view(-1, self.heads, sh[-1])
out = self.conv(x)
if self.heads != -1:
out = out.view(sh[0], self.real_out_channels, -1)
return out, lens
def update_masked_length(self, max_len, seq_range=None, device=None):
if seq_range is None:
self.lens, self.max_len = _masked_conv_init_lens(self.lens, max_len, self.max_len)
self.lens = self.lens.to(device)
else:
self.lens = seq_range
self.max_len = max_len
def mask_input(self, x, lens):
max_len = x.size(2)
mask = self.lens[:max_len].unsqueeze(0).to(lens.device) < lens.unsqueeze(1)
x = x * mask.unsqueeze(1).to(device=x.device)
return x
class GroupShuffle(nn.Module):
def __init__(self, groups, channels):
super(GroupShuffle, self).__init__()
self.groups = groups
self.channels_per_group = channels // groups
def forward(self, x):
sh = x.shape
x = x.view(-1, self.groups, self.channels_per_group, sh[-1])
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(-1, self.groups * self.channels_per_group, sh[-1])
return x
class SqueezeExcite(nn.Module):
def __init__(
self,
channels: int,
reduction_ratio: int,
context_window: int = -1,
interpolation_mode: str = 'nearest',
activation: Optional[Callable] = None,
quantize: bool = False,
):
"""
Squeeze-and-Excitation sub-module.
Args:
channels: Input number of channels.
reduction_ratio: Reduction ratio for "squeeze" layer.
context_window: Integer number of timesteps that the context
should be computed over, using stride 1 average pooling.
If value < 1, then global context is computed.
interpolation_mode: Interpolation mode of timestep dimension.
Used only if context window is > 1.
The modes available for resizing are: `nearest`, `linear` (3D-only),
`bilinear`, `area`
activation: Intermediate activation function used. Must be a
callable activation function.
"""
super(SqueezeExcite, self).__init__()
self.interpolation_mode = interpolation_mode
self._quantize = quantize
self.pool = None # prepare a placeholder which will be updated
if activation is None:
activation = nn.ReLU(inplace=True)
if PYTORCH_QUANTIZATION_AVAILABLE and quantize:
self.fc = nn.Sequential(
quant_nn.QuantLinear(channels, channels // reduction_ratio, bias=False),
activation,
quant_nn.QuantLinear(channels // reduction_ratio, channels, bias=False),
)
elif not PYTORCH_QUANTIZATION_AVAILABLE and quantize:
raise ImportError(
"pytorch-quantization is not installed. Install from "
"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
)
else:
self.fc = nn.Sequential(
nn.Linear(channels, channels // reduction_ratio, bias=False),
activation,
nn.Linear(channels // reduction_ratio, channels, bias=False),
)
self.gap = nn.AdaptiveAvgPool1d(1)
# Set default context window
self.change_context_window(context_window=context_window)
# Set default max sequence length
self.set_max_len(16)
def forward(self, x, lengths):
return self.forward_for_export(x, lengths)
def forward_for_export(self, x, lengths):
# The use of negative indices on the transpose allow for expanded SqueezeExcite
max_len = x.shape[-1]
if max_len > self.max_len:
self.set_max_len(max_len)
dtype = x.dtype
# Computes in float32 to avoid instabilities during training with AMP.
with torch.cuda.amp.autocast(enabled=False):
# Create sample mask - 1 represents value, 0 represents pad
mask = self.make_pad_mask(lengths, max_audio_length=max_len, device=x.device)
mask = ~mask # 0 represents value, 1 represents pad
x = x.float() # For stable AMP, SE must be computed at fp32.
x.masked_fill_(mask, 0.0) # mask padded values explicitly to 0
y = self._se_pool_step(x, mask) # [B, C, 1]
y = y.transpose(1, -1) # [B, 1, C]
y = self.fc(y) # [B, 1, C]
y = y.transpose(1, -1) # [B, C, 1]
# Note: Keep for future, in case we improve WER from doing so.
# if self.context_window >= 0:
# y = F.interpolate(y, size=x.shape[-1], mode=self.interpolation_mode)
y = torch.sigmoid(y)
y = x * y
return y, lengths
def _se_pool_step(self, x, mask):
# Negate mask back to represent 1 for signal and 0 for padded timestep.
mask = ~mask
if self.context_window < 0:
# [B, C, 1] - Masked Average over value + padding.
y = torch.sum(x, dim=-1, keepdim=True) / mask.sum(dim=-1, keepdim=True).type(x.dtype)
else:
# [B, C, 1] - Masked Average over value + padding with limited context.
# During training randomly subsegments a context_window chunk of timesteps.
# During inference selects only the first context_window chunk of timesteps.
if self.training:
y = _se_pool_step_script_train(x, self.context_window, mask)
else:
y = _se_pool_step_script_infer(x, self.context_window, mask)
return y
def set_max_len(self, max_len, seq_range=None):
""" Sets maximum input length.
Pre-calculates internal seq_range mask.
"""
self.max_len = max_len
if seq_range is None:
device = next(self.parameters()).device
seq_range = torch.arange(0, self.max_len, device=device)
if hasattr(self, 'seq_range'):
self.seq_range = seq_range
else:
self.register_buffer('seq_range', seq_range, persistent=False)
def make_pad_mask(self, seq_lens, max_audio_length, device=None):
"""Make masking for padding."""
if device and self.seq_range.device != device:
self.seq_range = self.seq_range.to(device)
if self.seq_range.device != seq_lens.device:
seq_lens = seq_lens.to(self.seq_range.device)
mask = self.seq_range[:max_audio_length].expand(seq_lens.size(0), -1) < seq_lens.unsqueeze(-1) # [B, T]; bool
mask = mask.unsqueeze(1) # [B, 1, T]
return mask
def change_context_window(self, context_window: int):
"""
Update the context window of the SqueezeExcitation module, in-place if possible.
Will update the pooling layer to either nn.AdaptiveAvgPool1d() (for global SE) or nn.AvgPool1d()
(for limited context SE).
If only the context window is changing but still a limited SE context block - then
the earlier instance of nn.AvgPool1d() will be updated.
Args:
context_window: An integer representing the number of input timeframes that will be used
to compute the context. Each timeframe corresponds to a single window stride of the
STFT features.
Say the window_stride = 0.01s, then a context window of 128 represents 128 * 0.01 s
of context to compute the Squeeze step.
"""
if hasattr(self, 'context_window'):
logging.info(f"Changing Squeeze-Excitation context window from {self.context_window} to {context_window}")
self.context_window = context_window
class JasperBlock(nn.Module, AdapterModuleMixin, AccessMixin):
"""
Constructs a single "Jasper" block. With modified parameters, also constructs other blocks for models
such as `QuartzNet` and `Citrinet`.
- For `Jasper` : `separable` flag should be False
- For `QuartzNet` : `separable` flag should be True
- For `Citrinet` : `separable` flag and `se` flag should be True
Note that above are general distinctions, each model has intricate differences that expand over
multiple such blocks.
For further information about the differences between models which use JasperBlock, please review
the configs for ASR models found in the ASR examples directory.
Args:
inplanes: Number of input channels.
planes: Number of output channels.
repeat: Number of repeated sub-blocks (R) for this block.
kernel_size: Convolution kernel size across all repeated sub-blocks.
kernel_size_factor: Floating point scale value that is multiplied with kernel size,
then rounded down to nearest odd integer to compose the kernel size. Defaults to 1.0.
stride: Stride of the convolutional layers.
dilation: Integer which defined dilation factor of kernel. Note that when dilation > 1, stride must
be equal to 1.
padding: String representing type of padding. Currently only supports "same" padding,
which symmetrically pads the input tensor with zeros.
dropout: Floating point value, determins percentage of output that is zeroed out.
activation: String representing activation functions. Valid activation functions are :
{"hardtanh": nn.Hardtanh, "relu": nn.ReLU, "selu": nn.SELU, "swish": Swish}.
Defaults to "relu".
residual: Bool that determined whether a residual branch should be added or not.
All residual branches are constructed using a pointwise convolution kernel, that may or may not
perform strided convolution depending on the parameter `residual_mode`.
groups: Number of groups for Grouped Convolutions. Defaults to 1.
separable: Bool flag that describes whether Time-Channel depthwise separable convolution should be
constructed, or ordinary convolution should be constructed.
heads: Number of "heads" for the masked convolution. Defaults to -1, which disables it.
normalization: String that represents type of normalization performed. Can be one of
"batch", "group", "instance" or "layer" to compute BatchNorm1D, GroupNorm1D, InstanceNorm or
LayerNorm (which are special cases of GroupNorm1D).
norm_groups: Number of groups used for GroupNorm (if `normalization` == "group").
residual_mode: String argument which describes whether the residual branch should be simply
added ("add") or should first stride, then add ("stride_add"). Required when performing stride on
parallel branch as well as utilizing residual add.
residual_panes: Number of residual panes, used for Jasper-DR models. Please refer to the paper.
conv_mask: Bool flag which determines whether to utilize masked convolutions or not. In general,
it should be set to True.
se: Bool flag that determines whether Squeeze-and-Excitation layer should be used.
se_reduction_ratio: Integer value, which determines to what extend the hidden dimension of the SE
intermediate step should be reduced. Larger values reduce number of parameters, but also limit
the effectiveness of SE layers.
se_context_window: Integer value determining the number of timesteps that should be utilized in order
to compute the averaged context window. Defaults to -1, which means it uses global context - such
that all timesteps are averaged. If any positive integer is used, it will utilize limited context
window of that size.
se_interpolation_mode: String used for interpolation mode of timestep dimension for SE blocks.
Used only if context window is > 1.
The modes available for resizing are: `nearest`, `linear` (3D-only),
`bilinear`, `area`.
stride_last: Bool flag that determines whether all repeated blocks should stride at once,
(stride of S^R when this flag is False) or just the last repeated block should stride
(stride of S when this flag is True).
future_context: Int value that determins how many "right" / "future" context frames will be utilized
when calculating the output of the conv kernel. All calculations are done for odd kernel sizes only.
By default, this is -1, which is recomputed as the symmetric padding case.
When future_context >= 0, will compute the asymmetric padding as follows :
(left context, right context) = [K - 1 - future_context, future_context]
Determining an exact formula to limit future context is dependent on global layout of the model.
As such, we provide both "local" and "global" guidelines below.
Local context limit (should always be enforced)
- future context should be <= half the kernel size for any given layer
- future context > kernel size defaults to symmetric kernel
- future context of layer = number of future frames * width of each frame (dependent on stride)
Global context limit (should be carefully considered)
- future context should be layed out in an ever reducing pattern. Initial layers should restrict
future context less than later layers, since shallow depth (and reduced stride) means each frame uses
less amounts of future context.
- Beyond a certain point, future context should remain static for a given stride level. This is
the upper bound of the amount of future context that can be provided to the model on a global scale.
- future context is calculated (roughly) as - (2 ^ stride) * (K // 2) number of future frames.
This resultant value should be bound to some global maximum number of future seconds of audio (in ms).
Note: In the special case where K < future_context, it is assumed that the kernel is too small to limit
its future context, so symmetric padding is used instead.
Note: There is no explicit limitation on the amount of future context used, as long as
K > future_context constraint is maintained. This might lead to cases where future_context is
more than half the actual kernel size K! In such cases, the conv layer is utilizing more of the future
context than its current and past context to compute the output. While this is possible to do,
it is not recommended and the layer will raise a warning to notify the user of such cases.
It is advised to simply use symmetric padding for such cases.
Example:
Say we have a model that performs 8x stride and receives spectrogram frames with stride of 0.01s.
Say we wish to upper bound future context to 80 ms.
Layer ID, Kernel Size, Stride, Future Context, Global Context
0, K=5, S=1, FC=8, GC= 2 * (2^0) = 2 * 0.01 ms (special case, K < FC so use symmetric pad)
1, K=7, S=1, FC=3, GC= 3 * (2^0) = 3 * 0.01 ms (note that symmetric pad here uses 3 FC frames!)
2, K=11, S=2, FC=4, GC= 4 * (2^1) = 8 * 0.01 ms (note that symmetric pad here uses 5 FC frames!)
3, K=15, S=1, FC=4, GC= 4 * (2^1) = 8 * 0.01 ms (note that symmetric pad here uses 7 FC frames!)
4, K=21, S=2, FC=2, GC= 2 * (2^2) = 8 * 0.01 ms (note that symmetric pad here uses 10 FC frames!)
5, K=25, S=2, FC=1, GC= 1 * (2^3) = 8 * 0.01 ms (note that symmetric pad here uses 14 FC frames!)
6, K=29, S=1, FC=1, GC= 1 * (2^3) = 8 * 0.01 ms ...
quantize: Bool flag whether to quantize the Convolutional blocks.
layer_idx (int, optional): can be specified to allow layer output capture for InterCTC loss. Defaults to -1.
"""
__constants__ = ["conv_mask", "separable", "residual_mode", "res", "mconv"]
def __init__(
self,
inplanes,
planes,
repeat=3,
kernel_size=11,
kernel_size_factor=1,
stride=1,
dilation=1,
padding='same',
dropout=0.2,
activation=None,
residual=True,
groups=1,
separable=False,
heads=-1,
normalization="batch",
norm_groups=1,
residual_mode='add',
residual_panes=[],
conv_mask=False,
se=False,
se_reduction_ratio=16,
se_context_window=-1,
se_interpolation_mode='nearest',
stride_last=False,
future_context: int = -1,
quantize=False,
layer_idx: int = -1, # only used for capturing tensors for interctc loss
):
super(JasperBlock, self).__init__()
if padding != "same":
raise ValueError("currently only 'same' padding is supported")
kernel_size_factor = float(kernel_size_factor)
if isinstance(kernel_size, Iterable):
kernel_size = [compute_new_kernel_size(k, kernel_size_factor) for k in kernel_size]
else:
kernel_size = [compute_new_kernel_size(kernel_size, kernel_size_factor)]
if future_context < 0:
padding_val = get_same_padding(kernel_size[0], stride[0], dilation[0])
else:
padding_val = get_asymtric_padding(kernel_size[0], stride[0], dilation[0], future_context)
self.inplanes = inplanes
self.planes = planes
self.conv_mask = conv_mask
self.separable = separable
self.residual_mode = residual_mode
self.se = se
self.quantize = quantize
self.layer_idx = layer_idx
# will be set in self.forward() if defined in AccessMixin config
self.interctc_should_capture = None
inplanes_loop = inplanes
conv = nn.ModuleList()
for _ in range(repeat - 1):
# Stride last means only the last convolution in block will have stride
if stride_last:
stride_val = [1]
else:
stride_val = stride
conv.extend(
self._get_conv_bn_layer(
inplanes_loop,
planes,
kernel_size=kernel_size,
stride=stride_val,
dilation=dilation,
padding=padding_val,
groups=groups,
heads=heads,
separable=separable,
normalization=normalization,
norm_groups=norm_groups,
quantize=quantize,
)
)
conv.extend(self._get_act_dropout_layer(drop_prob=dropout, activation=activation))
inplanes_loop = planes
conv.extend(
self._get_conv_bn_layer(
inplanes_loop,
planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding_val,
groups=groups,
heads=heads,
separable=separable,
normalization=normalization,
norm_groups=norm_groups,
quantize=quantize,
)
)
if se:
conv.append(
SqueezeExcite(
planes,
reduction_ratio=se_reduction_ratio,
context_window=se_context_window,
interpolation_mode=se_interpolation_mode,
activation=activation,
quantize=quantize,
)
)
self.mconv = conv
res_panes = residual_panes.copy()
self.dense_residual = residual
if residual:
res_list = nn.ModuleList()
if residual_mode == 'stride_add':
stride_val = stride
else:
stride_val = [1]
if len(residual_panes) == 0:
res_panes = [inplanes]
self.dense_residual = False
for ip in res_panes:
res = nn.ModuleList(
self._get_conv_bn_layer(
ip,
planes,
kernel_size=1,
normalization=normalization,
norm_groups=norm_groups,
stride=stride_val,
quantize=quantize,
)
)
res_list.append(res)
self.res = res_list
if PYTORCH_QUANTIZATION_AVAILABLE and self.quantize:
self.residual_quantizer = quant_nn.TensorQuantizer(quant_nn.QuantConv2d.default_quant_desc_input)
elif not PYTORCH_QUANTIZATION_AVAILABLE and quantize:
raise ImportError(
"pytorch-quantization is not installed. Install from "
"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
)
else:
self.res = None
self.mout = nn.Sequential(*self._get_act_dropout_layer(drop_prob=dropout, activation=activation))
def _get_conv(
self,
in_channels,
out_channels,
kernel_size=11,
stride=1,
dilation=1,
padding=0,
bias=False,
groups=1,
heads=-1,
separable=False,
quantize=False,
):
use_mask = self.conv_mask
if use_mask:
return MaskedConv1d(
in_channels,
out_channels,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
bias=bias,
groups=groups,
heads=heads,
use_mask=use_mask,
quantize=quantize,
)
else:
if PYTORCH_QUANTIZATION_AVAILABLE and quantize:
return quant_nn.QuantConv1d(
in_channels,
out_channels,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
bias=bias,
groups=groups,
)
elif not PYTORCH_QUANTIZATION_AVAILABLE and quantize:
raise ImportError(
"pytorch-quantization is not installed. Install from "
"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
)
else:
return nn.Conv1d(
in_channels,
out_channels,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
bias=bias,
groups=groups,
)
def _get_conv_bn_layer(
self,
in_channels,
out_channels,
kernel_size=11,
stride=1,
dilation=1,
padding=0,
bias=False,
groups=1,
heads=-1,
separable=False,
normalization="batch",
norm_groups=1,
quantize=False,
):
if norm_groups == -1:
norm_groups = out_channels
if separable:
layers = [
self._get_conv(
in_channels,
in_channels,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
bias=bias,
groups=in_channels,
heads=heads,
quantize=quantize,
),
self._get_conv(
in_channels,
out_channels,
kernel_size=1,
stride=1,
dilation=1,
padding=0,
bias=bias,
groups=groups,
quantize=quantize,
),
]
else:
layers = [
self._get_conv(
in_channels,
out_channels,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
bias=bias,
groups=groups,
quantize=quantize,
)
]
if normalization == "group":
layers.append(nn.GroupNorm(num_groups=norm_groups, num_channels=out_channels))
elif normalization == "instance":
layers.append(nn.GroupNorm(num_groups=out_channels, num_channels=out_channels))
elif normalization == "layer":
layers.append(nn.GroupNorm(num_groups=1, num_channels=out_channels))
elif normalization == "batch":
layers.append(nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1))
else:
raise ValueError(
f"Normalization method ({normalization}) does not match" f" one of [batch, layer, group, instance]."
)
if groups > 1:
layers.append(GroupShuffle(groups, out_channels))
return layers
def _get_act_dropout_layer(self, drop_prob=0.2, activation=None):
if activation is None:
activation = nn.Hardtanh(min_val=0.0, max_val=20.0)
layers = [activation, nn.Dropout(p=drop_prob)]
return layers
def forward(self, input_: Tuple[List[Tensor], Optional[Tensor]]) -> Tuple[List[Tensor], Optional[Tensor]]:
"""
Forward pass of the module.
Args:
input_: The input is a tuple of two values - the preprocessed audio signal as well as the lengths
of the audio signal. The audio signal is padded to the shape [B, D, T] and the lengths are
a torch vector of length B.
Returns:
The output of the block after processing the input through `repeat` number of sub-blocks,
as well as the lengths of the encoded audio after padding/striding.
"""
lens_orig = None
xs = input_[0]
if len(input_) == 2:
xs, lens_orig = input_
# compute forward convolutions
out = xs[-1]
lens = lens_orig
for i, l in enumerate(self.mconv):
# if we're doing masked convolutions, we need to pass in and
# possibly update the sequence lengths
# if (i % 4) == 0 and self.conv_mask:
if isinstance(l, (MaskedConv1d, SqueezeExcite)):
out, lens = l(out, lens)
else:
out = l(out)
# compute the residuals
if self.res is not None:
for i, layer in enumerate(self.res):
res_out = xs[i]
for j, res_layer in enumerate(layer):
if isinstance(res_layer, MaskedConv1d):
res_out, _ = res_layer(res_out, lens_orig)
else:
res_out = res_layer(res_out)
if self.residual_mode == 'add' or self.residual_mode == 'stride_add':
if PYTORCH_QUANTIZATION_AVAILABLE and self.quantize:
out = self.residual_quantizer(out) + res_out
elif not PYTORCH_QUANTIZATION_AVAILABLE and self.quantize:
raise ImportError(
"pytorch-quantization is not installed. Install from "
"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
)
else:
out = out + res_out
else:
out = torch.max(out, res_out)
# compute the output
out = self.mout(out)
# Support ASR Adapters
if self.is_adapter_available():
# Check for all available and enabled adapters
adapter_names = self.get_enabled_adapters()
if len(adapter_names) > 0:
out = out.transpose(1, 2) # (B, T, C)
# Call the adapters
out = self.forward_enabled_adapters(out)
out = out.transpose(1, 2) # (B, C, T)
if self.is_access_enabled():
# for adapters
if self.access_cfg.get('save_encoder_tensors', False):
self.register_accessible_tensor(name='encoder', tensor=out)
# for interctc - even though in some cases it's the same, we
# want to register separate key to be able to modify it later
# during interctc processing, if required
if self.interctc_should_capture is None:
capture_layers = self.access_cfg.get('interctc', {}).get('capture_layers', [])
self.interctc_should_capture = self.layer_idx in capture_layers
if self.interctc_should_capture:
# shape is the same as the shape of audio_signal output, i.e. [B, D, T]
self.register_accessible_tensor(name=f'interctc/layer_output_{self.layer_idx}', tensor=out)
self.register_accessible_tensor(name=f'interctc/layer_length_{self.layer_idx}', tensor=lens)
if self.res is not None and self.dense_residual:
return xs + [out], lens
return [out], lens
class ParallelBlock(nn.Module):
"""
Computational module that computes several `blocks` independently from each other and aggregates the outputs.
It expects audio inputs to be passed together with lengths, just like Jasper blocks, and all outputs to have
the same dimensions but it does not impose any additional requirements on the structure of the blocks themselves.
Args:
blocks: List of Jasper blocks that will be computed concurently. It is expected that they accept the same
input and return outputs with the same number of channels.
aggregation_mode: an optional string, indicating how the outputs will be aggregated. Supported values are
['sum', 'dropout']. "sum" value forces outputs to be summed together. "dropout" value enables tower
dropout training with different blocks being dropped out during training.
block_dropout_prob: a probability of dropping any individual block during training with "dropout" aggregation
mode. Acts as a regularization technique.
residual_mode: an optional string indicating how residuals will be applied. Supported values are
['sum', 'conv']. In 'sum' mode input features are summed together with the output. This will fail if the
number of channels in the input is different from the number of channels in an output tensor. In 'conv' mode
inputs are passed through pointwise convolution to make input channel dimension match output channel
dimension. In this mode `in_filters` and `out_filters` params are required.
in_filters: number of filters (channels) in the input tensor of each block.
out_filters: number of filters (channels) in the output tensor of each block.
"""
def __init__(
self,
blocks,
aggregation_mode: str = "sum",
block_dropout_prob: int = 0.0,
residual_mode: str = "sum",
in_filters: int = None,
out_filters: int = None,
):
super().__init__()
self.blocks = nn.ModuleList(blocks)
self.supported_aggregations = ["sum", "dropout"]
if aggregation_mode not in self.supported_aggregations:
raise ValueError(
f"Got non-supported aggregation mode: {aggregation_mode}. Supported values are {self.supported_aggregations}."
)
self.aggregation_mode = aggregation_mode
if aggregation_mode == "dropout":
self.weights = nn.Parameter(torch.ones(len(blocks)), requires_grad=False)
self.dropout = nn.Dropout(block_dropout_prob)
self.supported_residuals = ["sum", "conv"]
if residual_mode not in self.supported_residuals:
raise ValueError(
f"Got non-supported residual mode: {residual_mode}. Supported values are {self.supported_residuals}."
)
self.residual_mode = residual_mode
if residual_mode == "conv":
if in_filters is None or out_filters is None:
raise ValueError("in_filters and out_filters have to be specified when using 'conv' residual mode.")
self.res_conv = MaskedConv1d(in_filters, out_filters, kernel_size=1, bias=False, use_mask=True)
def get_dropout_mask(self):
weights = self.dropout(self.weights)
while torch.sum(weights) == 0 and self.dropout.p < 1.0:
weights = self.dropout(self.weights)
return weights
def forward(self, x: Tuple[List[Tensor], Optional[Tensor]]):
"""
Forward pass computing aggregated output.
Args:
x: tuple of padded signal and lengths the signal. The shape of the signal is [B, D, T]. The lengths are
1D torch tensor of length B.
Returns:
torch tensor after passing input throught each block and aggregating these outputs according to the
aggregation mode.
"""
if len(self.blocks) == 1:
return self.blocks[0](x)
result = None
max_mask = None
scaling_weights = None
if self.aggregation_mode == "dropout":
scaling_weights = self.get_dropout_mask()
for i, block in enumerate(self.blocks):
output, mask = block(x)
weighted_output = output[-1]
if self.aggregation_mode == "dropout":
weighted_output = scaling_weights[i] * output[-1]
if result is None:
result = weighted_output
else:
result = result + weighted_output
if max_mask is None:
max_mask = mask
else:
max_mask = torch.max(torch.stack([mask, max_mask]), dim=0)[0]
input_feat = x[0][-1]
lens = x[1]
if self.residual_mode == "sum":
result = result + input_feat
elif self.residual_mode == "conv":
result = result + self.res_conv(input_feat, lens)[0]
return [result], max_mask
|
NeMo-main
|
nemo/collections/asr/parts/submodules/jasper.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List
import torch
from numpy import inf
from torch import nn as nn
from torch.nn import functional as F
from nemo.collections.asr.parts.submodules.jasper import get_same_padding, init_weights
class StatsPoolLayer(nn.Module):
"""Statistics and time average pooling (TAP) layer
This computes mean and, optionally, standard deviation statistics across the time dimension.
Args:
feat_in: Input features with shape [B, D, T]
pool_mode: Type of pool mode. Supported modes are 'xvector' (mean and standard deviation) and 'tap' (time
average pooling, i.e., mean)
eps: Epsilon, minimum value before taking the square root, when using 'xvector' mode.
biased: Whether to use the biased estimator for the standard deviation when using 'xvector' mode. The default
for torch.Tensor.std() is True.
Returns:
Pooled statistics with shape [B, D].
Raises:
ValueError if an unsupported pooling mode is specified.
"""
def __init__(self, feat_in: int, pool_mode: str = 'xvector', eps: float = 1e-10, biased: bool = True):
super().__init__()
supported_modes = {"xvector", "tap"}
if pool_mode not in supported_modes:
raise ValueError(f"Pool mode must be one of {supported_modes}; got '{pool_mode}'")
self.pool_mode = pool_mode
self.feat_in = feat_in
self.eps = eps
self.biased = biased
if self.pool_mode == 'xvector':
# Mean + std
self.feat_in *= 2
def forward(self, encoder_output, length=None):
if length is None:
mean = encoder_output.mean(dim=-1) # Time Axis
if self.pool_mode == 'xvector':
std = encoder_output.std(dim=-1)
pooled = torch.cat([mean, std], dim=-1)
else:
pooled = mean
else:
mask = make_seq_mask_like(like=encoder_output, lengths=length, valid_ones=False)
encoder_output = encoder_output.masked_fill(mask, 0.0)
# [B, D, T] -> [B, D]
means = encoder_output.mean(dim=-1)
# Re-scale to get padded means
means = means * (encoder_output.shape[-1] / length).unsqueeze(-1)
if self.pool_mode == "xvector":
stds = (
encoder_output.sub(means.unsqueeze(-1))
.masked_fill(mask, 0.0)
.pow(2.0)
.sum(-1) # [B, D, T] -> [B, D]
.div(length.view(-1, 1).sub(1 if self.biased else 0))
.clamp(min=self.eps)
.sqrt()
)
pooled = torch.cat((means, stds), dim=-1)
else:
pooled = means
return pooled
@torch.jit.script_if_tracing
def make_seq_mask_like(
like: torch.Tensor, lengths: torch.Tensor, valid_ones: bool = True, time_dim: int = -1
) -> torch.Tensor:
mask = torch.arange(like.shape[time_dim], device=like.device).repeat(lengths.shape[0], 1).lt(lengths.unsqueeze(-1))
# Match number of dims in `like` tensor
for _ in range(like.dim() - mask.dim()):
mask = mask.unsqueeze(1)
# If time dim != -1, transpose to proper dim.
if time_dim != -1:
mask = mask.transpose(time_dim, -1)
if not valid_ones:
mask = ~mask
return mask
def lens_to_mask(lens: List[int], max_len: int, device: str = None):
"""
outputs masking labels for list of lengths of audio features, with max length of any
mask as max_len
input:
lens: list of lens
max_len: max length of any audio feature
output:
mask: masked labels
num_values: sum of mask values for each feature (useful for computing statistics later)
"""
lens_mat = torch.arange(max_len).to(device)
mask = lens_mat[:max_len].unsqueeze(0) < lens.unsqueeze(1)
mask = mask.unsqueeze(1)
num_values = torch.sum(mask, dim=2, keepdim=True)
return mask, num_values
def get_statistics_with_mask(x: torch.Tensor, m: torch.Tensor, dim: int = 2, eps: float = 1e-10):
"""
compute mean and standard deviation of input(x) provided with its masking labels (m)
input:
x: feature input
m: averaged mask labels
output:
mean: mean of input features
std: stadard deviation of input features
"""
mean = torch.sum((m * x), dim=dim)
std = torch.sqrt((m * (x - mean.unsqueeze(dim)).pow(2)).sum(dim).clamp(eps))
return mean, std
class TDNNModule(nn.Module):
"""
Time Delayed Neural Module (TDNN) - 1D
input:
inp_filters: input filter channels for conv layer
out_filters: output filter channels for conv layer
kernel_size: kernel weight size for conv layer
dilation: dilation for conv layer
stride: stride for conv layer
padding: padding for conv layer (default None: chooses padding value such that input and output feature shape matches)
output:
tdnn layer output
"""
def __init__(
self,
inp_filters: int,
out_filters: int,
kernel_size: int = 1,
dilation: int = 1,
stride: int = 1,
padding: int = None,
):
super().__init__()
if padding is None:
padding = get_same_padding(kernel_size, stride=stride, dilation=dilation)
self.conv_layer = nn.Conv1d(
in_channels=inp_filters,
out_channels=out_filters,
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
)
self.activation = nn.ReLU()
self.bn = nn.BatchNorm1d(out_filters)
def forward(self, x, length=None):
x = self.conv_layer(x)
x = self.activation(x)
return self.bn(x)
class MaskedSEModule(nn.Module):
"""
Squeeze and Excite module implementation with conv1d layers
input:
inp_filters: input filter channel size
se_filters: intermediate squeeze and excite channel output and input size
out_filters: output filter channel size
kernel_size: kernel_size for both conv1d layers
dilation: dilation size for both conv1d layers
output:
squeeze and excite layer output
"""
def __init__(self, inp_filters: int, se_filters: int, out_filters: int, kernel_size: int = 1, dilation: int = 1):
super().__init__()
self.se_layer = nn.Sequential(
nn.Conv1d(inp_filters, se_filters, kernel_size=kernel_size, dilation=dilation,),
nn.ReLU(),
nn.BatchNorm1d(se_filters),
nn.Conv1d(se_filters, out_filters, kernel_size=kernel_size, dilation=dilation,),
nn.Sigmoid(),
)
def forward(self, input, length=None):
if length is None:
x = torch.mean(input, dim=2, keep_dim=True)
else:
max_len = input.size(2)
mask, num_values = lens_to_mask(length, max_len=max_len, device=input.device)
x = torch.sum((input * mask), dim=2, keepdim=True) / (num_values)
out = self.se_layer(x)
return out * input
class TDNNSEModule(nn.Module):
"""
Modified building SE_TDNN group module block from ECAPA implementation for faster training and inference
Reference: ECAPA-TDNN Embeddings for Speaker Diarization (https://arxiv.org/pdf/2104.01466.pdf)
inputs:
inp_filters: input filter channel size
out_filters: output filter channel size
group_scale: scale value to group wider conv channels (deafult:8)
se_channels: squeeze and excite output channel size (deafult: 1024/8= 128)
kernel_size: kernel_size for group conv1d layers (default: 1)
dilation: dilation size for group conv1d layers (default: 1)
"""
def __init__(
self,
inp_filters: int,
out_filters: int,
group_scale: int = 8,
se_channels: int = 128,
kernel_size: int = 1,
dilation: int = 1,
init_mode: str = 'xavier_uniform',
):
super().__init__()
self.out_filters = out_filters
padding_val = get_same_padding(kernel_size=kernel_size, dilation=dilation, stride=1)
group_conv = nn.Conv1d(
out_filters,
out_filters,
kernel_size=kernel_size,
dilation=dilation,
padding=padding_val,
groups=group_scale,
)
self.group_tdnn_block = nn.Sequential(
TDNNModule(inp_filters, out_filters, kernel_size=1, dilation=1),
group_conv,
nn.ReLU(),
nn.BatchNorm1d(out_filters),
TDNNModule(out_filters, out_filters, kernel_size=1, dilation=1),
)
self.se_layer = MaskedSEModule(out_filters, se_channels, out_filters)
self.apply(lambda x: init_weights(x, mode=init_mode))
def forward(self, input, length=None):
x = self.group_tdnn_block(input)
x = self.se_layer(x, length)
return x + input
class AttentivePoolLayer(nn.Module):
"""
Attention pooling layer for pooling speaker embeddings
Reference: ECAPA-TDNN Embeddings for Speaker Diarization (https://arxiv.org/pdf/2104.01466.pdf)
inputs:
inp_filters: input feature channel length from encoder
attention_channels: intermediate attention channel size
kernel_size: kernel_size for TDNN and attention conv1d layers (default: 1)
dilation: dilation size for TDNN and attention conv1d layers (default: 1)
"""
def __init__(
self,
inp_filters: int,
attention_channels: int = 128,
kernel_size: int = 1,
dilation: int = 1,
eps: float = 1e-10,
):
super().__init__()
self.feat_in = 2 * inp_filters
self.attention_layer = nn.Sequential(
TDNNModule(inp_filters * 3, attention_channels, kernel_size=kernel_size, dilation=dilation),
nn.Tanh(),
nn.Conv1d(
in_channels=attention_channels, out_channels=inp_filters, kernel_size=kernel_size, dilation=dilation,
),
)
self.eps = eps
def forward(self, x, length=None):
max_len = x.size(2)
if length is None:
length = torch.ones(x.shape[0], device=x.device)
mask, num_values = lens_to_mask(length, max_len=max_len, device=x.device)
# encoder statistics
mean, std = get_statistics_with_mask(x, mask / num_values)
mean = mean.unsqueeze(2).repeat(1, 1, max_len)
std = std.unsqueeze(2).repeat(1, 1, max_len)
attn = torch.cat([x, mean, std], dim=1)
# attention statistics
attn = self.attention_layer(attn) # attention pass
attn = attn.masked_fill(mask == 0, -inf)
alpha = F.softmax(attn, dim=2) # attention values, α
mu, sg = get_statistics_with_mask(x, alpha) # µ and ∑
# gather
return torch.cat((mu, sg), dim=1).unsqueeze(2)
|
NeMo-main
|
nemo/collections/asr/parts/submodules/tdnn_attention.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Part of this code is adopted from https://github.com/espnet/espnet
"""
import math
from functools import lru_cache
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from nemo.utils import avoid_float16_autocast_context
__all__ = [
'RelPositionMultiHeadAttention',
'RelPositionalEncoding',
'PositionalEncoding',
]
class MultiHeadAttention(nn.Module):
"""Multi-Head Attention layer of Transformer.
Args:
n_head (int): number of heads
n_feat (int): size of the features
dropout_rate (float): dropout rate
"""
def __init__(self, n_head, n_feat, dropout_rate, max_cache_len=0):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadAttention, self).__init__()
self.cache_drop_size = None
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.s_d_k = math.sqrt(self.d_k)
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
self._max_cache_len = max_cache_len
def forward_qkv(self, query, key, value):
"""Transforms query, key and value.
Args:
query (torch.Tensor): (batch, time1, size)
key (torch.Tensor): (batch, time2, size)
value (torch.Tensor): (batch, time2, size)
returns:
q (torch.Tensor): (batch, head, time1, size)
k (torch.Tensor): (batch, head, time2, size)
v (torch.Tensor): (batch, head, time2, size)
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
Args:
value (torch.Tensor): (batch, time2, size)
scores(torch.Tensor): (batch, time1, time2)
mask(torch.Tensor): (batch, time1, time2)
returns:
value (torch.Tensor): transformed `value` (batch, time2, d_model) weighted by the attention scores
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1) # (batch, 1, time1, time2)
scores = scores.masked_fill(mask, -10000.0)
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) # (batch, head, time1, time2)
else:
attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = x.transpose(1, 2).reshape(n_batch, -1, self.h * self.d_k) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, query, key, value, mask, pos_emb=None, cache=None):
"""Compute 'Scaled Dot Product Attention'.
Args:
query (torch.Tensor): (batch, time1, size)
key (torch.Tensor): (batch, time2, size)
value(torch.Tensor): (batch, time2, size)
mask (torch.Tensor): (batch, time1, time2)
cache (torch.Tensor) : (batch, time_cache, size)
returns:
output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention
cache (torch.Tensor) : (batch, time_cache_next, size)
"""
key, value, query, cache = self.update_cache(key=key, value=value, query=query, cache=cache)
if torch.is_autocast_enabled():
query, key, value = query.to(torch.float32), key.to(torch.float32), value.to(torch.float32)
# temporary until we solve this more gracefully
with avoid_float16_autocast_context():
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / self.s_d_k
out = self.forward_attention(v, scores, mask)
if cache is None:
return out
else:
return out, cache
def update_cache(self, key, value, query, cache):
if cache is not None:
key = value = torch.cat([cache, key], dim=1)
q_keep_size = query.shape[1] - self.cache_drop_size
cache = torch.cat([cache[:, q_keep_size:, :], query[:, :q_keep_size, :]], dim=1)
return key, value, query, cache
class RelPositionMultiHeadAttention(MultiHeadAttention):
"""Multi-Head Attention layer of Transformer-XL with support of relative positional encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): number of heads
n_feat (int): size of the features
dropout_rate (float): dropout rate
"""
def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v, max_cache_len=0):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head=n_head, n_feat=n_feat, dropout_rate=dropout_rate, max_cache_len=max_cache_len)
# linear transformation for positional encoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable biases are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
if pos_bias_u is None or pos_bias_v is None:
self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))
# nn.init.normal_(self.pos_bias_u, 0.0, 0.02)
# nn.init.normal_(self.pos_bias_v, 0.0, 0.02)
nn.init.zeros_(self.pos_bias_u)
nn.init.zeros_(self.pos_bias_v)
else:
self.pos_bias_u = pos_bias_u
self.pos_bias_v = pos_bias_v
def rel_shift(self, x):
"""Compute relative positional encoding.
Args:
x (torch.Tensor): (batch, nheads, time, 2*time-1)
"""
b, h, qlen, pos_len = x.size() # (b, h, t1, t2)
# need to add a column of zeros on the left side of last dimension to perform the relative shifting
x = torch.nn.functional.pad(x, pad=(1, 0)) # (b, h, t1, t2+1)
x = x.view(b, h, -1, qlen) # (b, h, t2+1, t1)
# need to drop the first row
x = x[:, :, 1:].view(b, h, qlen, pos_len) # (b, h, t1, t2)
return x
def forward(self, query, key, value, mask, pos_emb, cache=None):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): (batch, time1, size)
key (torch.Tensor): (batch, time2, size)
value(torch.Tensor): (batch, time2, size)
mask (torch.Tensor): (batch, time1, time2)
pos_emb (torch.Tensor) : (batch, time1, size)
cache (torch.Tensor) : (batch, time_cache, size)
Returns:
output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention
cache (torch.Tensor) : (batch, time_cache_next, size)
"""
key, value, query, cache = self.update_cache(key=key, value=value, query=query, cache=cache)
if torch.is_autocast_enabled():
query, key, value = query.to(torch.float32), key.to(torch.float32), value.to(torch.float32)
# temporary until we solve this more gracefully
with avoid_float16_autocast_context():
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, time2)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
# drops extra elements in the matrix_bd to match the matrix_ac's size
matrix_bd = matrix_bd[:, :, :, : matrix_ac.size(-1)]
scores = (matrix_ac + matrix_bd) / self.s_d_k # (batch, head, time1, time2)
out = self.forward_attention(v, scores, mask)
if cache is None:
return out
else:
return out, cache
class RelPositionMultiHeadAttentionLongformer(RelPositionMultiHeadAttention):
"""Multi-Head Attention layer of Transformer-XL with sliding window local+global attention from Longformer.
Partially adapted from allenai (https://github.com/allenai/longformer/blob/master/longformer/sliding_chunks.py)
and huggingface (https://github.com/huggingface/transformers/blob/main/src/transformers/models/longformer/modeling_longformer.py)
Paper: https://arxiv.org/abs/1901.02860 (Transformer-XL),
https://arxiv.org/abs/2004.05150 (Longformer)
Args:
n_head (int): number of heads
n_feat (int): size of the features
dropout_rate (float): dropout rate
pos_bias_u (Tensor): the positional bias matrix U
pos_bias_v (Tensor): the positional bias matrix V
att_context_size (List[int]): List of 2 ints corresponding to left and right attention context sizes.
max_cache_len (int): the maximum size of cache
global_tokens (int): number of tokens to be used for global attention
global_tokens_spacing (int): how far apart the global tokens are
global_attn_separate (bool): whether the q, k, v layers used for global tokens should be separate
"""
def __init__(
self,
n_head,
n_feat,
dropout_rate,
pos_bias_u,
pos_bias_v,
att_context_size,
max_cache_len=0,
global_tokens=0,
global_tokens_spacing=1,
global_attn_separate=False,
):
"""Construct an RelPositionMultiHeadAttentionLongformer object."""
super().__init__(
n_head=n_head,
n_feat=n_feat,
dropout_rate=dropout_rate,
pos_bias_u=pos_bias_u,
pos_bias_v=pos_bias_v,
max_cache_len=max_cache_len,
)
self.att_context_size = att_context_size
self.global_tokens = global_tokens
self.global_tokens_spacing = global_tokens_spacing
self.global_attn_separate = global_attn_separate
if self.global_attn_separate:
self.global_q = nn.Linear(n_feat, n_feat)
self.global_k = nn.Linear(n_feat, n_feat)
self.global_v = nn.Linear(n_feat, n_feat)
def forward(self, query, key, value, pad_mask, pos_emb, cache=None):
"""Compute Scaled Dot Product Local Attention with rel. positional encoding. using overlapping chunks
Args:
query (torch.Tensor): (batch, time, size)
key (torch.Tensor): (batch, time, size)
value(torch.Tensor): (batch, time, size)
pad_mask (torch.Tensor): (batch, time)
pos_emb (torch.Tensor) : (batch, 2w + 1, size)
cache (torch.Tensor) : (batch, time_cache, size)
Returns:
output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention
cache (torch.Tensor) : (batch, time_cache_next, size)
"""
key, value, query, cache = self.update_cache(key=key, value=value, query=query, cache=cache)
if torch.is_autocast_enabled():
query, key, value = query.to(torch.float32), key.to(torch.float32), value.to(torch.float32)
# temporary until we solve this more gracefully
with avoid_float16_autocast_context():
q, k, v = self.forward_qkv(query, key, value)
n_batch, _, T, _ = q.size()
w = max(self.att_context_size[0], self.att_context_size[1])
if w <= 0:
raise ValueError("When using local attention, context size must be set > 0")
pad_len = (2 * w - T % (2 * w)) % (2 * w) # pad time to 2w
q = F.pad(q, (0, 0, 0, pad_len)) # (batch, head, time, size)
k = F.pad(k, (0, 0, 0, pad_len)) # (batch, head, time, size)
v = F.pad(v, (0, 0, 0, pad_len)) # (batch, head, time, size)
mask = F.pad(pad_mask, (0, pad_len), value=1.0)
q_with_bias_u = q + self.pos_bias_u.unsqueeze(1) # (batch, head, time, size)
q_with_bias_v = q + self.pos_bias_v.unsqueeze(1) # (batch, head, time, size)
diagonal_matrix_ac = self.sliding_chunks_matmul_qk(
q_with_bias_u, k, w, padding_value=0.0
) # (batch, head, time, 2w + 1)
# add relative positional embedding
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k).transpose(1, 2)
# (batch, head, 2w, size)
diagonal_matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
# (batch, head, time, 2w + 1)
start_pos = w - self.att_context_size[0]
end_pos = w + self.att_context_size[1]
diagonal_matrix_ac[:, :, :, : self.att_context_size[0]] += diagonal_matrix_bd[
:, :, :, : self.att_context_size[0]
]
diagonal_matrix_ac[:, :, :, -(self.att_context_size[1] + 1) :] += diagonal_matrix_bd[
:, :, :, self.att_context_size[0] :
]
scores = diagonal_matrix_ac / self.s_d_k
# (batch, head, time, 2w + 1)
# mask invalid positions
scores[:, :, :, :start_pos] = -10000.0
scores[:, :, :, end_pos + 1 :] = -10000.0
# This implementation is fast and takes very little memory because num_heads x hidden_size = 1
# from (bsz x seq_len) to (bsz x num_heads x seqlen x hidden_size)
mask = mask.unsqueeze(dim=1).unsqueeze(dim=-1)
# cast to float/half then replace 1's with -inf
float_mask = mask.type_as(scores).masked_fill(mask, -10000.0)
ones = float_mask.new_ones(size=float_mask.size()) # tensor of ones
# diagonal mask with zeros everywhere and -inf inplace of padding
d_mask = self.sliding_chunks_matmul_qk(ones, float_mask, w, padding_value=0.0)
# (batch, head, time, 2w + 1)
scores += d_mask
if self.global_tokens > 0:
# create q, k, v for global attn
if self.global_attn_separate:
global_q = self.global_q(query).view(n_batch, -1, self.h, self.d_k)
global_k = self.global_k(key).view(n_batch, -1, self.h, self.d_k)
global_v = self.global_v(value).view(n_batch, -1, self.h, self.d_k)
global_q = global_q.transpose(1, 2)
global_k = global_k.transpose(1, 2)
global_v = global_v.transpose(1, 2)
global_q = F.pad(global_q, (0, 0, 0, pad_len)) # (batch, head, time, size)
global_k = F.pad(global_k, (0, 0, 0, pad_len)) # (batch, head, time, size)
global_v = F.pad(global_v, (0, 0, 0, pad_len)) # (batch, head, time, size)
else:
global_q, global_k, global_v = q, k, v
global_q /= self.s_d_k
# assign which tokens are global
is_index_global_attn = torch.zeros_like(pad_mask)
is_index_global_attn[
:, : self.global_tokens * self.global_tokens_spacing : self.global_tokens_spacing
] = 1.0
# compute global attn indices
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn=is_index_global_attn)
# calculate global attn probs with global keys
# (batch, time, head, max_num_global_attn_indices)
global_key_attn = self._compute_global_key_attn(
query=global_q.transpose(1, 2),
key=global_k.transpose(1, 2),
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
).transpose(1, 2)
# concat to local_attn_probs
# (batch, time, head, max_num_global_attn_indices + 2*w)
scores = torch.cat((global_key_attn, scores), dim=-1)
# free memory
del global_key_attn
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
p_attn = self.dropout(attn)
# (batch, head, time, 2w + 1)
if self.global_tokens > 0:
# compute sum of global and local attn
out = self._compute_attn_output_with_global_indices(
value=v,
attn_probs=p_attn,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
w=w,
)
else:
# compute local attn only
out = self.sliding_chunks_matmul_pv(p_attn, v, w)
out = out.reshape(n_batch, -1, self.h * self.d_k)[:, :T]
if self.global_tokens > 0:
out_global_to_all = self._compute_out_global_to_all(
query=global_q,
key=global_k,
value=global_v,
max_num_global_attn_indices=max_num_global_attn_indices,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=mask,
)
# overwrite values with global attention
out[is_index_global_attn_nonzero] = out_global_to_all
ret = self.linear_out(out)
if cache is None:
return ret
else:
return ret, cache
def _get_global_attn_indices(self, is_index_global_attn: torch.Tensor) -> Tuple:
"""
Compute global attention indices.
Args:
is_index_global_attn (torch.Tensor): (batch, time) A boolean tensor indicating if an index is a global attention index.
Returns:
max_num_global_attn_indices (int): Maximum number of global attention indices in the batch.
is_index_global_attn_nonzero (tuple): Indices of global attention (non-zero elements).
is_local_index_global_attn_nonzero (tuple): Indices of non-padding values within global attention indices.
is_local_index_no_global_attn_nonzero (tuple): Indices of padding values within global attention indices.
"""
# Calculate the number of global attention indices in the batch
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# Find the maximum number of global attention indices in the batch
max_num_global_attn_indices = num_global_attn_indices.max()
# Get the indices of global attention (non-zero elements)
is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
# Create a helper tensor to find the local indices of global attention
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# Find the non-padding values within global attention indices
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
# Find the padding values within global attention indices
is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _compute_global_key_attn(
self,
key: torch.Tensor,
query: torch.Tensor,
max_num_global_attn_indices: int,
is_index_global_attn_nonzero: tuple,
is_local_index_global_attn_nonzero: tuple,
is_local_index_no_global_attn_nonzero: tuple,
) -> torch.Tensor:
"""
Compute the attention probabilities using only global key vectors.
Args:
key (torch.Tensor): (batch, time, head, head_dim) The key vectors.
query (torch.Tensor): (batch, time, head, head_dim) The query vectors.
max_num_global_attn_indices (int): Maximum number of global attention indices in the batch.
is_index_global_attn_nonzero (tuple): Indices of global attention (non-zero elements).
is_local_index_global_attn_nonzero (tuple): Non-padding values within global attention indices.
is_local_index_no_global_attn_nonzero (tuple): Padding values within global attention indices.
Returns:
attn_probs_from_global_key (torch.Tensor): (batch, time, head, max_num_global_attn_indices) The computed attention probabilities using only global key vectors.
"""
batch_size = key.shape[0]
# create only global key vectors
key_only_global = key.new_zeros(batch_size, max_num_global_attn_indices, self.h, self.d_k)
key_only_global[is_local_index_global_attn_nonzero] = key[is_index_global_attn_nonzero]
# (batch_size, seq_len, head, max_num_global_attn_indices)
attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query, key_only_global))
# need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets
attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3)
attn_probs_from_global_key[
is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :
] = torch.finfo(attn_probs_from_global_key.dtype).min
attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3)
return attn_probs_from_global_key
def _compute_attn_output_with_global_indices(
self,
value: torch.Tensor,
attn_probs: torch.Tensor,
max_num_global_attn_indices: int,
is_index_global_attn_nonzero: tuple,
is_local_index_global_attn_nonzero: tuple,
w: int,
) -> torch.Tensor:
"""
Compute the attention output with global indices.
Args:
value (torch.Tensor): (batch, head, time, head_dim) The value vectors for global attention.
attn_probs (torch.Tensor): (batch, time, head, 2w) The attention probabilities.
max_num_global_attn_indices (int): Maximum number of global attention indices in the batch.
is_index_global_attn_nonzero (tuple): Indices of global attention (non-zero elements).
is_local_index_global_attn_nonzero (tuple): Non-padding values within global attention indices.
w (int): Local context size
Returns:
torch.Tensor: (batch, time, head x head_dim) The attention output of all tokens attending to global.
"""
batch_size, time = attn_probs.shape[0], attn_probs.shape[2]
value = value.transpose(1, 2)
# get value vectors for global only
value_vectors_only_global = value.new_zeros(batch_size, max_num_global_attn_indices, self.h, self.d_k)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value[is_index_global_attn_nonzero]
# cut local attn probs to global only
attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
# compute attn output only global
attn_output_only_global = torch.matmul(
attn_probs_only_global.clone(), value_vectors_only_global.transpose(1, 2).clone()
).transpose(1, 2)
# reshape attn probs
attn_probs_without_global = attn_probs.narrow(
-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices
).contiguous()
# compute attn output with global
attn_output_without_global = self.sliding_chunks_matmul_pv(attn_probs_without_global, value.transpose(1, 2), w)
return attn_output_only_global + attn_output_without_global
def _compute_out_global_to_all(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
max_num_global_attn_indices: int,
is_local_index_global_attn_nonzero: tuple,
is_index_global_attn_nonzero: tuple,
is_local_index_no_global_attn_nonzero: tuple,
is_index_masked: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Compute the attention output of global tokens attending to all.
Args:
query (torch.Tensor): (batch, head, time, head_dim) The queries for global attention.
key (torch.Tensor): (batch, head, time, head_dim) The keys for global attention.
value (torch.Tensor): (batch, head, time, head_dim) The values for global attention.
max_num_global_attn_indices (int): Maximum number of global attention indices in the batch.
is_local_index_global_attn_nonzero (tuple): Non-padding values within global attention indices.
is_index_global_attn_nonzero (tuple): Indices of global attention (non-zero elements).
is_local_index_no_global_attn_nonzero (tuple): Padding values within global attention indices.
is_index_masked (torch.Tensor): (batch, time) A boolean tensor indicating if an index is masked.
Returns:
global_attn_output (torch.Tensor): (batch, max_num_global_attn_indices, head x head_dim)
The attention output of global tokens attending to all.
"""
batch_size = key.shape[0]
seq_len = key.shape[2]
global_k = key.reshape(batch_size * self.h, -1, self.d_k)
global_v = value.reshape(batch_size * self.h, -1, self.d_k)
global_q = query.transpose(1, 2)
global_q_from_global = global_q.new_zeros(batch_size, max_num_global_attn_indices, self.h, self.d_k)
global_q_from_global[is_local_index_global_attn_nonzero] = global_q[is_index_global_attn_nonzero]
global_q_from_global = global_q_from_global.transpose(0, 1).reshape(batch_size * self.h, -1, self.d_k)
# compute attn scores
global_attn_scores = torch.bmm(global_q_from_global, global_k.transpose(1, 2))
global_attn_scores = global_attn_scores.view(batch_size, self.h, max_num_global_attn_indices, seq_len)
# need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets
global_attn_scores = global_attn_scores.transpose(1, 2)
global_attn_scores[
is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :
] = torch.finfo(global_attn_scores.dtype).min
global_attn_scores = global_attn_scores.transpose(1, 2)
global_attn_scores = global_attn_scores.masked_fill(
is_index_masked.transpose(2, 3), torch.finfo(global_attn_scores.dtype).min,
)
global_attn_scores = global_attn_scores.view(batch_size * self.h, max_num_global_attn_indices, seq_len)
# compute global attn probs
global_attn_probs_float = nn.functional.softmax(global_attn_scores, dim=-1, dtype=torch.float32)
global_attn_probs = self.dropout(global_attn_probs_float)
# global attn output
global_attn_output = torch.bmm(global_attn_probs, global_v)
global_attn_output = global_attn_output.view(batch_size, self.h, max_num_global_attn_indices, self.d_k)
global_attn_output = global_attn_output[
is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
]
global_attn_output = global_attn_output.reshape(global_attn_output.shape[0], -1)
return global_attn_output
# Longformer implementation for overlap case
#
def _skew(self, x: torch.Tensor, direction: List[int], padding_value: float) -> torch.Tensor:
"""Convert diagonals into columns (or columns into diagonals depending on `direction`
Args:
x (torch.Tensor): (batch x head, chunk_count, 2w, 2w)
direction (List[int]): padding directions
padding_value (float): value to pad with
Returns:
output (torch.Tensor): (batch x head, chunk_count, 2w, 2w + 1)
"""
x_padded = F.pad(x, direction, value=padding_value)
x_padded = x_padded.view(*x_padded.size()[:-2], x_padded.size(-1), x_padded.size(-2))
return x_padded
def _skew2(self, x: torch.Tensor, padding_value: float) -> torch.Tensor:
"""Shift every row 1 step to right converting columns into diagonals
Args:
x (torch.Tensor): (batch x head, chunks_count + 1, w, 2w + 1)
padding_value (float): value to pad with
Returns:
output (torch.Tensor): (batch x head, chunks_count + 1, w, 3w)
"""
# X = B x C x M x L
B, C, M, L = x.size()
x = F.pad(x, (0, M + 1), value=padding_value) # B x C x M x (L+M+1)
x = x.view(B, C, -1) # B x C x ML+MM+M
x = x[:, :, :-M] # B x C x ML+MM
x = x.view(B, C, M, M + L) # B x C, M x L+M
x = x[:, :, :, :-1]
return x
def _chunk_overlap(self, x: torch.Tensor, w: int) -> torch.Tensor:
"""Convert into overlapping chunks.
Args:
x (torch.Tensor): # (batch x head, time, size)
w (int): Chunk overlap size
Returns:
output (torch.Tensor): # (batch x head, chunk_count, 2w, size)
"""
# non-overlapping chunks of size = 2w
x = x.view(x.size(0), x.size(1) // (w * 2), w * 2, x.size(2))
# use `as_strided` to make the chunks overlap with an overlap size = w
chunk_size = list(x.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(x.stride())
chunk_stride[1] = chunk_stride[1] // 2
return x.as_strided(size=chunk_size, stride=chunk_stride)
@lru_cache()
def _get_invalid_locations_mask(self, w: int, device: str):
diagonals_list = []
for j in range(-w, 1):
diagonal_mask = torch.zeros(w, device='cpu', dtype=torch.uint8)
diagonal_mask[:-j] = 1
diagonals_list.append(diagonal_mask)
mask = torch.stack(diagonals_list, dim=-1)
mask = mask[None, None, :, :]
ending_mask = mask.flip(dims=(2, 3)).bool().to(device)
return mask.bool().to(device), ending_mask
def mask_invalid_locations(
self, input_tensor: torch.Tensor, w: int,
):
"""
Mask locations invalid for the sliding window attention
Args:
input_tensor (torch.Tensor): # (batch x head, time, size)
w (int): Chunk overlap size
"""
beginning_mask, ending_mask = self._get_invalid_locations_mask(w, input_tensor.device)
seq_len = input_tensor.size(2)
beginning_input = input_tensor[:, :, :w, : w + 1]
beginning_mask = beginning_mask[:, :, :seq_len].expand(beginning_input.size())
beginning_input.masked_fill_(beginning_mask, -float('inf'))
ending_input = input_tensor[:, :, -w:, -(w + 1) :]
ending_mask = ending_mask[:, :, -seq_len:].expand(ending_input.size())
ending_input.masked_fill_(ending_mask, -float('inf'))
def sliding_chunks_matmul_qk(self, q: torch.Tensor, k: torch.Tensor, w: int, padding_value: float) -> torch.Tensor:
"""Matrix multiplication of query x key tensors using with a sliding window attention pattern.
This implementation splits the input into overlapping chunks of size 2w
with an overlap of size w
Args:
q (torch.Tensor): (batch, head, time, size)
k (torch.Tensor): (batch, head, time, size)
w (int): Chunk overlap size
padding_value (float): Value to pad with
Returns:
output (torch.Tensor): (batch, head, time, 2w + 1)
"""
bsz, num_heads, seqlen, head_dim = q.size()
assert seqlen % (w * 2) == 0
assert q.size() == k.size()
chunks_count = seqlen // w - 1
# group bsz and num_heads dimensions into one, then chunk seqlen into chunks of size w * 2
q = q.reshape(bsz * num_heads, seqlen, head_dim)
k = k.reshape(bsz * num_heads, seqlen, head_dim)
chunk_q = self._chunk_overlap(q, w) # (batch x head, chunk_count, 2w, size)
chunk_k = self._chunk_overlap(k, w) # (batch x head, chunk_count, 2w, size)
# matrix multipication
# bcxd: bsz*num_heads x chunks x 2w x head_dim
# bcyd: bsz*num_heads x chunks x 2w x head_dim
# bcxy: bsz*num_heads x chunks x 2w x 2w
chunk_attn = torch.einsum('bcxd,bcyd->bcxy', (chunk_q, chunk_k)) # multiply
# (batch x head, chunk_count, 2w, 2w)
# convert diagonals into columns
diagonal_chunk_attn = self._skew(chunk_attn, direction=(0, 0, 0, 1), padding_value=padding_value)
# (batch x head, chunk_count, 2w, 2w + 1)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (w * 2 + 1) columns. The first (w) columns are the w lower triangles (attention from a word to
# w previous words). The following column is attention score from each word to itself, then
# followed by w columns for the upper triangle.
diagonal_attn = diagonal_chunk_attn.new_empty((bsz * num_heads, chunks_count + 1, w, w * 2 + 1))
# (batch x head, chunk_count + 1, w, 2w + 1)
# copy parts from diagonal_chunk_attn into the compined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attn[:, :-1, :, w:] = diagonal_chunk_attn[:, :, :w, : w + 1]
diagonal_attn[:, -1, :, w:] = diagonal_chunk_attn[:, -1, w:, : w + 1]
# - copying the lower triangle
diagonal_attn[:, 1:, :, :w] = diagonal_chunk_attn[:, :, -(w + 1) : -1, w + 1 :]
diagonal_attn[:, 0, 1:w, 1:w] = diagonal_chunk_attn[:, 0, : w - 1, 1 - w :]
# separate bsz and num_heads dimensions again
diagonal_attn = diagonal_attn.view(bsz, num_heads, seqlen, 2 * w + 1)
# (batch, head, time, 2w + 1)
self.mask_invalid_locations(diagonal_attn, w)
return diagonal_attn
def sliding_chunks_matmul_pv(self, prob: torch.Tensor, v: torch.Tensor, w: int):
"""Same as sliding_chunks_matmul_qk but for prob and value tensors.
Args:
prob (torch.Tensor): (batch, head, time, size)
v (torch.Tensor): (batch, head, time, size)
w (int): Chunk overlap size
Returns:
output (torch.Tensor): (batch, time, head, size)
"""
bsz, num_heads, seqlen, head_dim = v.size()
chunks_count = seqlen // w - 1
# group bsz and num_heads dimensions into one, then chunk seqlen into chunks of size 2w
chunk_prob = prob.reshape(bsz * num_heads, seqlen // w, w, 2 * w + 1)
# (batch x head, chunks_count + 1, w, 2w + 1)
# group bsz and num_heads dimensions into one
v = v.reshape(bsz * num_heads, seqlen, head_dim)
# (batch x head, time, size)
# pad seqlen with w at the beginning of the sequence and another w at the end
padded_v = F.pad(v, (0, 0, w, w), value=-1)
# (batch x head, time + 2w, size)
# chunk padded_v into chunks of size 3w and an overlap of size w
chunk_v_size = (bsz * num_heads, chunks_count + 1, 3 * w, head_dim)
chunk_v_stride = padded_v.stride()
chunk_v_stride = chunk_v_stride[0], w * chunk_v_stride[1], chunk_v_stride[1], chunk_v_stride[2]
chunk_v = padded_v.as_strided(size=chunk_v_size, stride=chunk_v_stride)
# (batch x head, chunks_count + 1, 3w, size)
skewed_prob = self._skew2(chunk_prob, padding_value=0)
# (batch x head, chunks_count + 1, w, 3w)
context = torch.einsum('bcwd,bcdh->bcwh', (skewed_prob, chunk_v))
# (batch x head, chunks_count + 1, w, size)
return context.view(bsz, num_heads, seqlen, head_dim).transpose(1, 2)
class PositionalEncoding(torch.nn.Module):
"""Fixed sinusoidal positional encoding.
Args:
d_model (int): embedding dim
dropout_rate (float): dropout rate
max_len (int): maximum input length
xscale (bool): whether to scale the input by sqrt(d_model)
dropout_rate_emb (float): dropout rate for the positional embeddings
"""
def __init__(self, d_model, dropout_rate, max_len=5000, xscale=None, dropout_rate_emb=0.0):
"""Construct an PositionalEncoding object."""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.xscale = xscale
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.max_len = max_len
if dropout_rate_emb > 0:
self.dropout_emb = nn.Dropout(dropout_rate_emb)
else:
self.dropout_emb = None
def create_pe(self, positions):
pos_length = positions.size(0)
pe = torch.zeros(pos_length, self.d_model, device=positions.device)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32, device=positions.device)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(positions * div_term)
pe[:, 1::2] = torch.cos(positions * div_term)
pe = pe.unsqueeze(0)
if hasattr(self, 'pe'):
self.pe = pe
else:
self.register_buffer('pe', pe, persistent=False)
def extend_pe(self, length, device):
"""Reset and extend the positional encodings if needed."""
if hasattr(self, 'pe') and self.pe.size(1) >= length:
return
positions = torch.arange(0, length, dtype=torch.float32, device=device).unsqueeze(1)
self.create_pe(positions=positions)
def forward(self, x: torch.Tensor, cache_len=0):
"""Adds positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, feature_size)
cache_len (int): the size of the cache which is used to shift positions
Returns:
x+pos_emb (torch.Tensor): Its shape is (batch, time, feature_size)
pos_emb (torch.Tensor): Its shape is (1, time, feature_size)
"""
input_len = x.size(1) + cache_len
if self.xscale:
x = x * self.xscale
pos_emb = self.pe[:, :input_len]
if self.dropout_emb:
pos_emb = self.dropout_emb(pos_emb)
x = x + pos_emb
return self.dropout(x), pos_emb
class RelPositionalEncoding(PositionalEncoding):
"""Relative positional encoding for TransformerXL's layers
See : Appendix B in https://arxiv.org/abs/1901.02860
Args:
d_model (int): embedding dim
dropout_rate (float): dropout rate
max_len (int): maximum input length
xscale (bool): whether to scale the input by sqrt(d_model)
dropout_rate_emb (float): dropout rate for the positional embeddings
"""
def extend_pe(self, length, device):
"""Reset and extend the positional encodings if needed."""
needed_size = 2 * length - 1
if hasattr(self, 'pe') and self.pe.size(1) >= needed_size:
return
# positions would be from negative numbers to positive
# positive positions would be used for left positions and negative for right positions
positions = torch.arange(length - 1, -length, -1, dtype=torch.float32, device=device).unsqueeze(1)
self.create_pe(positions=positions)
def forward(self, x, cache_len=0):
"""Compute positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, feature_size)
cache_len (int): the size of the cache which is used to shift positions
Returns:
x (torch.Tensor): Its shape is (batch, time, feature_size)
pos_emb (torch.Tensor): Its shape is (1, time, feature_size)
"""
if self.xscale:
x = x * self.xscale
# center_pos would be the index of position 0
# negative positions would be used for right and positive for left tokens
# for input of length L, 2*L-1 positions are needed, positions from (L-1) to -(L-1)
input_len = x.size(1) + cache_len
center_pos = self.pe.size(1) // 2 + 1
start_pos = center_pos - input_len
end_pos = center_pos + input_len - 1
pos_emb = self.pe[:, start_pos:end_pos]
if self.dropout_emb:
pos_emb = self.dropout_emb(pos_emb)
return self.dropout(x), pos_emb
class LocalAttRelPositionalEncoding(PositionalEncoding):
"""Relative positional encoding for sliding window attention or chunked attention.
See above for relative positional encoding based on Transformer-XL paper
Args:
left_chunk_size (int): number of frames to in past chunks
chunk size (int): number of frames (max frames if using multimode) in current chunk
d_model (int): embedding dim
dropout_rate (float): dropout rate
max_len (int): maximum input length
xscale (bool): whether to scale the input by sqrt(d_model)
dropout_rate_emb (float): dropout rate for the positional embeddings
"""
def __init__(self, att_context_size, **kwargs):
super(LocalAttRelPositionalEncoding, self).__init__(**kwargs)
self.left_context = att_context_size[0]
self.right_context = att_context_size[1]
def extend_pe(self, length, device):
"""Reset and extend the positional encodings only at the beginning"""
if hasattr(self, 'pe'):
return
positions = torch.arange(
self.left_context, -self.right_context - 1, -1, dtype=torch.float32, device=device
).unsqueeze(1)
self.create_pe(positions=positions)
def forward(self, x, cache_len=0):
"""Compute positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, feature_size)
Returns:
x (torch.Tensor): Its shape is (batch, time, feature_size)
pos_emb (torch.Tensor): Its shape is (1, time, feature_size)
"""
if self.xscale:
x = x * self.xscale
end_pos = self.left_context + self.right_context + 1
pos_emb = self.pe[:, :end_pos]
if self.dropout_emb:
pos_emb = self.dropout_emb(pos_emb)
return self.dropout(x), pos_emb
|
NeMo-main
|
nemo/collections/asr/parts/submodules/multi_head_attention.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from typing import Any, Optional
import torch
from torch import nn as nn
from nemo.collections.asr.parts.submodules import multi_head_attention as mha
from nemo.collections.common.parts import adapter_modules
from nemo.core.classes.mixins import adapter_mixin_strategies
class MHAResidualAddAdapterStrategy(adapter_mixin_strategies.ResidualAddAdapterStrategy):
"""
An implementation of residual addition of an adapter module with its input for the MHA Adapters.
"""
def forward(self, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'):
"""
A basic strategy, comprising of a residual connection over the input, after forward pass by
the underlying adapter. Additional work is done to pack and unpack the dictionary of inputs and outputs.
Note: The `value` tensor is added to the output of the attention adapter as the residual connection.
Args:
input: A dictionary of multiple input arguments for the adapter module.
`query`, `key`, `value`: Original output tensor of the module, or the output of the
previous adapter (if more than one adapters are enabled).
`mask`: Attention mask.
`pos_emb`: Optional positional embedding for relative encoding.
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
Returns:
The result tensor, after one of the active adapters has finished its forward passes.
"""
out = self.compute_output(input, adapter, module=module)
# If not in training mode, or probability of stochastic depth is 0, skip step.
p = self.stochastic_depth
if not module.training or p == 0.0:
pass
else:
out = self.apply_stochastic_depth(out, input['value'], adapter, module=module)
# Return the residual connection output = input + adapter(input)
result = input['value'] + out
# If l2_lambda is activated, register the loss value
self.compute_auxiliary_losses(result, input['value'], adapter, module=module)
return result
def compute_output(
self, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'
) -> torch.Tensor:
"""
Compute the output of a single adapter to some input.
Args:
input: Original output tensor of the module, or the output of the previous adapter (if more than
one adapters are enabled).
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
Returns:
The result tensor, after one of the active adapters has finished its forward passes.
"""
if isinstance(input, (list, tuple)):
out = adapter(*input)
elif isinstance(input, dict):
out = adapter(**input)
else:
out = adapter(input)
return out
@dataclass
class MHAResidualAddAdapterStrategyConfig(adapter_mixin_strategies.ResidualAddAdapterStrategyConfig):
_target_: str = "{0}.{1}".format(
MHAResidualAddAdapterStrategy.__module__, MHAResidualAddAdapterStrategy.__name__
) # mandatory field
class MultiHeadAttentionAdapter(mha.MultiHeadAttention, adapter_modules.AdapterModuleUtil):
"""Multi-Head Attention layer of Transformer.
Args:
n_head (int): number of heads
n_feat (int): size of the features
dropout_rate (float): dropout rate
proj_dim (int, optional): Optional integer value for projection before computing attention.
If None, then there is no projection (equivalent to proj_dim = n_feat).
If > 0, then will project the n_feat to proj_dim before calculating attention.
If <0, then will equal n_head, so that each head has a projected dimension of 1.
adapter_strategy: By default, MHAResidualAddAdapterStrategyConfig. An adapter composition function object.
"""
def __init__(
self,
n_head: int,
n_feat: int,
dropout_rate: float,
proj_dim: Optional[int] = None,
adapter_strategy: MHAResidualAddAdapterStrategy = None,
):
super().__init__(n_head=n_head, n_feat=n_feat, dropout_rate=dropout_rate, max_cache_len=0)
self.pre_norm = nn.LayerNorm(n_feat)
# Set the projection dim to number of heads automatically
if proj_dim is not None and proj_dim < 1:
proj_dim = n_head
self.proj_dim = proj_dim
# Recompute weights for projection dim
if self.proj_dim is not None:
if self.proj_dim % n_head != 0:
raise ValueError(f"proj_dim ({proj_dim}) is not divisible by n_head ({n_head})")
self.d_k = self.proj_dim // n_head
self.s_d_k = math.sqrt(self.d_k)
self.linear_q = nn.Linear(n_feat, self.proj_dim)
self.linear_k = nn.Linear(n_feat, self.proj_dim)
self.linear_v = nn.Linear(n_feat, self.proj_dim)
self.linear_out = nn.Linear(self.proj_dim, n_feat)
# Setup adapter strategy
self.setup_adapter_strategy(adapter_strategy)
# reset parameters for Q to be identity operation
self.reset_parameters()
def forward(self, query, key, value, mask, pos_emb=None, cache=None):
"""Compute 'Scaled Dot Product Attention'.
Args:
query (torch.Tensor): (batch, time1, size)
key (torch.Tensor): (batch, time2, size)
value(torch.Tensor): (batch, time2, size)
mask (torch.Tensor): (batch, time1, time2)
cache (torch.Tensor) : (batch, time_cache, size)
returns:
output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention
cache (torch.Tensor) : (batch, time_cache_next, size)
"""
# Need to perform duplicate computations as at this point the tensors have been
# separated by the adapter forward
query = self.pre_norm(query)
key = self.pre_norm(key)
value = self.pre_norm(value)
return super().forward(query, key, value, mask, pos_emb, cache=cache)
def reset_parameters(self):
with torch.no_grad():
nn.init.zeros_(self.linear_out.weight)
nn.init.zeros_(self.linear_out.bias)
def get_default_strategy_config(self) -> 'dataclass':
return MHAResidualAddAdapterStrategyConfig()
@dataclass
class MultiHeadAttentionAdapterConfig:
n_head: int
n_feat: int
dropout_rate: float = 0.0
proj_dim: Optional[int] = None
adapter_strategy: Optional[Any] = MHAResidualAddAdapterStrategyConfig()
_target_: str = "{0}.{1}".format(MultiHeadAttentionAdapter.__module__, MultiHeadAttentionAdapter.__name__)
class RelPositionMultiHeadAttentionAdapter(mha.RelPositionMultiHeadAttention, adapter_modules.AdapterModuleUtil):
"""Multi-Head Attention layer of Transformer-XL with support of relative positional encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): number of heads
n_feat (int): size of the features
dropout_rate (float): dropout rate
proj_dim (int, optional): Optional integer value for projection before computing attention.
If None, then there is no projection (equivalent to proj_dim = n_feat).
If > 0, then will project the n_feat to proj_dim before calculating attention.
If <0, then will equal n_head, so that each head has a projected dimension of 1.
adapter_strategy: By default, MHAResidualAddAdapterStrategyConfig. An adapter composition function object.
"""
def __init__(
self,
n_head: int,
n_feat: int,
dropout_rate: float,
proj_dim: Optional[int] = None,
adapter_strategy: MHAResidualAddAdapterStrategyConfig = None,
):
super().__init__(
n_head=n_head, n_feat=n_feat, dropout_rate=dropout_rate, pos_bias_u=None, pos_bias_v=None, max_cache_len=0
)
self.pre_norm = nn.LayerNorm(n_feat)
# Set the projection dim to number of heads automatically
if proj_dim is not None and proj_dim < 1:
proj_dim = n_head
self.proj_dim = proj_dim
# Recompute weights for projection dim
if self.proj_dim is not None:
if self.proj_dim % n_head != 0:
raise ValueError(f"proj_dim ({proj_dim}) is not divisible by n_head ({n_head})")
self.d_k = self.proj_dim // n_head
self.s_d_k = math.sqrt(self.d_k)
self.linear_q = nn.Linear(n_feat, self.proj_dim)
self.linear_k = nn.Linear(n_feat, self.proj_dim)
self.linear_v = nn.Linear(n_feat, self.proj_dim)
self.linear_out = nn.Linear(self.proj_dim, n_feat)
self.linear_pos = nn.Linear(n_feat, self.proj_dim, bias=False)
self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))
# Setup adapter strategy
self.setup_adapter_strategy(adapter_strategy)
# reset parameters for Q to be identity operation
self.reset_parameters()
def forward(self, query, key, value, mask, pos_emb, cache=None):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): (batch, time1, size)
key (torch.Tensor): (batch, time2, size)
value(torch.Tensor): (batch, time2, size)
mask (torch.Tensor): (batch, time1, time2)
pos_emb (torch.Tensor) : (batch, time1, size)
cache (torch.Tensor) : (batch, time_cache, size)
Returns:
output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention
cache_next (torch.Tensor) : (batch, time_cache_next, size)
"""
# Need to perform duplicate computations as at this point the tensors have been
# separated by the adapter forward
query = self.pre_norm(query)
key = self.pre_norm(key)
value = self.pre_norm(value)
return super().forward(query, key, value, mask, pos_emb, cache=cache)
def reset_parameters(self):
with torch.no_grad():
nn.init.zeros_(self.linear_out.weight)
nn.init.zeros_(self.linear_out.bias)
# NOTE: This exact procedure apparently highly important.
# Above operation is safe to do as self.linear_out.weight *= 0.0 (similar for bias)
# However:
# DO NOT REPLACE BELOW WITH self.pos_bias_u *= 0.0 OR self.pos_bias_v *= 0.0
# For some reason at init sometimes it will cause the value of the tensor to become NaN
# All operations to compute matrix_ac and matrix_bd will then fail.
nn.init.zeros_(self.pos_bias_u)
nn.init.zeros_(self.pos_bias_v)
def get_default_strategy_config(self) -> 'dataclass':
return MHAResidualAddAdapterStrategyConfig()
@dataclass
class RelPositionMultiHeadAttentionAdapterConfig:
n_head: int
n_feat: int
dropout_rate: float = 0.0
proj_dim: Optional[int] = None
adapter_strategy: Optional[Any] = MHAResidualAddAdapterStrategyConfig()
_target_: str = "{0}.{1}".format(
RelPositionMultiHeadAttentionAdapter.__module__, RelPositionMultiHeadAttentionAdapter.__name__
)
class PositionalEncodingAdapter(mha.PositionalEncoding, adapter_modules.AdapterModuleUtil):
"""
Absolute positional embedding adapter.
.. note::
Absolute positional embedding value is added to the input tensor *without residual connection* !
Therefore, the input is changed, if you only require the positional embedding, drop the returned `x` !
Args:
d_model (int): The input dimension of x.
max_len (int): The max sequence length.
xscale (float): The input scaling factor. Defaults to 1.0.
adapter_strategy (AbstractAdapterStrategy): By default, ReturnResultAdapterStrategyConfig.
An adapter composition function object.
NOTE: Since this is a positional encoding, it will not add a residual !
"""
def __init__(
self,
d_model: int,
max_len: int = 5000,
xscale=1.0,
adapter_strategy: adapter_mixin_strategies.ReturnResultAdapterStrategyConfig = None,
):
super().__init__(
d_model=d_model, dropout_rate=0.0, max_len=max_len, xscale=xscale, dropout_rate_emb=0.0,
)
# Setup adapter strategy
self.setup_adapter_strategy(adapter_strategy)
def get_default_strategy_config(self) -> 'dataclass':
return adapter_mixin_strategies.ReturnResultAdapterStrategyConfig()
@dataclass
class PositionalEncodingAdapterConfig:
d_model: int
max_len: int = 5000
xscale: float = 1.0
adapter_strategy: Optional[Any] = adapter_mixin_strategies.ResidualAddAdapterStrategyConfig()
_target_: str = "{0}.{1}".format(PositionalEncodingAdapter.__module__, PositionalEncodingAdapter.__name__)
class RelPositionalEncodingAdapter(mha.RelPositionalEncoding, adapter_modules.AdapterModuleUtil):
"""
Relative positional encoding for TransformerXL's layers
See : Appendix B in https://arxiv.org/abs/1901.02860
.. note::
Relative positional embedding value is **not** added to the input tensor !
Therefore, the input should be updated changed, if you only require the positional embedding, drop the returned `x` !
Args:
d_model (int): embedding dim
max_len (int): maximum input length
xscale (bool): whether to scale the input by sqrt(d_model)
adapter_strategy: By default, ReturnResultAdapterStrategyConfig. An adapter composition function object.
"""
def __init__(
self,
d_model: int,
max_len: int = 5000,
xscale=1.0,
adapter_strategy: adapter_mixin_strategies.ReturnResultAdapterStrategyConfig = None,
):
super().__init__(d_model=d_model, dropout_rate=0.0, max_len=max_len, xscale=xscale, dropout_rate_emb=0.0)
# Setup adapter strategy
self.setup_adapter_strategy(adapter_strategy)
def get_default_strategy_config(self) -> 'dataclass':
return adapter_mixin_strategies.ReturnResultAdapterStrategyConfig()
@dataclass
class RelPositionalEncodingAdapterConfig:
d_model: int
max_len: int = 5000
xscale: float = 1.0
adapter_strategy: Optional[Any] = adapter_mixin_strategies.ResidualAddAdapterStrategyConfig()
_target_: str = "{0}.{1}".format(RelPositionalEncodingAdapter.__module__, RelPositionalEncodingAdapter.__name__)
|
NeMo-main
|
nemo/collections/asr/parts/submodules/adapters/multi_head_attention_adapter_module.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.parts.submodules.adapters.multi_head_attention_adapter_module import (
MHAResidualAddAdapterStrategy,
MHAResidualAddAdapterStrategyConfig,
MultiHeadAttentionAdapter,
MultiHeadAttentionAdapterConfig,
PositionalEncodingAdapter,
PositionalEncodingAdapterConfig,
RelPositionalEncodingAdapter,
RelPositionalEncodingAdapterConfig,
RelPositionMultiHeadAttentionAdapter,
RelPositionMultiHeadAttentionAdapterConfig,
)
|
NeMo-main
|
nemo/collections/asr/parts/submodules/adapters/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.parts.numba.rnnt_loss.rnnt_pytorch import RNNTLossNumba
|
NeMo-main
|
nemo/collections/asr/parts/numba/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.parts.numba.spec_augment.spec_aug_numba import (
SpecAugmentNumba,
spec_augment_launch_heuristics,
)
|
NeMo-main
|
nemo/collections/asr/parts/numba/spec_augment/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from numba import cuda
from nemo.core.classes import Typing, typecheck
from nemo.core.neural_types import LengthsType, NeuralType, SpectrogramType
from nemo.utils import logging
MAX_THREAD_BUFFER = 512
@cuda.jit()
def spec_augment_kernel(
x: torch.Tensor,
x_len: torch.Tensor,
freq_starts: torch.Tensor,
freq_widths: torch.Tensor,
time_starts: torch.Tensor,
time_widths: torch.Tensor,
mask_value: float,
):
"""
Numba CUDA kernel to perform SpecAugment in-place on the GPU.
Parallelize over freq and time axis, parallel threads over batch.
Sequential over masks (adaptive in time).
Args:
x: Pytorch tensor of shape [B, F, T] with the acoustic features.
x_len: Pytorch tensor of shape [B] with the lengths of the padded sequence.
freq_starts: Pytorch tensor of shape [B, M_f] with the start indices of freq masks.
freq_widths: Pytorch tensor of shape [B, M_f] with the width of freq masks.
time_starts: Pytorch tensor of shape [B, M_t] with the start indices of time masks.
time_widths: Pytorch tensor of shape [B, M_t] with the width of time masks.
mask_value: Float value that will be used as mask value.
"""
f = cuda.blockIdx.x # indexes the Freq dim
t = cuda.blockIdx.y # indexes the Time dim
tid = cuda.threadIdx.x # index of the current mask
threads_per_block = cuda.blockDim.x
# Compute the number of masks over freq axis
len_f = freq_starts.shape[1]
# For all samples in the batch, apply the freq mask
for bidx in range(0, x.shape[0], threads_per_block):
# Resolve the index of the batch (case where more masks than MAX_THREAD_BUFFER)
bm_idx = bidx + tid
# Access mask only if valid sample id in batch
if bm_idx < x.shape[0]:
# For `len_f` number of freq masks that must be applied
for fidx in range(0, len_f):
# Access the start index and width of this freq mask
f_start = freq_starts[bm_idx, fidx]
f_width = freq_widths[bm_idx, fidx]
# If block idx `f` >= start and < (start + width) of this freq mask
if f >= f_start and f < (f_start + f_width):
x[bm_idx, f, t] = mask_value
# Compute the number of masks over time axis
len_t = time_starts.shape[1]
# For all samples in the batch, apply the time mask
for b_idx in range(0, x.shape[0], threads_per_block):
# Resolve the index of the batch (case where more masks than MAX_THREAD_BUFFER)
bm_idx = b_idx + tid
# Access mask only if valid sample id in batch
if bm_idx < x.shape[0]:
# For `len_t` number of freq masks that must be applied
for tidx in range(0, len_t):
# Access the start index and width of this time mask
t_start = time_starts[bm_idx, tidx]
t_width = time_widths[bm_idx, tidx]
# If block idx `t` >= start and < (start + width) of this time mask
if t >= t_start and t < (t_start + t_width):
# Current block idx `t` < current seq length x_len[b]
# This ensure that we mask only upto the length of that sample
# Everything after that index is padded value so unnecessary to mask
if t < x_len[bm_idx]:
x[bm_idx, f, t] = mask_value
def spec_augment_launch_heuristics(x: torch.Tensor, length: torch.Tensor):
"""
Heuristics to determins whether pytorch implementation or numba implementation is selected.
Assumes numba cuda is supported.
Args:
x: Torch tensor of shape [B, F, T]
length: Optional, Torch of tensor of shape [B] - containing lengths of the tensor.
Returns:
True if numba kernel should be selected, else False
"""
if not x.is_cuda:
return False
if length is None:
return False
if x.shape[0] < 8:
return False
return True
def launch_spec_augment_kernel(
x: torch.Tensor,
x_len: torch.Tensor,
freq_starts: torch.Tensor,
freq_lengths: torch.Tensor,
time_starts: torch.Tensor,
time_lengths: torch.Tensor,
freq_masks: int,
time_masks: int,
mask_value: float,
):
"""
Helper method to launch the SpecAugment kernel
Args:
x: Pytorch tensor of shape [B, F, T] with the acoustic features.
x_len: Pytorch tensor of shape [B] with the lengths of the padded sequence.
freq_starts: Pytorch tensor of shape [B, M_f] with the start indices of freq masks.
freq_widths: Pytorch tensor of shape [B, M_f] with the width of freq masks.
time_starts: Pytorch tensor of shape [B, M_t] with the start indices of time masks.
time_widths: Pytorch tensor of shape [B, M_t] with the width of time masks.
freq_masks: Int value that determines the number of time masks.
time_masks: Int value that determines the number of freq masks.
mask_value: Float value that will be used as mask value.
Returns:
The spec augmented tensor 'x'
"""
# Setup CUDA stream
sh = x.shape
stream = cuda.external_stream(torch.cuda.current_stream(x.device).cuda_stream)
if time_masks > 0 or freq_masks > 0:
# Parallelize over freq and time axis, parallel threads over batch
# Sequential over masks (adaptive in time).
blocks_per_grid = tuple([sh[1], sh[2]])
# threads_per_block = min(MAX_THREAD_BUFFER, max(freq_masks, time_masks))
threads_per_block = min(MAX_THREAD_BUFFER, x.shape[0])
# Numba does not support fp16, force cast to fp32 temporarily at the expense of memory
original_dtype = x.dtype
cast_x = False
if x.dtype == torch.float16:
x = x.float()
cast_x = True
# Launch CUDA kernel
spec_augment_kernel[blocks_per_grid, threads_per_block, stream, 0](
x, x_len, freq_starts, freq_lengths, time_starts, time_lengths, mask_value
)
torch.cuda.synchronize()
# Recast back to original dtype if earlier cast was performed
if cast_x:
x = x.to(dtype=original_dtype)
return x
class SpecAugmentNumba(nn.Module, Typing):
"""
Zeroes out(cuts) random continuous horisontal or
vertical segments of the spectrogram as described in
SpecAugment (https://arxiv.org/abs/1904.08779).
Utilizes a Numba CUDA kernel to perform inplace edit of the input without loops.
Parallelize over freq and time axis, parallel threads over batch.
Sequential over masks (adaptive in time).
Args:
freq_masks - how many frequency segments should be cut
time_masks - how many time segments should be cut
freq_width - maximum number of frequencies to be cut in one segment
time_width - maximum number of time steps to be cut in one segment.
Can be a positive integer or a float value in the range [0, 1].
If positive integer value, defines maximum number of time steps
to be cut in one segment.
If a float value, defines maximum percentage of timesteps that
are cut adaptively.
rng: Ignored.
"""
@property
def input_types(self):
"""Returns definitions of module input types
"""
return {
"input_spec": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output types
"""
return {"augmented_spec": NeuralType(('B', 'D', 'T'), SpectrogramType())}
def __init__(
self, freq_masks=0, time_masks=0, freq_width=10, time_width=0.1, rng=None, mask_value=0.0,
):
super().__init__()
# Message to mention that numba specaugment kernel will be available
# if input device is CUDA and lengths are provided
logging.debug("Numba SpecAugment kernel is available")
self.freq_masks = freq_masks
self.time_masks = time_masks
self.freq_width = freq_width
self.time_width = time_width
self.mask_value = mask_value
# Unused
self.rng = rng
if self.rng is not None:
logging.warning("`rng` was supplied to SpecAugmentNumba, but it is not used.")
if isinstance(time_width, int):
self.adaptive_temporal_width = False
else:
if time_width > 1.0 or time_width < 0.0:
raise ValueError('If `time_width` is a float value, must be in range [0, 1]')
self.adaptive_temporal_width = True
@typecheck()
@torch.no_grad()
def forward(self, input_spec, length):
sh = input_spec.shape
bs = sh[0]
# Construct the freq and time masks as well as start positions
if self.freq_masks > 0:
freq_starts = torch.randint(
0, sh[1] - self.freq_width + 1, size=[bs, self.freq_masks], device=input_spec.device
)
freq_lengths = torch.randint(0, self.freq_width + 1, size=[bs, self.freq_masks], device=input_spec.device)
else:
freq_starts = torch.zeros([bs, 1], dtype=torch.int64, device=input_spec.device)
freq_lengths = torch.zeros([bs, 1], dtype=torch.int64, device=input_spec.device)
if self.time_masks > 0:
if self.adaptive_temporal_width:
time_width = (length * self.time_width).int().clamp(min=1)
else:
time_width = (
torch.tensor(self.time_width, dtype=torch.int32, device=input_spec.device)
.unsqueeze(0)
.repeat(sh[0])
)
time_starts = []
time_lengths = []
for idx in range(sh[0]):
time_starts.append(
torch.randint(
0, max(1, length[idx] - time_width[idx]), size=[1, self.time_masks], device=input_spec.device
)
)
time_lengths.append(
torch.randint(0, time_width[idx] + 1, size=[1, self.time_masks], device=input_spec.device)
)
time_starts = torch.cat(time_starts, 0)
time_lengths = torch.cat(time_lengths, 0)
else:
time_starts = torch.zeros([bs, 1], dtype=torch.int64, device=input_spec.device)
time_lengths = torch.zeros([bs, 1], dtype=torch.int64, device=input_spec.device)
x = launch_spec_augment_kernel(
input_spec,
length,
freq_starts=freq_starts,
freq_lengths=freq_lengths,
time_starts=time_starts,
time_lengths=time_lengths,
freq_masks=self.freq_masks,
time_masks=self.time_masks,
mask_value=self.mask_value,
)
return x
|
NeMo-main
|
nemo/collections/asr/parts/numba/spec_augment/spec_aug_numba.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.parts.numba.rnnt_loss.rnnt import rnnt_loss_cpu, rnnt_loss_gpu
from nemo.collections.asr.parts.numba.rnnt_loss.rnnt_pytorch import (
MultiblankRNNTLossNumba,
RNNTLossNumba,
TDTLossNumba,
)
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.autograd import Function, Variable
from torch.nn import Module
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int64, "labels")
check_type(label_lengths, torch.int64, "label_lengths")
check_type(lengths, torch.int64, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")
if U != max_U + 1:
raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")
def _assert_no_grad(tensor):
assert not tensor.requires_grad, (
"gradients only computed for log_probs - please " "mark other tensors as not requiring gradients"
)
class LogSoftmaxGradModification(Function):
@staticmethod
def forward(ctx, acts, clamp):
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float.")
res = acts.new(acts)
ctx.clamp = clamp
return res
@staticmethod
def backward(ctx, grad_output):
grad_output = torch.clamp(grad_output, -ctx.clamp, ctx.clamp)
return (
grad_output,
None,
)
def forward_pass(log_probs, labels, blank):
"""
Computes probability of the forward variable alpha.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the forward variable probabilities - alpha of shape [T, U]
and the log likelihood of this forward step.
"""
T, U, _ = log_probs.shape
alphas = np.zeros((T, U), dtype='f')
for t in range(1, T):
alphas[t, 0] = alphas[t - 1, 0] + log_probs[t - 1, 0, blank]
for u in range(1, U):
alphas[0, u] = alphas[0, u - 1] + log_probs[0, u - 1, labels[u - 1]]
for t in range(1, T):
for u in range(1, U):
no_emit = alphas[t - 1, u] + log_probs[t - 1, u, blank]
emit = alphas[t, u - 1] + log_probs[t, u - 1, labels[u - 1]]
alphas[t, u] = np.logaddexp(emit, no_emit)
loglike = alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank]
return alphas, loglike
def backward_pass(log_probs, labels, blank):
"""
Computes probability of the backward variable beta.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the backward variable probabilities - beta of shape [T, U]
and the log likelihood of this backward step.
"""
T, U, _ = log_probs.shape
betas = np.zeros((T, U), dtype='f')
betas[T - 1, U - 1] = log_probs[T - 1, U - 1, blank]
for t in reversed(range(T - 1)):
betas[t, U - 1] = betas[t + 1, U - 1] + log_probs[t, U - 1, blank]
for u in reversed(range(U - 1)):
betas[T - 1, u] = betas[T - 1, u + 1] + log_probs[T - 1, u, labels[u]]
for t in reversed(range(T - 1)):
for u in reversed(range(U - 1)):
no_emit = betas[t + 1, u] + log_probs[t, u, blank]
emit = betas[t, u + 1] + log_probs[t, u, labels[u]]
betas[t, u] = np.logaddexp(emit, no_emit)
return betas, betas[0, 0]
def compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda):
"""
Computes the gradients of the log_probs with respect to the log probability of this step occuring.
Args:
Args:
log_probs: Tensor of shape [T, U, V+1]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Tensor of shape [T, U] which represents the backward variable.
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
Gradients of shape [T, U, V+1] with respect to the forward log probability
"""
T, U, _ = log_probs.shape
grads = np.full(log_probs.shape, -float("inf"))
log_like = betas[0, 0] # == alphas[T - 1, U - 1] + betas[T - 1, U - 1]
# // grad to last blank transition
grads[T - 1, U - 1, blank] = alphas[T - 1, U - 1]
grads[: T - 1, :, blank] = alphas[: T - 1, :] + betas[1:, :]
# // grad to label transition
for u, l in enumerate(labels):
grads[:, u, l] = alphas[:, u] + betas[:, u + 1]
grads = -np.exp(grads + log_probs - log_like)
if fastemit_lambda > 0.0:
for u, l in enumerate(labels):
grads[:, u, l] = (1.0 + fastemit_lambda) * grads[:, u, l]
return grads
def fastemit_regularization(log_probs, labels, alphas, betas, blank, fastemit_lambda):
"""
Describes the computation of FastEmit regularization from the paper -
[FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148)
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Unused. Labels of shape [B, U]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Unused. Tensor of shape [T, U] which represents the backward variable.
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
The regularized negative log likelihood - lambda * P˜(At, u|x)
"""
# General calculation of the fastemit regularization alignments
T, U, _ = log_probs.shape
# alignment = np.zeros((T, U), dtype='float32')
#
# for t in range(0, T):
# alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1]
#
# for t in range(0, T):
# for u in range(0, U - 1):
# emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1]
# alignment[t, u] = emit
# reg = fastemit_lambda * (alignment[T - 1, U - 1])
# The above is equivalent to below, without need of computing above
# reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1])
# The above is also equivalent to below, without need of computing the betas alignment matrix
reg = fastemit_lambda * (alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank])
return -reg
def transduce(log_probs, labels, blank=0, fastemit_lambda=0.0):
"""
Args:
log_probs: 3D array with shape
[input len, output len + 1, vocab size]
labels: 1D array with shape [output time steps]
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
float: The negative log-likelihood
3D array: Gradients with respect to the
unnormalized input actications
2d arrays: Alphas matrix (TxU)
2d array: Betas matrix (TxU)
"""
alphas, ll_forward = forward_pass(log_probs, labels, blank)
betas, ll_backward = backward_pass(log_probs, labels, blank)
grads = compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda)
return -ll_forward, grads, alphas, betas
def transduce_batch(log_probs, labels, flen, glen, blank=0, fastemit_lambda=0.0):
"""
Compute the transducer loss of the batch.
Args:
log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax.
labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning.
flen: Length vector of the acoustic sequence.
glen: Length vector of the target sequence.
blank: Id of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix.
"""
grads = np.zeros_like(log_probs)
costs = []
for b in range(log_probs.shape[0]):
t = int(flen[b])
u = int(glen[b]) + 1
ll, g, alphas, betas = transduce(log_probs[b, :t, :u, :], labels[b, : u - 1], blank, fastemit_lambda)
grads[b, :t, :u, :] = g
reg = fastemit_regularization(
log_probs[b, :t, :u, :], labels[b, : u - 1], alphas, betas, blank, fastemit_lambda
)
ll += reg
costs.append(ll)
return costs, grads
class _RNNT(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, fastemit_lambda):
costs, grads = transduce_batch(
acts.detach().cpu().numpy(),
labels.cpu().numpy(),
act_lens.cpu().numpy(),
label_lens.cpu().numpy(),
blank,
fastemit_lambda,
)
costs = torch.FloatTensor([sum(costs)])
grads = torch.Tensor(grads).to(acts)
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return ctx.grads.mul(grad_output), None, None, None, None, None
class RNNTLoss(Module):
"""
Parameters:
`blank_label` (int): default 0 - label index of blank token
fastemit_lambda: Float scaling factor for FastEmit regularization.
"""
def __init__(self, blank: int = 0, fastemit_lambda: float = 0.0, clamp: float = -1.0):
super(RNNTLoss, self).__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.clamp = float(clamp) if clamp > 0 else 0.0
self.rnnt = _RNNT.apply
def forward(self, acts, labels, act_lens, label_lens):
assert len(labels.size()) == 2
_assert_no_grad(labels)
_assert_no_grad(act_lens)
_assert_no_grad(label_lens)
certify_inputs(acts, labels, act_lens, label_lens)
# CPU Patch for fp16 - force cast to fp32
if not acts.is_cuda and acts.dtype == torch.float16:
acts = acts.float()
if self.clamp > 0.0:
acts = LogSoftmaxGradModification.apply(acts, self.clamp)
acts = torch.nn.functional.log_softmax(acts, -1)
return self.rnnt(acts, labels, act_lens, label_lens, self.blank, self.fastemit_lambda)
if __name__ == '__main__':
loss = RNNTLoss(fastemit_lambda=0.01)
torch.manual_seed(0)
acts = torch.randn(1, 2, 5, 3)
labels = torch.tensor([[0, 2, 1, 2]], dtype=torch.int64)
act_lens = torch.tensor([2], dtype=torch.int64)
label_lens = torch.tensor([len(labels[0])], dtype=torch.int64)
loss_val = loss(acts, labels, act_lens, label_lens)
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/rnnt_numpy.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Function
from torch.nn import Module
from nemo.collections.asr.parts.numba.rnnt_loss import rnnt
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cpu_utils import cpu_rnnt
__all__ = ['rnnt_loss', 'RNNTLossNumba', 'MultiblankRNNTLossNumba', 'TDTLossNumba']
class _RNNTNumba(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, reduction, fastemit_lambda, clamp):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
"""
is_cuda = acts.is_cuda
certify_inputs(acts, labels, act_lens, label_lens)
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float value.")
loss_func = rnnt.rnnt_loss_gpu if is_cuda else rnnt.rnnt_loss_cpu
grads = torch.zeros_like(acts) if acts.requires_grad else None
minibatch_size = acts.size(0)
costs = torch.zeros(minibatch_size, device=acts.device, dtype=torch.float32)
loss_func(
acts,
labels=labels,
input_lengths=act_lens,
label_lengths=label_lens,
costs=costs,
grads=grads,
blank_label=blank,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=0,
)
if reduction in ['sum', 'mean']:
costs = costs.sum().unsqueeze_(-1)
if reduction == 'mean':
costs /= minibatch_size
if grads is not None:
grads /= minibatch_size
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
if grad_output is not None and ctx.grads is not None:
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return ctx.grads.mul_(grad_output), None, None, None, None, None, None, None
class _TDTNumba(Function):
"""
Numba class for Token-and-Duration Transducer (TDT) loss (https://arxiv.org/abs/2304.06795)
"""
@staticmethod
def forward(
ctx,
label_acts,
duration_acts,
labels,
act_lens,
label_lens,
blank,
durations,
reduction,
fastemit_lambda,
clamp,
sigma,
omega,
):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
durations: list of durations for TDT model, must include 0 and 1, e.g.
[0, 1, 2, 3, 4].
sigma: hyper-parameter for logit under-normalization method for training
TDT models. Recommended value 0.05.
omega: probability for sampling the standard RNN-T loss.
Refer to https://arxiv.org/abs/2304.06795 for detailed explanations for
the above parameters;
"""
is_cuda = label_acts.is_cuda
certify_inputs(label_acts, labels, act_lens, label_lens)
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float value.")
if is_cuda:
loss_func = rnnt.tdt_loss_gpu
else:
raise ValueError("TDT is not yet implemented for non CUDA computation.")
label_grads = torch.zeros_like(label_acts) if label_acts.requires_grad else None
duration_grads = torch.zeros_like(duration_acts) if duration_acts.requires_grad else None
minibatch_size = label_acts.size(0)
costs = torch.zeros(minibatch_size, device=label_acts.device, dtype=label_acts.dtype)
loss_func(
label_acts,
duration_acts,
labels=labels,
input_lengths=act_lens,
label_lengths=label_lens,
costs=costs,
label_grads=label_grads,
duration_grads=duration_grads,
blank_label=blank,
durations=durations,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
sigma=sigma,
omega=omega,
num_threads=0,
)
if reduction in ['sum', 'mean']:
costs = costs.sum().unsqueeze_(-1)
if reduction == 'mean':
costs /= minibatch_size
if label_grads is not None:
label_grads /= minibatch_size
duration_grads /= minibatch_size
ctx.label_grads = label_grads
ctx.duration_grads = duration_grads
return costs
@staticmethod
def backward(ctx, grad_output):
if grad_output is not None and ctx.label_grads is not None:
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.label_grads)
return (
ctx.label_grads.mul_(grad_output),
ctx.duration_grads.mul_(grad_output),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
class _MultiblankRNNTNumba(Function):
"""
Numba class for multi-blank transducer loss (https://arxiv.org/pdf/2211.03541.pdf)
"""
@staticmethod
def forward(
ctx, acts, labels, act_lens, label_lens, blank, big_blank_durations, reduction, fastemit_lambda, clamp, sigma
):
"""
big_blank_durations: list of durations for multi-blank transducer, e.g.
[2, 4, 8].
sigma: hyper-parameter for logit under-normalization method for training
multi-blank transducers. Recommended value 0.05.
Refer to https://arxiv.org/pdf/2211.03541 for detailed explanations for
the above parameters;
For other parameters for this class, refer to comment for class _RNNTNumba
"""
is_cuda = acts.is_cuda
certify_inputs(acts, labels, act_lens, label_lens)
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float value.")
if is_cuda:
loss_func = rnnt.multiblank_rnnt_loss_gpu
else:
raise NotImplementedError()
grads = torch.zeros_like(acts) if acts.requires_grad else None
minibatch_size = acts.size(0)
costs = torch.zeros(minibatch_size, device=acts.device, dtype=acts.dtype)
loss_func(
acts,
labels=labels,
input_lengths=act_lens,
label_lengths=label_lens,
costs=costs,
grads=grads,
blank_label=blank,
big_blank_durations=big_blank_durations,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
sigma=sigma,
num_threads=0,
)
if reduction in ['sum', 'mean']:
costs = costs.sum().unsqueeze_(-1)
if reduction == 'mean':
costs /= minibatch_size
if grads is not None:
grads /= minibatch_size
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
if grad_output is not None and ctx.grads is not None:
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return ctx.grads.mul_(grad_output), None, None, None, None, None, None, None, None, None, None
def rnnt_loss(
acts, labels, act_lens, label_lens, blank=0, reduction='mean', fastemit_lambda: float = 0.0, clamp: float = 0.0
):
"""RNN Transducer Loss (functional form)
Args:
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly, we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire joint tensor which is expensive.
# CUDA version is much more efficient since it performs an inplace logsoftmax, and therefore
# can inplace clamp the gradient.
if clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return _RNNTNumba.apply(acts, labels, act_lens, label_lens, blank, reduction, fastemit_lambda, clamp)
def multiblank_rnnt_loss(
acts,
labels,
act_lens,
label_lens,
blank,
big_blank_durations=[],
reduction='mean',
fastemit_lambda: float = 0.0,
clamp: float = 0.0,
):
"""
Multi-blank RNN Transducer (https://arxiv.org/pdf/2211.03541.pdf) Loss (functional form)
Args:
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
blank (int): standard blank label.
big_blank_durations: list of durations for multi-blank transducer, e.g.
[2, 4, 8].
sigma: hyper-parameter for logit under-normalization method for training
multi-blank transducers. Recommended value 0.05.
Refer to https://arxiv.org/pdf/2211.03541 for detailed explanations for
the last two params.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly, we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire joint tensor which is expensive.
# CUDA version is much more efficient since it performs an inplace logsoftmax, and therefore
# can inplace clamp the gradient.
if clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return _MultiblankRNNTNumba.apply(
acts, labels, act_lens, label_lens, blank, big_blank_durations, reduction, fastemit_lambda, clamp
)
def tdt_loss(
acts,
labels,
act_lens,
label_lens,
blank,
durations=[],
reduction='mean',
fastemit_lambda: float = 0.0,
clamp: float = 0.0,
):
"""
TDT RNN Transducer (https://arxiv.org/abs/2304.06795) Loss (functional form)
Args:
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
blank (int): standard blank label.
durations: list of durations for TDT model, e.g.
[0,1,2,3,4].
sigma: hyper-parameter for logit under-normalization method for training
multi-blank transducers. Recommended value 0.05.
Refer to https://arxiv.org/abs/2304.06795 for detailed explanations for
the last two params.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly, we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire joint tensor which is expensive.
# CUDA version is much more efficient since it performs an inplace logsoftmax, and therefore
# can inplace clamp the gradient.
if clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return _TDTNumba.apply(acts, labels, act_lens, label_lens, blank, durations, reduction, fastemit_lambda, clamp)
class RNNTLossNumba(Module):
"""
Parameters:
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
"""
def __init__(self, blank=0, reduction='mean', fastemit_lambda: float = 0.0, clamp: float = -1):
super(RNNTLossNumba, self).__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.clamp = float(clamp) if clamp > 0 else 0.0
self.reduction = reduction
self.loss = _RNNTNumba.apply
def forward(self, acts, labels, act_lens, label_lens):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
if not acts.is_cuda:
# Force FP32 until log_softmax() is implemented for fp16 on CPU
if acts.dtype == torch.float16:
acts = acts.float()
# Since CPU requires log_softmax to be computed explicitly, we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire joint tensor which is expensive.
# CUDA version is much more efficient since it performs an inplace logsoftmax, and therefore
# can inplace clamp the gradient.
if self.clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, self.clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return self.loss(
acts, labels, act_lens, label_lens, self.blank, self.reduction, self.fastemit_lambda, self.clamp
)
class MultiblankRNNTLossNumba(Module):
"""
Parameters:
blank (int): standard blank label.
big_blank_durations: list of durations for multi-blank transducer, e.g.
[2, 4, 8].
sigma: hyper-parameter for logit under-normalization method for training
multi-blank transducers. Recommended value 0.05.
Refer to https://arxiv.org/pdf/2211.03541 for detailed explanations for
the above parameters;
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
"""
def __init__(
self,
blank,
big_blank_durations,
reduction='mean',
fastemit_lambda: float = 0.0,
clamp: float = -1,
sigma: float = 0.0,
):
super(MultiblankRNNTLossNumba, self).__init__()
self.blank = blank
self.big_blank_durations = big_blank_durations
self.fastemit_lambda = fastemit_lambda
self.clamp = float(clamp) if clamp > 0 else 0.0
self.reduction = reduction
self.loss = _MultiblankRNNTNumba.apply
self.sigma = sigma
def forward(self, acts, labels, act_lens, label_lens):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly, we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire joint tensor which is expensive.
# CUDA version is much more efficient since it performs an inplace logsoftmax, and therefore
# can inplace clamp the gradient.
if self.clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, self.clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return self.loss(
acts,
labels,
act_lens,
label_lens,
self.blank,
self.big_blank_durations,
self.reduction,
self.fastemit_lambda,
self.clamp,
self.sigma,
)
class TDTLossNumba(Module):
"""
Parameters:
blank (int): standard blank label.
durations: list of durations for TDT model, e.g.
[0, 1, 2, 3, 4].
sigma: hyper-parameter for logit under-normalization method for training
TDT. Recommended value 0.05.
omega: hyper-parameter for RNN-T loss for loss combination.
Refer to https://arxiv.org/abs/2304.06795 for detailed explanations for
the above parameters;
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
"""
def __init__(
self,
blank,
durations=None,
reduction='mean',
fastemit_lambda: float = 0.0,
clamp: float = -1,
sigma: float = 0.0,
omega: float = 0.0,
):
super(TDTLossNumba, self).__init__()
self.blank = blank
self.durations = durations if durations is not None else []
self.fastemit_lambda = fastemit_lambda
self.clamp = float(clamp) if clamp > 0 else 0.0
self.reduction = reduction
self.loss = _TDTNumba.apply
self.sigma = sigma
self.omega = omega
def forward(self, acts, labels, act_lens, label_lens):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
# TODO(hainan): in the future, we could further optimize this so that we don't need to
# make contiguous copies of the acts tensor.
label_acts, duration_acts = torch.split(
acts, [acts.shape[-1] - len(self.durations), len(self.durations)], dim=-1
)
label_acts = label_acts.contiguous()
duration_acts = torch.nn.functional.log_softmax(duration_acts, dim=-1).contiguous()
return self.loss(
label_acts,
duration_acts,
labels,
act_lens,
label_lens,
self.blank,
self.durations,
self.reduction,
self.fastemit_lambda,
self.clamp,
self.sigma,
self.omega,
)
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int64, "labels")
check_type(label_lengths, torch.int64, "label_lengths")
check_type(lengths, torch.int64, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")
if U != max_U + 1:
raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/rnnt_pytorch.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import torch
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants, rnnt_helper
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cpu_utils import cpu_rnnt
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cuda_utils import gpu_rnnt
def rnnt_loss_cpu(
acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
grads: torch.Tensor,
blank_label: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
):
"""
Wrapper method for accessing CPU RNNT loss.
CPU implementation ported from [HawkAaron/warp-transducer](https://github.com/HawkAaron/warp-transducer).
Args:
acts: Activation tensor of shape [B, T, U, V+1].
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
grads: Zero tensor of shape [B, T, U, V+1] where the gradient will be set.
blank_label: Index of the blank token in the vocabulary.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
num_threads: Number of threads for OpenMP.
"""
# aliases
log_probs = acts
flat_labels = labels
minibatch_size = log_probs.shape[0]
maxT = log_probs.shape[1]
maxU = log_probs.shape[2]
alphabet_size = log_probs.shape[3]
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(maxT, maxU, minibatch_size, gpu=False)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Invalid parameter passed when calculating working space memory")
cpu_workspace = torch.zeros(gpu_size, device=log_probs.device, dtype=log_probs.dtype, requires_grad=False)
### VIEW TENSORS AS VECTORS FOR POINTER INDEXING ###
log_probs, acts_shape = rnnt_helper.flatten_tensor(log_probs)
flat_labels, labels_shape = rnnt_helper.flatten_tensor(flat_labels)
wrapper = cpu_rnnt.CPURNNT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=cpu_workspace,
blank=blank_label,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=num_threads,
batch_first=True,
)
if grads is None:
status = wrapper.score_forward(
log_probs=log_probs.data,
costs=costs,
flat_labels=flat_labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
### FLATTEN GRAD TENSOR ###
grads, grads_shape = rnnt_helper.flatten_tensor(grads)
status = wrapper.cost_and_grad(
log_probs=log_probs.data,
grads=grads.data,
costs=costs,
flat_labels=flat_labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del cpu_workspace, wrapper
return True
def rnnt_loss_gpu(
acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
grads: torch.Tensor,
blank_label: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
):
"""
Wrapper method for accessing GPU RNNT loss.
CUDA implementation ported from [HawkAaron/warp-transducer](https://github.com/HawkAaron/warp-transducer).
Args:
acts: Activation tensor of shape [B, T, U, V+1].
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
grads: Zero tensor of shape [B, T, U, V+1] where the gradient will be set.
blank_label: Index of the blank token in the vocabulary.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
num_threads: Number of threads for OpenMP.
"""
minibatch_size = acts.shape[0]
maxT = acts.shape[1]
maxU = acts.shape[2]
alphabet_size = acts.shape[3]
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(acts.device).cuda_stream)
else:
stream = cuda.default_stream()
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(maxT, maxU, minibatch_size, gpu=True)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Invalid parameter passed when calculating working space memory")
# Select GPU index
cuda.select_device(acts.device.index)
gpu_workspace = torch.zeros(gpu_size, device=acts.device, dtype=torch.float32, requires_grad=False)
### VIEW TENSORS AS VECTORS FOR POINTER INDEXING ###
acts, acts_shape = rnnt_helper.flatten_tensor(acts)
wrapper = gpu_rnnt.GPURNNT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=gpu_workspace,
blank=blank_label,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=num_threads,
stream=stream,
)
if grads is None:
status = wrapper.score_forward(
acts=acts.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
### FLATTEN GRAD TENSOR ###
grads, grads_shape = rnnt_helper.flatten_tensor(grads)
status = wrapper.cost_and_grad(
acts=acts.data,
grads=grads.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del gpu_workspace, wrapper
return True
def tdt_loss_gpu(
label_acts: torch.Tensor,
duration_acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
label_grads: torch.Tensor,
duration_grads: torch.Tensor,
blank_label: int,
durations: list,
fastemit_lambda: float,
clamp: float,
num_threads: int,
sigma: float,
omega: float,
):
"""
Wrapper method for accessing GPU TDT loss (https://arxiv.org/abs/2304.06795).
CUDA implementation ported from [HawkAaron/warp-transducer](https://github.com/HawkAaron/warp-transducer).
Args:
label_acts: Activation tensor of shape [B, T, U, V], where V includes the blank symbol.
duration_acts: Activation tensor of shape [B, T, U, D], where D is the number of durations.
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
label_grads: Zero tensor of shape [B, T, U, V] where the gradient to label_acts will be set.
duration_grads: Zero tensor of shape [B, T, U, D] where the gradient to duration_acts will be set.
blank_label: Index of the standard blank token in the vocabulary.
durations: A list of supported durations for TDT. Must include 0 and 1.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
num_threads: Number of threads for OpenMP.
sigma: logit-undernormalization weight used in the multi-blank model. Refer to
the multi-blank paper https://arxiv.org/abs/2304.06795 for detailed explanations.
omega: weight for regular RNN-T loss
"""
minibatch_size = label_acts.shape[0]
maxT = label_acts.shape[1]
maxU = label_acts.shape[2]
alphabet_size = label_acts.shape[3]
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(label_acts.device).cuda_stream)
else:
stream = cuda.default_stream()
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(maxT, maxU, minibatch_size, gpu=True)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Invalid parameter passed when calculating working space memory")
# Select GPU index
cuda.select_device(label_acts.device.index)
gpu_workspace = torch.zeros(gpu_size, device=label_acts.device, dtype=label_acts.dtype, requires_grad=False)
tdt_workspace = torch.zeros(len(durations), device=label_acts.device, dtype=torch.long, requires_grad=False)
for i in range(0, len(durations)):
tdt_workspace[i] = durations[i]
### VIEW TENSORS AS VECTORS FOR POINTER INDEXING ###
label_acts, label_acts_shape = rnnt_helper.flatten_tensor(label_acts)
duration_acts, duration_acts_shape = rnnt_helper.flatten_tensor(duration_acts)
wrapper = gpu_rnnt.GPUTDT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=gpu_workspace,
tdt_workspace=tdt_workspace,
num_durations=len(durations),
blank=blank_label,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=num_threads,
stream=stream,
sigma=sigma,
omega=omega,
)
if label_grads is None:
status = wrapper.score_forward(
label_acts=label_acts.data,
duration_acts=duration_acts.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
### FLATTEN GRAD TENSOR ###
label_grads, label_grads_shape = rnnt_helper.flatten_tensor(label_grads)
duration_grads, duration_grads_shape = rnnt_helper.flatten_tensor(duration_grads)
status = wrapper.cost_and_grad(
label_acts=label_acts.data,
duration_acts=duration_acts.data,
label_grads=label_grads.data,
duration_grads=duration_grads.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del gpu_workspace, tdt_workspace, wrapper
return True
def multiblank_rnnt_loss_gpu(
acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
grads: torch.Tensor,
blank_label: int,
big_blank_durations: list,
fastemit_lambda: float,
clamp: float,
num_threads: int,
sigma: float,
):
"""
Wrapper method for accessing GPU Multi-blank RNNT loss (https://arxiv.org/pdf/2211.03541.pdf).
CUDA implementation ported from [HawkAaron/warp-transducer](https://github.com/HawkAaron/warp-transducer).
Args:
acts: Activation tensor of shape [B, T, U, V + num_big_blanks + 1].
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
grads: Zero tensor of shape [B, T, U, V + num_big_blanks + 1] where the gradient will be set.
blank_label: Index of the standard blank token in the vocabulary.
big_blank_durations: A list of supported durations for big blank symbols
in the model, e.g. [2, 4, 8]. Note we only include durations for ``big
blanks'' here and it should not include 1 for the standard blank.
Those big blanks have vocabulary indices after the standard blank index.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
num_threads: Number of threads for OpenMP.
sigma: logit-undernormalization weight used in the multi-blank model. Refer to
the multi-blank paper https://arxiv.org/pdf/2211.03541 for detailed explanations.
"""
minibatch_size = acts.shape[0]
maxT = acts.shape[1]
maxU = acts.shape[2]
alphabet_size = acts.shape[3]
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(acts.device).cuda_stream)
else:
stream = cuda.default_stream()
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(maxT, maxU, minibatch_size, gpu=True)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Invalid parameter passed when calculating working space memory")
# Select GPU index
cuda.select_device(acts.device.index)
gpu_workspace = torch.zeros(gpu_size, device=acts.device, dtype=acts.dtype, requires_grad=False)
big_blank_workspace = torch.zeros(
len(big_blank_durations), device=acts.device, dtype=torch.long, requires_grad=False
)
for i in range(0, len(big_blank_durations)):
big_blank_workspace[i] = big_blank_durations[i]
### VIEW TENSORS AS VECTORS FOR POINTER INDEXING ###
acts, acts_shape = rnnt_helper.flatten_tensor(acts)
wrapper = gpu_rnnt.MultiblankGPURNNT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=gpu_workspace,
big_blank_workspace=big_blank_workspace,
num_big_blanks=len(big_blank_durations),
blank=blank_label,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=num_threads,
stream=stream,
sigma=sigma,
)
if grads is None:
status = wrapper.score_forward(
acts=acts.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
### FLATTEN GRAD TENSOR ###
grads, grads_shape = rnnt_helper.flatten_tensor(grads)
status = wrapper.cost_and_grad(
acts=acts.data,
grads=grads.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del gpu_workspace, big_blank_workspace, wrapper
return True
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/rnnt.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Tuple
import numba
import torch
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants
threshold = global_constants.THRESHOLD
@cuda.jit(device=True, inline=True)
def log_sum_exp(a: float, b: float):
if a == global_constants.FP32_NEG_INF:
return b
if b == global_constants.FP32_NEG_INF:
return a
if a > b:
return math.log1p(math.exp(b - a)) + a
else:
return math.log1p(math.exp(a - b)) + b
@cuda.jit(device=True, inline=True)
def div_up(x: int, y: int):
return (x + y - 1) // y
@cuda.jit(device=True)
def maximum(x, y):
if x < y:
return y
else:
return x
@cuda.jit(device=True)
def add(x, y):
return x + y
@cuda.jit(device=True)
def identity(x):
return x
@cuda.jit(device=True)
def negate(x):
return -x
@cuda.jit(device=True)
def exponential(x):
return math.exp(x)
@cuda.jit(device=True)
def log_plus(p1: float, p2: float):
if p1 == global_constants.FP32_NEG_INF:
return p2
if p2 == global_constants.FP32_NEG_INF:
return p1
result = math.log1p(math.exp(-math.fabs(p1 - p2))) + maximum(p1, p2)
return result
@cuda.jit(device=True, inline=True)
def copy_data_1d(source: torch.Tensor, dest: torch.Tensor, idx: int):
dest[idx] = source[idx]
@cuda.jit()
def compute_costs_data(source: torch.Tensor, dest: torch.Tensor, fastemit_lambda: float):
block = cuda.blockIdx.x
tid = cuda.threadIdx.x
idx = block * cuda.blockDim.x + tid
length = source.shape[0]
if idx < length:
copy_data_1d(source, dest, idx)
dest[idx] *= -1.0
dest[idx] *= numba.float32(1.0 + fastemit_lambda)
def get_workspace_size(
maxT: int, maxU: int, minibatch: int, gpu: bool
) -> Tuple[Optional[int], global_constants.RNNTStatus]:
if minibatch <= 0 or maxT <= 0 or maxU <= 0:
return (None, global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE)
# per minibatch memory
per_minibatch_size = 0
# alphas & betas
per_minibatch_size += maxT * maxU * 2
if not gpu:
# // blank & label log probability cache
per_minibatch_size += maxT * maxU * 2
else:
# // softmax denominator
per_minibatch_size += maxT * maxU
# // forward - backward loglikelihood
per_minibatch_size += 2
size = per_minibatch_size * minibatch
return (size, global_constants.RNNTStatus.RNNT_STATUS_SUCCESS)
def flatten_tensor(x: torch.Tensor):
original_shape = x.shape
x = x.view([-1])
return x, original_shape
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/rnnt_helper.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import numpy as np
from numba import float32
# Internal globals
_THREADS_PER_BLOCK = 32
_WARP_SIZE = 32
_DTYPE = float32
# Constants
FP32_INF = np.inf
FP32_NEG_INF = -np.inf
THRESHOLD = 1e-1
"""
Getters
"""
def threads_per_block():
global _THREADS_PER_BLOCK
return _THREADS_PER_BLOCK
def warp_size():
global _WARP_SIZE
return _WARP_SIZE
def dtype():
global _DTYPE
return _DTYPE
# RNNT STATUS
class RNNTStatus(enum.Enum):
RNNT_STATUS_SUCCESS = 0
RNNT_STATUS_INVALID_VALUE = 1
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/global_constants.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import math
import torch
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants, rnnt_helper
warp_size = global_constants.warp_size()
dtype = global_constants.dtype()
CTA_REDUCE_SIZE = 128
class I_Op(enum.Enum):
"""
Represents an operation that is performed on the input tensor
"""
EXPONENTIAL = 0
IDENTITY = 1
class R_Op(enum.Enum):
"""
Represents a reduction operation performed on the input tensor
"""
ADD = 0
MAXIMUM = 1
@cuda.jit(device=True)
def CTAReduce(tid: int, x, storage, count: int, R_opid: int):
"""
CUDA Warp reduction kernel.
It is a device kernel to be called by other kernels.
The data will be read from the right segement recursively, and reduced (ROP) onto the left half.
Operation continues while warp size is larger than a given offset.
Beyond this offset, warp reduction is performed via `shfl_down_sync`, which halves the reduction
space and sums the two halves at each call.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives [https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
tid: CUDA thread index
x: activation. Single float.
storage: shared memory of size CTA_REDUCE_SIZE used for reduction in parallel threads.
count: equivalent to num_rows, which is equivalent to alphabet_size (V+1)
R_opid: Operator ID for reduction. See R_Op for more information.
"""
storage[tid] = x
cuda.syncthreads()
# Fold the data in half with each pass
offset = CTA_REDUCE_SIZE // 2
while offset >= warp_size:
if (tid + offset) < count and tid < offset:
# Read from the right half and store to the left half.
if R_opid == 0:
x = rnnt_helper.add(x, storage[offset + tid])
else:
x = rnnt_helper.maximum(x, storage[offset + tid])
storage[tid] = x
cuda.syncthreads()
offset = offset // 2
offset = warp_size // 2
while offset > 0:
# warp reduction and sync
shuff = cuda.shfl_down_sync(0xFFFFFFFF, x, offset)
if (tid + offset < count) and (tid < offset):
if R_opid == 0:
x = rnnt_helper.add(x, shuff)
else:
x = rnnt_helper.maximum(x, shuff)
offset = offset // 2
return x
@cuda.jit()
def _reduce_rows(I_opid: int, R_opid: int, acts, output, num_rows: int):
"""
CUDA Warp reduction kernel which reduces via the R_Op.Maximum
Reduces the input data such that I_Op = Identity and R_op = Maximum.
The result is stored in the blockIdx, and is stored as an identity op.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives [https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
I_opid: Operator ID for input. See I_Op for more information. For this kernel,
the Identity op is chosen in general, and therefore the input is reduced in place
without scaling.
R_opid: Operator ID for reduction. See R_Op for more information.
For this kernel, generally Maximum op is chosen. It reduces the kernel via max.
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)]. Data will be overwritten.
num_rows: Vocabulary size (including blank token) - V+1.
"""
tid = cuda.threadIdx.x
idx = tid
col = cuda.blockIdx.x
# allocate shared thread memory
storage = cuda.shared.array(shape=(CTA_REDUCE_SIZE,), dtype=acts.dtype)
max = output[col]
# // Each block works on a column
if idx < num_rows:
curr = acts[col * num_rows + idx] - max
if I_opid == 0:
curr = rnnt_helper.exponential(curr)
else:
curr = rnnt_helper.identity(curr)
idx += CTA_REDUCE_SIZE
while idx < num_rows:
activation_ = acts[col * num_rows + idx] - max
if I_opid == 0 and R_opid == 0:
curr = rnnt_helper.add(curr, rnnt_helper.exponential(activation_))
elif I_opid == 0 and R_opid == 1:
curr = rnnt_helper.maximum(curr, rnnt_helper.exponential(activation_))
elif I_opid == 1 and R_opid == 0:
curr = rnnt_helper.add(curr, rnnt_helper.identity(activation_))
else:
curr = rnnt_helper.maximum(curr, rnnt_helper.identity(activation_))
idx += CTA_REDUCE_SIZE
# // Sum thread-totals over the CTA.
curr = CTAReduce(tid, curr, storage, num_rows, R_opid)
# // Store result in out (inplace, I_op: identity)
if tid == 0:
output[col] = curr
@cuda.jit()
def _reduce_minus(I_opid: int, R_opid: int, acts, output, num_rows: int):
"""
CUDA Warp reduction kernel which reduces via the R_Op.Add
Reduces the input data such that I_Op = Exponential and R_op = Add.
The result is stored in the blockIdx, and is stored as an exp op.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives [https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
I_opid: Operator ID for input. See I_Op for more information. For this kernel,
the Exponential op is chosen in general, and therefore the input is reduced in place
with scaling.
R_opid: Operator ID for reduction. See R_Op for more information.
For this kernel, generally Add op is chosen. It reduces the kernel via summation.
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)]. Data will be overwritten.
num_rows: Vocabulary size (including blank token) - V+1.
"""
tid = cuda.threadIdx.x
idx = tid
col = cuda.blockIdx.x
# allocate shared thread memory
storage = cuda.shared.array(shape=(CTA_REDUCE_SIZE,), dtype=acts.dtype)
max = output[col]
# // Each block works on a column
if idx < num_rows:
curr = acts[col * num_rows + idx] - max
if I_opid == 0:
curr = rnnt_helper.exponential(curr)
else:
curr = rnnt_helper.identity(curr)
idx += CTA_REDUCE_SIZE
while idx < num_rows:
activation_ = acts[col * num_rows + idx] - max
if I_opid == 0 and R_opid == 0:
curr = rnnt_helper.add(curr, rnnt_helper.exponential(activation_))
elif I_opid == 0 and R_opid == 1:
curr = rnnt_helper.maximum(curr, rnnt_helper.exponential(activation_))
elif I_opid == 1 and R_opid == 0:
curr = rnnt_helper.add(curr, rnnt_helper.identity(activation_))
else:
curr = rnnt_helper.maximum(curr, rnnt_helper.identity(activation_))
idx += CTA_REDUCE_SIZE
# // Sum thread-totals over the CTA.
curr = CTAReduce(tid, curr, storage, num_rows, R_opid)
# // Store result in out (inplace, I_op: exponential)
if tid == 0:
output[col] = -max - math.log(curr)
def ReduceHelper(
I_opid: int,
R_opid: int,
acts: torch.Tensor,
output: torch.Tensor,
num_rows: int,
num_cols: int,
minus: bool,
stream,
):
"""
CUDA Warp reduction kernel helper which reduces via the R_Op.Add and writes
the result to `output` according to I_op id.
The result is stored in the blockIdx.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives [https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
I_opid: Operator ID for input. See I_Op for more information.
R_opid: Operator ID for reduction. See R_Op for more information.
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)]. Data will be overwritten.
num_rows: Vocabulary size (including blank token) - V+1.
Represents the number of threads per block.
num_cols: Flattened shape of activation matrix, without vocabulary dimension (B * T * U).
Represents number of blocks per grid.
minus: Bool flag whether to add or subtract as reduction.
If minus is set; calls _reduce_minus, else calls _reduce_rows kernel.
stream: CUDA Stream.
"""
if minus:
grid_size = num_cols
# call kernel
_reduce_minus[grid_size, CTA_REDUCE_SIZE, stream, 0](I_opid, R_opid, acts, output, num_rows)
else:
grid_size = num_cols
# call kernel
_reduce_rows[grid_size, CTA_REDUCE_SIZE, stream, 0](I_opid, R_opid, acts, output, num_rows)
return True
def reduce_exp(acts: torch.Tensor, denom, rows: int, cols: int, minus: bool, stream):
"""
Helper method to call the Warp Reduction Kernel to perform `exp` reduction.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives [https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)]. Data will be overwritten.
rows: Vocabulary size (including blank token) - V+1.
Represents the number of threads per block.
cols: Flattened shape of activation matrix, without vocabulary dimension (B * T * U).
Represents number of blocks per grid.
minus: Bool flag whether to add or subtract as reduction.
If minus is set; calls _reduce_minus, else calls _reduce_rows kernel.
stream: CUDA Stream.
"""
return ReduceHelper(
I_opid=I_Op.EXPONENTIAL.value,
R_opid=R_Op.ADD.value,
acts=acts,
output=denom,
num_rows=rows,
num_cols=cols,
minus=minus,
stream=stream,
)
def reduce_max(acts: torch.Tensor, denom, rows: int, cols: int, minus: bool, stream):
"""
Helper method to call the Warp Reduction Kernel to perform `max` reduction.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives [https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)]. Data will be overwritten.
rows: Vocabulary size (including blank token) - V+1.
Represents the number of threads per block.
cols: Flattened shape of activation matrix, without vocabulary dimension (B * T * U).
Represents number of blocks per grid.
minus: Bool flag whether to add or subtract as reduction.
If minus is set; calls _reduce_minus, else calls _reduce_rows kernel.
stream: CUDA Stream.
"""
return ReduceHelper(
I_opid=I_Op.IDENTITY.value,
R_opid=R_Op.MAXIMUM.value,
acts=acts,
output=denom,
num_rows=rows,
num_cols=cols,
minus=minus,
stream=stream,
)
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/cuda_utils/reduce.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/cuda_utils/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import random
from typing import Optional, Tuple
import numba
import torch
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants, rnnt_helper
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce
class GPURNNT:
def __init__(
self,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
workspace,
blank: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
stream,
):
"""
Helper class to launch the CUDA Kernels to compute the Transducer Loss.
Args:
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
workspace: An allocated chunk of memory that will be sliced off and reshaped into required
blocks used as working memory.
blank: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
num_threads: Number of OMP threads to launch.
stream: Numba Cuda Stream.
"""
self.minibatch_ = minibatch
self.maxT_ = maxT
self.maxU_ = maxU
self.alphabet_size_ = alphabet_size
self.gpu_workspace = cuda.as_cuda_array(
workspace
) # a flat vector of floatX numbers that represents allocated memory slices
self.blank_ = blank
self.fastemit_lambda_ = fastemit_lambda
self.clamp_ = abs(clamp)
self.num_threads_ = num_threads
self.stream_ = stream # type: cuda.cudadrv.driver.Stream
if num_threads > 0:
numba.set_num_threads(min(multiprocessing.cpu_count(), num_threads))
self.num_threads_ = numba.get_num_threads()
else:
self.num_threads_ = numba.get_num_threads()
def log_softmax(self, acts: torch.Tensor, denom: torch.Tensor):
"""
Computes the log softmax denominator of the input activation tensor
and stores the result in denom.
Args:
acts: Activation tensor of shape [B, T, U, V+1]. The input must be represented as a flat tensor
of shape [B * T * U * (V+1)] to allow pointer indexing.
denom: A zero tensor of same shape as acts.
Updates:
This kernel inplace updates the `denom` tensor
"""
# // trans_acts + pred_acts -> log_softmax denominator
reduce.reduce_max(
acts,
denom,
rows=self.alphabet_size_,
cols=self.minibatch_ * self.maxT_ * self.maxU_,
minus=False,
stream=self.stream_,
)
reduce.reduce_exp(
acts,
denom,
rows=self.alphabet_size_,
cols=self.minibatch_ * self.maxT_ * self.maxU_,
minus=True,
stream=self.stream_,
)
def compute_cost_and_score(
self,
acts: torch.Tensor,
grads: Optional[torch.Tensor],
costs: torch.Tensor,
labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
) -> global_constants.RNNTStatus:
"""
Compute both the loss and the gradients.
Args:
acts: A flattened tensor of shape [B, T, U, V+1] representing the activation matrix.
grad: A flattented zero tensor of same shape as acts.
costs: A zero vector of length B which will be updated inplace with the log probability costs.
flat_labels: A flattened matrix of labels of shape [B, U]
label_lengths: A vector of length B that contains the original lengths of the acoustic sequence.
input_lengths: A vector of length B that contains the original lengths of the target sequence.
Updates:
This will launch kernels that will update inline the following variables:
- grads: Gradients of the activation matrix wrt the costs vector.
- costs: Negative log likelihood of the forward variable.
Returns:
An enum that either represents a successful RNNT operation or failure.
"""
training = grads is not None
if training:
grads *= 0.0 # zero grads
used_offset, (denom, alphas, betas, llForward, llBackward) = self._prepare_workspace()
######## START EXECUTION ########
self.log_softmax(acts, denom)
# Compute alphas
gpu_rnnt_kernel.compute_alphas_kernel[self.minibatch_, self.maxU_, self.stream_, 0](
acts,
denom,
alphas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
)
if training:
# Compute betas
gpu_rnnt_kernel.compute_betas_kernel[self.minibatch_, self.maxU_, self.stream_, 0](
acts,
denom,
betas,
llBackward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
)
# Compute gradient
grad_blocks_per_grid = self.minibatch_ * self.maxT_ * self.maxU_
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, self.stream_, 0](
grads,
acts,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
self.fastemit_lambda_,
self.clamp_,
)
# // cost copy, negate (for log likelihood) and update with additional regularizers
# This needs to be done via CUDA, because we used temporary memory llForward
# passed to alpha, which was updated with log likelihoods.
# But copying this data into a pytorch pointer is more difficult (numba api is one way)
# Therefore launch a pointwise CUDA kernel to update the costs inplace from data of llForward
# Then negate to compute the loglikelihood.
threadsperblock = min(costs.shape[0], 32)
blockspergrid = (costs.shape[0] + (threadsperblock - 1)) // threadsperblock
rnnt_helper.compute_costs_data[blockspergrid, threadsperblock, self.stream_, 0](
llForward, costs, self.fastemit_lambda_
)
self.stream_.synchronize()
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
def cost_and_grad(
self,
acts: torch.Tensor,
grads: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if (
acts is None
or grads is None
or costs is None
or pad_labels is None
or label_lengths is None
or input_lengths is None
):
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(acts, grads, costs, pad_labels, label_lengths, input_lengths)
def score_forward(
self,
acts: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if acts is None or costs is None or pad_labels is None or label_lengths is None or input_lengths is None:
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(acts, None, costs, pad_labels, label_lengths, input_lengths)
def _prepare_workspace(self) -> Tuple[int, Tuple[torch.Tensor, ...]]:
"""
Helper method that uses the workspace and constructs slices of it that can be used.
Returns:
An int, representing the offset of the used workspace (practically, the slice of the workspace consumed)
A tuple of tensors representing the shared workspace.
"""
used_offset = 0
# // denom
denom = self.gpu_workspace[used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
# // alphas & betas
alphas = self.gpu_workspace[used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
betas = self.gpu_workspace[used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
# // logllh
llForward = self.gpu_workspace[used_offset : used_offset + self.minibatch_]
used_offset += self.minibatch_
llBackward = self.gpu_workspace[used_offset : used_offset + self.minibatch_]
used_offset += self.minibatch_
return used_offset, (denom, alphas, betas, llForward, llBackward)
class MultiblankGPURNNT(GPURNNT):
def __init__(
self,
sigma: float,
num_big_blanks: int,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
workspace,
big_blank_workspace,
blank: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
stream,
):
"""
Helper class to launch the CUDA Kernels to compute Multi-blank Transducer Loss (https://arxiv.org/pdf/2211.03541).
Args:
sigma: Hyper-parameter related to the logit-normalization method in training multi-blank transducers.
num_big_blanks: Number of big blank symbols the model has. This should not include the standard blank symbol.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V + 1 + num-big-blanks
workspace: An allocated chunk of memory that will be sliced off and reshaped into required
blocks used as working memory.
big_blank_workspace: An allocated chunk of memory that will be sliced off and reshaped into required
blocks used as working memory specifically for the multi-blank related computations.
blank: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
num_threads: Number of OMP threads to launch.
stream: Numba Cuda Stream.
"""
super().__init__(
minibatch, maxT, maxU, alphabet_size, workspace, blank, fastemit_lambda, clamp, num_threads, stream
)
self.big_blank_workspace = cuda.as_cuda_array(
big_blank_workspace
) # a flat vector of integer numbers that represents allocated memory slices
self.num_big_blanks = num_big_blanks
self.sigma = sigma
def compute_cost_and_score(
self,
acts: torch.Tensor,
grads: Optional[torch.Tensor],
costs: torch.Tensor,
labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
) -> global_constants.RNNTStatus:
"""
Compute both the loss and the gradients.
Args:
acts: A flattened tensor of shape [B, T, U, V+1] representing the activation matrix.
grad: A flattented zero tensor of same shape as acts.
costs: A zero vector of length B which will be updated inplace with the log probability costs.
flat_labels: A flattened matrix of labels of shape [B, U]
label_lengths: A vector of length B that contains the original lengths of the acoustic sequence.
input_lengths: A vector of length B that contains the original lengths of the target sequence.
Updates:
This will launch kernels that will update inline the following variables:
- grads: Gradients of the activation matrix wrt the costs vector.
- costs: Negative log likelihood of the forward variable.
Returns:
An enum that either represents a successful RNNT operation or failure.
"""
training = grads is not None
if training:
grads *= 0.0 # zero grads
_, (denom, alphas, betas, llForward, llBackward, bigblank_durations) = self._prepare_workspace()
######## START EXECUTION ########
self.log_softmax(acts, denom)
# Compute alphas
gpu_rnnt_kernel.compute_multiblank_alphas_kernel[self.minibatch_, self.maxU_, self.stream_, 0](
acts,
denom,
self.sigma,
alphas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
bigblank_durations,
self.num_big_blanks,
)
if training:
# Compute betas
gpu_rnnt_kernel.compute_multiblank_betas_kernel[self.minibatch_, self.maxU_, self.stream_, 0](
acts,
denom,
self.sigma,
betas,
llBackward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
bigblank_durations,
self.num_big_blanks,
)
# Compute gradient
grad_blocks_per_grid = self.minibatch_ * self.maxT_ * self.maxU_
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_multiblank_grad_kernel[
grad_blocks_per_grid, grad_threads_per_block, self.stream_, 0
](
grads,
acts,
denom,
self.sigma,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
bigblank_durations,
self.num_big_blanks,
self.fastemit_lambda_,
self.clamp_,
)
# // cost copy, negate (for log likelihood) and update with additional regularizers
# This needs to be done via CUDA, because we used temporary memory llForward
# passed to alpha, which was updated with log likelihoods.
# But copying this data into a pytorch pointer is more difficult (numba api is one way)
# Therefore launch a pointwise CUDA kernel to update the costs inplace from data of llForward
# Then negate to compute the loglikelihood.
threadsperblock = min(costs.shape[0], 32)
blockspergrid = (costs.shape[0] + (threadsperblock - 1)) // threadsperblock
rnnt_helper.compute_costs_data[blockspergrid, threadsperblock, self.stream_, 0](
llForward, costs, self.fastemit_lambda_
)
self.stream_.synchronize()
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
def cost_and_grad(
self,
acts: torch.Tensor,
grads: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if (
acts is None
or grads is None
or costs is None
or pad_labels is None
or label_lengths is None
or input_lengths is None
):
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(acts, grads, costs, pad_labels, label_lengths, input_lengths)
def score_forward(
self,
acts: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if acts is None or costs is None or pad_labels is None or label_lengths is None or input_lengths is None:
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(acts, None, costs, pad_labels, label_lengths, input_lengths)
def _prepare_workspace(self) -> (int, Tuple[torch.Tensor]):
"""
Helper method that uses the workspace and constructs slices of it that can be used.
Returns:
An int, representing the offset of the used workspace (practically, the slice of the workspace consumed)
A tuple of tensors representing the shared workspace.
"""
used_offset, (denom, alphas, betas, llForward, llBackward) = super()._prepare_workspace()
bigblank_durations = self.big_blank_workspace[: self.num_big_blanks]
return used_offset, (denom, alphas, betas, llForward, llBackward, bigblank_durations)
class GPUTDT(GPURNNT):
def __init__(
self,
sigma: float,
omega: float,
num_durations: int,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
workspace,
tdt_workspace,
blank: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
stream,
):
"""
Helper class to launch the CUDA Kernels to compute TDT Loss (https://arxiv.org/pdf/2211.03541).
Args:
sigma: Hyper-parameter related to the logit-normalization method in training tdt transducers.
omega: Hyper-parameter related to the sampled training.
num_durations: Number of durations the model supports.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V + 1 + num-big-blanks
workspace: An allocated chunk of memory that will be sliced off and reshaped into required
blocks used as working memory.
tdt_workspace: An allocated chunk of memory that will be sliced off and reshaped into required
blocks used as working memory specifically for the tdt related computations.
blank: Index of the blank token in the vocabulary. Must be the last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
num_threads: Number of OMP threads to launch.
stream: Numba Cuda Stream.
"""
super().__init__(
minibatch, maxT, maxU, alphabet_size, workspace, blank, fastemit_lambda, clamp, num_threads, stream
)
self.tdt_workspace = cuda.as_cuda_array(
tdt_workspace
) # a flat vector of integer numbers that represents allocated memory slices
self.num_durations = num_durations
self.sigma = sigma
self.omega = omega
def compute_cost_and_score(
self,
label_acts: torch.Tensor,
duration_acts: torch.Tensor,
label_grads: Optional[torch.Tensor],
duration_grads: Optional[torch.Tensor],
costs: torch.Tensor,
labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
) -> global_constants.RNNTStatus:
"""
Compute both the loss and the gradients.
Args:
label_acts: A flattened tensor of shape [B, T, U, V] representing the activation matrix for tokens.
duration_acts: A flattened tensor of shape [B, T, U, D] representing the activation matrix for durations.
label_grad: A flattented zero tensor of same shape as label_acts.
duration_grad: A flattented zero tensor of same shape as duration_acts.
costs: A zero vector of length B which will be updated inplace with the log probability costs.
flat_labels: A flattened matrix of labels of shape [B, U]
label_lengths: A vector of length B that contains the original lengths of the acoustic sequence.
input_lengths: A vector of length B that contains the original lengths of the target sequence.
Updates:
This will launch kernels that will update inline the following variables:
- *_grads: Gradients of the activation matrix wrt the costs vector.
- costs: Negative log likelihood of the forward variable.
Returns:
An enum that either represents a successful RNNT operation or failure.
"""
training = label_grads is not None
if training:
label_grads *= 0.0 # zero grads
duration_grads *= 0.0 # zero grads
_, (denom, alphas, betas, llForward, llBackward, durations) = self._prepare_workspace()
######## START EXECUTION ########
self.log_softmax(label_acts, denom)
r = random.uniform(0, 1)
if r < self.omega:
# Compute alphas
gpu_rnnt_kernel.compute_alphas_kernel[self.minibatch_, self.maxU_, self.stream_, 0](
label_acts,
denom,
alphas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
)
else:
# Compute alphas
gpu_rnnt_kernel.compute_tdt_alphas_kernel[self.minibatch_, self.maxU_, self.stream_, 0](
label_acts,
duration_acts,
denom,
self.sigma,
alphas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
durations,
self.num_durations,
)
if training:
# Compute betas
if r < self.omega:
gpu_rnnt_kernel.compute_betas_kernel[self.minibatch_, self.maxU_, self.stream_, 0](
label_acts,
denom,
betas,
llBackward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
)
# Compute gradient
grad_blocks_per_grid = self.minibatch_ * self.maxT_ * self.maxU_
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, self.stream_, 0](
label_grads,
label_acts,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
self.fastemit_lambda_,
self.clamp_,
)
else:
gpu_rnnt_kernel.compute_tdt_betas_kernel[self.minibatch_, self.maxU_, self.stream_, 0](
label_acts,
duration_acts,
denom,
self.sigma,
betas,
llBackward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
durations,
self.num_durations,
)
# Compute gradient
grad_blocks_per_grid = self.minibatch_ * self.maxT_ * self.maxU_
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_tdt_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, self.stream_, 0](
label_grads,
duration_grads,
label_acts,
duration_acts,
denom,
self.sigma,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
durations,
self.num_durations,
self.fastemit_lambda_,
self.clamp_,
)
# // cost copy, negate (for log likelihood) and update with additional regularizers
# This needs to be done via CUDA, because we used temporary memory llForward
# passed to alpha, which was updated with log likelihoods.
# But copying this data into a pytorch pointer is more difficult (numba api is one way)
# Therefore launch a pointwise CUDA kernel to update the costs inplace from data of llForward
# Then negate to compute the loglikelihood.
threadsperblock = min(costs.shape[0], 32)
blockspergrid = (costs.shape[0] + (threadsperblock - 1)) // threadsperblock
rnnt_helper.compute_costs_data[blockspergrid, threadsperblock, self.stream_, 0](
llForward, costs, self.fastemit_lambda_
)
self.stream_.synchronize()
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
def cost_and_grad(
self,
label_acts: torch.Tensor,
duration_acts: torch.Tensor,
label_grads: torch.Tensor,
duration_grads: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if (
duration_acts is None
or label_acts is None
or label_grads is None
or duration_grads is None
or costs is None
or pad_labels is None
or label_lengths is None
or input_lengths is None
):
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(
label_acts, duration_acts, label_grads, duration_grads, costs, pad_labels, label_lengths, input_lengths
)
def score_forward(
self,
label_acts: torch.Tensor,
duration_acts: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if (
label_acts is None
or duration_acts is None
or costs is None
or pad_labels is None
or label_lengths is None
or input_lengths is None
):
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(
label_acts, duration_acts, None, None, costs, pad_labels, label_lengths, input_lengths
)
def _prepare_workspace(self) -> (int, Tuple[torch.Tensor]):
"""
Helper method that uses the workspace and constructs slices of it that can be used.
Returns:
An int, representing the offset of the used workspace (practically, the slice of the workspace consumed)
A tuple of tensors representing the shared workspace.
"""
used_offset, (denom, alphas, betas, llForward, llBackward) = super()._prepare_workspace()
durations = self.tdt_workspace[: self.num_durations]
return used_offset, (denom, alphas, betas, llForward, llBackward, durations)
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/cuda_utils/gpu_rnnt.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import rnnt_helper
GPU_RNNT_THREAD_SIZE = 256
INF = 10000.0
@cuda.jit(device=True, inline=True)
def logp(
denom: torch.Tensor, acts: torch.Tensor, maxT: int, maxU: int, alphabet_size: int, mb: int, t: int, u: int, v: int
):
"""
Compute the sum of log probability from the activation tensor and its denominator.
Args:
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
mb: Batch indexer.
t: Acoustic sequence timestep indexer.
u: Target sequence timestep indexer.
v: Vocabulary token indexer.
Returns:
The sum of logprobs[mb, t, u, v] + denom[mb, t, u]
"""
col = (mb * maxT + t) * maxU + u
return denom[col] + acts[col * alphabet_size + v]
@cuda.jit(device=True, inline=True)
def logp_duration(acts: torch.Tensor, maxT: int, maxU: int, num_durations: int, mb: int, t: int, u: int, v: int):
col = (mb * maxT + t) * maxU + u
return acts[col * num_durations + v]
@cuda.jit()
def compute_alphas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
alphas: torch.Tensor,
llForward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
):
"""
Compute alpha (forward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable
probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.
Returned as the forward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# alphas += offset # pointer offset, ignored since we explicitly add offset
# Initilize alpha[b, t=0, u=0] for all b in B
if u == 0:
alphas[offset] = 0
# sync until all alphas are initialized
cuda.syncthreads()
# Ordinary alpha calculations, broadcast across B=b and U=u
# Look up forward variable calculation from rnnt_numpy.forward_pass()
for n in range(1, T + U - 1):
t = n - u
if u == 0:
# for t in range(1, T) step to initialize alphas[b, t, 0]
if t > 0 and t < T:
alphas[offset + t * maxU + u] = alphas[offset + (t - 1) * maxU + u] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t - 1, 0, blank_
)
elif u < U:
# for u in range(1, U) step to initialize alphas[b, 0, u]
if t == 0:
alphas[offset + u] = alphas[offset + u - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, 0, u - 1, labels[u - 1]
)
# for t in range(1, T) for u in range(1, U) step to compute alphas[b, t, u]
elif t > 0 and t < T:
no_emit = alphas[offset + (t - 1) * maxU + u] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t - 1, u, blank_
)
emit = alphas[offset + t * maxU + u - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u - 1, labels[u - 1]
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, alphas[b, T-1, U - 1] + logprobs[b, T-1, U-1, blank] + denom[b, T-1, U-1] gives
# log-likelihood of forward pass.
if u == 0:
loglike = alphas[offset + (T - 1) * maxU + U - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_
)
llForward[b] = loglike
@cuda.jit()
def compute_betas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
betas: torch.Tensor,
llBackward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
):
"""
Compute beta (backward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable
probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.
Returned as the backward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# betas += offset # pointer offset, ignored since we explicitly add offset
# Initilize beta[b, t=T-1, u=U-1] for all b in B with log_probs[b, t=T-1, u=U-1, blank]
if u == 0:
betas[offset + (T - 1) * maxU + U - 1] = logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
# sync until all betas are initialized
cuda.syncthreads()
# Ordinary beta calculations, broadcast across B=b and U=u
# Look up backward variable calculation from rnnt_numpy.backward_pass()
for n in range(T + U - 2, -1, -1):
t = n - u
if u == (U - 1):
# for t in reversed(range(T - 1)) step to initialize betas[b, t, U-1]
if t >= 0 and t < (T - 1):
betas[offset + t * maxU + U - 1] = betas[offset + (t + 1) * maxU + U - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_
)
elif u < U:
if t == T - 1:
# for u in reversed(range(U - 1)) step to initialize betas[b, T-1, u]
betas[offset + (T - 1) * maxU + u] = betas[offset + (T - 1) * maxU + u + 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, T - 1, u, labels[u]
)
elif (t >= 0) and (t < T - 1):
# for t in reversed(range(T - 1)) for u in reversed(range(U - 1)) step to compute betas[b, t, u]
no_emit = betas[offset + (t + 1) * maxU + u] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_
)
emit = betas[offset + t * maxU + u + 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u]
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, betas[b, 0, 0] gives
# log-likelihood of backward pass.
if u == 0:
llBackward[b] = betas[offset]
@cuda.jit()
def compute_grad_kernel(
grads: torch.Tensor,
acts: torch.Tensor,
denom: torch.Tensor,
alphas: torch.Tensor,
betas: torch.Tensor,
logll: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
fastemit_lambda: float,
clamp: float,
):
"""
Compute gradients over the transduction step.
Args:
grads: Zero Tensor of shape [B, T, U, V+1]. Is updated by this kernel to contain the gradients
of this batch of samples.
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector of shape [B].
Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).
"""
# Kernel call:
# blocks_per_grid = minibatch (b) * maxT (t) * maxU (u)
# threads_per_block = constant buffer size of parallel threads (v :: Constant)
tid = cuda.threadIdx.x # represents v, taking steps of some constant size
idx = tid # index of v < V+1; in steps of constant buffer size
col = cuda.blockIdx.x # represents a fused index of b * t * u
# Decompose original indices from fused `col`
u = col % maxU # (b * t * u) % u = u
bt = (col - u) // maxU # (b * t * u - u) // U = b * t
t = bt % maxT # (b * t) % t = t
mb = (bt - t) // maxT # (b * t - t) // T = b
# constants
T = xlen[mb] # select AM length of current sample
U = ylen[mb] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[mb] # labels = mlabels + mb * (maxU - 1);
# Buffered gradient calculations, broadcast across B=b, T=t and U=u, looped over V with some constant stride.
# Look up gradient calculation from rnnt_numpy.compute_gradient()
if t < T and u < U:
# For cuda kernels, maximum number of threads per block is limited to some value.
# However, it may be the case that vocabulary size is larger than this limit
# To work around this, an arbitrary thread buffer size is chosen such that,
# 1) each element within the thread pool operates independently of the other
# 2) An inner while loop moves the index of each buffer element by the size of the buffer itself,
# such that all elements of the vocabulary size are covered in (V + 1 // thread_buffer) number of steps.
# As such, each thread will perform the while loop at least (V + 1 // thread_buffer) number of times
while idx < alphabet_size:
# remember, `col` represents the tri-index [b, t, u]
# therefore; logpk = denom[b, t, u] + acts[b, t, u, v]
logpk = denom[col] + acts[col * alphabet_size + idx]
# initialize the grad of the sample acts[b, t, u, v]
grad = math.exp(alphas[col] + betas[col] + logpk - logll[mb])
# If FastEmit regularization is enabled, calculate the gradeint of probability of predicting the next label
# at the current timestep.
# The formula for this is Equation 9 in https://arxiv.org/abs/2010.11148, multiplied by the log probability
# of the current step (t, u), normalized by the total log likelihood.
# Once the gradient has been calculated, scale it by `fastemit_lambda`, as in Equation 10.
if fastemit_lambda > 0.0 and u < U - 1:
fastemit_grad = fastemit_lambda * math.exp(
alphas[col] # alphas(t, u)
+ (denom[col] + acts[col * alphabet_size + labels[u]]) # y_hat(t, u)
+ betas[col + 1] # betas(t, u+1)
+ logpk # log Pr(k|t, u)
- logll[mb] # total log likelihood for normalization
)
else:
fastemit_grad = 0.0
# Update the gradient of act[b, t, u, v] with the gradient from FastEmit regularization
grad = grad + fastemit_grad
# // grad to last blank transition
# grad[b, T-1, U-1, v=blank] -= exp(alphas[b, t, u) + logpk - logll[b])
if (idx == blank_) and (t == T - 1) and (u == U - 1):
grad -= math.exp(alphas[col] + logpk - logll[mb])
# grad of blank across t < T;
# grad[b, t<T-1, u, v=blank] -= exp(alphas[b, t, u] + logpk - logll[b] betas[b, t + 1, u])
if (idx == blank_) and (t < T - 1):
grad -= math.exp(alphas[col] + logpk - logll[mb] + betas[col + maxU])
# grad of correct token across u < U;
# grad[b, t, u<U-1, v=label[u]] -= exp(alphas[b, t, u] + logpk - logll[b] + betas[b, t, u+1])
# Scale the gradient by (1.0 + FastEmit_lambda) in log space, then exponentiate
if (u < U - 1) and (idx == labels[u]):
# exp(log(1 + fastemit_lambda) + ...) is numerically more stable than
# multiplying (1.0 + fastemit_lambda) with result.
grad -= math.exp(math.log1p(fastemit_lambda) + alphas[col] + logpk - logll[mb] + betas[col + 1])
# update grads[b, t, u, v] = grad
grads[col * alphabet_size + idx] = grad
# clamp gradient (if needed)
if clamp > 0.0:
g = grads[col * alphabet_size + idx]
g = min(g, clamp)
g = max(g, -clamp)
grads[col * alphabet_size + idx] = g
# update internal index through the thread_buffer;
# until idx < V + 1, such that entire vocabulary has been updated.
idx += GPU_RNNT_THREAD_SIZE
@cuda.jit()
def compute_multiblank_alphas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
llForward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
):
"""
Compute alpha (forward variable) probabilities for multi-blank transducuer loss (https://arxiv.org/pdf/2211.03541).
Args:
acts: Tensor of shape [B, T, U, V + 1 + num_big_blanks] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique for training multi-blank transducers.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable
probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.
Returned as the forward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT standard blank token in the vocabulary.
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# Initilize alpha[b, t=0, u=0] for all b in B
if u == 0:
alphas[offset] = 0
# sync until all alphas are initialized
cuda.syncthreads()
# Ordinary alpha calculations, broadcast across B=b and U=u
# Look up forward variable calculation from rnnt_numpy.forward_pass()
# Note: because of the logit under-normalization, everytime logp() is called,
# it is always followed by a `-sigma` term.
for n in range(1, T + U - 1):
t = n - u
if u == 0:
# for t in range(1, T) step to initialize alphas[b, t, 0]
if t > 0 and t < T:
alphas[offset + t * maxU + u] = (
alphas[offset + (t - 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t - 1, 0, blank_)
- sigma
)
# Now add the weights for big blanks.
for i in range(num_big_blanks):
if t >= big_blank_duration[i]:
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
alphas[offset + t * maxU + u],
alphas[offset + (t - big_blank_duration[i]) * maxU + u]
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - big_blank_duration[i], 0, blank_ - 1 - i
)
- sigma,
)
elif u < U:
# for u in range(1, U) step to initialize alphas[b, 0, u]
if t == 0:
alphas[offset + u] = (
alphas[offset + u - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, 0, u - 1, labels[u - 1])
- sigma
)
# for t in range(1, T) for u in range(1, U) step to compute alphas[b, t, u]
elif t > 0 and t < T:
no_emit = (
alphas[offset + (t - 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t - 1, u, blank_)
- sigma
)
emit = (
alphas[offset + t * maxU + u - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u - 1, labels[u - 1])
- sigma
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# Now add the weights for big blanks.
for i in range(num_big_blanks):
if t >= big_blank_duration[i]:
# big-blank weight here is
# alpha(t - duration, u) * p(big-blank | t - duration, u) / exp(sigma), in log domain
# do this all all big-blanks if the above condition is met
big_blank_no_emit = (
alphas[offset + (t - big_blank_duration[i]) * maxU + u]
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - big_blank_duration[i], u, blank_ - 1 - i
)
- sigma
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
alphas[offset + t * maxU + u], big_blank_no_emit
)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, alphas[b, T-1, U - 1] + logprobs[b, T-1, U-1, blank] + denom[b, T-1, U-1] gives
# log-likelihood of forward pass.
if u == 0:
loglike = (
alphas[offset + (T - 1) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
- sigma
)
# Now add the weights for big blanks for the final weight computation.
for i in range(num_big_blanks):
if T >= big_blank_duration[i]:
big_blank_loglike = (
alphas[offset + (T - big_blank_duration[i]) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - big_blank_duration[i], U - 1, blank_ - 1 - i)
- sigma
)
loglike = rnnt_helper.log_sum_exp(loglike, big_blank_loglike)
llForward[b] = loglike
@cuda.jit()
def compute_multiblank_betas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
betas: torch.Tensor,
llBackward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
):
"""
Compute beta (backward variable) probabilities for multi-blank transducer loss (https://arxiv.org/pdf/2211.03541).
Args:
acts: Tensor of shape [B, T, U, V + 1 + num-big-blanks] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique for training multi-blank transducers.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable
probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.
Returned as the backward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT standard blank token in the vocabulary.
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# Note: just like the alphas, because of the logit under-normalization, everytime
# logp() is called, it is always followed by a `-sigma` term.
# Initilize beta[b, t=T-1, u=U-1] for all b in B with log_probs[b, t=T-1, u=U-1, blank]
if u == 0:
betas[offset + (T - 1) * maxU + U - 1] = (
logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_) - sigma
)
# sync until all betas are initialized
cuda.syncthreads()
# Ordinary beta calculations, broadcast across B=b and U=u
# Look up backward variable calculation from rnnt_numpy.backward_pass()
for n in range(T + U - 2, -1, -1):
t = n - u
if u == (U - 1):
# for t in reversed(range(T - 1)) step to initialize betas[b, t, U-1]
if t >= 0 and t < (T - 1):
# beta[t, U - 1] = beta[t + 1, U - 1] * p(blank | t, U - 1) / exp(sigma)
# this part is the same as regular RNN-T.
betas[offset + t * maxU + U - 1] = (
betas[offset + (t + 1) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_)
- sigma
)
# now add the weights from big blanks
for i in range(num_big_blanks):
if t + big_blank_duration[i] < T:
# adding to beta[t, U - 1] of weight (in log domain),
# beta[t + duration, U - 1] * p(big-blank | t, U - 1) / exp(sigma)
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
betas[offset + (t + big_blank_duration[i]) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_ - 1 - i)
- sigma,
)
elif t + big_blank_duration[i] == T and big_blank_duration[i] != 1:
# adding to beta[T - duration, U - 1] of weight (in log domain),
# p(big-blank | T - duration, U - 1) / exp(sigma)
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_ - 1 - i) - sigma,
)
elif u < U:
if t == T - 1:
# for u in reversed(range(U - 1)) step to initialize betas[b, T-1, u]
betas[offset + (T - 1) * maxU + u] = (
betas[offset + (T - 1) * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, u, labels[u])
- sigma
)
elif (t >= 0) and (t < T - 1):
# for t in reversed(range(T - 1)) for u in reversed(range(U - 1)) step to compute betas[b, t, u]
no_emit = (
betas[offset + (t + 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_)
- sigma
)
emit = (
betas[offset + t * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u])
- sigma
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# now add the weights from big blanks
for i in range(num_big_blanks):
if t < T - big_blank_duration[i]:
# added weight for the big-blank,
# beta[t + duration, u] * p(big-blank | t, u) / exp(sigma)
big_blank_no_emit = (
betas[offset + (t + big_blank_duration[i]) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_ - 1 - i)
- sigma
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + u], big_blank_no_emit
)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, betas[b, 0, 0] gives
# log-likelihood of backward pass.
if u == 0:
llBackward[b] = betas[offset]
@cuda.jit()
def compute_multiblank_grad_kernel(
grads: torch.Tensor,
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
betas: torch.Tensor,
logll: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
fastemit_lambda: float,
clamp: float,
):
"""
Compute gradients for multi-blank transducer loss (https://arxiv.org/pdf/2211.03541).
Args:
grads: Zero Tensor of shape [B, T, U, V + 1 + num_big_blanks]. Is updated by this kernel to contain the gradients
of this batch of samples.
acts: Tensor of shape [B, T, U, V + 1 + num_big_blanks] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique for training multi-blank transducers.
alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector of shape [B].
Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).
"""
# Kernel call:
# blocks_per_grid = minibatch (b) * maxT (t) * maxU (u)
# threads_per_block = constant buffer size of parallel threads (v :: Constant)
tid = cuda.threadIdx.x # represents v, taking steps of some constant size
idx = tid # index of v < V+1; in steps of constant buffer size
col = cuda.blockIdx.x # represents a fused index of b * t * u
# Decompose original indices from fused `col`
u = col % maxU # (b * t * u) % u = u
bt = (col - u) // maxU # (b * t * u - u) // U = b * t
t = bt % maxT # (b * t) % t = t
mb = (bt - t) // maxT # (b * t - t) // T = b
# constants
T = xlen[mb] # select AM length of current sample
U = ylen[mb] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[mb] # labels = mlabels + mb * (maxU - 1);
# Buffered gradient calculations, broadcast across B=b, T=t and U=u, looped over V with some constant stride.
# Look up gradient calculation from rnnt_numpy.compute_gradient()
if t < T and u < U:
# For cuda kernels, maximum number of threads per block is limited to some value.
# However, it may be the case that vocabulary size is larger than this limit
# To work around this, an arbitrary thread buffer size is chosen such that,
# 1) each element within the thread pool operates independently of the other
# 2) An inner while loop moves the index of each buffer element by the size of the buffer itself,
# such that all elements of the vocabulary size are covered in (V + 1 // thread_buffer) number of steps.
# As such, each thread will perform the while loop at least (V + 1 // thread_buffer) number of times
while idx < alphabet_size:
# remember, `col` represents the tri-index [b, t, u]
# therefore; logpk = denom[b, t, u] + acts[b, t, u, v]
logpk = denom[col] + acts[col * alphabet_size + idx]
# initialize the grad of the sample acts[b, t, u, v]
grad = math.exp(alphas[col] + betas[col] + logpk - logll[mb])
# In all of the following computation, whenever logpk is used, we
# need to subtract sigma based on our derivation of the gradient of
# the logit under-normalization method.
# If FastEmit regularization is enabled, calculate the gradeint of probability of predicting the next label
# at the current timestep.
# The formula for this is Equation 9 in https://arxiv.org/abs/2010.11148, multiplied by the log probability
# of the current step (t, u), normalized by the total log likelihood.
# Once the gradient has been calculated, scale it by `fastemit_lambda`, as in Equation 10.
if fastemit_lambda > 0.0 and u < U - 1:
fastemit_grad = fastemit_lambda * math.exp(
alphas[col] # alphas(t, u)
+ (denom[col] + acts[col * alphabet_size + labels[u]])
+ betas[col + 1] # betas(t, u+1)
+ logpk # log Pr(k|t, u)
- sigma
- logll[mb] # total log likelihood for normalization
)
else:
fastemit_grad = 0.0
# Update the gradient of act[b, t, u, v] with the gradient from FastEmit regularization
grad = grad + fastemit_grad
# grad to last blank transition
# grad[b, T-1, U-1, v=blank] -= exp(alphas[b, t, u) + logpk - sigma - logll[b])
if (idx == blank_) and (t == T - 1) and (u == U - 1):
grad -= math.exp(alphas[col] + logpk - sigma - logll[mb])
else:
# this is one difference of the multi-blank gradient from standard RNN-T
# gradient -- basically, wherever the blank_ symbol is addressed in the
# original code, we need to do similar things to big blanks, and we need
# to change the if conditions to match the duration of the big-blank.
# grad[b, T-duration, U-1, v=big-blank] -= exp(alphas[b, t, u) + logpk - sigma - logll[b])
for i in range(num_big_blanks):
if (idx == blank_ - 1 - i) and (t == T - big_blank_duration[i]) and (u == U - 1):
grad -= math.exp(alphas[col] + logpk - sigma - logll[mb])
# grad of blank across t < T;
# grad[b, t<T-1, u, v=blank] -= exp(alphas[b, t, u] + logpk - sigma - logll[b] betas[b, t + 1, u])
if (idx == blank_) and (t < T - 1):
grad -= math.exp(alphas[col] + logpk - sigma - logll[mb] + betas[col + maxU])
else:
# This is another difference between multi-blank and RNN-T gradients.
# Now we consider gradients for big-blanks.
# grad[b, t<T-duration, u, v=big-blank] -= exp(alphas[b, t, u] + logpk - sigma - logll[b] + betas[b, t + duration, u])
for i in range(num_big_blanks):
if (idx == blank_ - 1 - i) and (t < T - big_blank_duration[i]):
grad -= math.exp(
alphas[col] + logpk - sigma - logll[mb] + betas[col + big_blank_duration[i] * maxU]
)
# grad of correct token across u < U;
# grad[b, t, u<U-1, v=label[u]] -= exp(alphas[b, t, u] + logpk - sigma - logll[b] + betas[b, t, u+1])
# Scale the gradient by (1.0 + FastEmit_lambda) in log space, then exponentiate
if (u < U - 1) and (idx == labels[u]):
# exp(log(1 + fastemit_lambda) + ...) is numerically more stable than
# multiplying (1.0 + fastemit_lambda) with result.
grad -= math.exp(
math.log1p(fastemit_lambda) + alphas[col] + logpk - sigma - logll[mb] + betas[col + 1]
)
# update grads[b, t, u, v] = grad
grads[col * alphabet_size + idx] = grad
# clamp gradient (if needed)
if clamp > 0.0:
g = grads[col * alphabet_size + idx]
g = min(g, clamp)
g = max(g, -clamp)
grads[col * alphabet_size + idx] = g
# update internal index through the thread_buffer;
# until idx < V + 1, such that entire vocabulary has been updated.
idx += GPU_RNNT_THREAD_SIZE
@cuda.jit()
def compute_tdt_alphas_kernel(
acts: torch.Tensor,
duration_acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
llForward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
durations: torch.Tensor,
num_durations: int,
):
"""
Compute alpha (forward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V] flattened. Represents the logprobs activation tensor for tokens.
duration_acts: Tensor of shape [B, T, U, D] flattened. Represents the logprobs activation tensor for duration.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor for tokens.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable
probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.
Returned as the forward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the TDT blank token in the vocabulary. Must be the last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# alphas += offset # pointer offset, ignored since we explicitly add offset
# Initilize alpha[b, t=0, u=0] for all b in B
if u == 0:
alphas[offset] = 0
# sync until all alphas are initialized
cuda.syncthreads()
# Ordinary alpha calculations, broadcast across B=b and U=u
# Look up forward variable calculation from rnnt_numpy.forward_pass()
for n in range(1, T + U - 1):
t = n - u
if u == 0:
# when u == 0, we only consider blank emissions.
if t > 0 and t < T:
alphas[offset + t * maxU + u] = -INF
for i in range(1, num_durations): # skip 0 since blank emission has to advance by at least one
if t >= durations[i]:
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
alphas[offset + t * maxU + u], # the current alpha value
alphas[offset + (t - durations[i]) * maxU + u] # alpha(t - duration, u)
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - durations[i], u, blank_
) # logp of blank emission
- sigma # logit under-normalization
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t - durations[i], u, i
), # logp of duration
)
else:
break # since durations are in ascending order, when we encounter a duration that is too large, then
# there is no need to check larger durations after that.
elif u < U:
# when t == 0, we only consider the non-blank emission.
if t == 0:
alphas[offset + u] = (
alphas[offset + u - 1] # alpha(t, u - 1)
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u - 1, labels[u - 1]
) # logp of token emission
- sigma # logit under-normalization
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t, u - 1, 0
) # t = 0, so it must be duration = 0. Therefore the last argument passed to logp_duration() is 0.
)
# now we have t != 0 and u != 0, and we need to consider both non-blank and blank emissions.
elif t > 0 and t < T:
no_emit = -INF # no_emit stores the score for all blank emissions.
for i in range(1, num_durations):
if t >= durations[i]:
no_emit = rnnt_helper.log_sum_exp(
no_emit, # current score
alphas[offset + (t - durations[i]) * maxU + u] # alpha(t - duration, u)
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - durations[i], u, blank_
) # logp of blank emission
- sigma # logit under-normalization
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t - durations[i], u, i
), # logp of duration
)
else:
break # we can exit the loop early here, same as the case for u == 0 above.
emit = -INF # emit stores the score for non-blank emissions.
for i in range(0, num_durations):
if t >= durations[i]:
emit = rnnt_helper.log_sum_exp(
emit, # current score
alphas[offset + (t - durations[i]) * maxU + u - 1] # alpha(t - duration, u - 1)
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, t - durations[i], u - 1, labels[u - 1]
) # logp of non-blank emission
- sigma # logit under-normalization
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t - durations[i], u - 1, i
), # logp of duration
)
else:
break # we can exit the loop early here, same as the case for u == 0 above.
# combining blank and non-blank emissions.
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, the forward log-likelihood can be computed as the summataion of
# alpha(T - duration, U - 1) + logp(blank, duration | t - duration, U - 1), over different durations.
if u == 0:
# first we consider duration = 1
loglike = (
alphas[offset + (T - 1) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
- sigma
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, T - 1, U - 1, 1)
)
# then we add the scores for duration > 1, if such durations are possible given the audio lengths.
for i in range(2, num_durations):
if T >= durations[i]:
big_blank_loglike = (
alphas[offset + (T - durations[i]) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - durations[i], U - 1, blank_)
- sigma
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, T - durations[i], U - 1, i)
)
loglike = rnnt_helper.log_sum_exp(loglike, big_blank_loglike)
else:
break
llForward[b] = loglike
@cuda.jit()
def compute_tdt_betas_kernel(
acts: torch.Tensor,
duration_acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
betas: torch.Tensor,
llBackward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
durations: torch.Tensor,
num_durations: int,
):
"""
Compute beta (backward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V] flattened. Represents the logprobs activation tensor for tokens.
duration_acts: Tensor of shape [B, T, U, D] flattened. Represents the logprobs activation tensor for duations.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable
probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.
Returned as the backward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[b] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# betas += offset # pointer offset, ignored since we explicitly add offset
# Initilize beta[b, t=T-1, u=U-1] for all b in B with log_probs[b, t=T-1, u=U-1, blank]
if u == 0:
betas[offset + (T - 1) * maxU + U - 1] = (
logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
- sigma
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, T - 1, U - 1, 1)
)
# sync until all betas are initialized
cuda.syncthreads()
# Ordinary beta calculations, broadcast across B=b and U=u
# Look up backward variable calculation from rnnt_numpy.backward_pass()
for n in range(T + U - 2, -1, -1):
t = n - u
if u == U - 1:
# u == U - 1, we only consider blank emissions.
if t >= 0 and t + 1 < T:
betas[offset + t * maxU + U - 1] = -INF
for i in range(1, num_durations):
# although similar, the computation for beta's is slightly more complex for boundary cases.
# the following two cases correspond to whether t is exactly certain duration away from T.
# and they have slightly different update rules.
if t + durations[i] < T:
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
betas[
offset + (t + durations[i]) * maxU + U - 1
] # beta[t, U - 1] depends on the value beta[t + duration, U - 1] here.
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_) # log prob of blank
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t, U - 1, i
) # log prob of duration (durations[i])
- sigma, # for logit undernormalization
)
elif t + durations[i] == T:
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
# here we have one fewer term than the "if" block above. This could be seen as having "0" here since
# beta[t + duration, U - 1] isn't defined because t + duration is out of bound.
logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_) # log prob of blank
+ logp_duration(
duration_acts, maxT, maxU, num_durations, b, t, U - 1, i
) # log prob of duration (durations[i])
- sigma, # for logit undernormalization. Basically every time sigma shows up is because of logit undernormalization.
)
elif u < U - 1:
if t == T - 1:
# t == T - 1, so we only consider non-blank with duration 0. (Note, we can't have blank emissions with duration = 0)
betas[offset + (T - 1) * maxU + u] = (
betas[offset + (T - 1) * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, u, labels[u]) # non-blank log prob
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, T - 1, u, 0) # log prob of duration 0
- sigma
)
elif t >= 0 and t < T - 1:
# now we need to consider both blank andnon-blanks. Similar to alphas, we first compute them separately with no_emit and emit.
no_emit = -INF
for i in range(1, num_durations):
if t + durations[i] < T:
no_emit = rnnt_helper.log_sum_exp(
no_emit,
betas[offset + (t + durations[i]) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_)
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, t, u, i)
- sigma,
)
emit = -INF
for i in range(0, num_durations):
if t + durations[i] < T:
emit = rnnt_helper.log_sum_exp(
emit,
betas[offset + (t + durations[i]) * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u])
+ logp_duration(duration_acts, maxT, maxU, num_durations, b, t, u, i)
- sigma,
)
# combining all blank emissions and all non-blank emissions.
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, betas[b, 0, 0] gives log-likelihood of backward pass, same with conventional Transducers.
if u == 0:
llBackward[b] = betas[offset]
@cuda.jit()
def compute_tdt_grad_kernel(
label_grads: torch.Tensor,
duration_grads: torch.Tensor,
acts: torch.Tensor,
duration_acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
betas: torch.Tensor,
logll: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
durations: torch.Tensor,
num_durations: int,
fastemit_lambda: float,
clamp: float,
):
"""
Compute gradients over the transduction step.
Args:
grads: Zero Tensor of shape [B, T, U, V] to store gradients for tokens.
duration_grads: Zero Tensor of shape [B, T, U, D] to store gradients for durations.
acts: Tensor of shape [B, T, U, V] flattened. Represents the logprobs activation tensor for tokens.
duration_acts: Tensor of shape [B, T, U, D] flattened. Represents the logprobs activation tensor for durations.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector of shape [B].
Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).
"""
# Kernel call:
# blocks_per_grid = minibatch (b) * maxT (t) * maxU (u)
# threads_per_block = constant buffer size of parallel threads (v :: Constant)
tid = cuda.threadIdx.x # represents v, taking steps of some constant size
idx = tid # index of v < V+1; in steps of constant buffer size
col = cuda.blockIdx.x # represents a fused index of b * t * u
# Decompose original indices from fused `col`
u = col % maxU # (b * t * u) % u = u
bt = (col - u) // maxU # (b * t * u - u) // U = b * t
t = bt % maxT # (b * t) % t = t
mb = (bt - t) // maxT # (b * t - t) // T = b
# constants
T = xlen[mb] # select AM length of current sample
U = ylen[mb] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[mb] # labels = mlabels + mb * (maxU - 1);
# Buffered gradient calculations, broadcast across B=b, T=t and U=u, looped over V with some constant stride.
# Look up gradient calculation from rnnt_numpy.compute_gradient()
if t < T and u < U:
logpk_blank = (
denom[col] + acts[col * alphabet_size + blank_] - sigma
) # whenever sigma is used, it is for logit under-normalization.
if idx < num_durations:
grad = 0.0
if t + durations[idx] < T and u < U - 1: # for label
logpk_label = denom[col] + acts[col * alphabet_size + labels[u]] - sigma
grad -= math.exp(alphas[col] + betas[col + 1 + durations[idx] * maxU] + logpk_label - logll[mb])
if t + durations[idx] < T and idx > 0: # for blank in the middle
grad -= math.exp(alphas[col] + betas[col + durations[idx] * maxU] + logpk_blank - logll[mb])
if t + durations[idx] == T and idx >= 1 and u == U - 1: # for blank as the last symbol
grad -= math.exp(alphas[col] + logpk_blank - logll[mb])
grad = grad * math.exp(duration_acts[col * num_durations + idx])
duration_grads[col * num_durations + idx] = grad
# For cuda kernels, maximum number of threads per block is limited to some value.
# However, it may be the case that vocabulary size is larger than this limit
# To work around this, an arbitrary thread buffer size is chosen such that,
# 1) each element within the thread pool operates independently of the other
# 2) An inner while loop moves the index of each buffer element by the size of the buffer itself,
# such that all elements of the vocabulary size are covered in (V + 1 // thread_buffer) number of steps.
# As such, each thread will perform the while loop at least (V + 1 // thread_buffer) number of times
while idx < alphabet_size:
# remember, `col` represents the tri-index [b, t, u]
# therefore; logpk = denom[b, t, u] + acts[b, t, u, v]
logpk = denom[col] + acts[col * alphabet_size + idx]
# initialize the grad of the sample acts[b, t, u, v]
grad = math.exp(alphas[col] + betas[col] + logpk - logll[mb])
# If FastEmit regularization is enabled, calculate the gradeint of probability of predicting the next label
# at the current timestep.
# The formula for this is Equation 9 in https://arxiv.org/abs/2010.11148, multiplied by the log probability
# of the current step (t, u), normalized by the total log likelihood.
# Once the gradient has been calculated, scale it by `fastemit_lambda`, as in Equation 10.
if fastemit_lambda > 0.0 and u < U - 1:
fastemit_grad = 0.0
for i in range(0, num_durations):
if t + durations[i] < T:
fastemit_grad += fastemit_lambda * math.exp(
alphas[col] # alphas(t, u)
+ (denom[col] + acts[col * alphabet_size + labels[u]]) # log prob of token emission
+ duration_acts[col * num_durations + i] # duration log-prob
+ betas[col + 1 + durations[i] * maxU] # betas(t, u+1)
+ logpk # log Pr(k|t, u)
- sigma # for logit under-normalization
- logll[mb] # total log likelihood for normalization
)
else:
fastemit_grad = 0.0
# Update the gradient of act[b, t, u, v] with the gradient from FastEmit regularization
grad = grad + fastemit_grad
# grad to last blank transition
# grad[b, T-1, U-1, v=blank] -= exp(alphas[b, t, u] + logpk - sigma - logll[b] + logp(duration) for all possible non-zero durations.
if idx == blank_ and u == U - 1:
for i in range(1, num_durations):
if t == T - durations[i]:
grad -= math.exp(
alphas[col] + logpk - sigma - logll[mb] + duration_acts[col * num_durations + i]
)
# grad of blank across t < T;
# grad[b, t<T-1, u, v=blank] -= exp(alphas[b, t, u] + logpk - sigma + logp_duration - logll[b] + betas[b, t + duration, u]) for all non-zero durations
if idx == blank_:
for i in range(1, num_durations):
if t < T - durations[i]:
grad -= math.exp(
alphas[col]
+ logpk
- sigma
- logll[mb]
+ betas[col + maxU * durations[i]]
+ duration_acts[col * num_durations + i]
)
# grad of correct token across u < U;
# grad[b, t, u<U-1, v=label[u]] -= exp(alphas[b, t, u] + logpk - sigma + logp_duration - logll[b] + betas[b, t + duration, u + 1]) for all blank durations.
# Scale the gradient by (1.0 + FastEmit_lambda) in log space, then exponentiate
if u < U - 1 and idx == labels[u]:
# exp(log(1 + fastemit_lambda) + ...) is numerically more stable than
# multiplying (1.0 + fastemit_lambda) with result.
for i in range(num_durations):
if t + durations[i] < T:
grad -= math.exp(
math.log1p(fastemit_lambda)
+ alphas[col]
+ logpk
- sigma
- logll[mb]
+ betas[col + 1 + maxU * durations[i]]
+ duration_acts[col * num_durations + i]
)
# update grads[b, t, u, v] = grad
label_grads[col * alphabet_size + idx] = grad
# clamp gradient (if needed)
if clamp > 0.0:
g = label_grads[col * alphabet_size + idx]
g = min(g, clamp)
g = max(g, -clamp)
label_grads[col * alphabet_size + idx] = g
# update internal index through the thread_buffer;
# until idx < V + 1, such that entire vocabulary has been updated.
idx += GPU_RNNT_THREAD_SIZE
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/cuda_utils/gpu_rnnt_kernel.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/cpu_utils/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import multiprocessing
from typing import Optional
import numba
import torch
from torch.autograd import Function
from nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants
def log_sum_exp(a: torch.Tensor, b: torch.Tensor):
"""
Logsumexp with safety checks for infs.
"""
if torch.isinf(a):
return b
if torch.isinf(b):
return a
if a > b:
return math.log1p(math.exp(b - a)) + a
else:
return math.log1p(math.exp(a - b)) + b
class CpuRNNT_index:
def __init__(self, U: int, maxU: int, minibatch: int, alphabet_size: int, batch_first: bool):
"""
A placeholder Index computation class that emits the resolved index in a flattened tensor,
mimicing pointer indexing in CUDA kernels on the CPU.
Args:
U: Length of the current target sample (without padding).
maxU: Max Length of the padded target samples.
minibatch: Minibatch index
alphabet_size: Size of the vocabulary including RNNT blank - V+1.
batch_first: Bool flag determining if batch index is first or third.
"""
super(CpuRNNT_index, self).__init__()
self.U = U
self.maxU = maxU
self.minibatch = minibatch
self.alphabet_size = alphabet_size
self.batch_first = batch_first
def __call__(self, t: int, u: int, v: Optional[int] = None):
# if indexing all the values of the vocabulary, then only t, u are provided
if v is None:
return t * self.U + u
else:
# otherwise, t, u, v are provided to index particular value in the vocabulary.
if self.batch_first:
return (t * self.maxU + u) * self.alphabet_size + v
else:
return (t * self.maxU + u) * self.minibatch * self.alphabet_size + v
class CpuRNNT_metadata:
def __init__(
self,
T: int,
U: int,
workspace: torch.Tensor,
bytes_used: int,
blank: int,
labels: torch.Tensor,
log_probs: torch.Tensor,
idx: CpuRNNT_index,
):
"""
Metadata for CPU based RNNT loss calculation. Holds the working space memory.
Args:
T: Length of the acoustic sequence (without padding).
U: Length of the target sequence (without padding).
workspace: Working space memory for the CPU.
bytes_used: Number of bytes currently used for indexing the working space memory. Generally 0.
blank: Index of the blank token in the vocabulary.
labels: Ground truth padded labels matrix of shape [B, U]
log_probs: Log probs / activation matrix of flattented shape [B, T, U, V+1]
idx:
"""
super(CpuRNNT_metadata, self).__init__()
self.alphas = workspace[bytes_used : bytes_used + T * U]
bytes_used += T * U
self.betas = workspace[bytes_used : bytes_used + T * U]
bytes_used += T * U
self.log_probs2 = workspace[bytes_used : bytes_used + T * U * 2] # // only store blank & label
bytes_used += T * U * 2
self.bytes_used = bytes_used
self.setup_probs(T, U, labels, blank, log_probs, idx)
def setup_probs(
self, T: int, U: int, labels: torch.Tensor, blank: int, log_probs: torch.Tensor, idx: CpuRNNT_index
):
# initialize the log probs memory for blank and label token.
for t in range(T):
for u in range(U):
offset = (t * U + u) * 2 # mult with 2 is for selecting either blank or label token. Odd idx is blank.
self.log_probs2[offset] = log_probs[idx(t, u, blank)]
# // labels do not have first blank
if u < U - 1:
self.log_probs2[offset + 1] = log_probs[idx(t, u, labels[u])]
class LogSoftmaxGradModification(Function):
@staticmethod
def forward(ctx, acts, clamp):
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float.")
# This is needed for correctness (inplace is problematic),
# but it wastes a log of memory.
res = acts.new(acts)
ctx.clamp = clamp
return res
@staticmethod
def backward(ctx, grad_output):
# Clamp the gradients of loss(logsoftmax(...))
# CPU computes logsoftmax explicitly, so we need to override t
grad_output = torch.clamp(grad_output, -ctx.clamp, ctx.clamp)
return (
grad_output,
None,
)
class CPURNNT:
def __init__(
self,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
workspace: torch.Tensor,
blank: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
batch_first: bool,
):
"""
Helper class to compute the Transducer Loss on CPU.
Args:
minibatch: Size of the minibatch b.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
workspace: An allocated chunk of memory that will be sliced off and reshaped into required
blocks used as working memory.
blank: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
num_threads: Number of OMP threads to launch.
batch_first: Bool that decides if batch dimension is first or third.
"""
self.minibatch_ = minibatch
self.maxT_ = maxT
self.maxU_ = maxU
self.alphabet_size_ = alphabet_size
self.workspace = workspace # a flat vector of floatX numbers that represents allocated memory slices
self.blank_ = blank
self.fastemit_lambda_ = fastemit_lambda
self.clamp_ = abs(clamp)
self.num_threads_ = num_threads
self.batch_first = batch_first
if num_threads > 0:
numba.set_num_threads(min(multiprocessing.cpu_count(), num_threads))
else:
self.num_threads_ = numba.get_num_threads()
def cost_and_grad_kernel(
self,
log_probs: torch.Tensor,
grad: torch.Tensor,
labels: torch.Tensor,
mb: int,
T: int,
U: int,
bytes_used: int,
):
idx = CpuRNNT_index(U, self.maxU_, self.minibatch_, self.alphabet_size_, self.batch_first)
rnntm = CpuRNNT_metadata(T, U, self.workspace, bytes_used, self.blank_, labels, log_probs, idx)
if self.batch_first:
# zero grads
grad *= 0.0
llForward = self.compute_alphas(rnntm.log_probs2, T, U, rnntm.alphas)
llBackward = self.compute_betas_and_grads(
grad, rnntm.log_probs2, T, U, rnntm.alphas, rnntm.betas, labels, llForward
)
# Scale llForward by FastEmit lambda
llForward += llForward * self.fastemit_lambda_
llBackward += llBackward * self.fastemit_lambda_
diff = (llForward - llBackward).abs()
if diff > 0.1:
print(f"WARNING: Forward backward likelihood mismatch : {diff}")
return -llForward
def compute_alphas(self, log_probs: torch.Tensor, T: int, U: int, alphas: torch.Tensor):
"""
Compute the probability of the forward variable alpha.
Args:
log_probs: Flattened tensor [B, T, U, V+1]
T: Length of the acoustic sequence T (not padded).
U: Length of the target sequence U (not padded).
alphas: Working space memory for alpha of shape [B, T, U].
Returns:
Loglikelihood of the forward variable alpha.
"""
idx = CpuRNNT_index(U, self.maxU_, self.minibatch_, self.alphabet_size_, self.batch_first)
alphas[0] = 0
for t in range(T):
for u in range(U):
if u == 0 and t > 0:
alphas[idx(t, 0)] = alphas[idx(t - 1, 0)] + log_probs[idx(t - 1, 0) * 2]
if t == 0 and u > 0:
alphas[idx(0, u)] = alphas[idx(0, u - 1)] + log_probs[idx(0, u - 1) * 2 + 1]
if t > 0 and u > 0:
no_emit = alphas[idx(t - 1, u)] + log_probs[idx(t - 1, u) * 2]
emit = alphas[idx(t, u - 1)] + log_probs[idx(t, u - 1) * 2 + 1]
alphas[idx(t, u)] = log_sum_exp(emit, no_emit)
loglike = alphas[idx(T - 1, U - 1)] + log_probs[idx(T - 1, U - 1) * 2]
return loglike
def compute_betas_and_grads(
self,
grad: torch.Tensor,
log_probs: torch.Tensor,
T: int,
U: int,
alphas: torch.Tensor,
betas: torch.Tensor,
labels: torch.Tensor,
logll: torch.Tensor,
):
"""
Compute backward variable beta as well as gradients of the activation matrix wrt loglikelihood
of forward variable.
Args:
grad: Working space memory of flattened shape [B, T, U, V+1]
log_probs: Activatio tensor of flattented shape [B, T, U, V+1]
T: Length of the acoustic sequence T (not padded).
U: Length of the target sequence U (not padded).
alphas: Working space memory for alpha of shape [B, T, U].
betas: Working space memory for alpha of shape [B, T, U].
labels: Ground truth label of shape [B, U]
logll: Loglikelihood of the forward variable.
Returns:
Loglikelihood of the forward variable and inplace updates the grad tensor.
"""
# Patch for CPU + fp16
if log_probs.dtype == torch.float16 and not log_probs.is_cuda:
log_probs = log_probs.float()
idx = CpuRNNT_index(U, self.maxU_, self.minibatch_, self.alphabet_size_, self.batch_first)
betas[idx(T - 1, U - 1)] = log_probs[idx(T - 1, U - 1) * 2]
for t in range(T - 1, -1, -1):
for u in range(U - 1, -1, -1):
if (u == U - 1) and (t < T - 1):
betas[idx(t, U - 1)] = betas[idx(t + 1, U - 1)] + log_probs[idx(t, U - 1) * 2]
if (t == T - 1) and (u < U - 1):
betas[idx(T - 1, u)] = betas[idx(T - 1, u + 1)] + log_probs[idx(T - 1, u) * 2 + 1]
if (t < T - 1) and (u < U - 1):
no_emit = betas[idx(t + 1, u)] + log_probs[idx(t, u) * 2]
emit = betas[idx(t, u + 1)] + log_probs[idx(t, u) * 2 + 1]
betas[idx(t, u)] = log_sum_exp(emit, no_emit)
loglike = betas[0]
# // Gradients w.r.t. log probabilities
for t in range(T):
for u in range(U):
if t < T - 1:
g = alphas[idx(t, u)] + betas[idx(t + 1, u)]
grad[idx(t, u, self.blank_)] = -torch.exp(log_probs[idx(t, u) * 2] + g - loglike)
if u < U - 1:
g = alphas[idx(t, u)] + betas[idx(t, u + 1)]
grad[idx(t, u, labels[u])] = -torch.exp(
math.log1p(self.fastemit_lambda_) + log_probs[idx(t, u) * 2 + 1] + g - loglike
)
# // gradient to the last blank transition
grad[idx(T - 1, U - 1, self.blank_)] = -torch.exp(
log_probs[idx(T - 1, U - 1) * 2] + alphas[idx(T - 1, U - 1)] - loglike
)
return loglike
def cost_and_grad(
self,
log_probs: torch.Tensor,
grads: torch.Tensor,
costs: torch.Tensor,
flat_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
) -> global_constants.RNNTStatus:
# // per minibatch memory
per_minibatch_bytes = 0
# // alphas & betas
per_minibatch_bytes += self.maxT_ * self.maxU_ * 2
# // blank & label log probability cache
per_minibatch_bytes += self.maxT_ * self.maxU_ * 2
for mb in range(self.minibatch_):
T = input_lengths[mb] # // Length of utterance (time)
U = label_lengths[mb] + 1 # // Number of labels in transcription
batch_size = self.alphabet_size_
if self.batch_first:
batch_size = self.maxT_ * self.maxU_ * self.alphabet_size_
costs[mb] = self.cost_and_grad_kernel(
log_probs[(mb * batch_size) :],
grads[(mb * batch_size) :],
flat_labels[(mb * (self.maxU_ - 1)) :],
mb,
T,
U,
mb * per_minibatch_bytes,
)
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
def score_forward(
self,
log_probs: torch.Tensor,
costs: torch.Tensor,
flat_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
# // per minibatch memory
per_minibatch_bytes = 0
# // alphas & betas
per_minibatch_bytes += self.maxT_ * self.maxU_ * 2
# // blank & label log probability cache
per_minibatch_bytes += self.maxT_ * self.maxU_ * 2
for mb in range(self.minibatch_):
T = input_lengths[mb] # // Length of utterance (time)
U = label_lengths[mb] + 1 # // Number of labels in transcription
batch_size = self.alphabet_size_
if self.batch_first:
batch_size = self.maxT_ * self.maxU_ * self.alphabet_size_
idx = CpuRNNT_index(U, self.maxU_, self.minibatch_, self.alphabet_size_, self.batch_first)
rnntm = CpuRNNT_metadata(
T,
U,
self.workspace,
mb * per_minibatch_bytes,
self.blank_,
flat_labels[(mb * (self.maxU_ - 1)) :],
log_probs[(mb * batch_size) :],
idx,
)
costs[mb] = -self.compute_alphas(rnntm.log_probs2, T, U, rnntm.alphas)
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
|
NeMo-main
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/cpu_utils/cpu_rnnt.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import torch
class ExternalFeatureLoader(object):
"""Feature loader that load external features store in certain format.
Currently support pickle, npy and npz format.
"""
def __init__(
self, augmentor: Optional["nemo.collections.asr.parts.perturb.FeatureAugmentor"] = None,
):
"""
Feature loader
"""
self.augmentor = augmentor
def load_feature_from_file(self, file_path: str):
"""Load samples from file_path and convert it to be of type float32
file_path (str) is the path of the file that stores feature/sample.
"""
if file_path.endswith(".pt") or file_path.endswith(".pth"):
samples = torch.load(file_path, map_location="cpu").float().numpy()
return samples
else:
# load pickle/npy/npz file
samples = np.load(file_path, allow_pickle=True)
return self._convert_samples_to_float32(samples)
# TODO load other type of files such as kaldi io ark
@staticmethod
def _convert_samples_to_float32(samples: np.ndarray) -> np.ndarray:
"""Convert sample type to float32.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= 1.0 / 2 ** (bits - 1)
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
def process(self, file_path: str) -> torch.Tensor:
features = self.load_feature_from_file(file_path)
features = self.process_segment(features)
return features
def process_segment(self, feature_segment):
if self.augmentor:
# augmentor for external features. Here possible augmentor for external embedding feature is Diaconis Augmentation and might be implemented later
self.augmentor.perturb(feature_segment)
return torch.tensor(feature_segment, dtype=torch.float)
return torch.tensor(feature_segment, dtype=torch.float)
|
NeMo-main
|
nemo/collections/asr/parts/preprocessing/feature_loader.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.parts.preprocessing.feature_loader import ExternalFeatureLoader
from nemo.collections.asr.parts.preprocessing.features import FeaturizerFactory, FilterbankFeatures, WaveformFeaturizer
from nemo.collections.asr.parts.preprocessing.perturb import (
AudioAugmentor,
AugmentationDataset,
GainPerturbation,
ImpulsePerturbation,
NoisePerturbation,
NoisePerturbationWithNormalization,
Perturbation,
RirAndNoisePerturbation,
ShiftPerturbation,
SilencePerturbation,
SpeedPerturbation,
TimeStretchPerturbation,
TranscodePerturbation,
WhiteNoisePerturbation,
perturbation_types,
process_augmentations,
register_perturbation,
)
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
|
NeMo-main
|
nemo/collections/asr/parts/preprocessing/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 Ryan Leary
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file contains code artifacts adapted from https://github.com/ryanleary/patter
import math
import random
from typing import Optional, Tuple, Union
import librosa
import numpy as np
import torch
import torch.nn as nn
from nemo.collections.asr.parts.preprocessing.perturb import AudioAugmentor
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.utils import logging
try:
import torchaudio
HAVE_TORCHAUDIO = True
except ModuleNotFoundError:
HAVE_TORCHAUDIO = False
CONSTANT = 1e-5
def normalize_batch(x, seq_len, normalize_type):
x_mean = None
x_std = None
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype, device=x.device)
x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
if x[i, :, : seq_len[i]].shape[1] == 1:
raise ValueError(
"normalize_batch with `per_feature` normalize_type received a tensor of length 1. This will result "
"in torch.std() returning nan. Make sure your audio length has enough samples for a single "
"feature (ex. at least `hop_length` for Mel Spectrograms)."
)
x_mean[i, :] = x[i, :, : seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, : seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += CONSTANT
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2), x_mean, x_std
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, : seq_len[i].item()].mean()
x_std[i] = x[i, :, : seq_len[i].item()].std()
# make sure x_std is not zero
x_std += CONSTANT
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1), x_mean, x_std
elif "fixed_mean" in normalize_type and "fixed_std" in normalize_type:
x_mean = torch.tensor(normalize_type["fixed_mean"], device=x.device)
x_std = torch.tensor(normalize_type["fixed_std"], device=x.device)
return (
(x - x_mean.view(x.shape[0], x.shape[1]).unsqueeze(2)) / x_std.view(x.shape[0], x.shape[1]).unsqueeze(2),
x_mean,
x_std,
)
else:
return x, x_mean, x_std
def clean_spectrogram_batch(spectrogram: torch.Tensor, spectrogram_len: torch.Tensor, fill_value=0.0) -> torch.Tensor:
"""
Fill spectrogram values outside the length with `fill_value`
Args:
spectrogram: Tensor with shape [B, C, L] containing batched spectrograms
spectrogram_len: Tensor with shape [B] containing the sequence length of each batch element
fill_value: value to fill with, 0.0 by default
Returns:
cleaned spectrogram, tensor with shape equal to `spectrogram`
"""
device = spectrogram.device
batch_size, _, max_len = spectrogram.shape
mask = torch.arange(max_len, device=device)[None, :] >= spectrogram_len[:, None]
mask = mask.unsqueeze(1).expand_as(spectrogram)
return spectrogram.masked_fill(mask, fill_value)
def splice_frames(x, frame_splicing):
""" Stacks frames together across feature dim
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim*frame_splicing, num_frames
"""
seq = [x]
for n in range(1, frame_splicing):
seq.append(torch.cat([x[:, :, :n], x[:, :, n:]], dim=2))
return torch.cat(seq, dim=1)
@torch.jit.script_if_tracing
def make_seq_mask_like(
lengths: torch.Tensor, like: torch.Tensor, time_dim: int = -1, valid_ones: bool = True
) -> torch.Tensor:
"""
Args:
lengths: Tensor with shape [B] containing the sequence length of each batch element
like: The mask will contain the same number of dimensions as this Tensor, and will have the same max
length in the time dimension of this Tensor.
time_dim: Time dimension of the `shape_tensor` and the resulting mask. Zero-based.
valid_ones: If True, valid tokens will contain value `1` and padding will be `0`. Else, invert.
Returns:
A :class:`torch.Tensor` containing 1's and 0's for valid and invalid tokens, respectively, if `valid_ones`, else
vice-versa. Mask will have the same number of dimensions as `like`. Batch and time dimensions will match
the `like`. All other dimensions will be singletons. E.g., if `like.shape == [3, 4, 5]` and
`time_dim == -1', mask will have shape `[3, 1, 5]`.
"""
# Mask with shape [B, T]
mask = torch.arange(like.shape[time_dim], device=like.device).repeat(lengths.shape[0], 1).lt(lengths.view(-1, 1))
# [B, T] -> [B, *, T] where * is any number of singleton dimensions to expand to like tensor
for _ in range(like.dim() - mask.dim()):
mask = mask.unsqueeze(1)
# If needed, transpose time dim
if time_dim != -1 and time_dim != mask.dim() - 1:
mask = mask.transpose(-1, time_dim)
# Maybe invert the padded vs. valid token values
if not valid_ones:
mask = ~mask
return mask
class WaveformFeaturizer(object):
def __init__(self, sample_rate=16000, int_values=False, augmentor=None):
self.augmentor = augmentor if augmentor is not None else AudioAugmentor()
self.sample_rate = sample_rate
self.int_values = int_values
def max_augmentation_length(self, length):
return self.augmentor.max_augmentation_length(length)
def process(
self,
file_path,
offset=0,
duration=0,
trim=False,
trim_ref=np.max,
trim_top_db=60,
trim_frame_length=2048,
trim_hop_length=512,
orig_sr=None,
channel_selector=None,
normalize_db=None,
):
audio = AudioSegment.from_file(
file_path,
target_sr=self.sample_rate,
int_values=self.int_values,
offset=offset,
duration=duration,
trim=trim,
trim_ref=trim_ref,
trim_top_db=trim_top_db,
trim_frame_length=trim_frame_length,
trim_hop_length=trim_hop_length,
orig_sr=orig_sr,
channel_selector=channel_selector,
normalize_db=normalize_db,
)
return self.process_segment(audio)
def process_segment(self, audio_segment):
self.augmentor.perturb(audio_segment)
return torch.tensor(audio_segment.samples, dtype=torch.float)
@classmethod
def from_config(cls, input_config, perturbation_configs=None):
if perturbation_configs is not None:
aa = AudioAugmentor.from_config(perturbation_configs)
else:
aa = None
sample_rate = input_config.get("sample_rate", 16000)
int_values = input_config.get("int_values", False)
return cls(sample_rate=sample_rate, int_values=int_values, augmentor=aa)
class FeaturizerFactory(object):
def __init__(self):
pass
@classmethod
def from_config(cls, input_cfg, perturbation_configs=None):
return WaveformFeaturizer.from_config(input_cfg, perturbation_configs=perturbation_configs)
class FilterbankFeatures(nn.Module):
"""Featurizer that converts wavs to Mel Spectrograms.
See AudioToMelSpectrogramPreprocessor for args.
"""
def __init__(
self,
sample_rate=16000,
n_window_size=320,
n_window_stride=160,
window="hann",
normalize="per_feature",
n_fft=None,
preemph=0.97,
nfilt=64,
lowfreq=0,
highfreq=None,
log=True,
log_zero_guard_type="add",
log_zero_guard_value=2 ** -24,
dither=CONSTANT,
pad_to=16,
max_duration=16.7,
frame_splicing=1,
exact_pad=False,
pad_value=0,
mag_power=2.0,
use_grads=False,
rng=None,
nb_augmentation_prob=0.0,
nb_max_freq=4000,
mel_norm="slaney",
stft_exact_pad=False, # Deprecated arguments; kept for config compatibility
stft_conv=False, # Deprecated arguments; kept for config compatibility
):
super().__init__()
if stft_conv or stft_exact_pad:
logging.warning(
"Using torch_stft is deprecated and has been removed. The values have been forcibly set to False "
"for FilterbankFeatures and AudioToMelSpectrogramPreprocessor. Please set exact_pad to True "
"as needed."
)
if exact_pad and n_window_stride % 2 == 1:
raise NotImplementedError(
f"{self} received exact_pad == True, but hop_size was odd. If audio_length % hop_size == 0. Then the "
"returned spectrogram would not be of length audio_length // hop_size. Please use an even hop_size."
)
self.log_zero_guard_value = log_zero_guard_value
if (
n_window_size is None
or n_window_stride is None
or not isinstance(n_window_size, int)
or not isinstance(n_window_stride, int)
or n_window_size <= 0
or n_window_stride <= 0
):
raise ValueError(
f"{self} got an invalid value for either n_window_size or "
f"n_window_stride. Both must be positive ints."
)
logging.info(f"PADDING: {pad_to}")
self.win_length = n_window_size
self.hop_length = n_window_stride
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.stft_pad_amount = (self.n_fft - self.hop_length) // 2 if exact_pad else None
if exact_pad:
logging.info("STFT using exact pad")
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length, periodic=False) if window_fn else None
self.register_buffer("window", window_tensor)
self.stft = lambda x: torch.stft(
x,
n_fft=self.n_fft,
hop_length=self.hop_length,
win_length=self.win_length,
center=False if exact_pad else True,
window=self.window.to(dtype=torch.float),
return_complex=True,
)
self.normalize = normalize
self.log = log
self.dither = dither
self.frame_splicing = frame_splicing
self.nfilt = nfilt
self.preemph = preemph
self.pad_to = pad_to
highfreq = highfreq or sample_rate / 2
filterbanks = torch.tensor(
librosa.filters.mel(
sr=sample_rate, n_fft=self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq, norm=mel_norm
),
dtype=torch.float,
).unsqueeze(0)
self.register_buffer("fb", filterbanks)
# Calculate maximum sequence length
max_length = self.get_seq_len(torch.tensor(max_duration * sample_rate, dtype=torch.float))
max_pad = pad_to - (max_length % pad_to) if pad_to > 0 else 0
self.max_length = max_length + max_pad
self.pad_value = pad_value
self.mag_power = mag_power
# We want to avoid taking the log of zero
# There are two options: either adding or clamping to a small value
if log_zero_guard_type not in ["add", "clamp"]:
raise ValueError(
f"{self} received {log_zero_guard_type} for the "
f"log_zero_guard_type parameter. It must be either 'add' or "
f"'clamp'."
)
self.use_grads = use_grads
if not use_grads:
self.forward = torch.no_grad()(self.forward)
self._rng = random.Random() if rng is None else rng
self.nb_augmentation_prob = nb_augmentation_prob
if self.nb_augmentation_prob > 0.0:
if nb_max_freq >= sample_rate / 2:
self.nb_augmentation_prob = 0.0
else:
self._nb_max_fft_bin = int((nb_max_freq / sample_rate) * n_fft)
# log_zero_guard_value is the the small we want to use, we support
# an actual number, or "tiny", or "eps"
self.log_zero_guard_type = log_zero_guard_type
logging.debug(f"sr: {sample_rate}")
logging.debug(f"n_fft: {self.n_fft}")
logging.debug(f"win_length: {self.win_length}")
logging.debug(f"hop_length: {self.hop_length}")
logging.debug(f"n_mels: {nfilt}")
logging.debug(f"fmin: {lowfreq}")
logging.debug(f"fmax: {highfreq}")
logging.debug(f"using grads: {use_grads}")
logging.debug(f"nb_augmentation_prob: {nb_augmentation_prob}")
def log_zero_guard_value_fn(self, x):
if isinstance(self.log_zero_guard_value, str):
if self.log_zero_guard_value == "tiny":
return torch.finfo(x.dtype).tiny
elif self.log_zero_guard_value == "eps":
return torch.finfo(x.dtype).eps
else:
raise ValueError(
f"{self} received {self.log_zero_guard_value} for the "
f"log_zero_guard_type parameter. It must be either a "
f"number, 'tiny', or 'eps'"
)
else:
return self.log_zero_guard_value
def get_seq_len(self, seq_len):
# Assuming that center is True is stft_pad_amount = 0
pad_amount = self.stft_pad_amount * 2 if self.stft_pad_amount is not None else self.n_fft // 2 * 2
seq_len = torch.floor_divide((seq_len + pad_amount - self.n_fft), self.hop_length) + 1
return seq_len.to(dtype=torch.long)
@property
def filter_banks(self):
return self.fb
def forward(self, x, seq_len, linear_spec=False):
seq_len = self.get_seq_len(seq_len)
if self.stft_pad_amount is not None:
x = torch.nn.functional.pad(
x.unsqueeze(1), (self.stft_pad_amount, self.stft_pad_amount), "reflect"
).squeeze(1)
# dither (only in training mode for eval determinism)
if self.training and self.dither > 0:
x += self.dither * torch.randn_like(x)
# do preemphasis
if self.preemph is not None:
x = torch.cat((x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]), dim=1)
# disable autocast to get full range of stft values
with torch.cuda.amp.autocast(enabled=False):
x = self.stft(x)
# torch stft returns complex tensor (of shape [B,N,T]); so convert to magnitude
# guard is needed for sqrt if grads are passed through
guard = 0 if not self.use_grads else CONSTANT
x = torch.view_as_real(x)
x = torch.sqrt(x.pow(2).sum(-1) + guard)
if self.training and self.nb_augmentation_prob > 0.0:
for idx in range(x.shape[0]):
if self._rng.random() < self.nb_augmentation_prob:
x[idx, self._nb_max_fft_bin :, :] = 0.0
# get power spectrum
if self.mag_power != 1.0:
x = x.pow(self.mag_power)
# return plain spectrogram if required
if linear_spec:
return x, seq_len
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
if self.log_zero_guard_type == "add":
x = torch.log(x + self.log_zero_guard_value_fn(x))
elif self.log_zero_guard_type == "clamp":
x = torch.log(torch.clamp(x, min=self.log_zero_guard_value_fn(x)))
else:
raise ValueError("log_zero_guard_type was not understood")
# frame splicing if required
if self.frame_splicing > 1:
x = splice_frames(x, self.frame_splicing)
# normalize if required
if self.normalize:
x, _, _ = normalize_batch(x, seq_len, normalize_type=self.normalize)
# mask to zero any values beyond seq_len in batch, pad to multiple of `pad_to` (for efficiency)
max_len = x.size(-1)
mask = torch.arange(max_len).to(x.device)
mask = mask.repeat(x.size(0), 1) >= seq_len.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1).type(torch.bool).to(device=x.device), self.pad_value)
del mask
pad_to = self.pad_to
if pad_to == "max":
x = nn.functional.pad(x, (0, self.max_length - x.size(-1)), value=self.pad_value)
elif pad_to > 0:
pad_amt = x.size(-1) % pad_to
if pad_amt != 0:
x = nn.functional.pad(x, (0, pad_to - pad_amt), value=self.pad_value)
return x, seq_len
class FilterbankFeaturesTA(nn.Module):
"""
Exportable, `torchaudio`-based implementation of Mel Spectrogram extraction.
See `AudioToMelSpectrogramPreprocessor` for args.
"""
def __init__(
self,
sample_rate: int = 16000,
n_window_size: int = 320,
n_window_stride: int = 160,
normalize: Optional[str] = "per_feature",
nfilt: int = 64,
n_fft: Optional[int] = None,
preemph: float = 0.97,
lowfreq: float = 0,
highfreq: Optional[float] = None,
log: bool = True,
log_zero_guard_type: str = "add",
log_zero_guard_value: Union[float, str] = 2 ** -24,
dither: float = 1e-5,
window: str = "hann",
pad_to: int = 0,
pad_value: float = 0.0,
mel_norm="slaney",
# Seems like no one uses these options anymore. Don't convolute the code by supporting thm.
use_grads: bool = False, # Deprecated arguments; kept for config compatibility
max_duration: float = 16.7, # Deprecated arguments; kept for config compatibility
frame_splicing: int = 1, # Deprecated arguments; kept for config compatibility
exact_pad: bool = False, # Deprecated arguments; kept for config compatibility
nb_augmentation_prob: float = 0.0, # Deprecated arguments; kept for config compatibility
nb_max_freq: int = 4000, # Deprecated arguments; kept for config compatibility
mag_power: float = 2.0, # Deprecated arguments; kept for config compatibility
rng: Optional[random.Random] = None, # Deprecated arguments; kept for config compatibility
stft_exact_pad: bool = False, # Deprecated arguments; kept for config compatibility
stft_conv: bool = False, # Deprecated arguments; kept for config compatibility
):
super().__init__()
if not HAVE_TORCHAUDIO:
raise ValueError(f"Need to install torchaudio to instantiate a {self.__class__.__name__}")
# Make sure log zero guard is supported, if given as a string
supported_log_zero_guard_strings = {"eps", "tiny"}
if isinstance(log_zero_guard_value, str) and log_zero_guard_value not in supported_log_zero_guard_strings:
raise ValueError(
f"Log zero guard value must either be a float or a member of {supported_log_zero_guard_strings}"
)
# Copied from `AudioPreprocessor` due to the ad-hoc structuring of the Mel Spec extractor class
self.torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'ones': torch.ones,
None: torch.ones,
}
# Ensure we can look up the window function
if window not in self.torch_windows:
raise ValueError(f"Got window value '{window}' but expected a member of {self.torch_windows.keys()}")
self.win_length = n_window_size
self.hop_length = n_window_stride
self._sample_rate = sample_rate
self._normalize_strategy = normalize
self._use_log = log
self._preemphasis_value = preemph
self.log_zero_guard_type = log_zero_guard_type
self.log_zero_guard_value: Union[str, float] = log_zero_guard_value
self.dither = dither
self.pad_to = pad_to
self.pad_value = pad_value
self.n_fft = n_fft
self._mel_spec_extractor: torchaudio.transforms.MelSpectrogram = torchaudio.transforms.MelSpectrogram(
sample_rate=self._sample_rate,
win_length=self.win_length,
hop_length=self.hop_length,
n_mels=nfilt,
window_fn=self.torch_windows[window],
mel_scale="slaney",
norm=mel_norm,
n_fft=n_fft,
f_max=highfreq,
f_min=lowfreq,
wkwargs={"periodic": False},
)
@property
def filter_banks(self):
""" Matches the analogous class """
return self._mel_spec_extractor.mel_scale.fb
def _resolve_log_zero_guard_value(self, dtype: torch.dtype) -> float:
if isinstance(self.log_zero_guard_value, float):
return self.log_zero_guard_value
return getattr(torch.finfo(dtype), self.log_zero_guard_value)
def _apply_dithering(self, signals: torch.Tensor) -> torch.Tensor:
if self.training and self.dither > 0.0:
noise = torch.randn_like(signals) * self.dither
signals = signals + noise
return signals
def _apply_preemphasis(self, signals: torch.Tensor) -> torch.Tensor:
if self._preemphasis_value is not None:
padded = torch.nn.functional.pad(signals, (1, 0))
signals = signals - self._preemphasis_value * padded[:, :-1]
return signals
def _compute_output_lengths(self, input_lengths: torch.Tensor) -> torch.Tensor:
out_lengths = input_lengths.div(self.hop_length, rounding_mode="floor").add(1).long()
return out_lengths
def _apply_pad_to(self, features: torch.Tensor) -> torch.Tensor:
# Only apply during training; else need to capture dynamic shape for exported models
if not self.training or self.pad_to == 0 or features.shape[-1] % self.pad_to == 0:
return features
pad_length = self.pad_to - (features.shape[-1] % self.pad_to)
return torch.nn.functional.pad(features, pad=(0, pad_length), value=self.pad_value)
def _apply_log(self, features: torch.Tensor) -> torch.Tensor:
if self._use_log:
zero_guard = self._resolve_log_zero_guard_value(features.dtype)
if self.log_zero_guard_type == "add":
features = features + zero_guard
elif self.log_zero_guard_type == "clamp":
features = features.clamp(min=zero_guard)
else:
raise ValueError(f"Unsupported log zero guard type: '{self.log_zero_guard_type}'")
features = features.log()
return features
def _extract_spectrograms(self, signals: torch.Tensor) -> torch.Tensor:
# Complex FFT needs to be done in single precision
with torch.cuda.amp.autocast(enabled=False):
features = self._mel_spec_extractor(waveform=signals)
return features
def _apply_normalization(self, features: torch.Tensor, lengths: torch.Tensor, eps: float = 1e-5) -> torch.Tensor:
# For consistency, this function always does a masked fill even if not normalizing.
mask: torch.Tensor = make_seq_mask_like(lengths=lengths, like=features, time_dim=-1, valid_ones=False)
features = features.masked_fill(mask, 0.0)
# Maybe don't normalize
if self._normalize_strategy is None:
return features
# Use the log zero guard for the sqrt zero guard
guard_value = self._resolve_log_zero_guard_value(features.dtype)
if self._normalize_strategy == "per_feature" or self._normalize_strategy == "all_features":
# 'all_features' reduces over each sample; 'per_feature' reduces over each channel
reduce_dim = 2
if self._normalize_strategy == "all_features":
reduce_dim = [1, 2]
# [B, D, T] -> [B, D, 1] or [B, 1, 1]
means = features.sum(dim=reduce_dim, keepdim=True).div(lengths.view(-1, 1, 1))
stds = (
features.sub(means)
.masked_fill(mask, 0.0)
.pow(2.0)
.sum(dim=reduce_dim, keepdim=True) # [B, D, T] -> [B, D, 1] or [B, 1, 1]
.div(lengths.view(-1, 1, 1) - 1) # assume biased estimator
.clamp(min=guard_value) # avoid sqrt(0)
.sqrt()
)
features = (features - means) / (stds + eps)
else:
# Deprecating constant std/mean
raise ValueError(f"Unsupported norm type: '{self._normalize_strategy}")
features = features.masked_fill(mask, 0.0)
return features
def forward(self, input_signal: torch.Tensor, length: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
feature_lengths = self._compute_output_lengths(input_lengths=length)
signals = self._apply_dithering(signals=input_signal)
signals = self._apply_preemphasis(signals=signals)
features = self._extract_spectrograms(signals=signals)
features = self._apply_log(features=features)
features = self._apply_normalization(features=features, lengths=feature_lengths)
features = self._apply_pad_to(features=features)
return features, feature_lengths
|
NeMo-main
|
nemo/collections/asr/parts/preprocessing/features.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 Ryan Leary
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file contains code artifacts adapted from https://github.com/ryanleary/patter
import math
import os
import random
from typing import Optional
import librosa
import numpy as np
import soundfile as sf
from nemo.collections.asr.parts.utils.audio_utils import select_channels
from nemo.utils import logging
# TODO @blisc: Perhaps refactor instead of import guarding
HAVE_PYDUB = True
try:
from pydub import AudioSegment as Audio
from pydub.exceptions import CouldntDecodeError
except ModuleNotFoundError:
HAVE_PYDUB = False
available_formats = sf.available_formats()
sf_supported_formats = ["." + i.lower() for i in available_formats.keys()]
class AudioSegment(object):
"""Audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(
self,
samples,
sample_rate,
target_sr=None,
trim=False,
trim_ref=np.max,
trim_top_db=60,
trim_frame_length=2048,
trim_hop_length=512,
orig_sr=None,
channel_selector=None,
normalize_db: Optional[float] = None,
ref_channel: Optional[int] = None,
):
"""Create audio segment from samples.
Samples are convert float32 internally, with int scaled to [-1, 1].
"""
samples = self._convert_samples_to_float32(samples)
# Check if channel selector is necessary
if samples.ndim == 1 and channel_selector not in [None, 0, 'average']:
raise ValueError(
'Input signal is one-dimensional, channel selector (%s) cannot not be used.', str(channel_selector)
)
elif samples.ndim == 2:
samples = select_channels(samples, channel_selector)
elif samples.ndim >= 3:
raise NotImplementedError(
'Signals with more than two dimensions (sample, channel) are currently not supported.'
)
if target_sr is not None and target_sr != sample_rate:
# resample along the temporal dimension (axis=0) will be in librosa 0.10.0 (#1561)
samples = samples.transpose()
samples = librosa.core.resample(samples, orig_sr=sample_rate, target_sr=target_sr)
samples = samples.transpose()
sample_rate = target_sr
if trim:
# librosa is using channels-first layout (num_channels, num_samples), which is transpose of AudioSegment's layout
samples = samples.transpose()
samples, _ = librosa.effects.trim(
samples, top_db=trim_top_db, ref=trim_ref, frame_length=trim_frame_length, hop_length=trim_hop_length
)
samples = samples.transpose()
self._samples = samples
self._sample_rate = sample_rate
self._orig_sr = orig_sr if orig_sr is not None else sample_rate
self._ref_channel = ref_channel
self._normalize_db = normalize_db
if normalize_db is not None:
self.normalize_db(normalize_db, ref_channel)
def __eq__(self, other):
"""Return whether two objects are equal."""
if type(other) is not type(self):
return False
if self._sample_rate != other._sample_rate:
return False
if self._samples.shape != other._samples.shape:
return False
if np.any(self.samples != other._samples):
return False
return True
def __ne__(self, other):
"""Return whether two objects are unequal."""
return not self.__eq__(other)
def __str__(self):
"""Return human-readable representation of segment."""
if self.num_channels == 1:
return "%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, rms=%.2fdB" % (
type(self),
self.num_samples,
self.sample_rate,
self.duration,
self.rms_db,
)
else:
rms_db_str = ', '.join([f'{rms:.2f}dB' for rms in self.rms_db])
return "%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, num_channels=%d, rms=[%s]" % (
type(self),
self.num_samples,
self.sample_rate,
self.duration,
self.num_channels,
rms_db_str,
)
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= 1.0 / 2 ** (bits - 1)
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
@classmethod
def from_file(
cls,
audio_file,
target_sr=None,
int_values=False,
offset=0,
duration=0,
trim=False,
trim_ref=np.max,
trim_top_db=60,
trim_frame_length=2048,
trim_hop_length=512,
orig_sr=None,
channel_selector=None,
normalize_db=None,
ref_channel=None,
):
"""
Load a file supported by librosa and return as an AudioSegment.
:param audio_file: path of file to load.
Alternatively, a list of paths of single-channel files can be provided
to form a multichannel signal.
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:param offset: offset in seconds when loading audio
:param duration: duration in seconds when loading audio
:param trim: if true, trim leading and trailing silence from an audio signal
:param trim_ref: the reference amplitude. By default, it uses `np.max` and compares to the peak amplitude in
the signal
:param trim_top_db: the threshold (in decibels) below reference to consider as silence
:param trim_frame_length: the number of samples per analysis frame
:param trim_hop_length: the number of samples between analysis frames
:param orig_sr: the original sample rate
:param channel selector: string denoting the downmix mode, an integer denoting the channel to be selected, or an iterable
of integers denoting a subset of channels. Channel selector is using zero-based indexing.
If set to `None`, the original signal will be used.
:param normalize_db (Optional[float]): if not None, normalize the audio signal to a target RMS value
:param ref_channel (Optional[int]): channel to use as reference for normalizing multi-channel audio, set None to use max RMS across channels
:return: AudioSegment instance
"""
samples = None
if isinstance(audio_file, list):
return cls.from_file_list(
audio_file_list=audio_file,
target_sr=target_sr,
int_values=int_values,
offset=offset,
duration=duration,
trim=trim,
trim_ref=trim_ref,
trim_top_db=trim_top_db,
trim_frame_length=trim_frame_length,
trim_hop_length=trim_hop_length,
orig_sr=orig_sr,
channel_selector=channel_selector,
normalize_db=normalize_db,
ref_channel=ref_channel,
)
if not isinstance(audio_file, str) or os.path.splitext(audio_file)[-1] in sf_supported_formats:
try:
with sf.SoundFile(audio_file, 'r') as f:
dtype = 'int32' if int_values else 'float32'
sample_rate = f.samplerate
if offset > 0:
f.seek(int(offset * sample_rate))
if duration > 0:
samples = f.read(int(duration * sample_rate), dtype=dtype)
else:
samples = f.read(dtype=dtype)
except RuntimeError as e:
logging.error(
f"Loading {audio_file} via SoundFile raised RuntimeError: `{e}`. "
f"NeMo will fallback to loading via pydub."
)
if hasattr(audio_file, "seek"):
audio_file.seek(0)
if HAVE_PYDUB and samples is None:
try:
samples = Audio.from_file(audio_file)
sample_rate = samples.frame_rate
num_channels = samples.channels
if offset > 0:
# pydub does things in milliseconds
seconds = offset * 1000
samples = samples[int(seconds) :]
if duration > 0:
seconds = duration * 1000
samples = samples[: int(seconds)]
samples = np.array(samples.get_array_of_samples())
# For multi-channel signals, channels are stacked in a one-dimensional vector
if num_channels > 1:
samples = np.reshape(samples, (-1, num_channels))
except CouldntDecodeError as err:
logging.error(f"Loading {audio_file} via pydub raised CouldntDecodeError: `{err}`.")
if samples is None:
libs = "soundfile, and pydub" if HAVE_PYDUB else "soundfile"
raise Exception(f"Your audio file {audio_file} could not be decoded. We tried using {libs}.")
return cls(
samples,
sample_rate,
target_sr=target_sr,
trim=trim,
trim_ref=trim_ref,
trim_top_db=trim_top_db,
trim_frame_length=trim_frame_length,
trim_hop_length=trim_hop_length,
orig_sr=orig_sr,
channel_selector=channel_selector,
normalize_db=normalize_db,
ref_channel=ref_channel,
)
@classmethod
def from_file_list(
cls,
audio_file_list,
target_sr=None,
int_values=False,
offset=0,
duration=0,
trim=False,
channel_selector=None,
*args,
**kwargs,
):
"""
Function wrapper for `from_file` method. Load a list of files from `audio_file_list`.
The length of each audio file is unified with the duration item in the input manifest file.
See `from_file` method for arguments.
If a list of files is provided, load samples from individual single-channel files and
concatenate them along the channel dimension.
"""
if isinstance(channel_selector, int):
# Shortcut when selecting a single channel
if channel_selector >= len(audio_file_list):
raise RuntimeError(
f'Channel cannot be selected: channel_selector={channel_selector}, num_audio_files={len(audio_file_list)}'
)
# Select only a single file
audio_file_list = [audio_file_list[channel_selector]]
# Reset the channel selector since we applied it here
channel_selector = None
samples = None
for a_file in audio_file_list:
# Load audio from the current file
a_segment = cls.from_file(
a_file,
target_sr=target_sr,
int_values=int_values,
offset=offset,
duration=duration,
channel_selector=None,
trim=False, # Do not apply trim to individual files, it will be applied to the concatenated signal
*args,
**kwargs,
)
# Only single-channel individual files are supported for now
if a_segment.num_channels != 1:
raise RuntimeError(
f'Expecting a single-channel audio signal, but loaded {a_segment.num_channels} channels from file {a_file}'
)
if target_sr is None:
# All files need to be loaded with the same sample rate
target_sr = a_segment.sample_rate
# Concatenate samples
a_samples = a_segment.samples[:, None]
if samples is None:
samples = a_samples
else:
# Check the dimensions match
if len(a_samples) != len(samples):
raise RuntimeError(
f'Loaded samples need to have identical length: {a_samples.shape} != {samples.shape}'
)
# Concatenate along channel dimension
samples = np.concatenate([samples, a_samples], axis=1)
# Final setup for class initialization
samples = np.squeeze(samples)
sample_rate = target_sr
return cls(
samples, sample_rate, target_sr=target_sr, trim=trim, channel_selector=channel_selector, *args, **kwargs,
)
@classmethod
def segment_from_file(
cls, audio_file, target_sr=None, n_segments=0, trim=False, orig_sr=None, channel_selector=None, offset=None
):
"""Grabs n_segments number of samples from audio_file.
If offset is not provided, n_segments are selected randomly.
If offset is provided, it is used to calculate the starting sample.
Note that audio_file can be either the file path, or a file-like object.
:param audio_file: path to a file or a file-like object
:param target_sr: sample rate for the output samples
:param n_segments: desired number of samples
:param trim: if true, trim leading and trailing silence from an audio signal
:param orig_sr: the original sample rate
:param channel selector: select a subset of channels. If set to `None`, the original signal will be used.
:param offset: fixed offset in seconds
:return: numpy array of samples
"""
is_segmented = False
try:
with sf.SoundFile(audio_file, 'r') as f:
sample_rate = f.samplerate
if target_sr is not None:
n_segments_at_original_sr = math.ceil(n_segments * sample_rate / target_sr)
else:
n_segments_at_original_sr = n_segments
if 0 < n_segments_at_original_sr < len(f):
max_audio_start = len(f) - n_segments_at_original_sr
if offset is None:
audio_start = random.randint(0, max_audio_start)
else:
audio_start = math.floor(offset * sample_rate)
if audio_start > max_audio_start:
raise RuntimeError(
f'Provided audio start ({audio_start}) is larger than the maximum possible ({max_audio_start})'
)
f.seek(audio_start)
samples = f.read(n_segments_at_original_sr, dtype='float32')
is_segmented = True
elif n_segments_at_original_sr > len(f):
logging.warning(
f"Number of segments ({n_segments_at_original_sr}) is greater than the length ({len(f)}) of the audio file {audio_file}. This may lead to shape mismatch errors."
)
samples = f.read(dtype='float32')
else:
samples = f.read(dtype='float32')
except RuntimeError as e:
logging.error(f"Loading {audio_file} via SoundFile raised RuntimeError: `{e}`.")
raise e
features = cls(
samples, sample_rate, target_sr=target_sr, trim=trim, orig_sr=orig_sr, channel_selector=channel_selector
)
if is_segmented:
features._samples = features._samples[:n_segments]
return features
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
@property
def num_channels(self):
if self._samples.ndim == 1:
return 1
else:
return self._samples.shape[-1]
@property
def num_samples(self):
return self._samples.shape[0]
@property
def duration(self):
return self.num_samples / float(self._sample_rate)
@property
def rms_db(self):
"""Return per-channel RMS value.
"""
mean_square = np.mean(self._samples ** 2, axis=0)
return 10 * np.log10(mean_square)
@property
def orig_sr(self):
return self._orig_sr
def gain_db(self, gain):
self._samples *= 10.0 ** (gain / 20.0)
def normalize_db(self, target_db=-20, ref_channel=None):
"""Normalize the signal to a target RMS value in decibels.
For multi-channel audio, the RMS value is determined by the reference channel (if not None),
otherwise it will be the maximum RMS across all channels.
"""
rms_db = self.rms_db
if self.num_channels > 1:
rms_db = max(rms_db) if ref_channel is None else rms_db[ref_channel]
gain = target_db - rms_db
self.gain_db(gain)
def pad(self, pad_size, symmetric=False):
"""Add zero padding to the sample. The pad size is given in number
of samples.
If symmetric=True, `pad_size` will be added to both sides. If false,
`pad_size`
zeros will be added only to the end.
"""
samples_ndim = self._samples.ndim
if samples_ndim == 1:
pad_width = pad_size if symmetric else (0, pad_size)
elif samples_ndim == 2:
# pad samples, keep channels
pad_width = ((pad_size, pad_size), (0, 0)) if symmetric else ((0, pad_size), (0, 0))
else:
raise NotImplementedError(
f"Padding not implemented for signals with more that 2 dimensions. Current samples dimension: {samples_ndim}."
)
# apply padding
self._samples = np.pad(self._samples, pad_width, mode='constant',)
def subsegment(self, start_time=None, end_time=None):
"""Cut the AudioSegment between given boundaries.
Note that this is an in-place transformation.
:param start_time: Beginning of subsegment in seconds.
:type start_time: float
:param end_time: End of subsegment in seconds.
:type end_time: float
:raise ValueError: If start_time or end_time is incorrectly set,
e.g. out of bounds in time.
"""
start_time = 0.0 if start_time is None else start_time
end_time = self.duration if end_time is None else end_time
if start_time < 0.0:
start_time = self.duration + start_time
if end_time < 0.0:
end_time = self.duration + end_time
if start_time < 0.0:
raise ValueError("The slice start position (%f s) is out of bounds." % start_time)
if end_time < 0.0:
raise ValueError("The slice end position (%f s) is out of bounds." % end_time)
if start_time > end_time:
raise ValueError(
"The slice start position (%f s) is later than the end position (%f s)." % (start_time, end_time)
)
if end_time > self.duration:
raise ValueError("The slice end position (%f s) is out of bounds (> %f s)" % (end_time, self.duration))
start_sample = int(round(start_time * self._sample_rate))
end_sample = int(round(end_time * self._sample_rate))
self._samples = self._samples[start_sample:end_sample]
|
NeMo-main
|
nemo/collections/asr/parts/preprocessing/segment.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 Ryan Leary
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file contains code artifacts adapted from https://github.com/ryanleary/patter
import copy
import inspect
import io
import os
import random
import subprocess
from tempfile import NamedTemporaryFile
from typing import Any, List, Optional, Union
import librosa
import numpy as np
import soundfile as sf
from scipy import signal
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.common.parts.preprocessing import collections, parsers
from nemo.core.classes import IterableDataset
from nemo.utils import logging
# TODO @blisc: Perhaps refactor instead of import guarding
HAVE_OMEGACONG_WEBDATASET = True
try:
import webdataset as wd
from omegaconf import DictConfig, OmegaConf
except ModuleNotFoundError:
from nemo.utils.exceptions import LightningNotInstalledException
HAVE_OMEGACONG_WEBDATASET = False
try:
from nemo.collections.asr.parts.utils import numba_utils
HAVE_NUMBA = True
except (ImportError, ModuleNotFoundError):
HAVE_NUMBA = False
def read_one_audiosegment(manifest, target_sr, tarred_audio=False, audio_dataset=None):
if tarred_audio:
if audio_dataset is None:
raise TypeError("Expected augmentation dataset but got None")
audio_file, file_id, manifest_entry = next(audio_dataset)
offset = 0 if manifest_entry.offset is None else manifest_entry.offset
duration = 0 if manifest_entry.duration is None else manifest_entry.duration
else:
audio_record = random.sample(manifest.data, 1)[0]
audio_file = audio_record.audio_file
offset = 0 if audio_record.offset is None else audio_record.offset
duration = 0 if audio_record.duration is None else audio_record.duration
return AudioSegment.from_file(audio_file, target_sr=target_sr, offset=offset, duration=duration)
class Perturbation(object):
def max_augmentation_length(self, length):
return length
def perturb(self, data):
raise NotImplementedError
class SpeedPerturbation(Perturbation):
"""
Performs Speed Augmentation by re-sampling the data to a different sampling rate,
which does not preserve pitch.
Note: This is a very slow operation for online augmentation. If space allows,
it is preferable to pre-compute and save the files to augment the dataset.
Args:
sr: Original sampling rate.
resample_type: Type of resampling operation that will be performed.
For better speed using `resampy`'s fast resampling method, use `resample_type='kaiser_fast'`.
For high-quality resampling, set `resample_type='kaiser_best'`.
To use `scipy.signal.resample`, set `resample_type='fft'` or `resample_type='scipy'`
min_speed_rate: Minimum sampling rate modifier.
max_speed_rate: Maximum sampling rate modifier.
num_rates: Number of discrete rates to allow. Can be a positive or negative
integer.
If a positive integer greater than 0 is provided, the range of
speed rates will be discretized into `num_rates` values.
If a negative integer or 0 is provided, the full range of speed rates
will be sampled uniformly.
Note: If a positive integer is provided and the resultant discretized
range of rates contains the value '1.0', then those samples with rate=1.0,
will not be augmented at all and simply skipped. This is to unnecessary
augmentation and increase computation time. Effective augmentation chance
in such a case is = `prob * (num_rates - 1 / num_rates) * 100`% chance
where `prob` is the global probability of a sample being augmented.
rng: Random seed. Default is None
"""
def __init__(self, sr, resample_type, min_speed_rate=0.9, max_speed_rate=1.1, num_rates=5, rng=None):
min_rate = min(min_speed_rate, max_speed_rate)
if min_rate < 0.0:
raise ValueError("Minimum sampling rate modifier must be > 0.")
if resample_type not in ('kaiser_best', 'kaiser_fast', 'fft', 'scipy'):
raise ValueError("Supported `resample_type` values are ('kaiser_best', 'kaiser_fast', 'fft', 'scipy')")
self._sr = sr
self._min_rate = min_speed_rate
self._max_rate = max_speed_rate
self._num_rates = num_rates
if num_rates > 0:
self._rates = np.linspace(self._min_rate, self._max_rate, self._num_rates, endpoint=True)
self._res_type = resample_type
random.seed(rng) if rng else None
def max_augmentation_length(self, length):
return length * self._max_rate
def perturb(self, data):
# Select speed rate either from choice or random sample
if self._num_rates < 0:
speed_rate = random.uniform(self._min_rate, self._max_rate)
else:
speed_rate = random.choice(self._rates)
# Skip perturbation in case of identity speed rate
if speed_rate == 1.0:
return
new_sr = int(self._sr * speed_rate)
data._samples = librosa.core.resample(
data._samples, orig_sr=self._sr, target_sr=new_sr, res_type=self._res_type
)
class TimeStretchPerturbation(Perturbation):
"""
Time-stretch an audio series by a fixed rate while preserving pitch, based on [1, 2].
Note:
This is a simplified implementation, intended primarily for reference and pedagogical purposes.
It makes no attempt to handle transients, and is likely to produce audible artifacts.
Reference
[1] [Ellis, D. P. W. “A phase vocoder in Matlab.” Columbia University, 2002.]
(http://www.ee.columbia.edu/~dpwe/resources/matlab/pvoc/)
[2] [librosa.effects.time_stretch]
(https://librosa.github.io/librosa/generated/librosa.effects.time_stretch.html)
Args:
min_speed_rate: Minimum sampling rate modifier.
max_speed_rate: Maximum sampling rate modifier.
num_rates: Number of discrete rates to allow. Can be a positive or negative
integer.
If a positive integer greater than 0 is provided, the range of
speed rates will be discretized into `num_rates` values.
If a negative integer or 0 is provided, the full range of speed rates
will be sampled uniformly.
Note: If a positive integer is provided and the resultant discretized
range of rates contains the value '1.0', then those samples with rate=1.0,
will not be augmented at all and simply skipped. This is to avoid unnecessary
augmentation and increase computation time. Effective augmentation chance
in such a case is = `prob * (num_rates - 1 / num_rates) * 100`% chance
where `prob` is the global probability of a sample being augmented.
n_fft: Number of fft filters to be computed.
rng: Random seed. Default is None
"""
def __init__(self, min_speed_rate=0.9, max_speed_rate=1.1, num_rates=5, n_fft=512, rng=None):
min_rate = min(min_speed_rate, max_speed_rate)
if min_rate < 0.0:
raise ValueError("Minimum sampling rate modifier must be > 0.")
self._min_rate = min_speed_rate
self._max_rate = max_speed_rate
self._num_rates = num_rates
if num_rates > 0:
self._rates = np.linspace(self._min_rate, self._max_rate, self._num_rates, endpoint=True)
random.seed(rng) if rng else None
# Pre-compute constants
self._n_fft = int(n_fft)
self._hop_length = int(n_fft // 2)
# Pre-allocate buffers
self._phi_advance_fast = np.linspace(0, np.pi * self._hop_length, self._hop_length + 1)
self._scale_buffer_fast = np.empty(self._hop_length + 1, dtype=np.float32)
self._phi_advance_slow = np.linspace(0, np.pi * self._n_fft, self._n_fft + 1)
self._scale_buffer_slow = np.empty(self._n_fft + 1, dtype=np.float32)
def max_augmentation_length(self, length):
return length * self._max_rate
def perturb(self, data):
# Select speed rate either from choice or random sample
if self._num_rates < 0:
speed_rate = random.uniform(self._min_rate, self._max_rate)
else:
speed_rate = random.choice(self._rates)
# Skip perturbation in case of identity speed rate
if speed_rate == 1.0:
return
# Increase `n_fft` based on task (speed up or slow down audio)
# This greatly reduces upper bound of maximum time taken
# to compute slowed down audio segments.
if speed_rate >= 1.0: # Speed up audio
fft_multiplier = 1
phi_advance = self._phi_advance_fast
scale_buffer = self._scale_buffer_fast
else: # Slow down audio
fft_multiplier = 2
phi_advance = self._phi_advance_slow
scale_buffer = self._scale_buffer_slow
n_fft = int(self._n_fft * fft_multiplier)
hop_length = int(self._hop_length * fft_multiplier)
# Perform short-term Fourier transform (STFT)
stft = librosa.core.stft(data._samples, n_fft=n_fft, hop_length=hop_length)
# Stretch by phase vocoding
if HAVE_NUMBA:
stft_stretch = numba_utils.phase_vocoder(stft, speed_rate, phi_advance, scale_buffer)
else:
stft_stretch = librosa.core.phase_vocoder(stft, speed_rate, hop_length)
# Predict the length of y_stretch
len_stretch = int(round(len(data._samples) / speed_rate))
# Invert the STFT
y_stretch = librosa.core.istft(
stft_stretch, dtype=data._samples.dtype, hop_length=hop_length, length=len_stretch
)
data._samples = y_stretch
class SilencePerturbation(Perturbation):
"""
Applies random silence at the start and/or end of the audio.
Args:
min_start_silence_secs (float): Min start silence level in secs
max_start_silence_secs (float): Max start silence level in secs
min_end_silence_secs (float): Min end silence level in secs
max_end_silence_secs (float): Max end silence level in secs
rng (int): Random seed. Default is None
value: (float): value representing silence to be added to audio array.
"""
def __init__(
self,
min_start_silence_secs: float = 0,
max_start_silence_secs: float = 0,
min_end_silence_secs: float = 0,
max_end_silence_secs: float = 0,
rng: int = None,
value: float = 0,
):
self._min_start_silence_secs = min_start_silence_secs
self._max_start_silence_secs = max_start_silence_secs
self._min_end_silence_secs = min_end_silence_secs
self._max_end_silence_secs = max_end_silence_secs
random.seed(rng) if rng else None
self._value = value
def perturb(self, data):
start_silence_len = random.uniform(self._min_start_silence_secs, self._max_start_silence_secs)
end_silence_len = random.uniform(self._min_end_silence_secs, self._max_end_silence_secs)
start = np.full((int(start_silence_len * data.sample_rate),), self._value)
end = np.full((int(end_silence_len * data.sample_rate),), self._value)
data._samples = np.concatenate([start, data._samples, end])
class GainPerturbation(Perturbation):
"""
Applies random gain to the audio.
Args:
min_gain_dbfs (float): Min gain level in dB
max_gain_dbfs (float): Max gain level in dB
rng (int): Random seed. Default is None
"""
def __init__(self, min_gain_dbfs=-10, max_gain_dbfs=10, rng=None):
self._min_gain_dbfs = min_gain_dbfs
self._max_gain_dbfs = max_gain_dbfs
random.seed(rng) if rng else None
def perturb(self, data):
gain = random.uniform(self._min_gain_dbfs, self._max_gain_dbfs)
data._samples = data._samples * (10.0 ** (gain / 20.0))
class ImpulsePerturbation(Perturbation):
"""
Convolves audio with a Room Impulse Response.
Args:
manifest_path (list): Manifest file for RIRs
audio_tar_filepaths (list): Tar files, if RIR audio files are tarred
shuffle_n (int): Shuffle parameter for shuffling buffered files from the tar files
normalize_impulse (bool): Normalize impulse response to zero mean and amplitude 1
shift_impulse (bool): Shift impulse response to adjust for delay at the beginning
rng (int): Random seed. Default is None
"""
def __init__(
self,
manifest_path=None,
audio_tar_filepaths=None,
shuffle_n=128,
normalize_impulse=False,
shift_impulse=False,
rng=None,
):
self._manifest = collections.ASRAudioText(manifest_path, parser=parsers.make_parser([]), index_by_file_id=True)
self._audiodataset = None
self._tarred_audio = False
self._normalize_impulse = normalize_impulse
self._shift_impulse = shift_impulse
self._data_iterator = None
if audio_tar_filepaths:
self._tarred_audio = True
self._audiodataset = AugmentationDataset(manifest_path, audio_tar_filepaths, shuffle_n)
self._data_iterator = iter(self._audiodataset)
self._rng = rng
random.seed(self._rng) if rng else None
def perturb(self, data):
impulse = read_one_audiosegment(
self._manifest, data.sample_rate, tarred_audio=self._tarred_audio, audio_dataset=self._data_iterator,
)
# normalize if necessary
if self._normalize_impulse:
# normalize the impulse response to zero mean and amplitude 1
impulse_norm = impulse.samples - np.mean(impulse.samples)
impulse_norm /= max(abs(impulse_norm))
else:
impulse_norm = impulse.samples
# len of input data samples
len_data = len(data._samples)
# convolve with the full impulse response
data._samples = signal.fftconvolve(data._samples, impulse_norm, "full")
# compensate the dominant path propagation delay
if self._shift_impulse:
# Find the peak of the IR and shift the output to the left
max_ind = np.argmax(np.abs(impulse_norm))
data._samples = data._samples[max_ind:]
# trim to match the input data length
data._samples = data._samples[:len_data]
# normalize data samples to [-1,1] after rir convolution to avoid nans with fp16 training
data._samples = data._samples / max(abs(data._samples))
class ShiftPerturbation(Perturbation):
"""
Perturbs audio by shifting the audio in time by a random amount between min_shift_ms and max_shift_ms.
The final length of the audio is kept unaltered by padding the audio with zeros.
Args:
min_shift_ms (float): Minimum time in milliseconds by which audio will be shifted
max_shift_ms (float): Maximum time in milliseconds by which audio will be shifted
rng (int): Random seed. Default is None
"""
def __init__(self, min_shift_ms=-5.0, max_shift_ms=5.0, rng=None):
self._min_shift_ms = min_shift_ms
self._max_shift_ms = max_shift_ms
random.seed(rng) if rng else None
def perturb(self, data):
shift_ms = random.uniform(self._min_shift_ms, self._max_shift_ms)
if abs(shift_ms) / 1000 > data.duration:
# TODO: do something smarter than just ignore this condition
return
shift_samples = int(shift_ms * data.sample_rate // 1000)
# logging.debug("shift: %s", shift_samples)
if shift_samples < 0:
data._samples[-shift_samples:] = data._samples[:shift_samples]
data._samples[:-shift_samples] = 0
elif shift_samples > 0:
data._samples[:-shift_samples] = data._samples[shift_samples:]
data._samples[-shift_samples:] = 0
class NoisePerturbation(Perturbation):
"""
Perturbation that adds noise to input audio.
Args:
manifest_path (str): Manifest file with paths to noise files
min_snr_db (float): Minimum SNR of audio after noise is added
max_snr_db (float): Maximum SNR of audio after noise is added
max_gain_db (float): Maximum gain that can be applied on the noise sample
audio_tar_filepaths (list) : Tar files, if noise audio files are tarred
shuffle_n (int): Shuffle parameter for shuffling buffered files from the tar files
orig_sr (int): Original sampling rate of the noise files
rng (int): Random seed. Default is None
"""
def __init__(
self,
manifest_path=None,
min_snr_db=10,
max_snr_db=50,
max_gain_db=300.0,
rng=None,
audio_tar_filepaths=None,
shuffle_n=100,
orig_sr=16000,
):
self._manifest = collections.ASRAudioText(manifest_path, parser=parsers.make_parser([]), index_by_file_id=True)
self._audiodataset = None
self._tarred_audio = False
self._orig_sr = orig_sr
self._data_iterator = None
if audio_tar_filepaths:
self._tarred_audio = True
self._audiodataset = AugmentationDataset(manifest_path, audio_tar_filepaths, shuffle_n)
self._data_iterator = iter(self._audiodataset)
random.seed(rng) if rng else None
self._rng = rng
self._min_snr_db = min_snr_db
self._max_snr_db = max_snr_db
self._max_gain_db = max_gain_db
@property
def orig_sr(self):
return self._orig_sr
def get_one_noise_sample(self, target_sr):
return read_one_audiosegment(
self._manifest, target_sr, tarred_audio=self._tarred_audio, audio_dataset=self._data_iterator
)
def perturb(self, data, ref_mic=0):
"""
Args:
data (AudioSegment): audio data
ref_mic (int): reference mic index for scaling multi-channel audios
"""
noise = read_one_audiosegment(
self._manifest, data.sample_rate, tarred_audio=self._tarred_audio, audio_dataset=self._data_iterator,
)
self.perturb_with_input_noise(data, noise, ref_mic=ref_mic)
def perturb_with_input_noise(self, data, noise, data_rms=None, ref_mic=0):
"""
Args:
data (AudioSegment): audio data
noise (AudioSegment): noise data
data_rms (Union[float, List[float]): rms_db for data input
ref_mic (int): reference mic index for scaling multi-channel audios
"""
if data.num_channels != noise.num_channels:
raise ValueError(
f"Found mismatched channels for data ({data.num_channels}) and noise ({noise.num_channels})."
)
if not (0 <= ref_mic < data.num_channels):
raise ValueError(
f" reference mic ID must be an integer in [0, {data.num_channels}), got {ref_mic} instead."
)
snr_db = random.uniform(self._min_snr_db, self._max_snr_db)
if data_rms is None:
data_rms = data.rms_db
if data.num_channels > 1:
noise_gain_db = data_rms[ref_mic] - noise.rms_db[ref_mic] - snr_db
else:
noise_gain_db = data_rms - noise.rms_db - snr_db
noise_gain_db = min(noise_gain_db, self._max_gain_db)
# calculate noise segment to use
start_time = random.uniform(0.0, noise.duration - data.duration)
if noise.duration > (start_time + data.duration):
noise.subsegment(start_time=start_time, end_time=start_time + data.duration)
# adjust gain for snr purposes and superimpose
noise.gain_db(noise_gain_db)
if noise._samples.shape[0] < data._samples.shape[0]:
noise_idx = random.randint(0, data._samples.shape[0] - noise._samples.shape[0])
data._samples[noise_idx : noise_idx + noise._samples.shape[0]] += noise._samples
else:
data._samples += noise._samples
def perturb_with_foreground_noise(self, data, noise, data_rms=None, max_noise_dur=2, max_additions=1, ref_mic=0):
"""
Args:
data (AudioSegment): audio data
noise (AudioSegment): noise data
data_rms (Union[float, List[float]): rms_db for data input
max_noise_dur: (float): max noise duration
max_additions (int): number of times for adding noise
ref_mic (int): reference mic index for scaling multi-channel audios
"""
if data.num_channels != noise.num_channels:
raise ValueError(
f"Found mismatched channels for data ({data.num_channels}) and noise ({noise.num_channels})."
)
if not (0 <= ref_mic < data.num_channels):
raise ValueError(
f" reference mic ID must be an integer in [0, {data.num_channels}), got {ref_mic} instead."
)
snr_db = random.uniform(self._min_snr_db, self._max_snr_db)
if not data_rms:
data_rms = data.rms_db
if data.num_channels > 1:
noise_gain_db = data_rms[ref_mic] - noise.rms_db[ref_mic] - snr_db
else:
noise_gain_db = data_rms - noise.rms_db - snr_db
noise_gain_db = min(noise_gain_db, self._max_gain_db)
n_additions = random.randint(1, max_additions)
for i in range(n_additions):
noise_dur = random.uniform(0.0, max_noise_dur)
start_time = random.uniform(0.0, noise.duration)
start_sample = int(round(start_time * noise.sample_rate))
end_sample = int(round(min(noise.duration, (start_time + noise_dur)) * noise.sample_rate))
noise_samples = np.copy(noise._samples[start_sample:end_sample])
# adjust gain for snr purposes and superimpose
noise_samples *= 10.0 ** (noise_gain_db / 20.0)
if noise_samples.shape[0] > data._samples.shape[0]:
noise_samples = noise_samples[0 : data._samples.shape[0]]
noise_idx = random.randint(0, data._samples.shape[0] - noise_samples.shape[0])
data._samples[noise_idx : noise_idx + noise_samples.shape[0]] += noise_samples
class NoisePerturbationWithNormalization(Perturbation):
"""
Perturbation that adds noise to input audio, with normalisation to specific decibel level.
Also tiles shorter noise samples up to their corresponding clean audio length.
Args:
manifest_path (str or list): Manifest file with paths to noise files, can be list if using multiple noise sources
min_snr_db (float): Minimum SNR of audio after noise is added
max_snr_db (float): Maximum SNR of audio after noise is added
snr_samples (list): A discrete list of SNRs DBs to sample from when mixing, will be used instead of [min_snr_db,max_snr_db]
norm_to_db (float): Will normalise clean, noise, and mixed samples to this DB
audio_tar_filepaths (str or list) : Tar files, if noise audio files are tarred, can be list for multiple sources
shuffle_n (int): Shuffle parameter for shuffling buffered files from the tar files
orig_sr (int): Original sampling rate of the noise files
rng (int): Random seed. Default is None
shard_strategy (str): if you're using tarred audio and wish to scatter instead of replicate, set this to 'scatter'
epsilon (float): minimum value for RMS DB normalisation to avoid divide by zero
"""
def __init__(
self,
manifest_path=None,
min_snr_db=10,
max_snr_db=50,
snr_samples=None,
norm_to_db=None,
rng=None,
audio_tar_filepaths=None,
shuffle_n=128,
orig_sr=16000,
global_rank=0,
world_size=1,
shard_strategy='replicate',
epsilon=0.01,
):
# import here to avoid circular import error
from nemo.collections.asr.data.audio_to_text import RandomizedChainDataset
self._manifest = collections.ASRAudioText(manifest_path, parser=parsers.make_parser([]), index_by_file_id=True)
self._audiodataset = None
self._tarred_audio = False
self._orig_sr = orig_sr
self._data_iterator = None
random.seed(rng) if rng else None
self._rng = rng
if audio_tar_filepaths:
self._tarred_audio = True
if isinstance(manifest_path, str):
manifest_path = [manifest_path]
if isinstance(audio_tar_filepaths, str):
audio_tar_filepaths = [audio_tar_filepaths]
datasets = []
for tarred_audio_filepath, manifest_filepath in zip(audio_tar_filepaths, manifest_path):
dataset = AugmentationDataset(
manifest_filepath,
tarred_audio_filepath,
shuffle_n,
rank=global_rank,
world_size=world_size,
shard_strategy=shard_strategy,
)
datasets.append(dataset)
self._audiodataset = RandomizedChainDataset(
datasets, rnd_seed=(rng if rng else random.randint(0, 30000)) + global_rank
)
if len(self._audiodataset) == 0:
raise RuntimeError(
"NoisePerturbationWithNormalization detected a zero length RandomizedChainDataset, should never happen"
)
self._data_iterator = iter(self._audiodataset)
self._min_snr_db = min_snr_db
self._max_snr_db = max_snr_db
self._norm_to_db = norm_to_db
self._snr_samples = snr_samples if isinstance(snr_samples, list) and len(snr_samples) > 0 else None
self._epsilon = epsilon
@property
def orig_sr(self):
return self._orig_sr
def read_one_audiosegment(self, target_sr):
if self._tarred_audio:
if self._data_iterator is None:
raise TypeError("Expected valid iterator but got None")
try:
audio_file, file_id, manifest_entry = next(self._data_iterator)
except StopIteration:
self._data_iterator = iter(self._audiodataset)
audio_file, file_id, manifest_entry = next(self._data_iterator)
offset = 0 if manifest_entry.offset is None else manifest_entry.offset
duration = 0 if manifest_entry.duration is None else manifest_entry.duration
else:
audio_record = random.sample(self._manifest.data, 1)[0]
audio_file = audio_record.audio_file
offset = 0 if audio_record.offset is None else audio_record.offset
duration = 0 if audio_record.duration is None else audio_record.duration
return AudioSegment.from_file(audio_file, target_sr=target_sr, offset=offset, duration=duration)
def perturb(self, data, ref_mic=0):
"""
Args:
data (AudioSegment): audio data
ref_mic (int): reference mic index for scaling multi-channel audios
"""
noise = self.read_one_audiosegment(data.sample_rate)
# noise samples need to be at least 1 second long to avoid strange oddities
# in the RMS SNR mixing, so we have a fail-safe here to ensure at least 1 sec duration
while noise.duration < 1:
noise = self.read_one_audiosegment(data.sample_rate)
self.perturb_with_input_noise(data, noise, ref_mic=ref_mic, norm_to_db=self._norm_to_db)
def snr_mixer(self, clean, noise, snr, norm_to_db=-25.0):
"""
Mixes the clean audio with the noise
Args:
clean (numpy array): the clean audio data
noise (numpy array): the noise audio data
snr (float): the SNR value for the mixing
norm_to_db (float): the DB value to normalise to before mixing
"""
clean = self.norm_audio_to_db(clean, norm_to_db)
noise = self.norm_audio_to_db(noise, norm_to_db)
# Set the noise level for a given SNR
# note that if your noise doesn't overlap with your audio then your target SNR
# may not be achievable. Consider using an rms-threshold in the future
noisescalar = 10 ** (-snr / 20.0)
noisenewlevel = noise * noisescalar
noisyspeech = clean + noisenewlevel
return clean, noisenewlevel, noisyspeech
def norm_audio_to_db(self, x, norm_to_db):
"""
Normalises audio signal to particular db, with some epsilon in-case of divide by zero
Args:
x (numpy array): input audio signal
norm_to_db (float): the db to normalise to
"""
rms = (x ** 2).mean(axis=0) ** 0.5
rms = np.where(np.isclose(rms, 0), self._epsilon, rms)
scalar = 10 ** (norm_to_db / 20.0) / rms
return x * scalar
def concatenate_noise_sample(self, clean, noise, fs, silence_length=0.25):
"""
Tiles the noise array to match the clean audio array, with small silence between the joins
Args:
clean (numpy array): clean audio data
noise (numpy array): noise audio data
fs (int): sample rate used by both clean and noise audio data
silence_length (float): the amount of silence (in secs) to insert before tiling
"""
while len(noise) < len(clean):
if noise.ndim > 1:
zeros = np.zeros((int(fs * silence_length), noise.shape[-1]))
else:
zeros = np.zeros((int(fs * silence_length),))
noiseconcat = np.append(noise, zeros, axis=0)
noise = np.append(noiseconcat, noise, axis=0)
return noise
def perturb_with_input_noise(self, data, noise, data_rms=None, ref_mic=0, norm_to_db=-25.0):
"""
Args:
data (AudioSegment): audio data
noise (AudioSegment): noise data
data_rms (Union[float, List[float]): rms_db for data input
ref_mic (int): reference mic index for scaling multi-channel audio, if set to None then
each channel will be scaled independently
norm_to_db (float): will normalise all audio to this DB
"""
if data.num_channels != noise.num_channels:
raise ValueError(
f"Found mismatched channels for data ({data.num_channels}) and noise ({noise.num_channels})."
)
if not (0 <= ref_mic < data.num_channels):
raise ValueError(
f" reference mic ID must be an integer in [0, {data.num_channels}), got {ref_mic} instead."
)
if self._snr_samples:
snr_db = random.sample(self._snr_samples, 1)[0]
else:
snr_db = random.uniform(self._min_snr_db, self._max_snr_db)
if data_rms is None:
data_rms = data.rms_db[ref_mic] if isinstance(data.rms_db, (list, np.ndarray)) else data.rms_db
if norm_to_db is None:
norm_to_db = data_rms
data_norm = data._samples
noise_norm = noise._samples
if len(data_norm) == 0:
return
if len(noise_norm) < len(data_norm):
noise_norm = self.concatenate_noise_sample(data_norm, noise_norm, data.sample_rate)
noise_norm = noise_norm[0 : len(data_norm)]
_, _, noisy_snr = self.snr_mixer(clean=data_norm, noise=noise_norm, snr=snr_db, norm_to_db=norm_to_db)
data._samples = noisy_snr
class WhiteNoisePerturbation(Perturbation):
"""
Perturbation that adds white noise to an audio file in the training dataset.
Args:
min_level (int): Minimum level in dB at which white noise should be added
max_level (int): Maximum level in dB at which white noise should be added
rng (int): Random seed. Default is None
"""
def __init__(self, min_level=-90, max_level=-46, rng=None):
self.min_level = int(min_level)
self.max_level = int(max_level)
np.random.seed(rng) if rng else None
def perturb(self, data):
noise_level_db = np.random.randint(self.min_level, self.max_level, dtype='int32')
noise_signal = np.random.randn(data._samples.shape[0]) * (10.0 ** (noise_level_db / 20.0))
data._samples += noise_signal
class RirAndNoisePerturbation(Perturbation):
"""
RIR augmentation with additive foreground and background noise.
In this implementation audio data is augmented by first convolving the audio with a Room Impulse Response
and then adding foreground noise and background noise at various SNRs. RIR, foreground and background noises
should either be supplied with a manifest file or as tarred audio files (faster).
Different sets of noise audio files based on the original sampling rate of the noise. This is useful while
training a mixed sample rate model. For example, when training a mixed model with 8 kHz and 16 kHz audio with a
target sampling rate of 16 kHz, one would want to augment 8 kHz data with 8 kHz noise rather than 16 kHz noise.
Args:
rir_manifest_path: Manifest file for RIRs
rir_tar_filepaths: Tar files, if RIR audio files are tarred
rir_prob: Probability of applying a RIR
noise_manifest_paths: Foreground noise manifest path
min_snr_db: Min SNR for foreground noise
max_snr_db: Max SNR for background noise,
noise_tar_filepaths: Tar files, if noise files are tarred
apply_noise_rir: Whether to convolve foreground noise with a a random RIR
orig_sample_rate: Original sampling rate of foreground noise audio
max_additions: Max number of times foreground noise is added to an utterance,
max_duration: Max duration of foreground noise
bg_noise_manifest_paths: Background noise manifest path
bg_min_snr_db: Min SNR for background noise
bg_max_snr_db: Max SNR for background noise
bg_noise_tar_filepaths: Tar files, if noise files are tarred
bg_orig_sample_rate: Original sampling rate of background noise audio
rng: Random seed. Default is None
"""
def __init__(
self,
rir_manifest_path=None,
rir_prob=0.5,
noise_manifest_paths=None,
noise_prob=1.0,
min_snr_db=0,
max_snr_db=50,
rir_tar_filepaths=None,
rir_shuffle_n=100,
noise_tar_filepaths=None,
apply_noise_rir=False,
orig_sample_rate=None,
max_additions=5,
max_duration=2.0,
bg_noise_manifest_paths=None,
bg_noise_prob=1.0,
bg_min_snr_db=10,
bg_max_snr_db=50,
bg_noise_tar_filepaths=None,
bg_orig_sample_rate=None,
rng=None,
):
self._rir_prob = rir_prob
self._noise_prob = noise_prob
self._bg_noise_prob = bg_noise_prob
random.seed(rng) if rng else None
self._rir_perturber = ImpulsePerturbation(
manifest_path=rir_manifest_path,
audio_tar_filepaths=rir_tar_filepaths,
shuffle_n=rir_shuffle_n,
shift_impulse=True,
)
self._fg_noise_perturbers = None
self._bg_noise_perturbers = None
if noise_manifest_paths:
self._fg_noise_perturbers = {}
for i in range(len(noise_manifest_paths)):
if orig_sample_rate is None:
orig_sr = 16000
else:
orig_sr = orig_sample_rate[i]
self._fg_noise_perturbers[orig_sr] = NoisePerturbation(
manifest_path=noise_manifest_paths[i],
min_snr_db=min_snr_db[i],
max_snr_db=max_snr_db[i],
audio_tar_filepaths=noise_tar_filepaths[i],
orig_sr=orig_sr,
)
self._max_additions = max_additions
self._max_duration = max_duration
if bg_noise_manifest_paths:
self._bg_noise_perturbers = {}
for i in range(len(bg_noise_manifest_paths)):
if bg_orig_sample_rate is None:
orig_sr = 16000
else:
orig_sr = bg_orig_sample_rate[i]
self._bg_noise_perturbers[orig_sr] = NoisePerturbation(
manifest_path=bg_noise_manifest_paths[i],
min_snr_db=bg_min_snr_db[i],
max_snr_db=bg_max_snr_db[i],
audio_tar_filepaths=bg_noise_tar_filepaths[i],
orig_sr=orig_sr,
)
self._apply_noise_rir = apply_noise_rir
def perturb(self, data):
prob = random.uniform(0.0, 1.0)
if prob < self._rir_prob:
self._rir_perturber.perturb(data)
data_rms = data.rms_db
if self._fg_noise_perturbers is not None and random.uniform(0.0, 1.0) < self._noise_prob:
orig_sr = data.orig_sr
if orig_sr not in self._fg_noise_perturbers:
orig_sr = max(self._fg_noise_perturbers.keys())
fg_perturber = self._fg_noise_perturbers[orig_sr]
noise = fg_perturber.get_one_noise_sample(data.sample_rate)
if self._apply_noise_rir:
self._rir_perturber.perturb(noise)
fg_perturber.perturb_with_foreground_noise(
data, noise, data_rms=data_rms, max_noise_dur=self._max_duration, max_additions=self._max_additions
)
if self._bg_noise_perturbers is not None and random.uniform(0.0, 1.0) < self._bg_noise_prob:
orig_sr = data.orig_sr
if orig_sr not in self._bg_noise_perturbers:
orig_sr = max(self._bg_noise_perturbers.keys())
bg_perturber = self._bg_noise_perturbers[orig_sr]
noise = bg_perturber.get_one_noise_sample(data.sample_rate)
bg_perturber.perturb_with_input_noise(data, noise, data_rms=data_rms)
class TranscodePerturbation(Perturbation):
"""
Audio codec augmentation. This implementation uses sox to transcode audio with low rate audio codecs,
so users need to make sure that the installed sox version supports the codecs used here (G711 and amr-nb).
Args:
codecs (List[str]):A list of codecs to be trancoded to. Default is None.
rng (int): Random seed. Default is None.
"""
def __init__(self, codecs=None, rng=None):
random.seed(rng) if rng else None
self._codecs = codecs if codecs is not None else ["g711", "amr-nb", "ogg"]
self.att_factor = 0.8 # to avoid saturation while writing to wav
if codecs is not None:
for codec in codecs:
if codec not in ["g711", "amr-nb", "ogg"]:
raise ValueError(
f"TranscodePerturbation with {codec} isnot supported. Only {codecs} are supported"
)
def perturb(self, data):
max_level = np.max(np.abs(data._samples))
if max_level > 0.8:
norm_factor = self.att_factor / max_level
norm_samples = norm_factor * data._samples
else:
norm_samples = data._samples
orig_f = NamedTemporaryFile(suffix=".wav")
sf.write(orig_f.name, norm_samples.transpose(), 16000)
codec_ind = random.randint(0, len(self._codecs) - 1)
if self._codecs[codec_ind] == "amr-nb":
transcoded_f = NamedTemporaryFile(suffix="_amr.wav")
rates = list(range(0, 4))
rate = rates[random.randint(0, len(rates) - 1)]
_ = subprocess.check_output(
f"sox {orig_f.name} -V0 -C {rate} -t amr-nb - | sox -t amr-nb - -V0 -b 16 -r 16000 {transcoded_f.name}",
shell=True,
)
elif self._codecs[codec_ind] == "ogg":
transcoded_f = NamedTemporaryFile(suffix="_ogg.wav")
rates = list(range(-1, 8))
rate = rates[random.randint(0, len(rates) - 1)]
_ = subprocess.check_output(
f"sox {orig_f.name} -V0 -C {rate} -t ogg - | sox -t ogg - -V0 -b 16 -r 16000 {transcoded_f.name}",
shell=True,
)
elif self._codecs[codec_ind] == "g711":
transcoded_f = NamedTemporaryFile(suffix="_g711.wav")
_ = subprocess.check_output(
f"sox {orig_f.name} -V0 -r 8000 -c 1 -e a-law {transcoded_f.name} lowpass 3400 highpass 300",
shell=True,
)
new_data = AudioSegment.from_file(transcoded_f.name, target_sr=16000)
data._samples = new_data._samples[0 : data._samples.shape[0]]
return
class RandomSegmentPerturbation(Perturbation):
"""
Returns a random segment from input of duration "duration_sec".
If duration_sec > input audio length, pad_to_duration determines the outcome.
RandomSegmentPerturbation is intended for self-supervised learning.
Not for supervised, as extracting corresponding text is not facilitated.
Args:
duration_sec (float): duration of the segment to be extracted
pad_to_duration (bool): zero pad if length of input audio < duration_sec
rng: Random seed. Default is None
"""
def __init__(self, duration_sec=32.0, pad_to_duration=False, rng=None):
if duration_sec <= 0:
raise ValueError("duration_sec should be > 0")
self._duration_sec = duration_sec
self._pad_to_duration = pad_to_duration
random.seed(rng) if rng else None
def perturb(self, data):
if self._duration_sec > data.duration:
if not self._pad_to_duration:
raise ValueError(f"audio length < {self._duration_sec} sec and pad_to_duration is set to False")
start_time = 0.0
pad_size = self._duration_sec * data.sample_rate - data.num_samples
data.pad(pad_size=pad_size)
else:
start_time = random.uniform(0.0, data.duration - self._duration_sec)
end_time = start_time + self._duration_sec
data.subsegment(start_time=start_time, end_time=end_time)
perturbation_types = {
"speed": SpeedPerturbation,
"time_stretch": TimeStretchPerturbation,
"gain": GainPerturbation,
"silence": SilencePerturbation,
"impulse": ImpulsePerturbation,
"shift": ShiftPerturbation,
"noise": NoisePerturbation,
"noise_norm": NoisePerturbationWithNormalization,
"white_noise": WhiteNoisePerturbation,
"rir_noise_aug": RirAndNoisePerturbation,
"transcode_aug": TranscodePerturbation,
"random_segment": RandomSegmentPerturbation,
}
def register_perturbation(name: str, perturbation: Perturbation):
if name in perturbation_types.keys():
raise KeyError(
f"Perturbation with the name {name} exists. " f"Type of perturbation : {perturbation_types[name]}."
)
perturbation_types[name] = perturbation
class AudioAugmentor(object):
def __init__(self, perturbations=None, rng=None):
random.seed(rng) if rng else None
self._pipeline = perturbations if perturbations is not None else []
def perturb(self, segment):
for (prob, p) in self._pipeline:
if random.random() < prob:
p.perturb(segment)
return
def max_augmentation_length(self, length):
newlen = length
for (prob, p) in self._pipeline:
newlen = p.max_augmentation_length(newlen)
return newlen
@classmethod
def from_config(cls, config):
ptbs = []
for p in config:
if p['aug_type'] not in perturbation_types:
logging.warning("%s perturbation not known. Skipping.", p['aug_type'])
continue
perturbation = perturbation_types[p['aug_type']]
ptbs.append((p['prob'], perturbation(**p['cfg'])))
return cls(perturbations=ptbs)
def process_augmentations(augmenter, global_rank=0, world_size=1) -> Optional[AudioAugmentor]:
"""Process list of online data augmentations.
Accepts either an AudioAugmentor object with pre-defined augmentations,
or a dictionary that points to augmentations that have been defined.
If a dictionary is passed, must follow the below structure:
Dict[str, Dict[str, Any]]: Which refers to a dictionary of string
names for augmentations, defined in `asr/parts/perturb.py`.
The inner dictionary may contain key-value arguments of the specific
augmentation, along with an essential key `prob`. `prob` declares the
probability of the augmentation being applied, and must be a float
value in the range [0, 1].
# Example in YAML config file
Augmentations are generally applied only during training, so we can add
these augmentations to our yaml config file, and modify the behaviour
for training and evaluation.
```yaml
AudioToSpeechLabelDataLayer:
... # Parameters shared between train and evaluation time
train:
augmentor:
shift:
prob: 0.5
min_shift_ms: -5.0
max_shift_ms: 5.0
white_noise:
prob: 1.0
min_level: -90
max_level: -46
...
eval:
...
```
Then in the training script,
```python
import copy
from ruamel.yaml import YAML
yaml = YAML(typ="safe")
with open(model_config) as f:
params = yaml.load(f)
# Train Config for Data Loader
train_dl_params = copy.deepcopy(params["AudioToTextDataLayer"])
train_dl_params.update(params["AudioToTextDataLayer"]["train"])
del train_dl_params["train"]
del train_dl_params["eval"]
data_layer_train = nemo_asr.AudioToTextDataLayer(
...,
**train_dl_params,
)
# Evaluation Config for Data Loader
eval_dl_params = copy.deepcopy(params["AudioToTextDataLayer"])
eval_dl_params.update(params["AudioToTextDataLayer"]["eval"])
del eval_dl_params["train"]
del eval_dl_params["eval"]
data_layer_eval = nemo_asr.AudioToTextDataLayer(
...,
**eval_dl_params,
)
```
# Registering your own Augmentations
To register custom augmentations to obtain the above convenience of
the declaring the augmentations in YAML, you can put additional keys in
`perturbation_types` dictionary as follows.
```python
from nemo.collections.asr.parts import perturb
# Define your own perturbation here
class CustomPerturbation(perturb.Perturbation):
...
perturb.register_perturbation(name_of_perturbation, CustomPerturbation)
```
Args:
augmenter: AudioAugmentor object or
dictionary of str -> kwargs (dict) which is parsed and used
to initialize an AudioAugmentor.
Note: It is crucial that each individual augmentation has
a keyword `prob`, that defines a float probability in the
the range [0, 1] of this augmentation being applied.
If this keyword is not present, then the augmentation is
disabled and a warning is logged.
Returns: AudioAugmentor object
"""
if augmenter is None:
return None
if isinstance(augmenter, AudioAugmentor):
return augmenter
augmenter_types = {dict}
if HAVE_OMEGACONG_WEBDATASET:
augmenter_types = {dict, DictConfig}
if not type(augmenter) in augmenter_types:
raise ValueError("Cannot parse augmenter. Must be a dict or an AudioAugmentor object ")
if HAVE_OMEGACONG_WEBDATASET and isinstance(augmenter, DictConfig):
augmenter = OmegaConf.to_container(augmenter, resolve=True)
augmenter = copy.deepcopy(augmenter)
augmentations = []
for augment_name, augment_kwargs in augmenter.items():
prob = augment_kwargs.get('prob', None)
if prob is None:
raise KeyError(
f'Augmentation "{augment_name}" will not be applied as '
f'keyword argument "prob" was not defined for this augmentation.'
)
else:
_ = augment_kwargs.pop('prob')
if prob < 0.0 or prob > 1.0:
raise ValueError("`prob` must be a float value between 0 and 1.")
try:
augmentation_class = perturbation_types[augment_name]
if 'global_rank' in inspect.signature(augmentation_class).parameters:
augment_kwargs['global_rank'] = global_rank
if 'world_size' in inspect.signature(augmentation_class).parameters:
augment_kwargs['world_size'] = world_size
augmentation = augmentation_class(**augment_kwargs)
augmentations.append([prob, augmentation])
except KeyError:
raise KeyError(f"Invalid perturbation name. Allowed values : {perturbation_types.keys()}")
augmenter = AudioAugmentor(perturbations=augmentations)
return augmenter
class AugmentationDataset(IterableDataset):
"""
A class that loads tarred audio files and cycles over the files in the dataset.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset/AudioToBPEDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
Supported opening braces - { <=> (, [, < and the special tag _OP_.
Supported closing braces - } <=> ), ], > and the special tag _CL_.
For SLURM based tasks, we suggest the use of the special tags for ease of use.
See the WebDataset documentation for more information about accepted data and input formats.
"""
def __init__(
self,
manifest_path: str,
tar_filepaths: Union[str, List[str]],
shuffle_n: int = 128,
rank: int = 0,
world_size: int = 1,
shard_strategy: str = "replicate",
):
# import here to avoid circular import error
from nemo.collections.asr.data.audio_to_text import expand_sharded_filepaths
self._manifest = collections.ASRAudioText(manifest_path, parser=parsers.make_parser([]), index_by_file_id=True)
tar_filepaths = expand_sharded_filepaths(
tar_filepaths, shard_strategy=shard_strategy, world_size=world_size, global_rank=rank
)
if not HAVE_OMEGACONG_WEBDATASET:
raise LightningNotInstalledException(self)
self.audio_dataset = wd.WebDataset(urls=tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self.audio_dataset = self.audio_dataset.shuffle(shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self.audio_dataset = (
self.audio_dataset.rename(audio='wav;ogg;flac', key='__key__')
.to_tuple('audio', 'key')
.pipe(self._loop_offsets)
)
def __len__(self):
return len(self._manifest)
def _loop_offsets(self, iterator):
"""This function is used to iterate through utterances with different offsets for each file.
"""
class TarredAudioLoopOffsets:
def __init__(self, collection):
self.iterator = iterator
self.collection = collection
self.current_fn = None
self.current_bytes = None
self.offset_id = 0
def __iter__(self):
return self
def __next__(self):
if self.current_fn is None:
self.current_bytes, self.current_fn = next(self.iterator)
self.offset_id = 0
else:
offset_list = self.collection.mapping[self.current_fn]
if len(offset_list) == self.offset_id + 1:
self.current_bytes, self.current_fn = next(self.iterator)
self.offset_id = 0
else:
self.offset_id += 1
return self.current_bytes, self.current_fn, self.offset_id
return TarredAudioLoopOffsets(self._manifest)
def __iter__(self):
audio_iter = iter(self.audio_dataset)
while True:
try:
audio_bytes, audio_filename, offset_id = next(audio_iter)
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
manifest_idx = self._manifest.mapping[file_id][offset_id]
manifest_entry = self._manifest[manifest_idx]
# Convert audio bytes to IO stream for processing (for SoundFile to read)
audio_file = io.BytesIO(audio_bytes)
yield audio_file, file_id, manifest_entry
except StopIteration:
audio_iter = iter(self.audio_dataset)
|
NeMo-main
|
nemo/collections/asr/parts/preprocessing/perturb.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
import torch
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.core import NeuralModule
class AbstractRNNTJoint(NeuralModule, ABC):
"""
An abstract RNNT Joint framework, which can possibly integrate with GreedyRNNTInfer and BeamRNNTInfer classes.
Represents the abstract RNNT Joint network, which accepts the acoustic model and prediction network
embeddings in order to compute the joint of the two prior to decoding the output sequence.
"""
@abstractmethod
def joint(self, f: torch.Tensor, g: torch.Tensor) -> torch.Tensor:
"""
Compute the joint step of the network.
Here,
B = Batch size
T = Acoustic model timesteps
U = Target sequence length
H1, H2 = Hidden dimensions of the Encoder / Decoder respectively
H = Hidden dimension of the Joint hidden step.
V = Vocabulary size of the Decoder (excluding the RNNT blank token).
NOTE:
The implementation of this model is slightly modified from the original paper.
The original paper proposes the following steps :
(enc, dec) -> Expand + Concat + Sum [B, T, U, H1+H2] -> Forward through joint hidden [B, T, U, H] -- *1
*1 -> Forward through joint final [B, T, U, V + 1].
We instead split the joint hidden into joint_hidden_enc and joint_hidden_dec and act as follows:
enc -> Forward through joint_hidden_enc -> Expand [B, T, 1, H] -- *1
dec -> Forward through joint_hidden_dec -> Expand [B, 1, U, H] -- *2
(*1, *2) -> Sum [B, T, U, H] -> Forward through joint final [B, T, U, V + 1].
Args:
f: Output of the Encoder model. A torch.Tensor of shape [B, T, H1]
g: Output of the Decoder model. A torch.Tensor of shape [B, U, H2]
Returns:
Logits / log softmaxed tensor of shape (B, T, U, V + 1).
"""
raise NotImplementedError()
@property
def num_classes_with_blank(self):
raise NotImplementedError()
@property
def num_extra_outputs(self):
raise NotImplementedError()
class AbstractRNNTDecoder(NeuralModule, ABC):
"""
An abstract RNNT Decoder framework, which can possibly integrate with GreedyRNNTInfer and BeamRNNTInfer classes.
Represents the abstract RNNT Prediction/Decoder stateful network, which performs autoregressive decoding
in order to construct the output sequence.
Args:
vocab_size: Size of the vocabulary, excluding the RNNT blank token.
blank_idx: Index of the blank token. Can be 0 or size(vocabulary).
blank_as_pad: Bool flag, whether to allocate an additional token in the Embedding layer
of this module in order to treat all RNNT `blank` tokens as pad tokens, thereby letting
the Embedding layer batch tokens more efficiently.
It is mandatory to use this for certain Beam RNNT Infer methods - such as TSD, ALSD.
It is also more efficient to use greedy batch decoding with this flag.
"""
def __init__(self, vocab_size, blank_idx, blank_as_pad):
super().__init__()
self.vocab_size = vocab_size
self.blank_idx = blank_idx # first or last index of vocabulary
self.blank_as_pad = blank_as_pad
if blank_idx not in [0, vocab_size]:
raise ValueError("`blank_idx` must be either 0 or the final token of the vocabulary")
@abstractmethod
def predict(
self,
y: Optional[torch.Tensor] = None,
state: Optional[torch.Tensor] = None,
add_sos: bool = False,
batch_size: Optional[int] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
Stateful prediction of scores and state for a (possibly null) tokenset.
This method takes various cases into consideration :
- No token, no state - used for priming the RNN
- No token, state provided - used for blank token scoring
- Given token, states - used for scores + new states
Here:
B - batch size
U - label length
H - Hidden dimension size of RNN
L - Number of RNN layers
Args:
y: Optional torch tensor of shape [B, U] of dtype long which will be passed to the Embedding.
If None, creates a zero tensor of shape [B, 1, H] which mimics output of pad-token on Embedding.
state: An optional list of states for the RNN. Eg: For LSTM, it is the state list length is 2.
Each state must be a tensor of shape [L, B, H].
If None, and during training mode and `random_state_sampling` is set, will sample a
normal distribution tensor of the above shape. Otherwise, None will be passed to the RNN.
add_sos: bool flag, whether a zero vector describing a "start of signal" token should be
prepended to the above "y" tensor. When set, output size is (B, U + 1, H).
batch_size: An optional int, specifying the batch size of the `y` tensor.
Can be infered if `y` and `state` is None. But if both are None, then batch_size cannot be None.
Returns:
A tuple (g, hid) such that -
If add_sos is False:
g: (B, U, H)
hid: (h, c) where h is the final sequence hidden state and c is the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
If add_sos is True:
g: (B, U + 1, H)
hid: (h, c) where h is the final sequence hidden state and c is the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
"""
raise NotImplementedError()
@abstractmethod
def initialize_state(self, y: torch.Tensor) -> List[torch.Tensor]:
"""
Initialize the state of the RNN layers, with same dtype and device as input `y`.
Args:
y: A torch.Tensor whose device the generated states will be placed on.
Returns:
List of torch.Tensor, each of shape [L, B, H], where
L = Number of RNN layers
B = Batch size
H = Hidden size of RNN.
"""
raise NotImplementedError()
@abstractmethod
def score_hypothesis(
self, hypothesis: Hypothesis, cache: Dict[Tuple[int], Any]
) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
"""
Similar to the predict() method, instead this method scores a Hypothesis during beam search.
Hypothesis is a dataclass representing one hypothesis in a Beam Search.
Args:
hypothesis: Refer to rnnt_utils.Hypothesis.
cache: Dict which contains a cache to avoid duplicate computations.
Returns:
Returns a tuple (y, states, lm_token) such that:
y is a torch.Tensor of shape [1, 1, H] representing the score of the last token in the Hypothesis.
state is a list of RNN states, each of shape [L, 1, H].
lm_token is the final integer token of the hypothesis.
"""
raise NotImplementedError()
def batch_score_hypothesis(
self, hypotheses: List[Hypothesis], cache: Dict[Tuple[int], Any], batch_states: List[torch.Tensor]
) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
"""
Used for batched beam search algorithms. Similar to score_hypothesis method.
Args:
hypothesis: List of Hypotheses. Refer to rnnt_utils.Hypothesis.
cache: Dict which contains a cache to avoid duplicate computations.
batch_states: List of torch.Tensor which represent the states of the RNN for this batch.
Each state is of shape [L, B, H]
Returns:
Returns a tuple (b_y, b_states, lm_tokens) such that:
b_y is a torch.Tensor of shape [B, 1, H] representing the scores of the last tokens in the Hypotheses.
b_state is a list of list of RNN states, each of shape [L, B, H].
Represented as B x List[states].
lm_token is a list of the final integer tokens of the hypotheses in the batch.
"""
raise NotImplementedError()
def batch_initialize_states(self, batch_states: List[torch.Tensor], decoder_states: List[List[torch.Tensor]]):
"""
Create batch of decoder states.
Args:
batch_states (list): batch of decoder states
([L x (B, H)], [L x (B, H)])
decoder_states (list of list): list of decoder states
[B x ([L x (1, H)], [L x (1, H)])]
Returns:
batch_states (tuple): batch of decoder states
([L x (B, H)], [L x (B, H)])
"""
raise NotImplementedError()
def batch_select_state(self, batch_states: List[torch.Tensor], idx: int) -> List[List[torch.Tensor]]:
"""Get decoder state from batch of states, for given id.
Args:
batch_states (list): batch of decoder states
([L x (B, H)], [L x (B, H)])
idx (int): index to extract state from batch of states
Returns:
(tuple): decoder states for given id
([L x (1, H)], [L x (1, H)])
"""
raise NotImplementedError()
def batch_concat_states(self, batch_states: List[List[torch.Tensor]]) -> List[torch.Tensor]:
"""Concatenate a batch of decoder state to a packed state.
Args:
batch_states (list): batch of decoder states
B x ([L x (H)], [L x (H)])
Returns:
(tuple): decoder states
(L x B x H, L x B x H)
"""
raise NotImplementedError()
def batch_copy_states(
self,
old_states: List[torch.Tensor],
new_states: List[torch.Tensor],
ids: List[int],
value: Optional[float] = None,
) -> List[torch.Tensor]:
"""Copy states from new state to old state at certain indices.
Args:
old_states(list): packed decoder states
(L x B x H, L x B x H)
new_states: packed decoder states
(L x B x H, L x B x H)
ids (list): List of indices to copy states at.
value (optional float): If a value should be copied instead of a state slice, a float should be provided
Returns:
batch of decoder states with partial copy at ids (or a specific value).
(L x B x H, L x B x H)
"""
raise NotImplementedError()
|
NeMo-main
|
nemo/collections/asr/modules/rnnt_abstract.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types import EncodedRepresentation, LengthsType, NeuralType, SpectrogramType
from nemo.core.neural_types.elements import ProbsType
__all__ = ['MSDD_module']
class ConvLayer(nn.Module):
def __init__(self, in_channels=1, out_channels=1, kernel_size=(3, 1), stride=(1, 1)):
super(ConvLayer, self).__init__()
self.cnn = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride),
nn.ReLU(),
nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.99),
)
def forward(self, feature):
feature = self.cnn(feature)
return feature
class MSDD_module(NeuralModule, Exportable):
"""
Multi-scale Diarization Decoder (MSDD) for overlap-aware diarization and improved diarization accuracy from clustering diarizer.
Based on the paper: Taejin Park et. al, "Multi-scale Speaker Diarization with Dynamic Scale Weighting", Interspeech 2022.
Arxiv version: https://arxiv.org/pdf/2203.15974.pdf
Args:
num_spks (int):
Max number of speakers that are processed by the model. In `MSDD_module`, `num_spks=2` for pairwise inference.
hidden_size (int):
Number of hidden units in sequence models and intermediate layers.
num_lstm_layers (int):
Number of the stacked LSTM layers.
dropout_rate (float):
Dropout rate for linear layers, CNN and LSTM.
cnn_output_ch (int):
Number of channels per each CNN layer.
emb_dim (int):
Dimension of the embedding vectors.
scale_n (int):
Number of scales in multi-scale system.
clamp_max (float):
Maximum value for limiting the scale weight values.
conv_repeat (int):
Number of CNN layers after the first CNN layer.
weighting_scheme (str):
Name of the methods for estimating the scale weights.
context_vector_type (str):
If 'cos_sim', cosine similarity values are used for the input of the sequence models.
If 'elem_prod', element-wise product values are used for the input of the sequence models.
"""
@property
def output_types(self):
"""
Return definitions of module output ports.
"""
return OrderedDict(
{
"probs": NeuralType(('B', 'T', 'C'), ProbsType()),
"scale_weights": NeuralType(('B', 'T', 'C', 'D'), ProbsType()),
}
)
@property
def input_types(self):
"""
Return definitions of module input ports.
"""
return OrderedDict(
{
"ms_emb_seq": NeuralType(('B', 'T', 'C', 'D'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
"ms_avg_embs": NeuralType(('B', 'C', 'D', 'C'), EncodedRepresentation()),
"targets": NeuralType(('B', 'T', 'C'), ProbsType()),
}
)
def init_weights(self, m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
torch.nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
torch.nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0.01)
def __init__(
self,
num_spks: int = 2,
hidden_size: int = 256,
num_lstm_layers: int = 2,
dropout_rate: float = 0.5,
cnn_output_ch: int = 16,
emb_dim: int = 192,
scale_n: int = 5,
clamp_max: float = 1.0,
conv_repeat: int = 1,
weighting_scheme: str = 'conv_scale_weight',
context_vector_type: str = 'cos_sim',
):
super().__init__()
self._speaker_model = None
self.batch_size: int = 1
self.length: int = 50
self.emb_dim: int = emb_dim
self.num_spks: int = num_spks
self.scale_n: int = scale_n
self.cnn_output_ch: int = cnn_output_ch
self.conv_repeat: int = conv_repeat
self.chan: int = 2
self.eps: float = 1e-6
self.num_lstm_layers: int = num_lstm_layers
self.weighting_scheme: str = weighting_scheme
self.context_vector_type: bool = context_vector_type
self.softmax = torch.nn.Softmax(dim=2)
self.cos_dist = torch.nn.CosineSimilarity(dim=3, eps=self.eps)
self.lstm = nn.LSTM(
hidden_size,
hidden_size,
num_layers=self.num_lstm_layers,
batch_first=True,
bidirectional=True,
dropout=dropout_rate,
)
if self.weighting_scheme == 'conv_scale_weight':
self.conv = nn.ModuleList(
[
ConvLayer(
in_channels=1,
out_channels=cnn_output_ch,
kernel_size=(self.scale_n + self.scale_n * num_spks, 1),
stride=(1, 1),
)
]
)
for conv_idx in range(1, conv_repeat + 1):
self.conv.append(
ConvLayer(
in_channels=1, out_channels=cnn_output_ch, kernel_size=(self.cnn_output_ch, 1), stride=(1, 1)
)
)
self.conv_bn = nn.ModuleList()
for conv_idx in range(self.conv_repeat + 1):
self.conv_bn.append(nn.BatchNorm2d(self.emb_dim, affine=False))
self.conv_to_linear = nn.Linear(emb_dim * cnn_output_ch, hidden_size)
self.linear_to_weights = nn.Linear(hidden_size, self.scale_n)
elif self.weighting_scheme == 'attn_scale_weight':
self.W_a = nn.Linear(emb_dim, emb_dim, bias=False)
nn.init.eye_(self.W_a.weight)
else:
raise ValueError(f"No such weighting scheme as {self.weighting_scheme}")
self.hidden_to_spks = nn.Linear(2 * hidden_size, self.num_spks)
if self.context_vector_type == "cos_sim":
self.dist_to_emb = nn.Linear(self.scale_n * self.num_spks, hidden_size)
self.dist_to_emb.apply(self.init_weights)
elif self.context_vector_type == "elem_prod":
self.product_to_emb = nn.Linear(self.emb_dim * self.num_spks, hidden_size)
else:
raise ValueError(f"No such context vector type as {self.context_vector_type}")
self.dropout = nn.Dropout(dropout_rate)
self.hidden_to_spks.apply(self.init_weights)
self.lstm.apply(self.init_weights)
self.clamp_max = clamp_max
def core_model(self, ms_emb_seq, length, ms_avg_embs, targets):
"""
Core model that accepts multi-scale cosine similarity values and estimates per-speaker binary label.
Args:
ms_emb_seq (Tensor):
Multiscale input embedding sequence
Shape: (batch_size, length, scale_n, emb_dim)
length (Tensor):
The actual length of embedding sequences without zero padding
Shape: (batch_size,)
ms_avg_embs (Tensor):
Cluster-average speaker embedding vectors.
Shape: (batch_size, scale_n, self.emb_dim, max_spks)
targets (Tensor):
Ground-truth labels for the finest segment.
Shape: (batch_size, feats_len, max_spks)
Returns:
preds (Tensor):
Predicted binary speaker label for each speaker.
Shape: (batch_size, feats_len, max_spks)
scale_weights (Tensor):
Multiscale weights per each base-scale segment.
Shape: (batch_size, length, scale_n, max_spks)
"""
self.batch_size = ms_emb_seq.shape[0]
self.length = ms_emb_seq.shape[1]
self.emb_dim = ms_emb_seq.shape[-1]
_ms_emb_seq = ms_emb_seq.unsqueeze(4).expand(-1, -1, -1, -1, self.num_spks)
ms_emb_seq_single = ms_emb_seq
ms_avg_embs = ms_avg_embs.unsqueeze(1).expand(-1, self.length, -1, -1, -1)
ms_avg_embs_perm = ms_avg_embs.permute(0, 1, 2, 4, 3).reshape(self.batch_size, self.length, -1, self.emb_dim)
if self.weighting_scheme == "conv_scale_weight":
scale_weights = self.conv_scale_weights(ms_avg_embs_perm, ms_emb_seq_single)
elif self.weighting_scheme == "attn_scale_weight":
scale_weights = self.attention_scale_weights(ms_avg_embs_perm, ms_emb_seq_single)
else:
raise ValueError(f"No such weighting scheme as {self.weighting_scheme}")
scale_weights = scale_weights.to(ms_emb_seq.device)
if self.context_vector_type == "cos_sim":
context_emb = self.cosine_similarity(scale_weights, ms_avg_embs, _ms_emb_seq)
elif self.context_vector_type == "elem_prod":
context_emb = self.element_wise_product(scale_weights, ms_avg_embs, _ms_emb_seq)
else:
raise ValueError(f"No such context vector type as {self.context_vector_type}")
context_emb = self.dropout(F.relu(context_emb))
lstm_output = self.lstm(context_emb)
lstm_hidden_out = self.dropout(F.relu(lstm_output[0]))
spk_preds = self.hidden_to_spks(lstm_hidden_out)
preds = nn.Sigmoid()(spk_preds)
return preds, scale_weights
def element_wise_product(self, scale_weights, ms_avg_embs, ms_emb_seq):
"""
Calculate element wise product values among cluster-average embedding vectors and input embedding vector sequences.
This function is selected by assigning `self.context_vector_type = "elem_prod"`. `elem_prod` method usually takes more
time to converge compared to `cos_sim` method.
Args:
scale_weights (Tensor):
Multiscale weight vector.
Shape: (batch_size, feats_len, scale_n, max_spks)
ms_avg_embs_perm (Tensor):
Tensor containing cluster-average speaker embeddings for each scale.
Shape: (batch_size, length, scale_n, emb_dim)
ms_emb_seq (Tensor):
Tensor containing multi-scale speaker embedding sequences. `ms_emb_seq` is a single channel input from the
given audio stream input.
Shape: (batch_size, length, num_spks, emb_dim)
Returns:
context_emb (Tensor):
Output of `dist_to_emb` linear layer containing context for speaker label estimation.
"""
scale_weight_flatten = scale_weights.reshape(self.batch_size * self.length, self.num_spks, self.scale_n)
ms_avg_embs_flatten = ms_avg_embs.reshape(
self.batch_size * self.length, self.scale_n, self.emb_dim, self.num_spks
)
ms_emb_seq_flatten = ms_emb_seq.reshape(-1, self.scale_n, self.emb_dim)
ms_emb_seq_flatten_rep = ms_emb_seq_flatten.unsqueeze(3).reshape(-1, self.scale_n, self.emb_dim, self.num_spks)
elemwise_product = ms_avg_embs_flatten * ms_emb_seq_flatten_rep
context_vectors = torch.bmm(
scale_weight_flatten.reshape(self.batch_size * self.num_spks * self.length, 1, self.scale_n),
elemwise_product.reshape(self.batch_size * self.num_spks * self.length, self.scale_n, self.emb_dim),
)
context_vectors = context_vectors.reshape(self.batch_size, self.length, self.emb_dim * self.num_spks)
context_emb = self.product_to_emb(context_vectors)
return context_emb
def cosine_similarity(self, scale_weights, ms_avg_embs, _ms_emb_seq):
"""
Calculate cosine similarity values among cluster-average embedding vectors and input embedding vector sequences.
This function is selected by assigning self.context_vector_type = "cos_sim".
Args:
scale_weights (Tensor):
Multiscale weight vector.
Shape: (batch_size, feats_len, scale_n, max_spks)
ms_avg_embs_perm (Tensor):
Tensor containing cluster-average speaker embeddings for each scale.
Shape: (batch_size, length, scale_n, emb_dim)
_ms_emb_seq (Tensor):
Tensor containing multi-scale speaker embedding sequences. `ms_emb_seq` is a single channel input from the
given audio stream input.
Shape: (batch_size, length, num_spks, emb_dim)
Returns:
context_emb (Tensor):
Output of `dist_to_emb` linear layer containing context for speaker label estimation.
"""
cos_dist_seq = self.cos_dist(_ms_emb_seq, ms_avg_embs)
context_vectors = torch.mul(scale_weights, cos_dist_seq)
context_vectors = context_vectors.view(self.batch_size, self.length, -1)
context_emb = self.dist_to_emb(context_vectors)
return context_emb
def attention_scale_weights(self, ms_avg_embs_perm, ms_emb_seq):
"""
Use weighted inner product for calculating each scale weight. W_a matrix has (emb_dim * emb_dim) learnable parameters
and W_a matrix is initialized with an identity matrix. Compared to "conv_scale_weight" method, this method shows more evenly
distributed scale weights.
Args:
ms_avg_embs_perm (Tensor):
Tensor containing cluster-average speaker embeddings for each scale.
Shape: (batch_size, length, scale_n, emb_dim)
ms_emb_seq (Tensor):
Tensor containing multi-scale speaker embedding sequences. `ms_emb_seq` is input from the
given audio stream input.
Shape: (batch_size, length, num_spks, emb_dim)
Returns:
scale_weights (Tensor):
Weight vectors that determine the weight of each scale.
Shape: (batch_size, length, num_spks, emb_dim)
"""
self.W_a(ms_emb_seq.flatten(0, 1))
mat_a = self.W_a(ms_emb_seq.flatten(0, 1))
mat_b = ms_avg_embs_perm.flatten(0, 1).permute(0, 2, 1)
weighted_corr = torch.matmul(mat_a, mat_b).reshape(-1, self.scale_n, self.scale_n, self.num_spks)
scale_weights = torch.sigmoid(torch.diagonal(weighted_corr, dim1=1, dim2=2))
scale_weights = scale_weights.reshape(self.batch_size, self.length, self.scale_n, self.num_spks)
scale_weights = self.softmax(scale_weights)
return scale_weights
def conv_scale_weights(self, ms_avg_embs_perm, ms_emb_seq_single):
"""
Use multiple Convnet layers to estimate the scale weights based on the cluster-average embedding and
input embedding sequence.
Args:
ms_avg_embs_perm (Tensor):
Tensor containing cluster-average speaker embeddings for each scale.
Shape: (batch_size, length, scale_n, emb_dim)
ms_emb_seq_single (Tensor):
Tensor containing multi-scale speaker embedding sequences. ms_emb_seq_single is input from the
given audio stream input.
Shape: (batch_size, length, num_spks, emb_dim)
Returns:
scale_weights (Tensor):
Weight vectors that determine the weight of each scale.
Shape: (batch_size, length, num_spks, emb_dim)
"""
ms_cnn_input_seq = torch.cat([ms_avg_embs_perm, ms_emb_seq_single], dim=2)
ms_cnn_input_seq = ms_cnn_input_seq.unsqueeze(2).flatten(0, 1)
conv_out = self.conv_forward(
ms_cnn_input_seq, conv_module=self.conv[0], bn_module=self.conv_bn[0], first_layer=True
)
for conv_idx in range(1, self.conv_repeat + 1):
conv_out = self.conv_forward(
conv_input=conv_out,
conv_module=self.conv[conv_idx],
bn_module=self.conv_bn[conv_idx],
first_layer=False,
)
lin_input_seq = conv_out.view(self.batch_size, self.length, self.cnn_output_ch * self.emb_dim)
hidden_seq = self.conv_to_linear(lin_input_seq)
hidden_seq = self.dropout(F.leaky_relu(hidden_seq))
scale_weights = self.softmax(self.linear_to_weights(hidden_seq))
scale_weights = scale_weights.unsqueeze(3).expand(-1, -1, -1, self.num_spks)
return scale_weights
def conv_forward(self, conv_input, conv_module, bn_module, first_layer=False):
"""
A module for convolutional neural networks with 1-D filters. As a unit layer batch normalization, non-linear layer and dropout
modules are included.
Note:
If `first_layer=True`, the input shape is set for processing embedding input.
If `first_layer=False`, then the input shape is set for processing the output from another `conv_forward` module.
Args:
conv_input (Tensor):
Reshaped tensor containing cluster-average embeddings and multi-scale embedding sequences.
Shape: (batch_size*length, 1, scale_n*(num_spks+1), emb_dim)
conv_module (ConvLayer):
ConvLayer instance containing torch.nn.modules.conv modules.
bn_module (torch.nn.modules.batchnorm.BatchNorm2d):
Predefined Batchnorm module.
first_layer (bool):
Boolean for switching between the first layer and the others.
Default: `False`
Returns:
conv_out (Tensor):
Convnet output that can be fed to another ConvLayer module or linear layer.
Shape: (batch_size*length, 1, cnn_output_ch, emb_dim)
"""
conv_out = conv_module(conv_input)
conv_out = conv_out.permute(0, 2, 1, 3) if not first_layer else conv_out
conv_out = conv_out.reshape(self.batch_size, self.length, self.cnn_output_ch, self.emb_dim)
conv_out = conv_out.unsqueeze(2).flatten(0, 1)
conv_out = bn_module(conv_out.permute(0, 3, 2, 1)).permute(0, 3, 2, 1)
conv_out = self.dropout(F.leaky_relu(conv_out))
return conv_out
@typecheck()
def forward(self, ms_emb_seq, length, ms_avg_embs, targets):
preds, scale_weights = self.core_model(ms_emb_seq, length, ms_avg_embs, targets)
return preds, scale_weights
def input_example(self):
"""
Generate input examples for tracing etc.
Returns (tuple):
A tuple of input examples.
"""
device = next(self.parameters()).device
lens = torch.full(size=(input_example.shape[0],), fill_value=123, device=device)
input_example = torch.randn(1, lens, self.scale_n, self.emb_dim, device=device)
avg_embs = torch.randn(1, self.scale_n, self.emb_dim, self.num_spks, device=device)
targets = torch.randn(1, lens, self.num_spks).round().float()
return tuple([input_example, lens, avg_embs, targets])
|
NeMo-main
|
nemo/collections/asr/modules/msdd_diarizer.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
from typing import Dict, List, Tuple
import torch
from omegaconf import DictConfig
from omegaconf.dictconfig import DictConfig
from torch import nn
from torch.nn import functional as F
from nemo.collections.common.parts import form_attention_mask, transformer_weights_init
from nemo.collections.nlp.modules.common.transformer import TransformerEncoder
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types import AcousticEncodedRepresentation, AudioSignal, LengthsType, NeuralType, SpectrogramType
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TransposeLast(torch.nn.Module):
"""
Transposes last dimension. Useful for adding to a sequential block.
"""
def forward(self, x):
return x.transpose(-2, -1)
class SamePad(torch.nn.Module):
def __init__(self, kernel_size):
super().__init__()
self.remove = kernel_size % 2 == 0
def forward(self, x):
if self.remove:
x = x[:, :, :-1]
return x
class ConvFeatureEncoder(NeuralModule):
"""
Encoder used to isolate features in raw audio for Wav2Vec style training.
Treated as preprocessor module in NeMo ASR training. Defaults values are
for base model found in Baeski et al (https://arxiv.org/abs/2006.11477),
save for use of layer normalization as default schema. (Chosen for stability.)
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
input_signal:
0: AxisType(BatchTag)
1: AxisType(TimeTag)
input_signal_length:
0: AxisType(BatchTag)
Note: length is in number of samples, not seconds
"""
return {
"input_signal": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),
"length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
For compatibility, processed features are treated as Spectrogram types
processed_signal:
0: AxisType(BatchTag)
1: AxisType(ChannelTag)
2: AxisType(ProcessedTimeTag)
processed_signal_length:
0: AxisType(BatchTag)
"""
return {
"processed_signal": NeuralType(('B', 'C', 'T'), SpectrogramType()),
"processed_signal_length": NeuralType(tuple('B'), LengthsType()),
}
def __init__(
self,
conv_layers: List[Dict[str, int]],
extractor_mode: str = "layer_norm",
conv_bias: bool = False,
feature_grad_mult=1.0,
normalize_audio=True,
embedding_dim=768,
):
super().__init__()
self.grad_mult = feature_grad_mult
self.normalize_input = normalize_audio
def block(
n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (is_layer_norm and is_group_norm) is False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Sequential(TransposeLast(), nn.LayerNorm(dim, elementwise_affine=True), TransposeLast()),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(make_conv(), nn.GroupNorm(dim, dim, affine=True), nn.GELU(),)
else:
return nn.Sequential(make_conv(), nn.GELU())
in_d = 1
self.layer_cfg = conv_layers
self.conv_layers = nn.ModuleList()
self.mode = extractor_mode
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
dim, k, stride = cl["emb_dim"], cl["kernel_size"], cl["stride"]
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=self.mode == "layer_norm",
is_group_norm=self.mode == "group_norm" and i == 0, # applied to first layer only
conv_bias=conv_bias,
)
)
in_d = dim
# Model Layers
final_conv_dim = self.layer_cfg[-1]["emb_dim"] # Select last conv output layer dimension
self.post_extract_proj = ( # To project feature encodings to transformer
nn.Linear(final_conv_dim, embedding_dim) if final_conv_dim != embedding_dim else None
)
self.layer_norm = nn.LayerNorm(embedding_dim)
def apply_layers(self, x):
for conv in self.conv_layers:
x = conv(x)
return x
def normalize(self, source, lengths):
with torch.no_grad(): # Normalizes audio source
for i in range(lengths.size(0)):
orig = source[i, : lengths[i]]
norm = F.layer_norm(orig, orig.shape)
source[i, : lengths[i]] = norm
return source
def forward(self, input_signal, length):
if self.normalize_input:
input_signal = self.normalize(input_signal, length)
# BxT -> BxCxT
processed_signal = input_signal.unsqueeze(1)
# Applies grad mult scaling
if self.grad_mult > 0:
processed_signal = self.apply_layers(processed_signal)
if self.grad_mult != 1.0:
processed_signal = GradMultiply.apply(processed_signal, self.grad_mult)
else:
with torch.no_grad(): # 0 indicates frozen feature encoder
processed_signal = self.apply_layers(processed_signal)
processed_signal = processed_signal.transpose(1, 2) # B,T,C
# Project to embedding
if self.post_extract_proj is not None:
processed_signal = self.post_extract_proj(processed_signal)
# Adding normalization for output
if self.mode == "layer_norm":
processed_signal = self.layer_norm(processed_signal)
processed_signal = processed_signal.transpose(1, 2) # B,C,T
# Feature lengths will have been changed through convolutions
processed_signal_length = self.get_lengths(audio_lengths=length)
return processed_signal, processed_signal_length
def get_lengths(self, audio_lengths):
# converts audio lengths to timestep lengths
for conv in self.layer_cfg:
kernel = conv["kernel_size"]
stride = conv["stride"]
audio_lengths = (
torch.div(audio_lengths - kernel, stride, rounding_mode='floor') + 1
) # from pytorch documentation
return audio_lengths
class Wav2VecTransformerEncoder(TransformerEncoder):
"""
Encoder module following Transformer encoder paradigm
as described in Vaswani et al. (https://arxiv.org/abs/1706.03762). Used for Wav2Vec
style encoding of context vectors as described by in Baeski et al (https://arxiv.org/abs/2006.11477).
Takes convolutional encodings of all time steps and adds to features before applying series
of self-attention layers.
Example configs may be found at: https://github.com/NVIDIA/NeMo/tree/main/examples/asr/conf/wav2vec
Args:
layer_drop: Floating point value specifying proportion of module for layer dropout (See Fan et al. https://arxiv.org/pdf/1909.11556.pdf).
If non-zero, each layer will draw from uniform probability to determine if applied in current forward call.
Occurs only during training step
pos_embed: Config specifying parameters for contextual embedding convolutions. Module configures convolutional padding
to maintain number of time steps
Must contain following:
embedding_dim: Depth/number of channels of each time step from feature encoding
conv_pos: Kernel size for convolution
conv_pos_groups: Number of groups for convolution
transformer: Config for transformer encoder. Uses self-attention layers found in: nemo.collections.nlp.modules.common.transformer
Must contain followign:
num_layers: Number of attention layers
hidden_size: Expected input depth (embedding size between model layers)
inner_size: Depth of embeddings within feed-forward sections of encoder layers
num_attention_heads: Number of attention heads
attn_score_dropout: Probability of dropout applied to attention scores
attn_layer_dropout: Probability of dropout applied to the output of the attention layers (prior to normalization)
ffn_dropout: Probability of dropout applied to feed-forward modules
hidden_act: Activation function for hidden layers
"""
def __init__(self, pos_embed: DictConfig, transformer: DictConfig, layer_drop: float = 0.0):
super().__init__(**transformer) # see nlp.collections
# positional convolutional embeddings
emb_dim = pos_embed.embedding_dim
self.pos_conv = nn.Conv1d(
emb_dim,
emb_dim,
kernel_size=pos_embed.conv_pos,
padding=pos_embed.conv_pos // 2, # Padding size preserves time step length
groups=pos_embed.conv_pos_groups,
)
self.layer_drop = layer_drop
self.dropout = transformer.attn_layer_dropout # He initialization
std = math.sqrt((4 * (1.0 - self.dropout)) / (pos_embed.conv_pos * pos_embed.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(pos_embed.conv_pos), nn.GELU())
self.layer_norm = nn.LayerNorm(emb_dim)
self.apply(lambda x: transformer_weights_init(x, xavier=False))
@property
def input_types(self):
"""Returns definitions of module output ports.
We treat features as SpectrogramType for Nemo compatibility
audio_signal:
0: AxisType(BatchTag)
1: AxisType(ChannelTag)
2: AxisType(ProcessedTimeTag)
length:
0: AxisType(BatchTag)
"""
return {
"audio_signal": NeuralType(('B', 'C', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
We're using SpectrogramType for now to keep things Nemo safe
processed_signal:
0: AxisType(BatchTag)
1: AxisType(ChannelTag)
2: AxisType(ProcessedTimeTag)
processed_length:
0: AxisType(BatchTag)
"""
return {
"processed_signal": NeuralType(('B', 'C', 'T'), AcousticEncodedRepresentation()),
"processed_length": NeuralType(tuple('B'), LengthsType()),
}
def forward(self, audio_signal, length):
# Padding mask needed for transformer
padding_mask = self.create_padding_mask(length)
# Applying padding before convolution
for idx, len in enumerate(length):
audio_signal[idx, :, len:] = 0.0
signal_conv = self.pos_conv(audio_signal) # B, C, T
audio_signal = audio_signal + signal_conv
audio_signal = audio_signal.transpose(1, 2) # B, C, T -> B, T, C
audio_signal = self.layer_norm(audio_signal)
context_emb = self.apply_transformer(audio_signal, padding_mask=padding_mask)
context_emb = context_emb.transpose(1, 2) # B, T, C -> B, C, T
return context_emb, length # Returning length for NeMo compatibility
def apply_transformer(self, x, padding_mask=None):
encoder_attn_mask = form_attention_mask(padding_mask)
if (
self.layer_drop and self.training
): # Stochastic layer drop as in: Huang et al. https://arxiv.org/pdf/1603.09382.pdf
for _, layer in enumerate(self.layers):
p = random.random()
if p > self.layer_drop:
x = layer(x, encoder_attn_mask, x)
else:
for _, layer in enumerate(self.layers):
x = layer(x, encoder_attn_mask, x)
return x
def create_padding_mask(self, length):
# Broadcast to vectorize creating the padding mask
max_len = max(length)
padding_mask = torch.arange(max_len, device=DEVICE)
# Switch to binary for transformer, 1 for valid tokens, 0 for padding
padding_mask = (padding_mask.expand(len(length), max_len) < length.unsqueeze(1)).type(torch.uint8)
return padding_mask
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
|
NeMo-main
|
nemo/collections/asr/modules/wav2vec_modules.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Tuple
import numpy as np
import torch
from nemo.collections.asr.parts.preprocessing.features import make_seq_mask_like
from nemo.collections.asr.parts.utils.audio_utils import db2mag, wrap_to_pi
from nemo.core.classes import NeuralModule, typecheck
from nemo.core.neural_types import FloatType, LengthsType, NeuralType, SpectrogramType
from nemo.utils import logging
from nemo.utils.decorators import experimental
try:
import torchaudio
HAVE_TORCHAUDIO = True
except ModuleNotFoundError:
HAVE_TORCHAUDIO = False
__all__ = [
'MaskEstimatorRNN',
'MaskReferenceChannel',
'MaskBasedBeamformer',
'MaskBasedDereverbWPE',
]
@experimental
class SpectrogramToMultichannelFeatures(NeuralModule):
"""Convert a complex-valued multi-channel spectrogram to
multichannel features.
Args:
num_subbands: Expected number of subbands in the input signal
num_input_channels: Optional, provides the number of channels
of the input signal. Used to infer the number
of output channels.
magnitude_reduction: Reduction across channels. Default `None`, will calculate
magnitude of each channel.
use_ipd: Use inter-channel phase difference (IPD).
mag_normalization: Normalization for magnitude features
ipd_normalization: Normalization for IPD features
"""
def __init__(
self,
num_subbands: int,
num_input_channels: Optional[int] = None,
mag_reduction: Optional[str] = 'rms',
use_ipd: bool = False,
mag_normalization: Optional[str] = None,
ipd_normalization: Optional[str] = None,
):
super().__init__()
self.mag_reduction = mag_reduction
self.use_ipd = use_ipd
# TODO: normalization
if mag_normalization is not None:
raise NotImplementedError(f'Unknown magnitude normalization {mag_normalization}')
self.mag_normalization = mag_normalization
if ipd_normalization is not None:
raise NotImplementedError(f'Unknown ipd normalization {ipd_normalization}')
self.ipd_normalization = ipd_normalization
if self.use_ipd:
self._num_features = 2 * num_subbands
self._num_channels = num_input_channels
else:
self._num_features = num_subbands
self._num_channels = num_input_channels if self.mag_reduction is None else 1
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"input": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"input_length": NeuralType(('B',), LengthsType()),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"output_length": NeuralType(('B',), LengthsType()),
}
@property
def num_features(self) -> int:
"""Configured number of features
"""
return self._num_features
@property
def num_channels(self) -> int:
"""Configured number of channels
"""
if self._num_channels is not None:
return self._num_channels
else:
raise ValueError(
'Num channels is not configured. To configure this, `num_input_channels` '
'must be provided when constructing the object.'
)
@typecheck()
def forward(self, input: torch.Tensor, input_length: torch.Tensor) -> torch.Tensor:
"""Convert input batch of C-channel spectrograms into
a batch of time-frequency features with dimension num_feat.
The output number of channels may be the same as input, or
reduced to 1, e.g., if averaging over magnitude and not appending individual IPDs.
Args:
input: Spectrogram for C channels with F subbands and N time frames, (B, C, F, N)
input_length: Length of valid entries along the time dimension, shape (B,)
Returns:
num_feat_channels channels with num_feat features, shape (B, num_feat_channels, num_feat, N)
"""
# Magnitude spectrum
if self.mag_reduction is None:
mag = torch.abs(input)
elif self.mag_reduction == 'abs_mean':
mag = torch.abs(torch.mean(input, axis=1, keepdim=True))
elif self.mag_reduction == 'mean_abs':
mag = torch.mean(torch.abs(input), axis=1, keepdim=True)
elif self.mag_reduction == 'rms':
mag = torch.sqrt(torch.mean(torch.abs(input) ** 2, axis=1, keepdim=True))
else:
raise ValueError(f'Unexpected magnitude reduction {self.mag_reduction}')
if self.mag_normalization is not None:
mag = self.mag_normalization(mag)
features = mag
if self.use_ipd:
# Calculate IPD relative to average spec
spec_mean = torch.mean(input, axis=1, keepdim=True)
ipd = torch.angle(input) - torch.angle(spec_mean)
# Modulo to [-pi, pi]
ipd = wrap_to_pi(ipd)
if self.ipd_normalization is not None:
ipd = self.ipd_normalization(ipd)
# Concatenate to existing features
features = torch.cat([features.expand(ipd.shape), ipd], axis=2)
if self._num_channels is not None and features.size(1) != self._num_channels:
raise RuntimeError(
f'Number of channels in features {features.size(1)} is different than the configured number of channels {self._num_channels}'
)
return features, input_length
class MaskEstimatorRNN(NeuralModule):
"""Estimate `num_outputs` masks from the input spectrogram
using stacked RNNs and projections.
The module is structured as follows:
input --> spatial features --> input projection -->
--> stacked RNNs --> output projection for each output --> sigmoid
Reference:
Multi-microphone neural speech separation for far-field multi-talker
speech recognition (https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8462081)
Args:
num_outputs: Number of output masks to estimate
num_subbands: Number of subbands of the input spectrogram
num_features: Number of features after the input projections
num_layers: Number of RNN layers
num_hidden_features: Number of hidden features in RNN layers
num_input_channels: Number of input channels
dropout: If non-zero, introduces dropout on the outputs of each RNN layer except the last layer, with dropout
probability equal to `dropout`. Default: 0
bidirectional: If `True`, use bidirectional RNN.
rnn_type: Type of RNN, either `lstm` or `gru`. Default: `lstm`
mag_reduction: Channel-wise reduction for magnitude features
use_ipd: Use inter-channel phase difference (IPD) features
"""
def __init__(
self,
num_outputs: int,
num_subbands: int,
num_features: int = 1024,
num_layers: int = 3,
num_hidden_features: Optional[int] = None,
num_input_channels: Optional[int] = None,
dropout: float = 0,
bidirectional=True,
rnn_type: str = 'lstm',
mag_reduction: str = 'rms',
use_ipd: bool = None,
):
super().__init__()
if num_hidden_features is None:
num_hidden_features = num_features
self.features = SpectrogramToMultichannelFeatures(
num_subbands=num_subbands,
num_input_channels=num_input_channels,
mag_reduction=mag_reduction,
use_ipd=use_ipd,
)
self.input_projection = torch.nn.Linear(
in_features=self.features.num_features * self.features.num_channels, out_features=num_features
)
if rnn_type == 'lstm':
self.rnn = torch.nn.LSTM(
input_size=num_features,
hidden_size=num_hidden_features,
num_layers=num_layers,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
elif rnn_type == 'gru':
self.rnn = torch.nn.GRU(
input_size=num_features,
hidden_size=num_hidden_features,
num_layers=num_layers,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
else:
raise ValueError(f'Unknown rnn_type: {rnn_type}')
self.fc = torch.nn.Linear(
in_features=2 * num_features if bidirectional else num_features, out_features=num_features
)
self.norm = torch.nn.LayerNorm(num_features)
# Each output shares the RNN and has a separate projection
self.output_projections = torch.nn.ModuleList(
[torch.nn.Linear(in_features=num_features, out_features=num_subbands) for _ in range(num_outputs)]
)
self.output_nonlinearity = torch.nn.Sigmoid()
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"input": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"input_length": NeuralType(('B',), LengthsType()),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'D', 'T'), FloatType()),
"output_length": NeuralType(('B',), LengthsType()),
}
@typecheck()
def forward(self, input: torch.Tensor, input_length: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Estimate `num_outputs` masks from the input spectrogram.
Args:
input: C-channel input, shape (B, C, F, N)
input_length: Length of valid entries along the time dimension, shape (B,)
Returns:
Returns `num_outputs` masks in a tensor, shape (B, num_outputs, F, N),
and output length with shape (B,)
"""
input, _ = self.features(input=input, input_length=input_length)
B, num_feature_channels, num_features, N = input.shape
# (B, num_feat_channels, num_feat, N) -> (B, N, num_feat_channels, num_feat)
input = input.permute(0, 3, 1, 2)
# (B, N, num_feat_channels, num_feat) -> (B, N, num_feat_channels * num_features)
input = input.view(B, N, -1)
# Apply projection on num_feat
input = self.input_projection(input)
# Apply RNN on the input sequence
input_packed = torch.nn.utils.rnn.pack_padded_sequence(
input, input_length.cpu(), batch_first=True, enforce_sorted=False
).to(input.device)
self.rnn.flatten_parameters()
input_packed, _ = self.rnn(input_packed)
output, output_length = torch.nn.utils.rnn.pad_packed_sequence(input_packed, batch_first=True)
output_length = output_length.to(input.device)
# Layer normalization and skip connection
output = self.norm(self.fc(output)) + input
# Create `num_outputs` masks
masks = []
for output_projection in self.output_projections:
# Output projection
mask = output_projection(output)
mask = self.output_nonlinearity(mask)
# Back to the original format
# (B, N, F) -> (B, F, N)
mask = mask.transpose(2, 1)
# Append to the output
masks.append(mask)
# Stack along channel dimension to get (B, M, F, N)
masks = torch.stack(masks, axis=1)
# Mask frames beyond output length
length_mask: torch.Tensor = make_seq_mask_like(
lengths=output_length, like=masks, time_dim=-1, valid_ones=False
)
masks = masks.masked_fill(length_mask, 0.0)
return masks, output_length
class MaskReferenceChannel(NeuralModule):
"""A simple mask processor which applies mask
on ref_channel of the input signal.
Args:
ref_channel: Index of the reference channel.
mask_min_db: Threshold mask to a minimal value before applying it, defaults to -200dB
mask_max_db: Threshold mask to a maximal value before applying it, defaults to 0dB
"""
def __init__(self, ref_channel: int = 0, mask_min_db: float = -200, mask_max_db: float = 0):
super().__init__()
self.ref_channel = ref_channel
# Mask thresholding
self.mask_min = db2mag(mask_min_db)
self.mask_max = db2mag(mask_max_db)
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"input": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"input_length": NeuralType(('B',), LengthsType()),
"mask": NeuralType(('B', 'C', 'D', 'T'), FloatType()),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"output_length": NeuralType(('B',), LengthsType()),
}
@typecheck()
def forward(
self, input: torch.Tensor, input_length: torch.Tensor, mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply mask on `ref_channel` of the input signal.
This can be used to generate multi-channel output.
If `mask` has `M` channels, the output will have `M` channels as well.
Args:
input: Input signal complex-valued spectrogram, shape (B, C, F, N)
input_length: Length of valid entries along the time dimension, shape (B,)
mask: Mask for M outputs, shape (B, M, F, N)
Returns:
M-channel output complex-valed spectrogram with shape (B, M, F, N)
"""
# Apply thresholds
mask = torch.clamp(mask, min=self.mask_min, max=self.mask_max)
# Apply each output mask on the ref channel
output = mask * input[:, self.ref_channel : self.ref_channel + 1, ...]
return output, input_length
class MaskBasedBeamformer(NeuralModule):
"""Multi-channel processor using masks to estimate signal statistics.
Args:
filter_type: string denoting the type of the filter. Defaults to `mvdr`
ref_channel: reference channel for processing
mask_min_db: Threshold mask to a minimal value before applying it, defaults to -200dB
mask_max_db: Threshold mask to a maximal value before applying it, defaults to 0dB
"""
def __init__(
self,
filter_type: str = 'mvdr_souden',
ref_channel: int = 0,
mask_min_db: float = -200,
mask_max_db: float = 0,
):
if not HAVE_TORCHAUDIO:
logging.error('Could not import torchaudio. Some features might not work.')
raise ModuleNotFoundError(
"torchaudio is not installed but is necessary to instantiate a {self.__class__.__name__}"
)
super().__init__()
self.ref_channel = ref_channel
self.filter_type = filter_type
if self.filter_type == 'mvdr_souden':
self.psd = torchaudio.transforms.PSD()
self.filter = torchaudio.transforms.SoudenMVDR()
else:
raise ValueError(f'Unknown filter type {filter_type}')
# Mask thresholding
self.mask_min = db2mag(mask_min_db)
self.mask_max = db2mag(mask_max_db)
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"input": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"input_length": NeuralType(('B',), LengthsType()),
"mask": NeuralType(('B', 'C', 'D', 'T'), FloatType()),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"output_length": NeuralType(('B',), LengthsType()),
}
@typecheck()
def forward(self, input: torch.Tensor, input_length: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""Apply a mask-based beamformer to the input spectrogram.
This can be used to generate multi-channel output.
If `mask` has `M` channels, the output will have `M` channels as well.
Args:
input: Input signal complex-valued spectrogram, shape (B, C, F, N)
input_length: Length of valid entries along the time dimension, shape (B,)
mask: Mask for M output signals, shape (B, M, F, N)
Returns:
M-channel output signal complex-valued spectrogram, shape (B, M, F, N)
"""
# Apply threshold on the mask
mask = torch.clamp(mask, min=self.mask_min, max=self.mask_max)
# Length mask
length_mask: torch.Tensor = make_seq_mask_like(
lengths=input_length, like=mask[:, 0, ...], time_dim=-1, valid_ones=False
)
# Use each mask to generate an output at ref_channel
output = []
for m in range(mask.size(1)):
# Prepare mask for the desired and the undesired signal
mask_desired = mask[:, m, ...].masked_fill(length_mask, 0.0)
mask_undesired = (1 - mask_desired).masked_fill(length_mask, 0.0)
# Calculate PSDs
psd_desired = self.psd(input, mask_desired)
psd_undesired = self.psd(input, mask_undesired)
# Apply filter
output_m = self.filter(input, psd_desired, psd_undesired, reference_channel=self.ref_channel)
output_m = output_m.masked_fill(length_mask, 0.0)
# Save the current output (B, F, N)
output.append(output_m)
output = torch.stack(output, axis=1)
return output, input_length
class WPEFilter(NeuralModule):
"""A weighted prediction error filter.
Given input signal, and expected power of the desired signal, this
class estimates a multiple-input multiple-output prediction filter
and returns the filtered signal. Currently, estimation of statistics
and processing is performed in batch mode.
Args:
filter_length: Length of the prediction filter in frames, per channel
prediction_delay: Prediction delay in frames
diag_reg: Diagonal regularization for the correlation matrix Q, applied as diag_reg * trace(Q) + eps
eps: Small positive constant for regularization
References:
- Yoshioka and Nakatani, Generalization of Multi-Channel Linear Prediction
Methods for Blind MIMO Impulse Response Shortening, 2012
- Jukić et al, Group sparsity for MIMO speech dereverberation, 2015
"""
def __init__(
self, filter_length: int, prediction_delay: int, diag_reg: Optional[float] = 1e-8, eps: float = 1e-10
):
super().__init__()
self.filter_length = filter_length
self.prediction_delay = prediction_delay
self.diag_reg = diag_reg
self.eps = eps
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"input": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"power": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"input_length": NeuralType(('B',), LengthsType(), optional=True),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"output_length": NeuralType(('B',), LengthsType(), optional=True),
}
@typecheck()
def forward(
self, input: torch.Tensor, power: torch.Tensor, input_length: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Given input and the predicted power for the desired signal, estimate
the WPE filter and return the processed signal.
Args:
input: Input signal, shape (B, C, F, N)
power: Predicted power of the desired signal, shape (B, C, F, N)
input_length: Optional, length of valid frames in `input`. Defaults to `None`
Returns:
Tuple of (processed_signal, output_length). Processed signal has the same
shape as the input signal (B, C, F, N), and the output length is the same
as the input length.
"""
# Temporal weighting: average power over channels, shape (B, F, N)
weight = torch.mean(power, dim=1)
# Use inverse power as the weight
weight = 1 / (weight + self.eps)
# Multi-channel convolution matrix for each subband
tilde_input = self.convtensor(input, filter_length=self.filter_length, delay=self.prediction_delay)
# Estimate correlation matrices
Q, R = self.estimate_correlations(
input=input, weight=weight, tilde_input=tilde_input, input_length=input_length
)
# Estimate prediction filter
G = self.estimate_filter(Q=Q, R=R)
# Apply prediction filter
undesired_signal = self.apply_filter(filter=G, tilde_input=tilde_input)
# Dereverberation
desired_signal = input - undesired_signal
if input_length is not None:
# Mask padded frames
length_mask: torch.Tensor = make_seq_mask_like(
lengths=input_length, like=desired_signal, time_dim=-1, valid_ones=False
)
desired_signal = desired_signal.masked_fill(length_mask, 0.0)
return desired_signal, input_length
@classmethod
def convtensor(
cls, x: torch.Tensor, filter_length: int, delay: int = 0, n_steps: Optional[int] = None
) -> torch.Tensor:
"""Create a tensor equivalent of convmtx_mc for each example in the batch.
The input signal tensor `x` has shape (B, C, F, N).
Convtensor returns a view of the input signal `x`.
Note: We avoid reshaping the output to collapse channels and filter taps into
a single dimension, e.g., (B, F, N, -1). In this way, the output is a view of the input,
while an additional reshape would result in a contiguous array and more memory use.
Args:
x: input tensor, shape (B, C, F, N)
filter_length: length of the filter, determines the shape of the convolution tensor
delay: delay to add to the input signal `x` before constructing the convolution tensor
n_steps: Optional, number of time steps to keep in the out. Defaults to the number of
time steps in the input tensor.
Returns:
Return a convolutional tensor with shape (B, C, F, n_steps, filter_length)
"""
if x.ndim != 4:
raise RuntimeError(f'Expecting a 4-D input. Received input with shape {x.shape}')
B, C, F, N = x.shape
if n_steps is None:
# Keep the same length as the input signal
n_steps = N
# Pad temporal dimension
x = torch.nn.functional.pad(x, (filter_length - 1 + delay, 0))
# Build Toeplitz-like matrix view by unfolding across time
tilde_X = x.unfold(-1, filter_length, 1)
# Trim to the set number of time steps
tilde_X = tilde_X[:, :, :, :n_steps, :]
return tilde_X
@classmethod
def permute_convtensor(cls, x: torch.Tensor) -> torch.Tensor:
"""Reshape and permute columns to convert the result of
convtensor to be equal to convmtx_mc. This is used for verification
purposes and it is not required to use the filter.
Args:
x: output of self.convtensor, shape (B, C, F, N, filter_length)
Returns:
Output has shape (B, F, N, C*filter_length) that corresponds to
the layout of convmtx_mc.
"""
B, C, F, N, filter_length = x.shape
# .view will not work, so a copy will have to be created with .reshape
# That will result in more memory use, since we don't use a view of the original
# multi-channel signal
x = x.permute(0, 2, 3, 1, 4)
x = x.reshape(B, F, N, C * filter_length)
permute = []
for m in range(C):
permute[m * filter_length : (m + 1) * filter_length] = m * filter_length + np.flip(
np.arange(filter_length)
)
return x[..., permute]
def estimate_correlations(
self,
input: torch.Tensor,
weight: torch.Tensor,
tilde_input: torch.Tensor,
input_length: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor]:
"""
Args:
input: Input signal, shape (B, C, F, N)
weight: Time-frequency weight, shape (B, F, N)
tilde_input: Multi-channel convolution tensor, shape (B, C, F, N, filter_length)
input_length: Length of each input example, shape (B)
Returns:
Returns a tuple of correlation matrices for each batch.
Let `X` denote the input signal in a single subband,
`tilde{X}` the corresponding multi-channel correlation matrix,
and `w` the vector of weights.
The first output is
Q = tilde{X}^H * diag(w) * tilde{X} (1)
for each (b, f).
The matrix calculated in (1) has shape (C * filter_length, C * filter_length)
The output is returned in a tensor with shape (B, F, C, filter_length, C, filter_length).
The second output is
R = tilde{X}^H * diag(w) * X (2)
for each (b, f).
The matrix calculated in (2) has shape (C * filter_length, C)
The output is returned in a tensor with shape (B, F, C, filter_length, C). The last
dimension corresponds to output channels.
"""
if input_length is not None:
# Take only valid samples into account
length_mask: torch.Tensor = make_seq_mask_like(
lengths=input_length, like=weight, time_dim=-1, valid_ones=False
)
weight = weight.masked_fill(length_mask, 0.0)
# Calculate (1)
# result: (B, F, C, filter_length, C, filter_length)
Q = torch.einsum('bjfik,bmfin->bfjkmn', tilde_input.conj(), weight[:, None, :, :, None] * tilde_input)
# Calculate (2)
# result: (B, F, C, filter_length, C)
R = torch.einsum('bjfik,bmfi->bfjkm', tilde_input.conj(), weight[:, None, :, :] * input)
return Q, R
def estimate_filter(self, Q: torch.Tensor, R: torch.Tensor) -> torch.Tensor:
"""Estimate the MIMO prediction filter as
G(b,f) = Q(b,f) \ R(b,f)
for each subband in each example in the batch (b, f).
Args:
Q: shape (B, F, C, filter_length, C, filter_length)
R: shape (B, F, C, filter_length, C)
Returns:
Complex-valued prediction filter, shape (B, C, F, C, filter_length)
"""
B, F, C, filter_length, _, _ = Q.shape
assert (
filter_length == self.filter_length
), f'Shape of Q {Q.shape} is not matching filter length {self.filter_length}'
# Reshape to analytical dimensions for each (b, f)
Q = Q.reshape(B, F, C * self.filter_length, C * filter_length)
R = R.reshape(B, F, C * self.filter_length, C)
# Diagonal regularization
if self.diag_reg:
# Regularization: diag_reg * trace(Q) + eps
diag_reg = self.diag_reg * torch.diagonal(Q, dim1=-2, dim2=-1).sum(-1).real + self.eps
# Apply regularization on Q
Q = Q + torch.diag_embed(diag_reg.unsqueeze(-1) * torch.ones(Q.shape[-1], device=Q.device))
# Solve for the filter
G = torch.linalg.solve(Q, R)
# Reshape to desired representation: (B, F, input channels, filter_length, output channels)
G = G.reshape(B, F, C, filter_length, C)
# Move output channels to front: (B, output channels, F, input channels, filter_length)
G = G.permute(0, 4, 1, 2, 3)
return G
def apply_filter(
self, filter: torch.Tensor, input: Optional[torch.Tensor] = None, tilde_input: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Apply a prediction filter `filter` on the input `input` as
output(b,f) = tilde{input(b,f)} * filter(b,f)
If available, directly use the convolution matrix `tilde_input`.
Args:
input: Input signal, shape (B, C, F, N)
tilde_input: Convolution matrix for the input signal, shape (B, C, F, N, filter_length)
filter: Prediction filter, shape (B, C, F, C, filter_length)
Returns:
Multi-channel signal obtained by applying the prediction filter on
the input signal, same shape as input (B, C, F, N)
"""
if input is None and tilde_input is None:
raise RuntimeError(f'Both inputs cannot be None simultaneously.')
if input is not None and tilde_input is not None:
raise RuntimeError(f'Both inputs cannot be provided simultaneously.')
if tilde_input is None:
tilde_input = self.convtensor(input, filter_length=self.filter_length, delay=self.prediction_delay)
# For each (batch, output channel, f, time step), sum across (input channel, filter tap)
output = torch.einsum('bjfik,bmfjk->bmfi', tilde_input, filter)
return output
class MaskBasedDereverbWPE(NeuralModule):
"""Multi-channel linear prediction-based dereverberation using
weighted prediction error for filter estimation.
An optional mask to estimate the signal power can be provided.
If a time-frequency mask is not provided, the algorithm corresponds
to the conventional WPE algorithm.
Args:
filter_length: Length of the convolutional filter for each channel in frames.
prediction_delay: Delay of the input signal for multi-channel linear prediction in frames.
num_iterations: Number of iterations for reweighting
mask_min_db: Threshold mask to a minimal value before applying it, defaults to -200dB
mask_max_db: Threshold mask to a minimal value before applying it, defaults to 0dB
diag_reg: Diagonal regularization for WPE
eps: Small regularization constant
References:
- Kinoshita et al, Neural network-based spectrum estimation for online WPE dereverberation, 2017
- Yoshioka and Nakatani, Generalization of Multi-Channel Linear Prediction Methods for Blind MIMO Impulse Response Shortening, 2012
"""
def __init__(
self,
filter_length: int,
prediction_delay: int,
num_iterations: int = 1,
mask_min_db: float = -200,
mask_max_db: float = 0,
diag_reg: Optional[float] = 1e-8,
eps: float = 1e-10,
):
super().__init__()
# Filter setup
self.filter = WPEFilter(
filter_length=filter_length, prediction_delay=prediction_delay, diag_reg=diag_reg, eps=eps
)
self.num_iterations = num_iterations
# Mask thresholding
self.mask_min = db2mag(mask_min_db)
self.mask_max = db2mag(mask_max_db)
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"input": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"input_length": NeuralType(('B',), LengthsType(), optional=True),
"mask": NeuralType(('B', 'C', 'D', 'T'), FloatType(), optional=True),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"output_length": NeuralType(('B',), LengthsType(), optional=True),
}
@typecheck()
def forward(
self, input: torch.Tensor, input_length: Optional[torch.Tensor] = None, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Given an input signal `input`, apply the WPE dereverberation algoritm.
Args:
input: C-channel complex-valued spectrogram, shape (B, C, F, N)
input_length: Optional length for each signal in the batch, shape (B,)
mask: Optional mask, shape (B, 1, F, N) or (B, C, F, N)
Returns:
Processed tensor with the same number of channels as the input,
shape (B, C, F, N).
"""
io_dtype = input.dtype
with torch.cuda.amp.autocast(enabled=False):
output = input.cdouble()
for i in range(self.num_iterations):
magnitude = torch.abs(output)
if i == 0 and mask is not None:
# Apply thresholds
mask = torch.clamp(mask, min=self.mask_min, max=self.mask_max)
# Mask magnitude
magnitude = mask * magnitude
# Calculate power
power = magnitude ** 2
# Apply filter
output, output_length = self.filter(input=output, input_length=input_length, power=power)
return output.to(io_dtype), output_length
class MixtureConsistencyProjection(NeuralModule):
"""Ensure estimated sources are consistent with the input mixture.
Note that the input mixture is assume to be a single-channel signal.
Args:
weighting: Optional weighting mode for the consistency constraint.
If `None`, use uniform weighting. If `power`, use the power of the
estimated source as the weight.
eps: Small positive value for regularization
Reference:
Wisdom et al., Differentiable consistency constraints for improved deep speech enhancement, 2018
"""
def __init__(self, weighting: Optional[str] = None, eps: float = 1e-8):
super().__init__()
self.weighting = weighting
self.eps = eps
if self.weighting not in [None, 'power']:
raise NotImplementedError(f'Weighting mode {self.weighting} not implemented')
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"mixture": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"estimate": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
}
@typecheck()
def forward(self, mixture: torch.Tensor, estimate: torch.Tensor) -> torch.Tensor:
"""Enforce mixture consistency on the estimated sources.
Args:
mixture: Single-channel mixture, shape (B, 1, F, N)
estimate: M estimated sources, shape (B, M, F, N)
Returns:
Source estimates consistent with the mixture, shape (B, M, F, N)
"""
# number of sources
M = estimate.size(-3)
# estimated mixture based on the estimated sources
estimated_mixture = torch.sum(estimate, dim=-3, keepdim=True)
# weighting
if self.weighting is None:
weight = 1 / M
elif self.weighting == 'power':
weight = estimate.abs().pow(2)
weight = weight / (weight.sum(dim=-3, keepdim=True) + self.eps)
else:
raise NotImplementedError(f'Weighting mode {self.weighting} not implemented')
# consistent estimate
consistent_estimate = estimate + weight * (mixture - estimated_mixture)
return consistent_estimate
|
NeMo-main
|
nemo/collections/asr/modules/audio_modules.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from omegaconf import DictConfig
from nemo.core.classes import NeuralModule
from nemo.core.neural_types import LengthsType, LogprobsType, NeuralType, PredictionsType
class ViterbiDecoderWithGraph(NeuralModule):
"""Viterbi Decoder with WFSA (Weighted Finite State Automaton) graphs.
Note:
Requires k2 v1.14 or later to be installed to use this module.
Decoder can be set up via the config, and optionally be passed keyword arguments as follows.
Examples:
.. code-block:: yaml
model: # Model config
...
graph_module_cfg: # Config for graph modules, e.g. ViterbiDecoderWithGraph
split_batch_size: 0
backend_cfg:
topo_type: "default" # other options: "compact", "shared_blank", "minimal"
topo_with_self_loops: true
token_lm: <token_lm_path> # must be provided for criterion_type: "map"
Args:
num_classes: Number of target classes for the decoder network to predict.
(Excluding the blank token).
backend: Which backend to use for decoding. Currently only `k2` is supported.
dec_type: Type of decoding graph to use. Choices: `topo` and `token_lm`,
with `topo` standing for the loss topology graph only
and `token_lm` for the topology composed with a token_lm graph.
return_type: Type of output. Choices: `1best` and `lattice`.
`1best` is represented as a list of 1D tensors.
`lattice` can be of type corresponding to the backend (e.g. k2.Fsa).
return_ilabels: For return_type=`1best`.
Whether to return input labels of a lattice (otherwise output labels).
output_aligned: For return_type=`1best`.
Whether the tensors length will correspond to log_probs_length
and the labels will be aligned to the frames of emission
(otherwise there will be only the necessary labels).
split_batch_size: Local batch size. Used for memory consumption reduction at the cost of speed performance.
Effective if complies 0 < split_batch_size < batch_size.
graph_module_cfg: Optional Dict of (str, value) pairs that are passed to the backend graph decoder.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"log_probs": NeuralType(("B", "T", "D") if self._3d_input else ("B", "T", "T", "D"), LogprobsType()),
"input_lengths": NeuralType(tuple("B"), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"predictions": NeuralType(("B", "T"), PredictionsType())}
def __init__(
self,
num_classes,
backend: str = "k2",
dec_type: str = "topo",
return_type: str = "1best",
return_ilabels: bool = True,
output_aligned: bool = True,
split_batch_size: int = 0,
graph_module_cfg: Optional[DictConfig] = None,
):
self._blank = num_classes
self.return_ilabels = return_ilabels
self.output_aligned = output_aligned
self.split_batch_size = split_batch_size
self.dec_type = dec_type
if return_type == "1best":
self.return_lattices = False
elif return_type == "lattice":
self.return_lattices = True
elif return_type == "nbest":
raise NotImplementedError(f"return_type {return_type} is not supported at the moment")
else:
raise ValueError(f"Unsupported return_type: {return_type}")
# we assume that self._blank + 1 == num_classes
if backend == "k2":
if self.dec_type == "topo":
from nemo.collections.asr.parts.k2.graph_decoders import CtcDecoder as Decoder
elif self.dec_type == "topo_rnnt_ali":
from nemo.collections.asr.parts.k2.graph_decoders import RnntAligner as Decoder
elif self.dec_type == "token_lm":
from nemo.collections.asr.parts.k2.graph_decoders import TokenLMDecoder as Decoder
elif self.dec_type == "loose_ali":
raise NotImplementedError()
elif self.dec_type == "tlg":
raise NotImplementedError(f"dec_type {self.dec_type} is not supported at the moment")
else:
raise ValueError(f"Unsupported dec_type: {self.dec_type}")
self._decoder = Decoder(num_classes=self._blank + 1, blank=self._blank, cfg=graph_module_cfg)
elif backend == "gtn":
raise NotImplementedError("gtn-backed decoding is not implemented")
self._3d_input = self.dec_type != "topo_rnnt"
super().__init__()
def update_graph(self, graph):
"""Updates graph of the backend graph decoder.
"""
self._decoder.update_graph(graph)
def _forward_impl(self, log_probs, log_probs_length, targets=None, target_length=None):
if targets is None and target_length is not None or targets is not None and target_length is None:
raise RuntimeError(
f"Both targets and target_length have to be None or not None: {targets}, {target_length}"
)
# do not use self.return_lattices for now
if targets is None:
align = False
decode_func = lambda a, b: self._decoder.decode(
a, b, return_lattices=False, return_ilabels=self.return_ilabels, output_aligned=self.output_aligned
)
else:
align = True
decode_func = lambda a, b, c, d: self._decoder.align(
a, b, c, d, return_lattices=False, return_ilabels=False, output_aligned=True
)
batch_size = log_probs.shape[0]
if self.split_batch_size > 0 and self.split_batch_size <= batch_size:
predictions = []
probs = []
for batch_idx in range(0, batch_size, self.split_batch_size):
begin = batch_idx
end = min(begin + self.split_batch_size, batch_size)
log_probs_length_part = log_probs_length[begin:end]
log_probs_part = log_probs[begin:end, : log_probs_length_part.max()]
if align:
target_length_part = target_length[begin:end]
targets_part = targets[begin:end, : target_length_part.max()]
predictions_part, probs_part = decode_func(
log_probs_part, log_probs_length_part, targets_part, target_length_part
)
del targets_part, target_length_part
else:
predictions_part, probs_part = decode_func(log_probs_part, log_probs_length_part)
del log_probs_part, log_probs_length_part
predictions += predictions_part
probs += probs_part
else:
predictions, probs = (
decode_func(log_probs, log_probs_length, targets, target_length)
if align
else decode_func(log_probs, log_probs_length)
)
assert len(predictions) == len(probs)
return predictions, probs
@torch.no_grad()
def forward(self, log_probs, log_probs_length):
if self.dec_type == "looseali":
raise RuntimeError(f"Decoder with dec_type=`{self.dec_type}` is not intended for regular decoding.")
predictions, probs = self._forward_impl(log_probs, log_probs_length)
lengths = torch.tensor([len(pred) for pred in predictions], device=predictions[0].device)
predictions_tensor = torch.full((len(predictions), lengths.max()), self._blank).to(
device=predictions[0].device
)
probs_tensor = torch.full((len(probs), lengths.max()), 1.0).to(device=predictions[0].device)
for i, (pred, prob) in enumerate(zip(predictions, probs)):
predictions_tensor[i, : lengths[i]] = pred
probs_tensor[i, : lengths[i]] = prob
return predictions_tensor, lengths, probs_tensor
@torch.no_grad()
def align(self, log_probs, log_probs_length, targets, target_length):
len_enough = (log_probs_length >= target_length) & (target_length > 0)
if torch.all(len_enough) or self.dec_type == "looseali":
results = self._forward_impl(log_probs, log_probs_length, targets, target_length)
else:
results = self._forward_impl(
log_probs[len_enough], log_probs_length[len_enough], targets[len_enough], target_length[len_enough]
)
for i, computed in enumerate(len_enough):
if not computed:
results[0].insert(i, torch.empty(0, dtype=torch.int32))
results[1].insert(i, torch.empty(0, dtype=torch.float))
return results
|
NeMo-main
|
nemo/collections/asr/modules/graph_decoder.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple
import torch
from packaging import version
from nemo.collections.asr.parts.numba.spec_augment import SpecAugmentNumba, spec_augment_launch_heuristics
from nemo.collections.asr.parts.preprocessing.features import (
FilterbankFeatures,
FilterbankFeaturesTA,
make_seq_mask_like,
)
from nemo.collections.asr.parts.submodules.spectr_augment import SpecAugment, SpecCutout
from nemo.core.classes import Exportable, NeuralModule, typecheck
from nemo.core.neural_types import (
AudioSignal,
LengthsType,
MelSpectrogramType,
MFCCSpectrogramType,
NeuralType,
SpectrogramType,
)
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
from nemo.utils import logging
try:
import torchaudio
import torchaudio.functional
import torchaudio.transforms
TORCHAUDIO_VERSION = version.parse(torchaudio.__version__)
TORCHAUDIO_VERSION_MIN = version.parse('0.5')
HAVE_TORCHAUDIO = True
except ModuleNotFoundError:
HAVE_TORCHAUDIO = False
__all__ = [
'AudioToMelSpectrogramPreprocessor',
'AudioToSpectrogram',
'SpectrogramToAudio',
'AudioToMFCCPreprocessor',
'SpectrogramAugmentation',
'MaskedPatchAugmentation',
'CropOrPadSpectrogramAugmentation',
]
class AudioPreprocessor(NeuralModule, ABC):
"""
An interface for Neural Modules that performs audio pre-processing,
transforming the wav files to features.
"""
def __init__(self, win_length, hop_length):
super().__init__()
self.win_length = win_length
self.hop_length = hop_length
self.torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'ones': torch.ones,
None: torch.ones,
}
@typecheck()
@torch.no_grad()
def forward(self, input_signal, length):
processed_signal, processed_length = self.get_features(input_signal, length)
return processed_signal, processed_length
@abstractmethod
def get_features(self, input_signal, length):
# Called by forward(). Subclasses should implement this.
pass
class AudioToMelSpectrogramPreprocessor(AudioPreprocessor, Exportable):
"""Featurizer module that converts wavs to mel spectrograms.
Args:
sample_rate (int): Sample rate of the input audio data.
Defaults to 16000
window_size (float): Size of window for fft in seconds
Defaults to 0.02
window_stride (float): Stride of window for fft in seconds
Defaults to 0.01
n_window_size (int): Size of window for fft in samples
Defaults to None. Use one of window_size or n_window_size.
n_window_stride (int): Stride of window for fft in samples
Defaults to None. Use one of window_stride or n_window_stride.
window (str): Windowing function for fft. can be one of ['hann',
'hamming', 'blackman', 'bartlett']
Defaults to "hann"
normalize (str): Can be one of ['per_feature', 'all_features']; all
other options disable feature normalization. 'all_features'
normalizes the entire spectrogram to be mean 0 with std 1.
'pre_features' normalizes per channel / freq instead.
Defaults to "per_feature"
n_fft (int): Length of FT window. If None, it uses the smallest power
of 2 that is larger than n_window_size.
Defaults to None
preemph (float): Amount of pre emphasis to add to audio. Can be
disabled by passing None.
Defaults to 0.97
features (int): Number of mel spectrogram freq bins to output.
Defaults to 64
lowfreq (int): Lower bound on mel basis in Hz.
Defaults to 0
highfreq (int): Lower bound on mel basis in Hz.
Defaults to None
log (bool): Log features.
Defaults to True
log_zero_guard_type(str): Need to avoid taking the log of zero. There
are two options: "add" or "clamp".
Defaults to "add".
log_zero_guard_value(float, or str): Add or clamp requires the number
to add with or clamp to. log_zero_guard_value can either be a float
or "tiny" or "eps". torch.finfo is used if "tiny" or "eps" is
passed.
Defaults to 2**-24.
dither (float): Amount of white-noise dithering.
Defaults to 1e-5
pad_to (int): Ensures that the output size of the time dimension is
a multiple of pad_to.
Defaults to 16
frame_splicing (int): Defaults to 1
exact_pad (bool): If True, sets stft center to False and adds padding, such that num_frames = audio_length
// hop_length. Defaults to False.
pad_value (float): The value that shorter mels are padded with.
Defaults to 0
mag_power (float): The power that the linear spectrogram is raised to
prior to multiplication with mel basis.
Defaults to 2 for a power spec
rng : Random number generator
nb_augmentation_prob (float) : Probability with which narrowband augmentation would be applied to
samples in the batch.
Defaults to 0.0
nb_max_freq (int) : Frequency above which all frequencies will be masked for narrowband augmentation.
Defaults to 4000
use_torchaudio: Whether to use the `torchaudio` implementation.
mel_norm: Normalization used for mel filterbank weights.
Defaults to 'slaney' (area normalization)
stft_exact_pad: Deprecated argument, kept for compatibility with older checkpoints.
stft_conv: Deprecated argument, kept for compatibility with older checkpoints.
"""
def save_to(self, save_path: str):
pass
@classmethod
def restore_from(cls, restore_path: str):
pass
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"input_signal": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),
"length": NeuralType(
tuple('B'), LengthsType()
), # Please note that length should be in samples not seconds.
}
@property
def output_types(self):
"""Returns definitions of module output ports.
processed_signal:
0: AxisType(BatchTag)
1: AxisType(MelSpectrogramSignalTag)
2: AxisType(ProcessedTimeTag)
processed_length:
0: AxisType(BatchTag)
"""
return {
"processed_signal": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"processed_length": NeuralType(tuple('B'), LengthsType()),
}
def __init__(
self,
sample_rate=16000,
window_size=0.02,
window_stride=0.01,
n_window_size=None,
n_window_stride=None,
window="hann",
normalize="per_feature",
n_fft=None,
preemph=0.97,
features=64,
lowfreq=0,
highfreq=None,
log=True,
log_zero_guard_type="add",
log_zero_guard_value=2 ** -24,
dither=1e-5,
pad_to=16,
frame_splicing=1,
exact_pad=False,
pad_value=0,
mag_power=2.0,
rng=None,
nb_augmentation_prob=0.0,
nb_max_freq=4000,
use_torchaudio: bool = False,
mel_norm="slaney",
stft_exact_pad=False, # Deprecated arguments; kept for config compatibility
stft_conv=False, # Deprecated arguments; kept for config compatibility
):
super().__init__(n_window_size, n_window_stride)
self._sample_rate = sample_rate
if window_size and n_window_size:
raise ValueError(f"{self} received both window_size and " f"n_window_size. Only one should be specified.")
if window_stride and n_window_stride:
raise ValueError(
f"{self} received both window_stride and " f"n_window_stride. Only one should be specified."
)
if window_size:
n_window_size = int(window_size * self._sample_rate)
if window_stride:
n_window_stride = int(window_stride * self._sample_rate)
# Given the long and similar argument list, point to the class and instantiate it by reference
if not use_torchaudio:
featurizer_class = FilterbankFeatures
else:
featurizer_class = FilterbankFeaturesTA
self.featurizer = featurizer_class(
sample_rate=self._sample_rate,
n_window_size=n_window_size,
n_window_stride=n_window_stride,
window=window,
normalize=normalize,
n_fft=n_fft,
preemph=preemph,
nfilt=features,
lowfreq=lowfreq,
highfreq=highfreq,
log=log,
log_zero_guard_type=log_zero_guard_type,
log_zero_guard_value=log_zero_guard_value,
dither=dither,
pad_to=pad_to,
frame_splicing=frame_splicing,
exact_pad=exact_pad,
pad_value=pad_value,
mag_power=mag_power,
rng=rng,
nb_augmentation_prob=nb_augmentation_prob,
nb_max_freq=nb_max_freq,
mel_norm=mel_norm,
stft_exact_pad=stft_exact_pad, # Deprecated arguments; kept for config compatibility
stft_conv=stft_conv, # Deprecated arguments; kept for config compatibility
)
def input_example(self, max_batch: int = 8, max_dim: int = 32000, min_length: int = 200):
batch_size = torch.randint(low=1, high=max_batch, size=[1]).item()
max_length = torch.randint(low=min_length, high=max_dim, size=[1]).item()
signals = torch.rand(size=[batch_size, max_length]) * 2 - 1
lengths = torch.randint(low=min_length, high=max_dim, size=[batch_size])
lengths[0] = max_length
return signals, lengths
def get_features(self, input_signal, length):
return self.featurizer(input_signal, length)
@property
def filter_banks(self):
return self.featurizer.filter_banks
class AudioToMFCCPreprocessor(AudioPreprocessor):
"""Preprocessor that converts wavs to MFCCs.
Uses torchaudio.transforms.MFCC.
Args:
sample_rate: The sample rate of the audio.
Defaults to 16000.
window_size: Size of window for fft in seconds. Used to calculate the
win_length arg for mel spectrogram.
Defaults to 0.02
window_stride: Stride of window for fft in seconds. Used to caculate
the hop_length arg for mel spect.
Defaults to 0.01
n_window_size: Size of window for fft in samples
Defaults to None. Use one of window_size or n_window_size.
n_window_stride: Stride of window for fft in samples
Defaults to None. Use one of window_stride or n_window_stride.
window: Windowing function for fft. can be one of ['hann',
'hamming', 'blackman', 'bartlett', 'none', 'null'].
Defaults to 'hann'
n_fft: Length of FT window. If None, it uses the smallest power of 2
that is larger than n_window_size.
Defaults to None
lowfreq (int): Lower bound on mel basis in Hz.
Defaults to 0
highfreq (int): Lower bound on mel basis in Hz.
Defaults to None
n_mels: Number of mel filterbanks.
Defaults to 64
n_mfcc: Number of coefficients to retain
Defaults to 64
dct_type: Type of discrete cosine transform to use
norm: Type of norm to use
log: Whether to use log-mel spectrograms instead of db-scaled.
Defaults to True.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"input_signal": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),
"length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {
"processed_signal": NeuralType(('B', 'D', 'T'), MFCCSpectrogramType()),
"processed_length": NeuralType(tuple('B'), LengthsType()),
}
def save_to(self, save_path: str):
pass
@classmethod
def restore_from(cls, restore_path: str):
pass
def __init__(
self,
sample_rate=16000,
window_size=0.02,
window_stride=0.01,
n_window_size=None,
n_window_stride=None,
window='hann',
n_fft=None,
lowfreq=0.0,
highfreq=None,
n_mels=64,
n_mfcc=64,
dct_type=2,
norm='ortho',
log=True,
):
self._sample_rate = sample_rate
if not HAVE_TORCHAUDIO:
logging.error('Could not import torchaudio. Some features might not work.')
raise ModuleNotFoundError(
"torchaudio is not installed but is necessary for "
"AudioToMFCCPreprocessor. We recommend you try "
"building it from source for the PyTorch version you have."
)
if window_size and n_window_size:
raise ValueError(f"{self} received both window_size and " f"n_window_size. Only one should be specified.")
if window_stride and n_window_stride:
raise ValueError(
f"{self} received both window_stride and " f"n_window_stride. Only one should be specified."
)
# Get win_length (n_window_size) and hop_length (n_window_stride)
if window_size:
n_window_size = int(window_size * self._sample_rate)
if window_stride:
n_window_stride = int(window_stride * self._sample_rate)
super().__init__(n_window_size, n_window_stride)
mel_kwargs = {}
mel_kwargs['f_min'] = lowfreq
mel_kwargs['f_max'] = highfreq
mel_kwargs['n_mels'] = n_mels
mel_kwargs['n_fft'] = n_fft or 2 ** math.ceil(math.log2(n_window_size))
mel_kwargs['win_length'] = n_window_size
mel_kwargs['hop_length'] = n_window_stride
# Set window_fn. None defaults to torch.ones.
window_fn = self.torch_windows.get(window, None)
if window_fn is None:
raise ValueError(
f"Window argument for AudioProcessor is invalid: {window}."
f"For no window function, use 'ones' or None."
)
mel_kwargs['window_fn'] = window_fn
# Use torchaudio's implementation of MFCCs as featurizer
self.featurizer = torchaudio.transforms.MFCC(
sample_rate=self._sample_rate,
n_mfcc=n_mfcc,
dct_type=dct_type,
norm=norm,
log_mels=log,
melkwargs=mel_kwargs,
)
def get_features(self, input_signal, length):
features = self.featurizer(input_signal)
seq_len = torch.ceil(length.to(torch.float32) / self.hop_length).to(dtype=torch.long)
return features, seq_len
class SpectrogramAugmentation(NeuralModule):
"""
Performs time and freq cuts in one of two ways.
SpecAugment zeroes out vertical and horizontal sections as described in
SpecAugment (https://arxiv.org/abs/1904.08779). Arguments for use with
SpecAugment are `freq_masks`, `time_masks`, `freq_width`, and `time_width`.
SpecCutout zeroes out rectangulars as described in Cutout
(https://arxiv.org/abs/1708.04552). Arguments for use with Cutout are
`rect_masks`, `rect_freq`, and `rect_time`.
Args:
freq_masks (int): how many frequency segments should be cut.
Defaults to 0.
time_masks (int): how many time segments should be cut
Defaults to 0.
freq_width (int): maximum number of frequencies to be cut in one
segment.
Defaults to 10.
time_width (int): maximum number of time steps to be cut in one
segment
Defaults to 10.
rect_masks (int): how many rectangular masks should be cut
Defaults to 0.
rect_freq (int): maximum size of cut rectangles along the frequency
dimension
Defaults to 5.
rect_time (int): maximum size of cut rectangles along the time
dimension
Defaults to 25.
"""
@property
def input_types(self):
"""Returns definitions of module input types
"""
return {
"input_spec": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output types
"""
return {"augmented_spec": NeuralType(('B', 'D', 'T'), SpectrogramType())}
def __init__(
self,
freq_masks=0,
time_masks=0,
freq_width=10,
time_width=10,
rect_masks=0,
rect_time=5,
rect_freq=20,
rng=None,
mask_value=0.0,
use_numba_spec_augment: bool = True,
):
super().__init__()
if rect_masks > 0:
self.spec_cutout = SpecCutout(rect_masks=rect_masks, rect_time=rect_time, rect_freq=rect_freq, rng=rng,)
# self.spec_cutout.to(self._device)
else:
self.spec_cutout = lambda input_spec: input_spec
if freq_masks + time_masks > 0:
self.spec_augment = SpecAugment(
freq_masks=freq_masks,
time_masks=time_masks,
freq_width=freq_width,
time_width=time_width,
rng=rng,
mask_value=mask_value,
)
else:
self.spec_augment = lambda input_spec, length: input_spec
# Check if numba is supported, and use a Numba kernel if it is
if use_numba_spec_augment and numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__):
logging.info('Numba CUDA SpecAugment kernel is being used')
self.spec_augment_numba = SpecAugmentNumba(
freq_masks=freq_masks,
time_masks=time_masks,
freq_width=freq_width,
time_width=time_width,
rng=rng,
mask_value=mask_value,
)
else:
self.spec_augment_numba = None
@typecheck()
def forward(self, input_spec, length):
augmented_spec = self.spec_cutout(input_spec=input_spec)
# To run the Numba kernel, correct numba version is required as well as
# tensor must be on GPU and length must be provided
if self.spec_augment_numba is not None and spec_augment_launch_heuristics(augmented_spec, length):
augmented_spec = self.spec_augment_numba(input_spec=augmented_spec, length=length)
else:
augmented_spec = self.spec_augment(input_spec=augmented_spec, length=length)
return augmented_spec
class MaskedPatchAugmentation(NeuralModule):
"""
Zeroes out fixed size time patches of the spectrogram.
All samples in batch are guaranteed to have the same amount of masked time steps.
Optionally also performs frequency masking in the same way as SpecAugment.
Args:
patch_size (int): up to how many time steps does one patch consist of.
Defaults to 48.
mask_patches (float): how many patches should be masked in each sample.
if >= 1., interpreted as number of patches (after converting to int)
if <1., interpreted as fraction of total tokens to be masked (number of patches is rounded up)
Defaults to 10.
freq_masks (int): how many frequency segments should be cut.
Defaults to 0.
freq_width (int): maximum number of frequencies to be cut in a segment.
Defaults to 0.
"""
@property
def input_types(self):
"""Returns definitions of module input types
"""
return {
"input_spec": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output types
"""
return {"augmented_spec": NeuralType(('B', 'D', 'T'), SpectrogramType())}
def __init__(
self, patch_size: int = 48, mask_patches: float = 10.0, freq_masks: int = 0, freq_width: int = 0,
):
super().__init__()
self.patch_size = patch_size
if mask_patches >= 1:
self.mask_patches = int(mask_patches)
elif mask_patches >= 0:
self._mask_fraction = mask_patches
self.mask_patches = None
else:
raise ValueError('mask_patches cannot be negative')
if freq_masks > 0:
self.spec_augment = SpecAugment(freq_masks=freq_masks, time_masks=0, freq_width=freq_width, time_width=0,)
else:
self.spec_augment = None
@typecheck()
def forward(self, input_spec, length):
augmented_spec = input_spec
min_len = torch.min(length)
if self.mask_patches is None:
# masking specified as fraction
len_fraction = int(min_len * self._mask_fraction)
mask_patches = len_fraction // self.patch_size + int(len_fraction % self.patch_size != 0)
else:
mask_patches = self.mask_patches
if min_len < self.patch_size * mask_patches:
mask_patches = min_len // self.patch_size
for idx in range(input_spec.shape[0]):
cur_len = length[idx]
patches = range(cur_len // self.patch_size)
masked_patches = random.sample(patches, mask_patches)
for mp in masked_patches:
augmented_spec[idx, :, mp * self.patch_size : (mp + 1) * self.patch_size] = 0.0
if self.spec_augment is not None:
augmented_spec = self.spec_augment(input_spec=augmented_spec, length=length)
return augmented_spec
class CropOrPadSpectrogramAugmentation(NeuralModule):
"""
Pad or Crop the incoming Spectrogram to a certain shape.
Args:
audio_length (int): the final number of timesteps that is required.
The signal will be either padded or cropped temporally to this
size.
"""
def __init__(self, audio_length):
super(CropOrPadSpectrogramAugmentation, self).__init__()
self.audio_length = audio_length
@typecheck()
@torch.no_grad()
def forward(self, input_signal, length):
image = input_signal
num_images = image.shape[0]
audio_length = self.audio_length
image_len = image.shape[-1]
# Crop long signal
if image_len > audio_length: # randomly slice
cutout_images = []
offset = torch.randint(low=0, high=image_len - audio_length + 1, size=[num_images])
for idx, offset in enumerate(offset):
cutout_images.append(image[idx : idx + 1, :, offset : offset + audio_length])
image = torch.cat(cutout_images, dim=0)
del cutout_images
else: # symmetrically pad short signal with zeros
pad_left = (audio_length - image_len) // 2
pad_right = (audio_length - image_len) // 2
if (audio_length - image_len) % 2 == 1:
pad_right += 1
image = torch.nn.functional.pad(image, [pad_left, pad_right], mode="constant", value=0)
# Replace dynamic length sequences with static number of timesteps
length = (length * 0) + audio_length
return image, length
@property
def input_types(self):
"""Returns definitions of module output ports.
"""
return {
"input_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {
"processed_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"processed_length": NeuralType(tuple('B'), LengthsType()),
}
def save_to(self, save_path: str):
pass
@classmethod
def restore_from(cls, restore_path: str):
pass
class AudioToSpectrogram(NeuralModule):
"""Transform a batch of input multi-channel signals into a batch of
STFT-based spectrograms.
Args:
fft_length: length of FFT
hop_length: length of hops/shifts of the sliding window
power: exponent for magnitude spectrogram. Default `None` will
return a complex-valued spectrogram
"""
def __init__(self, fft_length: int, hop_length: int, power: Optional[float] = None):
if not HAVE_TORCHAUDIO:
logging.error('Could not import torchaudio. Some features might not work.')
raise ModuleNotFoundError(
"torchaudio is not installed but is necessary to instantiate a {self.__class__.__name__}"
)
super().__init__()
# For now, assume FFT length is divisible by two
if fft_length % 2 != 0:
raise ValueError(f'fft_length = {fft_length} must be divisible by 2')
self.stft = torchaudio.transforms.Spectrogram(
n_fft=fft_length, hop_length=hop_length, power=power, pad_mode='constant'
)
# number of subbands
self.F = fft_length // 2 + 1
@property
def num_subbands(self) -> int:
return self.F
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"input": NeuralType(('B', 'C', 'T'), AudioSignal()),
"input_length": NeuralType(('B',), LengthsType(), optional=True),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"output_length": NeuralType(('B',), LengthsType()),
}
@typecheck()
def forward(
self, input: torch.Tensor, input_length: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Convert a batch of C-channel input signals
into a batch of complex-valued spectrograms.
Args:
input: Time-domain input signal with C channels, shape (B, C, T)
input_length: Length of valid entries along the time dimension, shape (B,)
Returns:
Output spectrogram with F subbands and N time frames, shape (B, C, F, N)
and output length with shape (B,).
"""
B, T = input.size(0), input.size(-1)
input = input.view(B, -1, T)
# STFT output (B, C, F, N)
with torch.cuda.amp.autocast(enabled=False):
output = self.stft(input.float())
if input_length is not None:
# Mask padded frames
output_length = self.get_output_length(input_length=input_length)
length_mask: torch.Tensor = make_seq_mask_like(
lengths=output_length, like=output, time_dim=-1, valid_ones=False
)
output = output.masked_fill(length_mask, 0.0)
else:
# Assume all frames are valid for all examples in the batch
output_length = output.size(-1) * torch.ones(B, device=output.device).long()
return output, output_length
def get_output_length(self, input_length: torch.Tensor) -> torch.Tensor:
"""Get length of valid frames for the output.
Args:
input_length: number of valid samples, shape (B,)
Returns:
Number of valid frames, shape (B,)
"""
output_length = input_length.div(self.stft.hop_length, rounding_mode='floor').add(1).long()
return output_length
class SpectrogramToAudio(NeuralModule):
"""Transform a batch of input multi-channel spectrograms into a batch of
time-domain multi-channel signals.
Args:
fft_length: length of FFT
hop_length: length of hops/shifts of the sliding window
power: exponent for magnitude spectrogram. Default `None` will
return a complex-valued spectrogram
"""
def __init__(self, fft_length: int, hop_length: int):
if not HAVE_TORCHAUDIO:
logging.error('Could not import torchaudio. Some features might not work.')
raise ModuleNotFoundError(
"torchaudio is not installed but is necessary to instantiate a {self.__class__.__name__}"
)
super().__init__()
# For now, assume FFT length is divisible by two
if fft_length % 2 != 0:
raise ValueError(f'fft_length = {fft_length} must be divisible by 2')
self.istft = torchaudio.transforms.InverseSpectrogram(
n_fft=fft_length, hop_length=hop_length, pad_mode='constant'
)
self.F = fft_length // 2 + 1
@property
def num_subbands(self) -> int:
return self.F
@property
def input_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"input": NeuralType(('B', 'C', 'D', 'T'), SpectrogramType()),
"input_length": NeuralType(('B',), LengthsType(), optional=True),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
"""Returns definitions of module output ports.
"""
return {
"output": NeuralType(('B', 'C', 'T'), AudioSignal()),
"output_length": NeuralType(('B',), LengthsType()),
}
@typecheck()
def forward(self, input: torch.Tensor, input_length: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Convert input complex-valued spectrogram to a time-domain
signal. Multi-channel IO is supported.
Args:
input: Input spectrogram for C channels, shape (B, C, F, N)
input_length: Length of valid entries along the time dimension, shape (B,)
Returns:
Time-domain signal with T time-domain samples and C channels, (B, C, T)
and output length with shape (B,).
"""
B, F, N = input.size(0), input.size(-2), input.size(-1)
assert F == self.F, f'Number of subbands F={F} not matching self.F={self.F}'
input = input.view(B, -1, F, N)
# iSTFT output (B, C, T)
with torch.cuda.amp.autocast(enabled=False):
output = self.istft(input.cfloat())
if input_length is not None:
# Mask padded samples
output_length = self.get_output_length(input_length=input_length)
length_mask: torch.Tensor = make_seq_mask_like(
lengths=output_length, like=output, time_dim=-1, valid_ones=False
)
output = output.masked_fill(length_mask, 0.0)
else:
# Assume all frames are valid for all examples in the batch
output_length = output.size(-1) * torch.ones(B, device=output.device).long()
return output, output_length
def get_output_length(self, input_length: torch.Tensor) -> torch.Tensor:
"""Get length of valid samples for the output.
Args:
input_length: number of valid frames, shape (B,)
Returns:
Number of valid samples, shape (B,)
"""
output_length = input_length.sub(1).mul(self.istft.hop_length).long()
return output_length
@dataclass
class AudioToMelSpectrogramPreprocessorConfig:
_target_: str = "nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor"
sample_rate: int = 16000
window_size: float = 0.02
window_stride: float = 0.01
n_window_size: Optional[int] = None
n_window_stride: Optional[int] = None
window: str = "hann"
normalize: str = "per_feature"
n_fft: Optional[int] = None
preemph: float = 0.97
features: int = 64
lowfreq: int = 0
highfreq: Optional[int] = None
log: bool = True
log_zero_guard_type: str = "add"
log_zero_guard_value: float = 2 ** -24
dither: float = 1e-5
pad_to: int = 16
frame_splicing: int = 1
exact_pad: bool = False
pad_value: int = 0
mag_power: float = 2.0
rng: Optional[str] = None
nb_augmentation_prob: float = 0.0
nb_max_freq: int = 4000
use_torchaudio: bool = False
mel_norm: str = "slaney"
stft_exact_pad: bool = False # Deprecated argument, kept for compatibility with older checkpoints.
stft_conv: bool = False # Deprecated argument, kept for compatibility with older checkpoints.
@dataclass
class AudioToMFCCPreprocessorConfig:
_target_: str = 'nemo.collections.asr.modules.AudioToMFCCPreprocessor'
sample_rate: int = 16000
window_size: float = 0.02
window_stride: float = 0.01
n_window_size: Optional[int] = None
n_window_stride: Optional[int] = None
window: str = 'hann'
n_fft: Optional[int] = None
lowfreq: Optional[float] = 0.0
highfreq: Optional[float] = None
n_mels: int = 64
n_mfcc: int = 64
dct_type: int = 2
norm: str = 'ortho'
log: bool = True
@dataclass
class SpectrogramAugmentationConfig:
_target_: str = "nemo.collections.asr.modules.SpectrogramAugmentation"
freq_masks: int = 0
time_masks: int = 0
freq_width: int = 0
time_width: Optional[Any] = 0
rect_masks: int = 0
rect_time: int = 0
rect_freq: int = 0
mask_value: float = 0
rng: Optional[Any] = None # random.Random() type
use_numba_spec_augment: bool = True
@dataclass
class CropOrPadSpectrogramAugmentationConfig:
audio_length: int
_target_: str = "nemo.collections.asr.modules.CropOrPadSpectrogramAugmentation"
@dataclass
class MaskedPatchAugmentationConfig:
patch_size: int = 48
mask_patches: float = 10.0
freq_masks: int = 0
freq_width: int = 0
_target_: str = "nemo.collections.asr.modules.MaskedPatchAugmentation"
|
NeMo-main
|
nemo/collections/asr/modules/audio_preprocessing.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import torch
import torch.nn as nn
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types import AcousticEncodedRepresentation, LogprobsType, NeuralType
__all__ = ['LSTMDecoder']
class LSTMDecoder(NeuralModule, Exportable):
"""
Simple LSTM Decoder for ASR models
Args:
feat_in (int): size of the input features
num_classes (int): the size of the vocabulary
lstm_hidden_size (int): hidden size of the LSTM layers
vocabulary (vocab): The vocabulary
bidirectional (bool): default is False. Whether LSTMs are bidirectional or not
num_layers (int): default is 1. Number of LSTM layers stacked
"""
@property
def input_types(self):
return OrderedDict({"encoder_output": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation())})
@property
def output_types(self):
return OrderedDict({"logprobs": NeuralType(('B', 'T', 'D'), LogprobsType())})
def __init__(self, feat_in, num_classes, lstm_hidden_size, vocabulary=None, bidirectional=False, num_layers=1):
super().__init__()
if vocabulary is not None:
if num_classes != len(vocabulary):
raise ValueError(
f"If vocabulary is specified, it's length should be equal to the num_classes. "
f"Instead got: num_classes={num_classes} and len(vocabulary)={len(vocabulary)}"
)
self.__vocabulary = vocabulary
self._feat_in = feat_in
# Add 1 for blank char
self._num_classes = num_classes + 1
self.lstm_layer = nn.LSTM(
input_size=feat_in,
hidden_size=lstm_hidden_size,
num_layers=num_layers,
batch_first=True,
bidirectional=bidirectional,
)
lstm_hidden_size = 2 * lstm_hidden_size if bidirectional else lstm_hidden_size
self.linear_layer = torch.nn.Linear(in_features=lstm_hidden_size, out_features=self._num_classes)
@typecheck()
def forward(self, encoder_output):
output = encoder_output.transpose(1, 2)
output, _ = self.lstm_layer(output)
output = self.linear_layer(output)
return torch.nn.functional.log_softmax(output, dim=-1)
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
input_example = torch.randn(max_batch, self._feat_in, max_dim).to(next(self.parameters()).device)
return tuple([input_example])
@property
def vocabulary(self):
return self.__vocabulary
@property
def num_classes_with_blank(self):
return self._num_classes
|
NeMo-main
|
nemo/collections/asr/modules/lstm_decoder.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.modules.audio_modules import MaskBasedBeamformer, MaskEstimatorRNN, MaskReferenceChannel
from nemo.collections.asr.modules.audio_preprocessing import (
AudioToMelSpectrogramPreprocessor,
AudioToMFCCPreprocessor,
AudioToSpectrogram,
CropOrPadSpectrogramAugmentation,
MaskedPatchAugmentation,
SpectrogramAugmentation,
SpectrogramToAudio,
)
from nemo.collections.asr.modules.beam_search_decoder import BeamSearchDecoderWithLM
from nemo.collections.asr.modules.conformer_encoder import ConformerEncoder, ConformerEncoderAdapter
from nemo.collections.asr.modules.conv_asr import (
ConvASRDecoder,
ConvASRDecoderClassification,
ConvASRDecoderReconstruction,
ConvASREncoder,
ConvASREncoderAdapter,
ECAPAEncoder,
ParallelConvASREncoder,
SpeakerDecoder,
)
from nemo.collections.asr.modules.graph_decoder import ViterbiDecoderWithGraph
from nemo.collections.asr.modules.hybrid_autoregressive_transducer import HATJoint
from nemo.collections.asr.modules.lstm_decoder import LSTMDecoder
from nemo.collections.asr.modules.msdd_diarizer import MSDD_module
from nemo.collections.asr.modules.rnn_encoder import RNNEncoder
from nemo.collections.asr.modules.rnnt import (
RNNTDecoder,
RNNTDecoderJointSSL,
RNNTJoint,
SampledRNNTJoint,
StatelessTransducerDecoder,
)
from nemo.collections.asr.modules.squeezeformer_encoder import SqueezeformerEncoder, SqueezeformerEncoderAdapter
|
NeMo-main
|
nemo/collections/asr/modules/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import OrderedDict
from typing import List, Optional, Set
import torch
import torch.distributed
import torch.nn as nn
from omegaconf import DictConfig
from nemo.collections.asr.parts.submodules.multi_head_attention import PositionalEncoding, RelPositionalEncoding
from nemo.collections.asr.parts.submodules.squeezeformer_modules import SqueezeformerLayer
from nemo.collections.asr.parts.submodules.subsampling import ConvSubsampling, StackingSubsampling, TimeReductionModule
from nemo.collections.asr.parts.utils import adapter_utils
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.mixins import AccessMixin, adapter_mixins
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types import AcousticEncodedRepresentation, LengthsType, NeuralType, SpectrogramType
__all__ = ['SqueezeformerEncoder']
class SqueezeformerEncoder(NeuralModule, Exportable, AccessMixin):
"""
The encoder for ASR model of Squeezeformer.
Based on this paper:
'Squeezeformer: An Efficient Transformer for Automatic Speech Recognition' by Sehoon Kim et al.
https://arxiv.org/abs/2206.00888
Args:
feat_in (int): the size of feature channels
n_layers (int): number of layers of ConformerBlock
d_model (int): the hidden size of the model
feat_out (int): the size of the output features
Defaults to -1 (means feat_out is d_model)
subsampling (str): the method of subsampling, choices=['vggnet', 'striding', 'dw_striding']
Defaults to dw_striding.
subsampling_factor (int): the subsampling factor which should be power of 2
Defaults to 4.
subsampling_conv_channels (int): the size of the convolutions in the subsampling module
Defaults to -1 which would set it to d_model.
ff_expansion_factor (int): the expansion factor in feed forward layers
Defaults to 4.
self_attention_model (str): type of the attention layer and positional encoding
'rel_pos': relative positional embedding and Transformer-XL
'abs_pos': absolute positional embedding and Transformer
default is rel_pos.
pos_emb_max_len (int): the maximum length of positional embeddings
Defaulst to 5000
n_heads (int): number of heads in multi-headed attention layers
Defaults to 4.
xscaling (bool): enables scaling the inputs to the multi-headed attention layers by sqrt(d_model)
Defaults to True.
untie_biases (bool): whether to not share (untie) the bias weights between layers of Transformer-XL
Defaults to True.
conv_kernel_size (int): the size of the convolutions in the convolutional modules
Defaults to 31.
conv_norm_type (str): the type of the normalization in the convolutional modules
Defaults to 'batch_norm'.
dropout (float): the dropout rate used in all layers except the attention layers
Defaults to 0.1.
dropout_emb (float): the dropout rate used for the positional embeddings
Defaults to 0.1.
dropout_att (float): the dropout rate used for the attention layer
Defaults to 0.0.
adaptive_scale (bool): Whether to scale the inputs to each component by affine `scale` and `bias` layer.
Or use a fixed scale=1 and bias=0.
time_reduce_idx (int): Optional integer index of a layer where a time reduction operation will occur.
All operations beyond this point will only occur at the reduced resolution.
time_recovery_idx (int): Optional integer index of a layer where the time recovery operation will occur.
All operations beyond this point will occur at the original resolution (resolution after
primary downsampling). If no value is provided, assumed to be the last layer.
"""
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
dev = next(self.parameters()).device
input_example = torch.randn(max_batch, self._feat_in, max_dim).to(dev)
input_example_length = torch.randint(1, max_dim, (max_batch,)).to(dev)
return tuple([input_example, input_example_length])
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return OrderedDict(
{
"audio_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
)
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return OrderedDict(
{
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
}
)
def __init__(
self,
feat_in: int,
n_layers: int,
d_model: int,
feat_out: int = -1,
subsampling: str = 'dw_striding',
subsampling_factor: int = 4,
subsampling_conv_channels: int = -1,
ff_expansion_factor: int = 4,
self_attention_model: str = 'rel_pos',
n_heads: int = 4,
att_context_size: Optional[List[int]] = None,
xscaling: bool = True,
untie_biases: bool = True,
pos_emb_max_len: int = 5000,
conv_kernel_size: int = 31,
conv_norm_type: str = 'batch_norm',
dropout: float = 0.1,
dropout_emb: float = 0.1,
dropout_att: float = 0.0,
adaptive_scale: bool = True,
time_reduce_idx: Optional[int] = None,
time_recovery_idx: Optional[int] = None,
):
super().__init__()
d_ff = d_model * ff_expansion_factor
self.d_model = d_model
self._feat_in = feat_in
if att_context_size:
self.att_context_size = att_context_size
else:
self.att_context_size = [-1, -1]
if xscaling:
self.xscale = math.sqrt(d_model)
else:
self.xscale = None
self.adaptive_scale = adaptive_scale
self.time_reduce_idx = time_reduce_idx
if time_reduce_idx is not None:
if time_recovery_idx is None:
self.time_recovery_idx = n_layers - 1 # recover at last layer
else:
self.time_recovery_idx = time_recovery_idx # recover at given layer
if self.time_reduce_idx is not None:
if self.time_reduce_idx < 0 or self.time_recovery_idx >= n_layers:
raise ValueError(f"Time reduce index must lie between [0, {n_layers})")
if self.time_recovery_idx < 0 or self.time_recovery_idx >= n_layers:
raise ValueError(f"Time recovery index must lie between [0, {n_layers})")
if subsampling_conv_channels == -1:
subsampling_conv_channels = d_model
if subsampling and subsampling_factor > 1:
if subsampling == 'stacking':
self.pre_encode = StackingSubsampling(
subsampling_factor=subsampling_factor, feat_in=feat_in, feat_out=d_model
)
else:
self.pre_encode = ConvSubsampling(
subsampling=subsampling,
subsampling_factor=subsampling_factor,
feat_in=feat_in,
feat_out=d_model,
conv_channels=subsampling_conv_channels,
activation=nn.ReLU(),
)
# For Squeezeformer, initialize the parameters as required.
self.pre_encode.reset_parameters()
else:
self.pre_encode = nn.Linear(feat_in, d_model)
self._feat_out = d_model
if not untie_biases and self_attention_model == "rel_pos":
d_head = d_model // n_heads
pos_bias_u = nn.Parameter(torch.Tensor(n_heads, d_head))
pos_bias_v = nn.Parameter(torch.Tensor(n_heads, d_head))
nn.init.zeros_(pos_bias_u)
nn.init.zeros_(pos_bias_v)
else:
pos_bias_u = None
pos_bias_v = None
self.pos_emb_max_len = pos_emb_max_len
if self_attention_model == "rel_pos":
self.pos_enc = RelPositionalEncoding(
d_model=d_model,
dropout_rate=dropout,
max_len=pos_emb_max_len,
xscale=self.xscale,
dropout_rate_emb=dropout_emb,
)
elif self_attention_model == "abs_pos":
pos_bias_u = None
pos_bias_v = None
self.pos_enc = PositionalEncoding(
d_model=d_model, dropout_rate=dropout, max_len=pos_emb_max_len, xscale=self.xscale
)
else:
raise ValueError(f"Not valid self_attention_model: '{self_attention_model}'!")
self.layers = nn.ModuleList()
for i in range(n_layers):
layer = SqueezeformerLayer(
d_model=d_model,
d_ff=d_ff,
self_attention_model=self_attention_model,
n_heads=n_heads,
conv_kernel_size=conv_kernel_size,
conv_norm_type=conv_norm_type,
dropout=dropout,
dropout_att=dropout_att,
pos_bias_u=pos_bias_u,
pos_bias_v=pos_bias_v,
adaptive_scale=adaptive_scale,
)
self.layers.append(layer)
# Time Reduction and Recovery layer setup
self.time_reduce_layer = None
self.time_recovery_layer = None
self.time_reduce_pos_enc = None
# Add time reduction layer
if self.time_reduce_idx is not None:
self.time_reduce_layer = TimeReductionModule(d_model, d_model, kernel_size=5, stride=2)
self.time_recovery_layer = nn.Linear(d_model, d_model)
# Chose same type of positional encoding as the originally determined above
if self_attention_model == "rel_pos":
self.time_reduce_pos_enc = RelPositionalEncoding(
d_model=d_model, dropout_rate=0.0, max_len=pos_emb_max_len, xscale=None, dropout_rate_emb=0.0,
)
else:
self.time_reduce_pos_enc = PositionalEncoding(
d_model=d_model, dropout_rate=0.0, max_len=pos_emb_max_len, xscale=None, dropout_rate_emb=0.0
)
self.pre_ln = nn.LayerNorm(d_model)
if feat_out > 0 and feat_out != self._feat_out:
self.out_proj = nn.Linear(self._feat_out, feat_out)
self._feat_out = feat_out
else:
self.out_proj = None
self._feat_out = d_model
self.set_max_audio_length(self.pos_emb_max_len)
self.use_pad_mask = True
# will be set in self.forward() if defined in AccessMixin config
self.interctc_capture_at_layers = None
def set_max_audio_length(self, max_audio_length):
""" Sets maximum input length.
Pre-calculates internal seq_range mask.
"""
self.max_audio_length = max_audio_length
device = next(self.parameters()).device
seq_range = torch.arange(0, self.max_audio_length, device=device)
if hasattr(self, 'seq_range'):
self.seq_range = seq_range
else:
self.register_buffer('seq_range', seq_range, persistent=False)
self.pos_enc.extend_pe(max_audio_length, device)
if self.time_reduce_pos_enc is not None:
self.time_reduce_pos_enc.extend_pe(max_audio_length, device)
@typecheck()
def forward(self, audio_signal, length=None):
self.update_max_seq_length(seq_length=audio_signal.size(2), device=audio_signal.device)
return self.forward_for_export(audio_signal=audio_signal, length=length)
@typecheck()
def forward_for_export(self, audio_signal, length):
max_audio_length: int = audio_signal.size(-1)
if max_audio_length > self.max_audio_length:
self.set_max_audio_length(max_audio_length)
if length is None:
length = audio_signal.new_full(
audio_signal.size(0), max_audio_length, dtype=torch.int32, device=self.seq_range.device
)
audio_signal = torch.transpose(audio_signal, 1, 2)
if isinstance(self.pre_encode, nn.Linear):
audio_signal = self.pre_encode(audio_signal)
else:
audio_signal, length = self.pre_encode(audio_signal, length)
audio_signal, pos_emb = self.pos_enc(audio_signal)
# adjust size
max_audio_length = audio_signal.size(1)
# Create the self-attention and padding masks
pad_mask = self.make_pad_mask(max_audio_length, length)
att_mask = pad_mask.unsqueeze(1).repeat([1, max_audio_length, 1])
att_mask = torch.logical_and(att_mask, att_mask.transpose(1, 2))
if self.att_context_size[0] >= 0:
att_mask = att_mask.triu(diagonal=-self.att_context_size[0])
if self.att_context_size[1] >= 0:
att_mask = att_mask.tril(diagonal=self.att_context_size[1])
att_mask = ~att_mask
if self.use_pad_mask:
pad_mask = ~pad_mask
else:
pad_mask = None
# Create cache of activations for the time reduction step
# Note: NeMo codebase allows only a single time reduction step to occur
recovery_activation_cache = []
audio_signal = self.pre_ln(audio_signal)
for lth, layer in enumerate(self.layers):
# Perform time reduction
if self.time_reduce_layer is not None and lth == self.time_reduce_idx:
# Perform time reduction
recovery_activation_cache.append((audio_signal, att_mask, pad_mask, pos_emb))
audio_signal, att_mask, pad_mask = self.time_reduce_layer(
x=audio_signal, att_mask=att_mask, pad_mask=pad_mask
)
# Only update PE, not the original audio_signal
_, pos_emb = self.time_reduce_pos_enc(audio_signal)
# Perform time recovery
if self.time_recovery_layer is not None and lth == self.time_recovery_idx:
recovery_audio_signal, att_mask, pad_mask, pos_emb = recovery_activation_cache.pop(0)
# repeat interleaved values for 2x seq length
audio_signal = torch.repeat_interleave(audio_signal, repeats=2, dim=1)
B, T, D = recovery_audio_signal.size()
audio_signal = audio_signal[:, :T, :] # Slice off the exact T timesteps as original cache value
audio_signal = self.time_recovery_layer(audio_signal) # learn non linear mapping
audio_signal = recovery_audio_signal + audio_signal # learn just the residual
audio_signal = layer(x=audio_signal, att_mask=att_mask, pos_emb=pos_emb, pad_mask=pad_mask)
# saving tensors if required for interctc loss
if self.is_access_enabled():
if self.interctc_capture_at_layers is None:
self.interctc_capture_at_layers = self.access_cfg.get('interctc', {}).get('capture_layers', [])
if lth in self.interctc_capture_at_layers:
lth_audio_signal = audio_signal
if self.out_proj is not None:
lth_audio_signal = self.out_proj(audio_signal)
# shape is the same as the shape of audio_signal output, i.e. [B, D, T]
self.register_accessible_tensor(
name=f'interctc/layer_output_{lth}', tensor=torch.transpose(lth_audio_signal, 1, 2)
)
self.register_accessible_tensor(name=f'interctc/layer_length_{lth}', tensor=length)
if self.out_proj is not None:
audio_signal = self.out_proj(audio_signal)
audio_signal = torch.transpose(audio_signal, 1, 2)
return audio_signal, length
def update_max_seq_length(self, seq_length: int, device):
# Find global max audio length across all nodes
if torch.distributed.is_initialized():
global_max_len = torch.tensor([seq_length], dtype=torch.float32, device=device)
# Update across all ranks in the distributed system
torch.distributed.all_reduce(global_max_len, op=torch.distributed.ReduceOp.MAX)
seq_length = global_max_len.int().item()
if seq_length > self.max_audio_length:
self.set_max_audio_length(seq_length)
def make_pad_mask(self, max_audio_length, seq_lens):
"""Make masking for padding."""
mask = self.seq_range[:max_audio_length].expand(seq_lens.size(0), -1) < seq_lens.unsqueeze(-1)
return mask
def enable_pad_mask(self, on=True):
# On inference, user may chose to disable pad mask
mask = self.use_pad_mask
self.use_pad_mask = on
return mask
class SqueezeformerEncoderAdapter(SqueezeformerEncoder, adapter_mixins.AdapterModuleMixin):
# Higher level forwarding
def add_adapter(self, name: str, cfg: dict):
cfg = self._update_adapter_cfg_input_dim(cfg)
for conformer_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
conformer_layer.add_adapter(name, cfg)
def is_adapter_available(self) -> bool:
return any([conformer_layer.is_adapter_available() for conformer_layer in self.layers])
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
for conformer_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
conformer_layer.set_enabled_adapters(name=name, enabled=enabled)
def get_enabled_adapters(self) -> List[str]:
names = set([])
for conformer_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
names.update(conformer_layer.get_enabled_adapters())
names = sorted(list(names))
return names
def _update_adapter_cfg_input_dim(self, cfg: DictConfig):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=self.d_model)
return cfg
def get_accepted_adapter_types(self,) -> Set[type]:
types = super().get_accepted_adapter_types()
if len(types) == 0:
self.set_accepted_adapter_types(
[
adapter_utils.LINEAR_ADAPTER_CLASSPATH,
adapter_utils.MHA_ADAPTER_CLASSPATH,
adapter_utils.RELMHA_ADAPTER_CLASSPATH,
]
)
types = self.get_accepted_adapter_types()
return types
"""
Register any additional information
"""
if adapter_mixins.get_registered_adapter(SqueezeformerEncoder) is None:
adapter_mixins.register_adapter(base_class=SqueezeformerEncoder, adapter_class=SqueezeformerEncoderAdapter)
|
NeMo-main
|
nemo/collections/asr/modules/squeezeformer_encoder.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import List, Optional, Set, Union
import torch
import torch.distributed
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import MISSING, DictConfig, ListConfig, OmegaConf
from nemo.collections.asr.parts.submodules.jasper import (
JasperBlock,
MaskedConv1d,
ParallelBlock,
SqueezeExcite,
init_weights,
jasper_activations,
)
from nemo.collections.asr.parts.submodules.tdnn_attention import (
AttentivePoolLayer,
StatsPoolLayer,
TDNNModule,
TDNNSEModule,
)
from nemo.collections.asr.parts.utils import adapter_utils
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.mixins import AccessMixin, adapter_mixins
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types import (
AcousticEncodedRepresentation,
LengthsType,
LogitsType,
LogprobsType,
NeuralType,
SpectrogramType,
)
from nemo.utils import logging
__all__ = ['ConvASRDecoder', 'ConvASREncoder', 'ConvASRDecoderClassification']
class ConvASREncoder(NeuralModule, Exportable, AccessMixin):
"""
Convolutional encoder for ASR models. With this class you can implement JasperNet and QuartzNet models.
Based on these papers:
https://arxiv.org/pdf/1904.03288.pdf
https://arxiv.org/pdf/1910.10261.pdf
"""
def _prepare_for_export(self, **kwargs):
m_count = 0
for name, m in self.named_modules():
if isinstance(m, MaskedConv1d):
m.use_mask = False
m_count += 1
Exportable._prepare_for_export(self, **kwargs)
logging.warning(f"Turned off {m_count} masked convolutions")
def input_example(self, max_batch=1, max_dim=8192):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
device = next(self.parameters()).device
input_example = torch.randn(max_batch, self._feat_in, max_dim, device=device)
lens = torch.full(size=(input_example.shape[0],), fill_value=max_dim, device=device)
return tuple([input_example, lens])
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return OrderedDict(
{
"audio_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
)
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return OrderedDict(
{
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
}
)
def __init__(
self,
jasper,
activation: str,
feat_in: int,
normalization_mode: str = "batch",
residual_mode: str = "add",
norm_groups: int = -1,
conv_mask: bool = True,
frame_splicing: int = 1,
init_mode: Optional[str] = 'xavier_uniform',
quantize: bool = False,
):
super().__init__()
if isinstance(jasper, ListConfig):
jasper = OmegaConf.to_container(jasper)
activation = jasper_activations[activation]()
# If the activation can be executed in place, do so.
if hasattr(activation, 'inplace'):
activation.inplace = True
feat_in = feat_in * frame_splicing
self._feat_in = feat_in
residual_panes = []
encoder_layers = []
self.dense_residual = False
for layer_idx, lcfg in enumerate(jasper):
dense_res = []
if lcfg.get('residual_dense', False):
residual_panes.append(feat_in)
dense_res = residual_panes
self.dense_residual = True
groups = lcfg.get('groups', 1)
separable = lcfg.get('separable', False)
heads = lcfg.get('heads', -1)
residual_mode = lcfg.get('residual_mode', residual_mode)
se = lcfg.get('se', False)
se_reduction_ratio = lcfg.get('se_reduction_ratio', 8)
se_context_window = lcfg.get('se_context_size', -1)
se_interpolation_mode = lcfg.get('se_interpolation_mode', 'nearest')
kernel_size_factor = lcfg.get('kernel_size_factor', 1.0)
stride_last = lcfg.get('stride_last', False)
future_context = lcfg.get('future_context', -1)
encoder_layers.append(
JasperBlock(
feat_in,
lcfg['filters'],
repeat=lcfg['repeat'],
kernel_size=lcfg['kernel'],
stride=lcfg['stride'],
dilation=lcfg['dilation'],
dropout=lcfg['dropout'],
residual=lcfg['residual'],
groups=groups,
separable=separable,
heads=heads,
residual_mode=residual_mode,
normalization=normalization_mode,
norm_groups=norm_groups,
activation=activation,
residual_panes=dense_res,
conv_mask=conv_mask,
se=se,
se_reduction_ratio=se_reduction_ratio,
se_context_window=se_context_window,
se_interpolation_mode=se_interpolation_mode,
kernel_size_factor=kernel_size_factor,
stride_last=stride_last,
future_context=future_context,
quantize=quantize,
layer_idx=layer_idx,
)
)
feat_in = lcfg['filters']
self._feat_out = feat_in
self.encoder = torch.nn.Sequential(*encoder_layers)
self.apply(lambda x: init_weights(x, mode=init_mode))
self.max_audio_length = 0
@typecheck()
def forward(self, audio_signal, length):
self.update_max_sequence_length(seq_length=audio_signal.size(2), device=audio_signal.device)
s_input, length = self.encoder(([audio_signal], length))
if length is None:
return s_input[-1]
return s_input[-1], length
def update_max_sequence_length(self, seq_length: int, device):
# Find global max audio length across all nodes
if torch.distributed.is_initialized():
global_max_len = torch.tensor([seq_length], dtype=torch.float32, device=device)
# Update across all ranks in the distributed system
torch.distributed.all_reduce(global_max_len, op=torch.distributed.ReduceOp.MAX)
seq_length = global_max_len.int().item()
if seq_length > self.max_audio_length:
if seq_length < 5000:
seq_length = seq_length * 2
elif seq_length < 10000:
seq_length = seq_length * 1.5
self.max_audio_length = seq_length
device = next(self.parameters()).device
seq_range = torch.arange(0, self.max_audio_length, device=device)
if hasattr(self, 'seq_range'):
self.seq_range = seq_range
else:
self.register_buffer('seq_range', seq_range, persistent=False)
# Update all submodules
for name, m in self.named_modules():
if isinstance(m, MaskedConv1d):
m.update_masked_length(self.max_audio_length, seq_range=self.seq_range)
elif isinstance(m, SqueezeExcite):
m.set_max_len(self.max_audio_length, seq_range=self.seq_range)
class ParallelConvASREncoder(NeuralModule, Exportable):
"""
Convolutional encoder for ASR models with parallel blocks. CarneliNet can be implemented with this class.
"""
def _prepare_for_export(self):
m_count = 0
for m in self.modules():
if isinstance(m, MaskedConv1d):
m.use_mask = False
m_count += 1
logging.warning(f"Turned off {m_count} masked convolutions")
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
input_example = torch.randn(max_batch, self._feat_in, max_dim).to(next(self.parameters()).device)
return tuple([input_example])
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
return set(["length"])
@property
def disabled_deployment_output_names(self):
"""Implement this method to return a set of output names disabled for export"""
return set(["encoded_lengths"])
def save_to(self, save_path: str):
pass
@classmethod
def restore_from(cls, restore_path: str):
pass
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return OrderedDict(
{
"audio_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
)
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return OrderedDict(
{
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
}
)
def __init__(
self,
jasper,
activation: str,
feat_in: int,
normalization_mode: str = "batch",
residual_mode: str = "add",
norm_groups: int = -1,
conv_mask: bool = True,
frame_splicing: int = 1,
init_mode: Optional[str] = 'xavier_uniform',
aggregation_mode: Optional[str] = None,
quantize: bool = False,
):
super().__init__()
if isinstance(jasper, ListConfig):
jasper = OmegaConf.to_container(jasper)
activation = jasper_activations[activation]()
feat_in = feat_in * frame_splicing
self._feat_in = feat_in
residual_panes = []
encoder_layers = []
self.dense_residual = False
for lcfg in jasper:
dense_res = []
if lcfg.get('residual_dense', False):
residual_panes.append(feat_in)
dense_res = residual_panes
self.dense_residual = True
groups = lcfg.get('groups', 1)
separable = lcfg.get('separable', False)
heads = lcfg.get('heads', -1)
residual_mode = lcfg.get('residual_mode', residual_mode)
se = lcfg.get('se', False)
se_reduction_ratio = lcfg.get('se_reduction_ratio', 8)
se_context_window = lcfg.get('se_context_size', -1)
se_interpolation_mode = lcfg.get('se_interpolation_mode', 'nearest')
kernel_size_factor = lcfg.get('kernel_size_factor', 1.0)
stride_last = lcfg.get('stride_last', False)
aggregation_mode = lcfg.get('aggregation_mode', 'sum')
block_dropout = lcfg.get('block_dropout', 0.0)
parallel_residual_mode = lcfg.get('parallel_residual_mode', 'sum')
parallel_blocks = []
for kernel_size in lcfg['kernel']:
parallel_blocks.append(
JasperBlock(
feat_in,
lcfg['filters'],
repeat=lcfg['repeat'],
kernel_size=[kernel_size],
stride=lcfg['stride'],
dilation=lcfg['dilation'],
dropout=lcfg['dropout'],
residual=lcfg['residual'],
groups=groups,
separable=separable,
heads=heads,
residual_mode=residual_mode,
normalization=normalization_mode,
norm_groups=norm_groups,
activation=activation,
residual_panes=dense_res,
conv_mask=conv_mask,
se=se,
se_reduction_ratio=se_reduction_ratio,
se_context_window=se_context_window,
se_interpolation_mode=se_interpolation_mode,
kernel_size_factor=kernel_size_factor,
stride_last=stride_last,
quantize=quantize,
)
)
if len(parallel_blocks) == 1:
encoder_layers.append(parallel_blocks[0])
else:
encoder_layers.append(
ParallelBlock(
parallel_blocks,
aggregation_mode=aggregation_mode,
block_dropout_prob=block_dropout,
residual_mode=parallel_residual_mode,
in_filters=feat_in,
out_filters=lcfg['filters'],
)
)
feat_in = lcfg['filters']
self._feat_out = feat_in
self.encoder = torch.nn.Sequential(*encoder_layers)
self.apply(lambda x: init_weights(x, mode=init_mode))
@typecheck()
def forward(self, audio_signal, length=None):
s_input, length = self.encoder(([audio_signal], length))
if length is None:
return s_input[-1]
return s_input[-1], length
class ConvASRDecoder(NeuralModule, Exportable, adapter_mixins.AdapterModuleMixin):
"""Simple ASR Decoder for use with CTC-based models such as JasperNet and QuartzNet
Based on these papers:
https://arxiv.org/pdf/1904.03288.pdf
https://arxiv.org/pdf/1910.10261.pdf
https://arxiv.org/pdf/2005.04290.pdf
"""
@property
def input_types(self):
return OrderedDict({"encoder_output": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation())})
@property
def output_types(self):
return OrderedDict({"logprobs": NeuralType(('B', 'T', 'D'), LogprobsType())})
def __init__(self, feat_in, num_classes, init_mode="xavier_uniform", vocabulary=None):
super().__init__()
if vocabulary is None and num_classes < 0:
raise ValueError(
f"Neither of the vocabulary and num_classes are set! At least one of them need to be set."
)
if num_classes <= 0:
num_classes = len(vocabulary)
logging.info(f"num_classes of ConvASRDecoder is set to the size of the vocabulary: {num_classes}.")
if vocabulary is not None:
if num_classes != len(vocabulary):
raise ValueError(
f"If vocabulary is specified, it's length should be equal to the num_classes. Instead got: num_classes={num_classes} and len(vocabulary)={len(vocabulary)}"
)
self.__vocabulary = vocabulary
self._feat_in = feat_in
# Add 1 for blank char
self._num_classes = num_classes + 1
self.decoder_layers = torch.nn.Sequential(
torch.nn.Conv1d(self._feat_in, self._num_classes, kernel_size=1, bias=True)
)
self.apply(lambda x: init_weights(x, mode=init_mode))
accepted_adapters = [adapter_utils.LINEAR_ADAPTER_CLASSPATH]
self.set_accepted_adapter_types(accepted_adapters)
# to change, requires running ``model.temperature = T`` explicitly
self.temperature = 1.0
@typecheck()
def forward(self, encoder_output):
# Adapter module forward step
if self.is_adapter_available():
encoder_output = encoder_output.transpose(1, 2) # [B, T, C]
encoder_output = self.forward_enabled_adapters(encoder_output)
encoder_output = encoder_output.transpose(1, 2) # [B, C, T]
if self.temperature != 1.0:
return torch.nn.functional.log_softmax(
self.decoder_layers(encoder_output).transpose(1, 2) / self.temperature, dim=-1
)
return torch.nn.functional.log_softmax(self.decoder_layers(encoder_output).transpose(1, 2), dim=-1)
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
input_example = torch.randn(max_batch, self._feat_in, max_dim).to(next(self.parameters()).device)
return tuple([input_example])
def _prepare_for_export(self, **kwargs):
m_count = 0
for m in self.modules():
if type(m).__name__ == "MaskedConv1d":
m.use_mask = False
m_count += 1
if m_count > 0:
logging.warning(f"Turned off {m_count} masked convolutions")
Exportable._prepare_for_export(self, **kwargs)
# Adapter method overrides
def add_adapter(self, name: str, cfg: DictConfig):
# Update the config with correct input dim
cfg = self._update_adapter_cfg_input_dim(cfg)
# Add the adapter
super().add_adapter(name=name, cfg=cfg)
def _update_adapter_cfg_input_dim(self, cfg: DictConfig):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=self._feat_in)
return cfg
@property
def vocabulary(self):
return self.__vocabulary
@property
def num_classes_with_blank(self):
return self._num_classes
class ConvASRDecoderReconstruction(NeuralModule, Exportable):
"""ASR Decoder for reconstructing masked regions of spectrogram
"""
@property
def input_types(self):
return OrderedDict({"encoder_output": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation())})
@property
def output_types(self):
if self.apply_softmax:
return OrderedDict({"out": NeuralType(('B', 'T', 'D'), LogprobsType())})
else:
return OrderedDict({"out": NeuralType(('B', 'T', 'D'), AcousticEncodedRepresentation())})
def __init__(
self,
feat_in,
feat_out,
feat_hidden,
stride_layers=0,
non_stride_layers=0,
kernel_size=11,
init_mode="xavier_uniform",
activation="relu",
stride_transpose=True,
apply_softmax=False,
):
super().__init__()
if ((stride_layers + non_stride_layers) > 0) and (kernel_size < 3 or kernel_size % 2 == 0):
raise ValueError("Kernel size in this decoder needs to be >= 3 and odd when using at least 1 conv layer.")
activation = jasper_activations[activation]()
self.feat_in = feat_in
self.feat_out = feat_out
self.feat_hidden = feat_hidden
self.decoder_layers = [nn.Conv1d(self.feat_in, self.feat_hidden, kernel_size=1, bias=True)]
for i in range(stride_layers):
self.decoder_layers.append(activation)
if stride_transpose:
self.decoder_layers.append(
nn.ConvTranspose1d(
self.feat_hidden,
self.feat_hidden,
kernel_size,
stride=2,
padding=(kernel_size - 3) // 2 + 1,
output_padding=1,
bias=True,
groups=self.feat_hidden,
)
)
else:
self.decoder_layers.append(
nn.Conv1d(
self.feat_hidden,
self.feat_hidden,
kernel_size,
stride=2,
padding=(kernel_size - 1) // 2,
bias=True,
groups=self.feat_hidden,
)
)
self.decoder_layers.append(nn.Conv1d(self.feat_hidden, self.feat_hidden, kernel_size=1, bias=True))
self.decoder_layers.append(nn.BatchNorm1d(self.feat_hidden, eps=1e-3, momentum=0.1))
for i in range(non_stride_layers):
self.decoder_layers.append(activation)
self.decoder_layers.append(
nn.Conv1d(
self.feat_hidden,
self.feat_hidden,
kernel_size,
bias=True,
groups=self.feat_hidden,
padding=kernel_size // 2,
)
)
self.decoder_layers.append(nn.Conv1d(self.feat_hidden, self.feat_hidden, kernel_size=1, bias=True))
self.decoder_layers.append(nn.BatchNorm1d(self.feat_hidden, eps=1e-3, momentum=0.1))
self.decoder_layers.append(activation)
self.decoder_layers.append(nn.Conv1d(self.feat_hidden, self.feat_out, kernel_size=1, bias=True))
self.decoder_layers = nn.Sequential(*self.decoder_layers)
self.apply_softmax = apply_softmax
self.apply(lambda x: init_weights(x, mode=init_mode))
@typecheck()
def forward(self, encoder_output):
out = self.decoder_layers(encoder_output).transpose(-2, -1)
if self.apply_softmax:
out = torch.nn.functional.log_softmax(out, dim=-1)
return out
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
input_example = torch.randn(max_batch, self._feat_in, max_dim).to(next(self.parameters()).device)
return tuple([input_example])
def _prepare_for_export(self, **kwargs):
m_count = 0
for m in self.modules():
if type(m).__name__ == "MaskedConv1d":
m.use_mask = False
m_count += 1
if m_count > 0:
logging.warning(f"Turned off {m_count} masked convolutions")
Exportable._prepare_for_export(self, **kwargs)
class ConvASRDecoderClassification(NeuralModule, Exportable):
"""Simple ASR Decoder for use with classification models such as JasperNet and QuartzNet
Based on these papers:
https://arxiv.org/pdf/2005.04290.pdf
"""
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
input_example = torch.randn(max_batch, self._feat_in, max_dim).to(next(self.parameters()).device)
return tuple([input_example])
@property
def input_types(self):
return OrderedDict({"encoder_output": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation())})
@property
def output_types(self):
return OrderedDict({"logits": NeuralType(('B', 'D'), LogitsType())})
def __init__(
self,
feat_in: int,
num_classes: int,
init_mode: Optional[str] = "xavier_uniform",
return_logits: bool = True,
pooling_type='avg',
):
super().__init__()
self._feat_in = feat_in
self._return_logits = return_logits
self._num_classes = num_classes
if pooling_type == 'avg':
self.pooling = torch.nn.AdaptiveAvgPool1d(1)
elif pooling_type == 'max':
self.pooling = torch.nn.AdaptiveMaxPool1d(1)
else:
raise ValueError('Pooling type chosen is not valid. Must be either `avg` or `max`')
self.decoder_layers = torch.nn.Sequential(torch.nn.Linear(self._feat_in, self._num_classes, bias=True))
self.apply(lambda x: init_weights(x, mode=init_mode))
@typecheck()
def forward(self, encoder_output):
batch, in_channels, timesteps = encoder_output.size()
encoder_output = self.pooling(encoder_output).view(batch, in_channels) # [B, C]
logits = self.decoder_layers(encoder_output) # [B, num_classes]
if self._return_logits:
return logits
return torch.nn.functional.softmax(logits, dim=-1)
@property
def num_classes(self):
return self._num_classes
class ECAPAEncoder(NeuralModule, Exportable):
"""
Modified ECAPA Encoder layer without Res2Net module for faster training and inference which achieves
better numbers on speaker diarization tasks
Reference: ECAPA-TDNN Embeddings for Speaker Diarization (https://arxiv.org/pdf/2104.01466.pdf)
input:
feat_in: input feature shape (mel spec feature shape)
filters: list of filter shapes for SE_TDNN modules
kernel_sizes: list of kernel shapes for SE_TDNN modules
dilations: list of dilations for group conv se layer
scale: scale value to group wider conv channels (deafult:8)
output:
outputs : encoded output
output_length: masked output lengths
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return OrderedDict(
{
"audio_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
)
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return OrderedDict(
{
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
}
)
def __init__(
self,
feat_in: int,
filters: list,
kernel_sizes: list,
dilations: list,
scale: int = 8,
init_mode: str = 'xavier_uniform',
):
super().__init__()
self.layers = nn.ModuleList()
self.layers.append(TDNNModule(feat_in, filters[0], kernel_size=kernel_sizes[0], dilation=dilations[0]))
for i in range(len(filters) - 2):
self.layers.append(
TDNNSEModule(
filters[i],
filters[i + 1],
group_scale=scale,
se_channels=128,
kernel_size=kernel_sizes[i + 1],
dilation=dilations[i + 1],
)
)
self.feature_agg = TDNNModule(filters[-1], filters[-1], kernel_sizes[-1], dilations[-1])
self.apply(lambda x: init_weights(x, mode=init_mode))
def forward(self, audio_signal, length=None):
x = audio_signal
outputs = []
for layer in self.layers:
x = layer(x, length=length)
outputs.append(x)
x = torch.cat(outputs[1:], dim=1)
x = self.feature_agg(x)
return x, length
class SpeakerDecoder(NeuralModule, Exportable):
"""
Speaker Decoder creates the final neural layers that maps from the outputs
of Jasper Encoder to the embedding layer followed by speaker based softmax loss.
Args:
feat_in (int): Number of channels being input to this module
num_classes (int): Number of unique speakers in dataset
emb_sizes (list) : shapes of intermediate embedding layers (we consider speaker embbeddings from 1st of this layers)
Defaults to [1024,1024]
pool_mode (str) : Pooling strategy type. options are 'xvector','tap', 'attention'
Defaults to 'xvector (mean and variance)'
tap (temporal average pooling: just mean)
attention (attention based pooling)
init_mode (str): Describes how neural network parameters are
initialized. Options are ['xavier_uniform', 'xavier_normal',
'kaiming_uniform','kaiming_normal'].
Defaults to "xavier_uniform".
"""
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
input_example = torch.randn(max_batch, self.input_feat_in, max_dim).to(next(self.parameters()).device)
return tuple([input_example])
@property
def input_types(self):
return OrderedDict(
{
"encoder_output": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"length": NeuralType(('B',), LengthsType(), optional=True),
}
)
@property
def output_types(self):
return OrderedDict(
{
"logits": NeuralType(('B', 'D'), LogitsType()),
"embs": NeuralType(('B', 'D'), AcousticEncodedRepresentation()),
}
)
def __init__(
self,
feat_in: int,
num_classes: int,
emb_sizes: Optional[Union[int, list]] = 256,
pool_mode: str = 'xvector',
angular: bool = False,
attention_channels: int = 128,
init_mode: str = "xavier_uniform",
):
super().__init__()
self.angular = angular
self.emb_id = 2
bias = False if self.angular else True
emb_sizes = [emb_sizes] if type(emb_sizes) is int else emb_sizes
self._num_classes = num_classes
self.pool_mode = pool_mode.lower()
if self.pool_mode == 'xvector' or self.pool_mode == 'tap':
self._pooling = StatsPoolLayer(feat_in=feat_in, pool_mode=self.pool_mode)
affine_type = 'linear'
elif self.pool_mode == 'attention':
self._pooling = AttentivePoolLayer(inp_filters=feat_in, attention_channels=attention_channels)
affine_type = 'conv'
shapes = [self._pooling.feat_in]
for size in emb_sizes:
shapes.append(int(size))
emb_layers = []
for shape_in, shape_out in zip(shapes[:-1], shapes[1:]):
layer = self.affine_layer(shape_in, shape_out, learn_mean=False, affine_type=affine_type)
emb_layers.append(layer)
self.emb_layers = nn.ModuleList(emb_layers)
self.final = nn.Linear(shapes[-1], self._num_classes, bias=bias)
self.apply(lambda x: init_weights(x, mode=init_mode))
def affine_layer(
self, inp_shape, out_shape, learn_mean=True, affine_type='conv',
):
if affine_type == 'conv':
layer = nn.Sequential(
nn.BatchNorm1d(inp_shape, affine=True, track_running_stats=True),
nn.Conv1d(inp_shape, out_shape, kernel_size=1),
)
else:
layer = nn.Sequential(
nn.Linear(inp_shape, out_shape),
nn.BatchNorm1d(out_shape, affine=learn_mean, track_running_stats=True),
nn.ReLU(),
)
return layer
@typecheck()
def forward(self, encoder_output, length=None):
pool = self._pooling(encoder_output, length)
embs = []
for layer in self.emb_layers:
pool, emb = layer(pool), layer[: self.emb_id](pool)
embs.append(emb)
pool = pool.squeeze(-1)
if self.angular:
for W in self.final.parameters():
W = F.normalize(W, p=2, dim=1)
pool = F.normalize(pool, p=2, dim=1)
out = self.final(pool)
return out, embs[-1].squeeze(-1)
class ConvASREncoderAdapter(ConvASREncoder, adapter_mixins.AdapterModuleMixin):
# Higher level forwarding
def add_adapter(self, name: str, cfg: dict):
for jasper_block in self.encoder: # type: adapter_mixins.AdapterModuleMixin
cfg = self._update_adapter_cfg_input_dim(jasper_block, cfg)
jasper_block.set_accepted_adapter_types(self.get_accepted_adapter_types())
jasper_block.add_adapter(name, cfg)
def is_adapter_available(self) -> bool:
return any([jasper_block.is_adapter_available() for jasper_block in self.encoder])
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
for jasper_block in self.encoder: # type: adapter_mixins.AdapterModuleMixin
jasper_block.set_enabled_adapters(name=name, enabled=enabled)
def get_enabled_adapters(self) -> List[str]:
names = set([])
for jasper_block in self.encoder: # type: adapter_mixins.AdapterModuleMixin
names.update(jasper_block.get_enabled_adapters())
names = sorted(list(names))
return names
def _update_adapter_cfg_input_dim(self, block: JasperBlock, cfg):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=block.planes)
return cfg
def get_accepted_adapter_types(self,) -> Set[type]:
types = super().get_accepted_adapter_types()
if len(types) == 0:
self.set_accepted_adapter_types(
[adapter_utils.LINEAR_ADAPTER_CLASSPATH,]
)
types = self.get_accepted_adapter_types()
return types
@dataclass
class JasperEncoderConfig:
filters: int = MISSING
repeat: int = MISSING
kernel: List[int] = MISSING
stride: List[int] = MISSING
dilation: List[int] = MISSING
dropout: float = MISSING
residual: bool = MISSING
# Optional arguments
groups: int = 1
separable: bool = False
heads: int = -1
residual_mode: str = "add"
residual_dense: bool = False
se: bool = False
se_reduction_ratio: int = 8
se_context_size: int = -1
se_interpolation_mode: str = 'nearest'
kernel_size_factor: float = 1.0
stride_last: bool = False
@dataclass
class ConvASREncoderConfig:
_target_: str = 'nemo.collections.asr.modules.ConvASREncoder'
jasper: Optional[List[JasperEncoderConfig]] = field(default_factory=list)
activation: str = MISSING
feat_in: int = MISSING
normalization_mode: str = "batch"
residual_mode: str = "add"
norm_groups: int = -1
conv_mask: bool = True
frame_splicing: int = 1
init_mode: Optional[str] = "xavier_uniform"
@dataclass
class ConvASRDecoderConfig:
_target_: str = 'nemo.collections.asr.modules.ConvASRDecoder'
feat_in: int = MISSING
num_classes: int = MISSING
init_mode: Optional[str] = "xavier_uniform"
vocabulary: Optional[List[str]] = field(default_factory=list)
@dataclass
class ConvASRDecoderClassificationConfig:
_target_: str = 'nemo.collections.asr.modules.ConvASRDecoderClassification'
feat_in: int = MISSING
num_classes: int = MISSING
init_mode: Optional[str] = "xavier_uniform"
return_logits: bool = True
pooling_type: str = 'avg'
"""
Register any additional information
"""
if adapter_mixins.get_registered_adapter(ConvASREncoder) is None:
adapter_mixins.register_adapter(base_class=ConvASREncoder, adapter_class=ConvASREncoderAdapter)
|
NeMo-main
|
nemo/collections/asr/modules/conv_asr.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from nemo.collections.asr.modules import rnnt
from nemo.collections.asr.parts.utils.rnnt_utils import HATJointOutput
from nemo.utils import logging
class HATJoint(rnnt.RNNTJoint):
"""A Hybrid Autoregressive Transducer Joint Network (HAT Joint Network).
A HAT Joint network, comprised of a feedforward model.
Args:
jointnet: A dict-like object which contains the following key-value pairs.
encoder_hidden: int specifying the hidden dimension of the encoder net.
pred_hidden: int specifying the hidden dimension of the prediction net.
joint_hidden: int specifying the hidden dimension of the joint net
activation: Activation function used in the joint step. Can be one of
['relu', 'tanh', 'sigmoid'].
Optionally, it may also contain the following:
dropout: float, set to 0.0 by default. Optional dropout applied at the end of the joint net.
num_classes: int, specifying the vocabulary size that the joint network must predict,
excluding the HAT blank token.
vocabulary: Optional list of strings/tokens that comprise the vocabulary of the joint network.
Unused and kept only for easy access for character based encoding HAT models.
log_softmax: Optional bool, set to None by default. If set as None, will compute the log_softmax()
based on the value provided.
preserve_memory: Optional bool, set to False by default. If the model crashes due to the memory
intensive joint step, one might try this flag to empty the tensor cache in pytorch.
Warning: This will make the forward-backward pass much slower than normal.
It also might not fix the OOM if the GPU simply does not have enough memory to compute the joint.
fuse_loss_wer: Optional bool, set to False by default.
Fuses the joint forward, loss forward and
wer forward steps. In doing so, it trades of speed for memory conservation by creating sub-batches
of the provided batch of inputs, and performs Joint forward, loss forward and wer forward (optional),
all on sub-batches, then collates results to be exactly equal to results from the entire batch.
When this flag is set, prior to calling forward, the fields `loss` and `wer` (either one) *must*
be set using the `HATJoint.set_loss()` or `HATJoint.set_wer()` methods.
Further, when this flag is set, the following argument `fused_batch_size` *must* be provided
as a non negative integer. This value refers to the size of the sub-batch.
When the flag is set, the input and output signature of `forward()` of this method changes.
Input - in addition to `encoder_outputs` (mandatory argument), the following arguments can be provided.
- decoder_outputs (optional). Required if loss computation is required.
- encoder_lengths (required)
- transcripts (optional). Required for wer calculation.
- transcript_lengths (optional). Required for wer calculation.
- compute_wer (bool, default false). Whether to compute WER or not for the fused batch.
Output - instead of the usual `joint` log prob tensor, the following results can be returned.
- loss (optional). Returned if decoder_outputs, transcripts and transript_lengths are not None.
- wer_numerator + wer_denominator (optional). Returned if transcripts, transcripts_lengths are provided
and compute_wer is set.
fused_batch_size: Optional int, required if `fuse_loss_wer` flag is set. Determines the size of the
sub-batches. Should be any value below the actual batch size per GPU.
"""
def __init__(
self,
jointnet: Dict[str, Any],
num_classes: int,
num_extra_outputs: int = 0,
vocabulary: Optional[List] = None,
log_softmax: Optional[bool] = None,
preserve_memory: bool = False,
fuse_loss_wer: bool = False,
fused_batch_size: Optional[int] = None,
experimental_fuse_loss_wer: Any = None,
):
super().__init__(
jointnet=jointnet,
num_classes=num_classes,
num_extra_outputs=num_extra_outputs,
vocabulary=vocabulary,
log_softmax=log_softmax,
preserve_memory=preserve_memory,
fuse_loss_wer=fuse_loss_wer,
fused_batch_size=fused_batch_size,
experimental_fuse_loss_wer=experimental_fuse_loss_wer,
)
self.pred, self.enc, self.joint_net, self.blank_pred = self._joint_hat_net_modules(
num_classes=self._vocab_size, # non blank symbol
pred_n_hidden=self.pred_hidden,
enc_n_hidden=self.encoder_hidden,
joint_n_hidden=self.joint_hidden,
activation=self.activation,
dropout=jointnet.get('dropout', 0.0),
)
self._return_hat_ilm = False
@property
def return_hat_ilm(self):
return self._return_hat_ilm
@return_hat_ilm.setter
def return_hat_ilm(self, hat_subtract_ilm):
self._return_hat_ilm = hat_subtract_ilm
def joint(self, f: torch.Tensor, g: torch.Tensor) -> Union[torch.Tensor, HATJointOutput]:
"""
Compute the joint step of the network.
Here,
B = Batch size
T = Acoustic model timesteps
U = Target sequence length
H1, H2 = Hidden dimensions of the Encoder / Decoder respectively
H = Hidden dimension of the Joint hidden step.
V = Vocabulary size of the Decoder (excluding the HAT blank token).
NOTE:
The implementation of this model is slightly modified from the original paper.
The original paper proposes the following steps :
(enc, dec) -> Expand + Concat + Sum [B, T, U, H1+H2] -> Forward through joint hidden [B, T, U, H] -- *1
*1 -> Forward through joint final [B, T, U, V + 1].
We instead split the joint hidden into joint_hidden_enc and joint_hidden_dec and act as follows:
enc -> Forward through joint_hidden_enc -> Expand [B, T, 1, H] -- *1
dec -> Forward through joint_hidden_dec -> Expand [B, 1, U, H] -- *2
(*1, *2) -> Sum [B, T, U, H] -> Forward through joint final [B, T, U, V + 1].
Args:
f: Output of the Encoder model. A torch.Tensor of shape [B, T, H1]
g: Output of the Decoder model. A torch.Tensor of shape [B, U, H2]
Returns:
Log softmaxed tensor of shape (B, T, U, V + 1).
Internal LM probability (B, 1, U, V) -- in case of return_ilm==True.
"""
# f = [B, T, H1]
f = self.enc(f)
f.unsqueeze_(dim=2) # (B, T, 1, H)
# g = [B, U, H2]
g = self.pred(g)
g.unsqueeze_(dim=1) # (B, 1, U, H)
inp = f + g # [B, T, U, H]
del f
# Forward adapter modules on joint hidden
if self.is_adapter_available():
inp = self.forward_enabled_adapters(inp)
blank_logprob = self.blank_pred(inp) # [B, T, U, 1]
label_logit = self.joint_net(inp) # [B, T, U, V]
del inp
label_logprob = label_logit.log_softmax(dim=-1)
scale_prob = torch.clamp(1 - torch.exp(blank_logprob), min=1e-6)
label_logprob_scaled = torch.log(scale_prob) + label_logprob # [B, T, U, V]
res = torch.cat((label_logprob_scaled, blank_logprob), dim=-1).contiguous() # [B, T, U, V+1]
if self.return_hat_ilm:
ilm_logprobs = self.joint_net(g).log_softmax(dim=-1) # [B, 1, U, V]
res = HATJointOutput(hat_logprobs=res, ilm_logprobs=ilm_logprobs)
del g, blank_logprob, label_logprob, label_logit, scale_prob, label_logprob_scaled
if self.preserve_memory:
torch.cuda.empty_cache()
return res
def _joint_hat_net_modules(self, num_classes, pred_n_hidden, enc_n_hidden, joint_n_hidden, activation, dropout):
"""
Prepare the trainable modules of the Joint Network
Args:
num_classes: Number of output classes (vocab size) excluding the HAT blank token.
pred_n_hidden: Hidden size of the prediction network.
enc_n_hidden: Hidden size of the encoder network.
joint_n_hidden: Hidden size of the joint network.
activation: Activation of the joint. Can be one of [relu, tanh, sigmoid]
dropout: Dropout value to apply to joint.
"""
pred = torch.nn.Linear(pred_n_hidden, joint_n_hidden)
enc = torch.nn.Linear(enc_n_hidden, joint_n_hidden)
blank_pred = torch.nn.Sequential(
torch.nn.Tanh(), torch.nn.Dropout(p=dropout), torch.nn.Linear(joint_n_hidden, 1), torch.nn.LogSigmoid()
)
if activation not in ['relu', 'sigmoid', 'tanh']:
raise ValueError("Unsupported activation for joint step - please pass one of " "[relu, sigmoid, tanh]")
activation = activation.lower()
if activation == 'relu':
activation = torch.nn.ReLU(inplace=True)
elif activation == 'sigmoid':
activation = torch.nn.Sigmoid()
elif activation == 'tanh':
activation = torch.nn.Tanh()
layers = (
[activation]
+ ([torch.nn.Dropout(p=dropout)] if dropout else [])
+ [torch.nn.Linear(joint_n_hidden, num_classes)]
)
return pred, enc, torch.nn.Sequential(*layers), blank_pred
|
NeMo-main
|
nemo/collections/asr/modules/hybrid_autoregressive_transducer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
from collections import OrderedDict
from dataclasses import dataclass
from typing import List, Optional, Set
import torch
import torch.distributed
import torch.nn as nn
from omegaconf import DictConfig, ListConfig, open_dict
from nemo.collections.asr.models.configs import CacheAwareStreamingConfig
from nemo.collections.asr.parts.mixins.streaming import StreamingEncoder
from nemo.collections.asr.parts.submodules.causal_convs import CausalConv1D
from nemo.collections.asr.parts.submodules.conformer_modules import ConformerLayer
from nemo.collections.asr.parts.submodules.multi_head_attention import (
LocalAttRelPositionalEncoding,
MultiHeadAttention,
PositionalEncoding,
RelPositionalEncoding,
RelPositionMultiHeadAttention,
RelPositionMultiHeadAttentionLongformer,
)
from nemo.collections.asr.parts.submodules.subsampling import (
ConvSubsampling,
StackingSubsampling,
SubsamplingReductionModule,
)
from nemo.collections.asr.parts.utils import adapter_utils
from nemo.collections.asr.parts.utils.regularization_utils import compute_stochastic_depth_drop_probs
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.mixins import AccessMixin, adapter_mixins
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types import AcousticEncodedRepresentation, ChannelType, LengthsType, NeuralType, SpectrogramType
from nemo.utils import logging
__all__ = ['ConformerEncoder']
class ConformerEncoder(NeuralModule, StreamingEncoder, Exportable, AccessMixin):
"""
The encoder for ASR model of Conformer.
Based on this paper:
'Conformer: Convolution-augmented Transformer for Speech Recognition' by Anmol Gulati et al.
https://arxiv.org/abs/2005.08100
Args:
feat_in (int): the size of feature channels
n_layers (int): number of layers of ConformerBlock
d_model (int): the hidden size of the model
feat_out (int): the size of the output features
Defaults to -1 (means feat_out is d_model)
subsampling (str): the method of subsampling, choices=['vggnet', 'striding', 'dw-striding', 'stacking', 'stacking_norm']
Defaults to striding.
subsampling_factor (int): the subsampling factor which should be power of 2
Defaults to 4.
subsampling_conv_chunking_factor(int): optionally, force chunk inputs (helpful for large inputs)
Should be power of 2, 1 (auto-chunking, default), or -1 (no chunking)
subsampling_conv_channels (int): the size of the convolutions in the subsampling module
Defaults to -1 which would set it to d_model.
reduction (str, Optional): the method of reduction, choices=['pooling', 'striding']. If no value
is passed, then no reduction is performed and the models runs with the original 4x subsampling.
reduction_position (int, Optional): the index of the layer to apply reduction. If -1, apply reduction
at the end.
reduction_factor (int): the reduction factor which should be either 1 or a power of 2
Defaults to 1.
ff_expansion_factor (int): the expansion factor in feed forward layers
Defaults to 4.
self_attention_model (str): type of the attention layer and positional encoding
'rel_pos': relative positional embedding and Transformer-XL
'rel_pos_local_attn': relative positional embedding and Transformer-XL with local attention using
overlapping chunks. Attention context is determined by att_context_size parameter.
'abs_pos': absolute positional embedding and Transformer
Default is rel_pos.
pos_emb_max_len (int): the maximum length of positional embeddings
Defaults to 5000
n_heads (int): number of heads in multi-headed attention layers
Defaults to 4.
att_context_size (List[Union[List[int],int]]): specifies the context sizes on each side. Each context size should be a list of two integers like [100,100].
A list of context sizes like [[100,100],[100,50]] can also be passed. -1 means unlimited context.
Defaults to [-1,-1]
att_context_probs (List[float]): a list of probabilities of each one of the att_context_size when a list of them is passed. If not specified, uniform distribution is being used.
Defaults to None
att_context_style (str): 'regular' or 'chunked_limited'.
Defaults to 'regular'
xscaling (bool): enables scaling the inputs to the multi-headed attention layers by sqrt(d_model)
Defaults to True.
untie_biases (bool): whether to not share (untie) the bias weights between layers of Transformer-XL
Defaults to True.
conv_kernel_size (int): the size of the convolutions in the convolutional modules
Defaults to 31.
conv_norm_type (str): the type of the normalization in the convolutional modules
Defaults to 'batch_norm'.
conv_context_size (list): it can be"causal" or a list of two integers while conv_context_size[0]+conv_context_size[1]+1==conv_kernel_size.
None means [(conv_kernel_size-1)//2, (conv_kernel_size-1)//2], and 'causal' means [(conv_kernel_size-1), 0].
Defaults to None.
conv_dual_mode (bool): specifies if convolution should be dual mode when dual_offline mode is being used. When enables, the left half of the convolution kernel would get masked in streaming cases.
Defaults to False
dropout (float): the dropout rate used in all layers except the attention layers
Defaults to 0.1.
dropout_pre_encoder (float): the dropout rate used before the encoder
Defaults to 0.1.
dropout_emb (float): the dropout rate used for the positional embeddings
Defaults to 0.1.
dropout_att (float): the dropout rate used for the attention layer
Defaults to 0.0.
stochastic_depth_drop_prob (float): if non-zero, will randomly drop
layers during training. The higher this value, the more often layers
are dropped. Defaults to 0.0.
stochastic_depth_mode (str): can be either "linear" or "uniform". If
set to "uniform", all layers have the same probability of drop. If
set to "linear", the drop probability grows linearly from 0 for the
first layer to the desired value for the final layer. Defaults to
"linear".
stochastic_depth_start_layer (int): starting layer for stochastic depth.
All layers before this will never be dropped. Note that drop
probability will be adjusted accordingly if mode is "linear" when
start layer is > 1. Defaults to 1.
global_tokens (int): number of tokens to be used for global attention.
Only relevant if self_attention_model is 'rel_pos_local_attn'.
Defaults to 0.
global_tokens_spacing (int): how far apart the global tokens are
Defaults to 1.
global_attn_separate (bool): whether the q, k, v layers used for global tokens should be separate.
Defaults to False.
"""
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
dev = next(self.parameters()).device
if self.export_cache_support:
window_size = max_dim
if self.streaming_cfg is not None:
if isinstance(self.streaming_cfg.chunk_size, list):
chunk_size = self.streaming_cfg.chunk_size[1]
else:
chunk_size = self.streaming_cfg.chunk_size
if isinstance(self.streaming_cfg.pre_encode_cache_size, list):
pre_encode_cache_size = self.streaming_cfg.pre_encode_cache_size[1]
else:
pre_encode_cache_size = self.streaming_cfg.pre_encode_cache_size
window_size = chunk_size + pre_encode_cache_size
input_example = torch.randn(max_batch, self._feat_in, window_size, device=dev)
input_example_length = torch.randint(
window_size // 4, window_size, (max_batch,), device=dev, dtype=torch.int64
)
cache_last_channel, cache_last_time, cache_last_channel_len = self.get_initial_cache_state(
batch_size=max_batch, device=dev, max_dim=max_dim
)
all_input_example = tuple(
[
input_example,
input_example_length,
cache_last_channel.transpose(0, 1),
cache_last_time.transpose(0, 1),
cache_last_channel_len,
]
)
else:
input_example = torch.randn(max_batch, self._feat_in, max_dim, device=dev)
input_example_length = torch.randint(max_dim // 4, max_dim, (max_batch,), device=dev, dtype=torch.int64)
all_input_example = tuple([input_example, input_example_length])
return all_input_example
@property
def input_types(self):
"""Returns definitions of module input ports."""
return OrderedDict(
{
"audio_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
"cache_last_channel": NeuralType(('D', 'B', 'T', 'D'), ChannelType(), optional=True),
"cache_last_time": NeuralType(('D', 'B', 'D', 'T'), ChannelType(), optional=True),
"cache_last_channel_len": NeuralType(tuple('B'), LengthsType(), optional=True),
}
)
@property
def input_types_for_export(self):
"""Returns definitions of module input ports."""
return OrderedDict(
{
"audio_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
"cache_last_channel": NeuralType(('B', 'D', 'T', 'D'), ChannelType(), optional=True),
"cache_last_time": NeuralType(('B', 'D', 'D', 'T'), ChannelType(), optional=True),
"cache_last_channel_len": NeuralType(tuple('B'), LengthsType(), optional=True),
}
)
@property
def output_types(self):
"""Returns definitions of module output ports."""
return OrderedDict(
{
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
"cache_last_channel_next": NeuralType(('D', 'B', 'T', 'D'), ChannelType(), optional=True),
"cache_last_time_next": NeuralType(('D', 'B', 'D', 'T'), ChannelType(), optional=True),
"cache_last_channel_next_len": NeuralType(tuple('B'), LengthsType(), optional=True),
}
)
@property
def output_types_for_export(self):
"""Returns definitions of module output ports."""
return OrderedDict(
{
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
"cache_last_channel_next": NeuralType(('B', 'D', 'T', 'D'), ChannelType(), optional=True),
"cache_last_time_next": NeuralType(('B', 'D', 'D', 'T'), ChannelType(), optional=True),
"cache_last_channel_next_len": NeuralType(tuple('B'), LengthsType(), optional=True),
}
)
@property
def disabled_deployment_input_names(self):
if not self.export_cache_support:
return set(["cache_last_channel", "cache_last_time", "cache_last_channel_len"])
else:
return set()
@property
def disabled_deployment_output_names(self):
if not self.export_cache_support:
return set(["cache_last_channel_next", "cache_last_time_next", "cache_last_channel_next_len"])
else:
return set()
def __init__(
self,
feat_in,
n_layers,
d_model,
feat_out=-1,
causal_downsampling=False,
subsampling='striding',
subsampling_factor=4,
subsampling_conv_chunking_factor=1,
subsampling_conv_channels=-1,
reduction=None,
reduction_position=None,
reduction_factor=1,
ff_expansion_factor=4,
self_attention_model='rel_pos',
n_heads=4,
att_context_size=None,
att_context_probs=None,
att_context_style='regular',
xscaling=True,
untie_biases=True,
pos_emb_max_len=5000,
conv_kernel_size=31,
conv_norm_type='batch_norm',
conv_context_size=None,
dropout=0.1,
dropout_pre_encoder=0.1,
dropout_emb=0.1,
dropout_att=0.0,
stochastic_depth_drop_prob: float = 0.0,
stochastic_depth_mode: str = "linear",
stochastic_depth_start_layer: int = 1,
global_tokens: int = 0,
global_tokens_spacing: int = 1,
global_attn_separate: bool = False,
):
super().__init__()
d_ff = d_model * ff_expansion_factor
self.d_model = d_model
self.n_layers = n_layers
self._feat_in = feat_in
self.att_context_style = att_context_style
self.subsampling_factor = subsampling_factor
self.subsampling_conv_chunking_factor = subsampling_conv_chunking_factor
self.self_attention_model = self_attention_model
self.global_tokens = global_tokens
self.global_attn_separate = global_attn_separate
self.global_tokens_spacing = global_tokens_spacing
# Setting up the att_context_size
(
self.att_context_size_all,
self.att_context_size,
self.att_context_probs,
self.conv_context_size,
) = self._calc_context_sizes(
att_context_style=att_context_style,
att_context_size=att_context_size,
att_context_probs=att_context_probs,
conv_context_size=conv_context_size,
conv_kernel_size=conv_kernel_size,
)
if xscaling:
self.xscale = math.sqrt(d_model)
else:
self.xscale = None
# Subsampling
if subsampling_conv_channels == -1:
subsampling_conv_channels = d_model
if subsampling and subsampling_factor > 1:
if subsampling in ['stacking', 'stacking_norm']:
# stacking_norm has an extra layer norm after stacking comparing to stacking
self.pre_encode = StackingSubsampling(
subsampling_factor=subsampling_factor,
feat_in=feat_in,
feat_out=d_model,
norm=True if subsampling == 'stacking_norm' else False,
)
else:
self.pre_encode = ConvSubsampling(
subsampling=subsampling,
subsampling_factor=subsampling_factor,
feat_in=feat_in,
feat_out=d_model,
conv_channels=subsampling_conv_channels,
subsampling_conv_chunking_factor=subsampling_conv_chunking_factor,
activation=nn.ReLU(True),
is_causal=causal_downsampling,
)
else:
self.pre_encode = nn.Linear(feat_in, d_model)
# Reduction
if reduction and reduction_factor > 1:
assert reduction_position >= -1 and reduction_position < n_layers
self.reduction_subsampling = SubsamplingReductionModule(
reduction=reduction, d_model=d_model, reduction_factor=reduction_factor,
)
self.reduction_position = reduction_position
else:
self.reduction_subsampling = None
self.reduction_position = None
self._feat_out = d_model
# Biases for relative positional encoding
if not untie_biases and self_attention_model == "rel_pos":
d_head = d_model // n_heads
pos_bias_u = nn.Parameter(torch.Tensor(n_heads, d_head))
pos_bias_v = nn.Parameter(torch.Tensor(n_heads, d_head))
nn.init.zeros_(pos_bias_u)
nn.init.zeros_(pos_bias_v)
else:
pos_bias_u = None
pos_bias_v = None
# Positional encodings
self.pos_emb_max_len = pos_emb_max_len
if self_attention_model == "rel_pos":
self.pos_enc = RelPositionalEncoding(
d_model=d_model,
dropout_rate=dropout_pre_encoder,
max_len=pos_emb_max_len,
xscale=self.xscale,
dropout_rate_emb=dropout_emb,
)
elif self_attention_model == 'rel_pos_local_attn':
if max(att_context_size) <= 0:
raise ValueError("When using local attention, context size must be set > 0")
self.pos_enc = LocalAttRelPositionalEncoding(
att_context_size=att_context_size,
d_model=d_model,
dropout_rate=dropout,
max_len=pos_emb_max_len,
xscale=self.xscale,
dropout_rate_emb=dropout_emb,
)
elif self_attention_model == "abs_pos":
pos_bias_u = None
pos_bias_v = None
self.pos_enc = PositionalEncoding(
d_model=d_model, dropout_rate=dropout_pre_encoder, max_len=pos_emb_max_len, xscale=self.xscale
)
else:
raise ValueError(f"Not valid self_attention_model: '{self_attention_model}'!")
self.layers = nn.ModuleList()
for i in range(n_layers):
layer = ConformerLayer(
d_model=d_model,
d_ff=d_ff,
self_attention_model=self_attention_model,
global_tokens=global_tokens,
global_tokens_spacing=global_tokens_spacing,
global_attn_separate=global_attn_separate,
n_heads=n_heads,
conv_kernel_size=conv_kernel_size,
conv_norm_type=conv_norm_type,
conv_context_size=self.conv_context_size,
dropout=dropout,
dropout_att=dropout_att,
pos_bias_u=pos_bias_u,
pos_bias_v=pos_bias_v,
att_context_size=self.att_context_size,
)
self.layers.append(layer)
if feat_out > 0 and feat_out != self._feat_out:
self.out_proj = nn.Linear(self._feat_out, feat_out)
self._feat_out = feat_out
else:
self.out_proj = None
self._feat_out = d_model
self.set_max_audio_length(self.pos_emb_max_len)
self.use_pad_mask = True
self.setup_streaming_params()
self.export_cache_support = False
self.layer_drop_probs = compute_stochastic_depth_drop_probs(
len(self.layers), stochastic_depth_drop_prob, stochastic_depth_mode, stochastic_depth_start_layer
)
# will be set in self.forward() if defined in AccessMixin config
self.interctc_capture_at_layers = None
def forward_for_export(
self, audio_signal, length, cache_last_channel=None, cache_last_time=None, cache_last_channel_len=None
):
if cache_last_channel is not None:
cache_last_channel = cache_last_channel.transpose(0, 1)
cache_last_time = cache_last_time.transpose(0, 1)
rets = self.forward_internal(
audio_signal,
length,
cache_last_channel=cache_last_channel,
cache_last_time=cache_last_time,
cache_last_channel_len=cache_last_channel_len,
)
rets = self.streaming_post_process(rets, keep_all_outputs=False)
if len(rets) == 2:
return rets
elif rets[2] is None and rets[3] is None and rets[4] is None:
return (rets[0], rets[1])
else:
return (
rets[0],
rets[1],
rets[2].transpose(0, 1),
rets[3].transpose(0, 1),
rets[4],
)
def streaming_post_process(self, rets, keep_all_outputs=True):
if len(rets) == 2:
return rets[0], rets[1], None, None, None
(encoded, encoded_len, cache_last_channel_next, cache_last_time_next, cache_last_channel_next_len) = rets
if cache_last_channel_next is not None and self.streaming_cfg.last_channel_cache_size >= 0:
if self.streaming_cfg.last_channel_cache_size > 0:
cache_last_channel_next = cache_last_channel_next[
:, :, -self.streaming_cfg.last_channel_cache_size :, :
]
if self.streaming_cfg.valid_out_len > 0 and (not keep_all_outputs or self.att_context_style == "regular"):
encoded = encoded[:, :, : self.streaming_cfg.valid_out_len]
encoded_len = torch.clamp(encoded_len, max=self.streaming_cfg.valid_out_len)
return (encoded, encoded_len, cache_last_channel_next, cache_last_time_next, cache_last_channel_next_len)
@typecheck()
def forward(
self, audio_signal, length, cache_last_channel=None, cache_last_time=None, cache_last_channel_len=None
):
return self.forward_internal(
audio_signal,
length,
cache_last_channel=cache_last_channel,
cache_last_time=cache_last_time,
cache_last_channel_len=cache_last_channel_len,
)
def forward_internal(
self, audio_signal, length, cache_last_channel=None, cache_last_time=None, cache_last_channel_len=None
):
self.update_max_seq_length(seq_length=audio_signal.size(2), device=audio_signal.device)
if length is None:
length = audio_signal.new_full(
(audio_signal.size(0),), audio_signal.size(-1), dtype=torch.int64, device=audio_signal.device
)
# select a random att_context_size with the distribution specified by att_context_probs during training
# for non-validation cases like test, validation or inference, it uses the first mode in self.att_context_size
if self.training and len(self.att_context_size_all) > 1:
cur_att_context_size = random.choices(self.att_context_size_all, weights=self.att_context_probs)[0]
else:
cur_att_context_size = self.att_context_size
audio_signal = torch.transpose(audio_signal, 1, 2)
if isinstance(self.pre_encode, nn.Linear):
audio_signal = self.pre_encode(audio_signal)
else:
audio_signal, length = self.pre_encode(x=audio_signal, lengths=length)
length = length.to(torch.int64)
# self.streaming_cfg is set by setup_streaming_cfg(), called in the init
if self.streaming_cfg.drop_extra_pre_encoded > 0 and cache_last_channel is not None:
audio_signal = audio_signal[:, self.streaming_cfg.drop_extra_pre_encoded :, :]
length = (length - self.streaming_cfg.drop_extra_pre_encoded).clamp(min=0)
if self.reduction_position is not None and cache_last_channel is not None:
raise ValueError("Caching with reduction feature is not supported yet!")
max_audio_length = audio_signal.size(1)
if cache_last_channel is not None:
cache_len = self.streaming_cfg.last_channel_cache_size
cache_keep_size = max_audio_length - self.streaming_cfg.cache_drop_size
max_audio_length = max_audio_length + cache_len
padding_length = length + cache_len
offset = torch.neg(cache_last_channel_len) + cache_len
else:
padding_length = length
cache_last_channel_next = None
cache_len = 0
offset = None
audio_signal, pos_emb = self.pos_enc(x=audio_signal, cache_len=cache_len)
# Create the self-attention and padding masks
pad_mask, att_mask = self._create_masks(
att_context_size=cur_att_context_size,
padding_length=padding_length,
max_audio_length=max_audio_length,
offset=offset,
device=audio_signal.device,
)
if cache_last_channel is not None:
pad_mask = pad_mask[:, cache_len:]
if att_mask is not None:
att_mask = att_mask[:, cache_len:]
# Convert caches from the tensor to list
cache_last_time_next = []
cache_last_channel_next = []
for lth, (drop_prob, layer) in enumerate(zip(self.layer_drop_probs, self.layers)):
original_signal = audio_signal
if cache_last_channel is not None:
cache_last_channel_cur = cache_last_channel[lth]
cache_last_time_cur = cache_last_time[lth]
else:
cache_last_channel_cur = None
cache_last_time_cur = None
audio_signal = layer(
x=audio_signal,
att_mask=att_mask,
pos_emb=pos_emb,
pad_mask=pad_mask,
cache_last_channel=cache_last_channel_cur,
cache_last_time=cache_last_time_cur,
)
if cache_last_channel_cur is not None:
(audio_signal, cache_last_channel_cur, cache_last_time_cur) = audio_signal
cache_last_channel_next.append(cache_last_channel_cur)
cache_last_time_next.append(cache_last_time_cur)
# applying stochastic depth logic from https://arxiv.org/abs/2102.03216
if self.training and drop_prob > 0.0:
should_drop = torch.rand(1) < drop_prob
# adjusting to match expectation
if should_drop:
# that's not efficient, but it's hard to implement distributed
# version of dropping layers without deadlock or random seed meddling
# so multiplying the signal by 0 to ensure all weights get gradients
audio_signal = audio_signal * 0.0 + original_signal
else:
# not doing this operation if drop prob is 0 as it's identity in that case
audio_signal = (audio_signal - original_signal) / (1.0 - drop_prob) + original_signal
if self.reduction_position == lth:
audio_signal, length = self.reduction_subsampling(x=audio_signal, lengths=length)
max_audio_length = audio_signal.size(1)
# Don't update the audio_signal here because then it will again scale the audio_signal
# and cause an increase in the WER
_, pos_emb = self.pos_enc(x=audio_signal, cache_len=cache_len)
pad_mask, att_mask = self._create_masks(
att_context_size=cur_att_context_size,
padding_length=length,
max_audio_length=max_audio_length,
offset=offset,
device=audio_signal.device,
)
# saving tensors if required for interctc loss
if self.is_access_enabled():
if self.interctc_capture_at_layers is None:
self.interctc_capture_at_layers = self.access_cfg.get('interctc', {}).get('capture_layers', [])
if lth in self.interctc_capture_at_layers:
lth_audio_signal = audio_signal
if self.out_proj is not None:
lth_audio_signal = self.out_proj(audio_signal)
# shape is the same as the shape of audio_signal output, i.e. [B, D, T]
self.register_accessible_tensor(
name=f'interctc/layer_output_{lth}', tensor=torch.transpose(lth_audio_signal, 1, 2)
)
self.register_accessible_tensor(name=f'interctc/layer_length_{lth}', tensor=length)
if self.out_proj is not None:
audio_signal = self.out_proj(audio_signal)
# Reduction
if self.reduction_position == -1:
audio_signal, length = self.reduction_subsampling(x=audio_signal, lengths=length)
audio_signal = torch.transpose(audio_signal, 1, 2)
length = length.to(dtype=torch.int64)
if cache_last_channel is not None:
cache_last_channel_next = torch.stack(cache_last_channel_next, dim=0)
cache_last_time_next = torch.stack(cache_last_time_next, dim=0)
return (
audio_signal,
length,
cache_last_channel_next,
cache_last_time_next,
torch.clamp(cache_last_channel_len + cache_keep_size, max=cache_len),
)
else:
return audio_signal, length
def update_max_seq_length(self, seq_length: int, device):
# Find global max audio length across all nodes
if torch.distributed.is_initialized():
global_max_len = torch.tensor([seq_length], dtype=torch.float32, device=device)
# Update across all ranks in the distributed system
torch.distributed.all_reduce(global_max_len, op=torch.distributed.ReduceOp.MAX)
seq_length = global_max_len.int().item()
if seq_length > self.max_audio_length:
self.set_max_audio_length(seq_length)
def set_max_audio_length(self, max_audio_length):
"""
Sets maximum input length.
Pre-calculates internal seq_range mask.
"""
self.max_audio_length = max_audio_length
device = next(self.parameters()).device
self.pos_enc.extend_pe(max_audio_length, device)
def _create_masks(self, att_context_size, padding_length, max_audio_length, offset, device):
if self.self_attention_model != "rel_pos_local_attn":
att_mask = torch.ones(1, max_audio_length, max_audio_length, dtype=torch.bool, device=device)
if self.att_context_style == "regular":
if att_context_size[0] >= 0:
att_mask = att_mask.triu(diagonal=-att_context_size[0])
if att_context_size[1] >= 0:
att_mask = att_mask.tril(diagonal=att_context_size[1])
elif self.att_context_style == "chunked_limited":
# When right context is unlimited, just the left side of the masking need to get updated
if att_context_size[1] == -1:
if att_context_size[0] >= 0:
att_mask = att_mask.triu(diagonal=-att_context_size[0])
else:
chunk_size = att_context_size[1] + 1
# left_chunks_num specifies the number of chunks to be visible by each chunk on the left side
if att_context_size[0] >= 0:
left_chunks_num = att_context_size[0] // chunk_size
else:
left_chunks_num = 10000
chunk_idx = torch.arange(0, max_audio_length, dtype=torch.int, device=att_mask.device)
chunk_idx = torch.div(chunk_idx, chunk_size, rounding_mode="trunc")
diff_chunks = chunk_idx.unsqueeze(1) - chunk_idx.unsqueeze(0)
chunked_limited_mask = torch.logical_and(
torch.le(diff_chunks, left_chunks_num), torch.ge(diff_chunks, 0)
)
att_mask = torch.logical_and(att_mask, chunked_limited_mask.unsqueeze(0))
else:
att_mask = None
# pad_mask is the masking to be used to ignore paddings
pad_mask = torch.arange(0, max_audio_length, device=device).expand(
padding_length.size(0), -1
) < padding_length.unsqueeze(-1)
if offset is not None:
pad_mask_off = torch.arange(0, max_audio_length, device=device).expand(
padding_length.size(0), -1
) >= offset.unsqueeze(-1)
pad_mask = pad_mask_off.logical_and(pad_mask)
if att_mask is not None:
# pad_mask_for_att_mask is the mask which helps to ignore paddings
pad_mask_for_att_mask = pad_mask.unsqueeze(1).repeat([1, max_audio_length, 1])
pad_mask_for_att_mask = torch.logical_and(pad_mask_for_att_mask, pad_mask_for_att_mask.transpose(1, 2))
# att_mask is the masking to be used by the MHA layers to ignore the tokens not supposed to be visible
att_mask = att_mask[:, :max_audio_length, :max_audio_length]
# paddings should also get ignored, so pad_mask_for_att_mask is used to ignore their corresponding scores
att_mask = torch.logical_and(pad_mask_for_att_mask, att_mask.to(pad_mask_for_att_mask.device))
att_mask = ~att_mask
pad_mask = ~pad_mask
return pad_mask, att_mask
def enable_pad_mask(self, on=True):
# On inference, user may choose to disable pad mask
mask = self.use_pad_mask
self.use_pad_mask = on
return mask
def _calc_context_sizes(
self, att_context_size, att_context_probs, att_context_style, conv_context_size, conv_kernel_size
):
# convert att_context_size to a standard list of lists
if att_context_size:
att_context_size_all = list(att_context_size)
if isinstance(att_context_size_all[0], int):
att_context_size_all = [att_context_size_all]
for i, att_cs in enumerate(att_context_size_all):
if isinstance(att_cs, ListConfig):
att_context_size_all[i] = list(att_cs)
if att_context_style == "chunked_limited":
if att_cs[0] > 0 and att_cs[0] % (att_cs[1] + 1) > 0:
raise ValueError(f"att_context_size[{i}][0] % (att_context_size[{i}][1] + 1) should be zero!")
if att_cs[1] < 0 and len(att_context_size_all) <= 1:
raise ValueError(
f"Right context (att_context_size[{i}][1]) can not be unlimited for chunked_limited style!"
)
else:
att_context_size_all = [[-1, -1]]
if att_context_probs:
if len(att_context_probs) != len(att_context_size_all):
raise ValueError("The size of the att_context_probs should be the same as att_context_size.")
att_context_probs = list(att_context_probs)
if sum(att_context_probs) != 1:
raise ValueError(
"The sum of numbers in att_context_probs should be equal to one to be a distribution."
)
else:
att_context_probs = [1.0 / len(att_context_size_all)] * len(att_context_size_all)
if conv_context_size is not None:
if isinstance(conv_context_size, ListConfig):
conv_context_size = list(conv_context_size)
if not isinstance(conv_context_size, list) and not isinstance(conv_context_size, str):
raise ValueError(
f"Invalid conv_context_size! It should be the string 'causal' or a list of two integers."
)
if conv_context_size == "causal":
conv_context_size = [conv_kernel_size - 1, 0]
else:
if conv_context_size[0] + conv_context_size[1] + 1 != conv_kernel_size:
raise ValueError(f"Invalid conv_context_size: {self.conv_context_size}!")
else:
conv_context_size = [(conv_kernel_size - 1) // 2, (conv_kernel_size - 1) // 2]
return att_context_size_all, att_context_size_all[0], att_context_probs, conv_context_size
def set_default_att_context_size(self, att_context_size):
if att_context_size not in self.att_context_size_all:
logging.warning(
f"att_context_size={att_context_size} is not among the list of the supported look-aheads: {self.att_context_size_all}"
)
if att_context_size is not None:
self.att_context_size = att_context_size
def setup_streaming_params(
self,
chunk_size: int = None,
shift_size: int = None,
left_chunks: int = None,
att_context_size: list = None,
max_context: int = 10000,
):
"""
This function sets the needed values and parameters to perform streaming. The configuration would be stored in self.streaming_cfg.
The streaming configuration is needed to simulate streaming inference.
Args:
chunk_size (int): overrides the chunk size
shift_size (int): overrides the shift size for chunks
left_chunks (int): overrides the number of left chunks visible to each chunk
max_context (int): the value used for the cache size of last_channel layers if left context is set to infinity (-1)
Defaults to -1 (means feat_out is d_model)
"""
streaming_cfg = CacheAwareStreamingConfig()
# When att_context_size is not specified, it uses the default_att_context_size
if att_context_size is None:
att_context_size = self.att_context_size
if chunk_size is not None:
if chunk_size < 1:
raise ValueError("chunk_size needs to be a number larger or equal to one.")
lookahead_steps = chunk_size - 1
streaming_cfg.cache_drop_size = chunk_size - shift_size
elif self.att_context_style == "chunked_limited":
lookahead_steps = att_context_size[1]
streaming_cfg.cache_drop_size = 0
elif self.att_context_style == "regular":
lookahead_steps = att_context_size[1] * self.n_layers + self.conv_context_size[1] * self.n_layers
streaming_cfg.cache_drop_size = lookahead_steps
else:
streaming_cfg.cache_drop_size = 0
lookahead_steps = None
if chunk_size is None:
streaming_cfg.last_channel_cache_size = att_context_size[0] if att_context_size[0] >= 0 else max_context
else:
if left_chunks is None:
raise ValueError("left_chunks can not be None when chunk_size is set.")
streaming_cfg.last_channel_cache_size = left_chunks * chunk_size
if hasattr(self.pre_encode, "get_sampling_frames"):
sampling_frames = self.pre_encode.get_sampling_frames()
else:
sampling_frames = 0
if isinstance(sampling_frames, list):
streaming_cfg.chunk_size = [
sampling_frames[0] + self.subsampling_factor * lookahead_steps,
sampling_frames[1] + self.subsampling_factor * lookahead_steps,
]
else:
streaming_cfg.chunk_size = sampling_frames * (1 + lookahead_steps)
if isinstance(sampling_frames, list):
streaming_cfg.shift_size = [
sampling_frames[0] + sampling_frames[1] * (lookahead_steps - streaming_cfg.cache_drop_size),
sampling_frames[1] + sampling_frames[1] * (lookahead_steps - streaming_cfg.cache_drop_size),
]
else:
streaming_cfg.shift_size = sampling_frames * (1 + lookahead_steps - streaming_cfg.cache_drop_size)
if isinstance(streaming_cfg.shift_size, list):
streaming_cfg.valid_out_len = (
streaming_cfg.shift_size[1] - sampling_frames[1]
) // self.subsampling_factor + 1
else:
streaming_cfg.valid_out_len = streaming_cfg.shift_size // self.subsampling_factor
if hasattr(self.pre_encode, "get_streaming_cache_size"):
streaming_cfg.pre_encode_cache_size = self.pre_encode.get_streaming_cache_size()
else:
streaming_cfg.pre_encode_cache_size = 0
if isinstance(streaming_cfg.pre_encode_cache_size, list):
if streaming_cfg.pre_encode_cache_size[1] >= 1:
streaming_cfg.drop_extra_pre_encoded = (
1 + (streaming_cfg.pre_encode_cache_size[1] - 1) // self.subsampling_factor
)
else:
streaming_cfg.drop_extra_pre_encoded = 0
else:
streaming_cfg.drop_extra_pre_encoded = streaming_cfg.pre_encode_cache_size // self.subsampling_factor
for m in self.layers.modules():
if hasattr(m, "_max_cache_len"):
if isinstance(m, MultiHeadAttention):
m.cache_drop_size = streaming_cfg.cache_drop_size
if isinstance(m, CausalConv1D):
m.cache_drop_size = streaming_cfg.cache_drop_size
self.streaming_cfg = streaming_cfg
def get_initial_cache_state(self, batch_size=1, dtype=torch.float32, device=None, max_dim=0):
if device is None:
device = next(self.parameters()).device
if max_dim > 0:
create_tensor = torch.randn
else:
create_tensor = torch.zeros
last_time_cache_size = self.conv_context_size[0]
cache_last_channel = create_tensor(
(len(self.layers), batch_size, self.streaming_cfg.last_channel_cache_size, self.d_model,),
device=device,
dtype=dtype,
)
cache_last_time = create_tensor(
(len(self.layers), batch_size, self.d_model, last_time_cache_size), device=device, dtype=dtype,
)
if max_dim > 0:
cache_last_channel_len = torch.randint(
0,
min(max_dim, self.streaming_cfg.last_channel_cache_size),
(batch_size,),
device=device,
dtype=torch.int64,
)
for i in range(batch_size):
cache_last_channel[:, i, cache_last_channel_len[i] :, :] = 0
# what is the right rule to zero out cache_last_time?
if cache_last_channel_len[i] == 0:
cache_last_time[:, i, :, :] = 0
else:
cache_last_channel_len = torch.zeros(batch_size, device=device, dtype=torch.int64)
return cache_last_channel, cache_last_time, cache_last_channel_len
def change_attention_model(
self,
self_attention_model: str = None,
att_context_size: List[int] = None,
update_config: bool = True,
device: torch.device = None,
):
"""
Update the self_attention_model which changes the positional encoding and attention layers.
Args:
self_attention_model (str): type of the attention layer and positional encoding
'rel_pos': relative positional embedding and Transformer-XL
'rel_pos_local_attn': relative positional embedding and Transformer-XL with local attention using
overlapping windows. Attention context is determined by att_context_size parameter.
'abs_pos': absolute positional embedding and Transformer
If None is provided, the self_attention_model isn't changed. Defaults to None.
att_context_size (List[int]): List of 2 ints corresponding to left and right attention context sizes,
or None to keep as it is. Defaults to None.
update_config (bool): Whether to update the config or not with the new attention model.
Defaults to True.
device (torch.device): If provided, new layers will be moved to the device.
Defaults to None.
"""
if att_context_size:
att_context_size = list(att_context_size)
else:
att_context_size = self.att_context_size
if self_attention_model is None:
self_attention_model = self.self_attention_model
if self_attention_model == 'rel_pos_local_attn' and max(att_context_size) <= 0:
raise ValueError("When using local attention, context size must be set > 0")
if self_attention_model == "rel_pos":
new_pos_enc = RelPositionalEncoding(
d_model=self._cfg.d_model,
dropout_rate=self._cfg.dropout,
max_len=self._cfg.pos_emb_max_len,
xscale=self.xscale,
dropout_rate_emb=self._cfg.dropout_emb,
)
elif self_attention_model == 'rel_pos_local_attn':
new_pos_enc = LocalAttRelPositionalEncoding(
att_context_size=att_context_size,
d_model=self._cfg.d_model,
dropout_rate=self._cfg.dropout,
max_len=self._cfg.pos_emb_max_len,
xscale=self.xscale,
dropout_rate_emb=self._cfg.dropout_emb,
)
elif self_attention_model == "abs_pos":
new_pos_enc = PositionalEncoding(
d_model=self._cfg.d_model,
dropout_rate=self._cfg.dropout,
max_len=self._cfg.pos_emb_max_len,
xscale=self.xscale,
)
else:
raise ValueError(f"Not valid self_attention_model: '{self_attention_model}'!")
if device is not None:
new_pos_enc = new_pos_enc.to(device=device)
del self.pos_enc
self.pos_enc = new_pos_enc
self.self_attention_model = self_attention_model
self.att_context_size = att_context_size
self.set_max_audio_length(self.pos_emb_max_len)
for name, m in self.named_modules():
if type(m) == ConformerLayer:
if self_attention_model == 'rel_pos':
new_attn = RelPositionMultiHeadAttention(
n_head=self._cfg.n_heads,
n_feat=self._cfg.d_model,
dropout_rate=self._cfg.dropout_att,
max_cache_len=att_context_size[0],
pos_bias_u=None,
pos_bias_v=None,
)
elif self_attention_model == 'rel_pos_local_attn':
new_attn = RelPositionMultiHeadAttentionLongformer(
n_head=self._cfg.n_heads,
n_feat=self._cfg.d_model,
dropout_rate=self._cfg.dropout_att,
max_cache_len=att_context_size[0],
att_context_size=att_context_size,
pos_bias_u=None,
pos_bias_v=None,
)
elif self_attention_model == 'abs_pos':
new_attn = MultiHeadAttention(
n_head=self._cfg.n_heads,
n_feat=self._cfg.d_model,
dropout_rate=self._cfg.dropout_att,
max_cache_len=att_context_size[0],
)
else:
raise ValueError(
f"'{self_attention_model}' is not not a valid value for 'self_attention_model', "
f"valid values can be from ['rel_pos', 'rel_pos_local_attn', 'abs_pos']"
)
if device is not None:
new_attn = new_attn.to(device=device)
new_attn.load_state_dict(m.self_attn.state_dict(), strict=False)
del m.self_attn
m.self_attn = new_attn
m.self_attention_model = self_attention_model
if update_config:
with open_dict(self._cfg):
self._cfg.self_attention_model = self_attention_model
self._cfg.att_context_size = att_context_size
def change_subsampling_conv_chunking_factor(self, subsampling_conv_chunking_factor: int):
"""
Update the conv_chunking_factor (int)
Default is 1 (auto)
Set it to -1 (disabled) or to a specific value (power of 2) if you OOM in the conv subsampling layers
Args:
subsampling_conv_chunking_factor (int)
"""
if not hasattr(self.pre_encode, "change_subsampling_conv_chunking_factor"):
logging.info("Model pre_encoder doesn't have a change_subsampling_conv_chunking_factor method ")
return
self.pre_encode.change_subsampling_conv_chunking_factor(
subsampling_conv_chunking_factor=subsampling_conv_chunking_factor
)
class ConformerEncoderAdapter(ConformerEncoder, adapter_mixins.AdapterModuleMixin):
# Higher level forwarding
def add_adapter(self, name: str, cfg: dict):
cfg = self._update_adapter_cfg_input_dim(cfg)
for conformer_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
conformer_layer.add_adapter(name, cfg)
def is_adapter_available(self) -> bool:
return any([conformer_layer.is_adapter_available() for conformer_layer in self.layers])
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
for conformer_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
conformer_layer.set_enabled_adapters(name=name, enabled=enabled)
def get_enabled_adapters(self) -> List[str]:
names = set([])
for conformer_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
names.update(conformer_layer.get_enabled_adapters())
names = sorted(list(names))
return names
def _update_adapter_cfg_input_dim(self, cfg: DictConfig):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=self.d_model)
return cfg
def get_accepted_adapter_types(self,) -> Set[type]:
types = super().get_accepted_adapter_types()
if len(types) == 0:
self.set_accepted_adapter_types(
[
adapter_utils.LINEAR_ADAPTER_CLASSPATH,
adapter_utils.MHA_ADAPTER_CLASSPATH,
adapter_utils.RELMHA_ADAPTER_CLASSPATH,
]
)
types = self.get_accepted_adapter_types()
return types
"""
Register any additional information
"""
if adapter_mixins.get_registered_adapter(ConformerEncoder) is None:
adapter_mixins.register_adapter(base_class=ConformerEncoder, adapter_class=ConformerEncoderAdapter)
@dataclass
class ConformerChangeConfig:
# Change self_attention_model for Conformer
# Options:
# 'rel_pos': relative positional embedding and Transformer-XL
# 'rel_pos_local_attn': relative positional embedding and Transformer-XL with local attention using
# overlapping chunks. Attention context is determined by att_context_size parameter.
# 'abs_pos': absolute positional embedding and Transformer
# If None is provided, self_attention_model is not changed.
self_attention_model: Optional[str] = None
# Change the attention context size by providing 2 integers,
# corresponding to left and right context, or -1 for full context.
# If None is provided, the attention context size isn't changed.
att_context_size: Optional[List[int]] = None
|
NeMo-main
|
nemo/collections/asr/modules/conformer_encoder.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.core.classes import NeuralModule, typecheck
from nemo.core.neural_types import LengthsType, LogprobsType, NeuralType, PredictionsType
class BeamSearchDecoderWithLM(NeuralModule):
"""Neural Module that does CTC beam search with a N-gram language model.
It takes a batch of log_probabilities. Note the bigger the batch, the
better as processing is parallelized. Outputs a list of size batch_size.
Each element in the list is a list of size beam_search, and each element
in that list is a tuple of (final_log_prob, hyp_string).
Args:
vocab (list): List of characters that can be output by the ASR model. For English, this is the 28 character set
{a-z '}. The CTC blank symbol is automatically added.
beam_width (int): Size of beams to keep and expand upon. Larger beams result in more accurate but slower
predictions
alpha (float): The amount of importance to place on the N-gram language model. Larger alpha means more
importance on the LM and less importance on the acoustic model.
beta (float): A penalty term given to longer word sequences. Larger beta will result in shorter sequences.
lm_path (str): Path to N-gram language model
num_cpus (int): Number of CPUs to use
cutoff_prob (float): Cutoff probability in vocabulary pruning, default 1.0, no pruning
cutoff_top_n (int): Cutoff number in pruning, only top cutoff_top_n characters with highest probs in
vocabulary will be used in beam search, default 40.
input_tensor (bool): Set to True if you intend to pass PyTorch Tensors, set to False if you intend to pass
NumPy arrays.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"log_probs": NeuralType(('B', 'T', 'D'), LogprobsType()),
"log_probs_length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"predictions": NeuralType(('B', 'T'), PredictionsType())}
def __init__(
self, vocab, beam_width, alpha, beta, lm_path, num_cpus, cutoff_prob=1.0, cutoff_top_n=40, input_tensor=False
):
try:
from ctc_decoders import Scorer, ctc_beam_search_decoder_batch
except ModuleNotFoundError:
raise ModuleNotFoundError(
"BeamSearchDecoderWithLM requires the installation of ctc_decoders "
"from scripts/asr_language_modeling/ngram_lm/install_beamsearch_decoders.sh"
)
super().__init__()
if lm_path is not None:
self.scorer = Scorer(alpha, beta, model_path=lm_path, vocabulary=vocab)
else:
self.scorer = None
self.beam_search_func = ctc_beam_search_decoder_batch
self.vocab = vocab
self.beam_width = beam_width
self.num_cpus = num_cpus
self.cutoff_prob = cutoff_prob
self.cutoff_top_n = cutoff_top_n
self.input_tensor = input_tensor
@typecheck(ignore_collections=True)
@torch.no_grad()
def forward(self, log_probs, log_probs_length):
probs_list = log_probs
if self.input_tensor:
probs = torch.exp(log_probs)
probs_list = []
for i, prob in enumerate(probs):
probs_list.append(prob[: log_probs_length[i], :])
res = self.beam_search_func(
probs_list,
self.vocab,
beam_size=self.beam_width,
num_processes=self.num_cpus,
ext_scoring_func=self.scorer,
cutoff_prob=self.cutoff_prob,
cutoff_top_n=self.cutoff_top_n,
)
return res
|
NeMo-main
|
nemo/collections/asr/modules/beam_search_decoder.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import torch
import torch.distributed
import torch.nn as nn
from nemo.collections.asr.parts.submodules.subsampling import ConvSubsampling, StackingSubsampling
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types import AcousticEncodedRepresentation, LengthsType, NeuralType, SpectrogramType
__all__ = ['RNNEncoder']
class RNNEncoder(NeuralModule, Exportable):
"""
The RNN-based encoder for ASR models.
Followed the architecture suggested in the following paper:
'STREAMING END-TO-END SPEECH RECOGNITION FOR MOBILE DEVICES' by Yanzhang He et al.
https://arxiv.org/pdf/1811.06621.pdf
Args:
feat_in (int): the size of feature channels
n_layers (int): number of layers of RNN
d_model (int): the hidden size of the model
proj_size (int): the size of the output projection after each RNN layer
rnn_type (str): the type of the RNN layers, choices=['lstm, 'gru', 'rnn']
bidirectional (float): specifies whether RNN layers should be bidirectional or not
Defaults to True.
feat_out (int): the size of the output features
Defaults to -1 (means feat_out is d_model)
subsampling (str): the method of subsampling, choices=['stacking, 'vggnet', 'striding']
Defaults to stacking.
subsampling_factor (int): the subsampling factor
Defaults to 4.
subsampling_conv_channels (int): the size of the convolutions in the subsampling module for vggnet and striding
Defaults to -1 which would set it to d_model.
dropout (float): the dropout rate used between all layers
Defaults to 0.2.
"""
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
input_example = torch.randn(16, self._feat_in, 256).to(next(self.parameters()).device)
input_example_length = torch.randint(0, 256, (16,)).to(next(self.parameters()).device)
return tuple([input_example, input_example_length])
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return OrderedDict(
{
"audio_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
)
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return OrderedDict(
{
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
}
)
def __init__(
self,
feat_in: int,
n_layers: int,
d_model: int,
proj_size: int = -1,
rnn_type: str = 'lstm',
bidirectional: bool = True,
subsampling: str = 'striding',
subsampling_factor: int = 4,
subsampling_conv_channels: int = -1,
dropout: float = 0.2,
):
super().__init__()
self.d_model = d_model
self._feat_in = feat_in
if subsampling_conv_channels == -1:
subsampling_conv_channels = proj_size
if subsampling and subsampling_factor > 1:
if subsampling in ['stacking', 'stacking_norm']:
self.pre_encode = StackingSubsampling(
subsampling_factor=subsampling_factor,
feat_in=feat_in,
feat_out=proj_size,
norm=True if 'norm' in subsampling else False,
)
else:
self.pre_encode = ConvSubsampling(
subsampling=subsampling,
subsampling_factor=subsampling_factor,
feat_in=feat_in,
feat_out=proj_size,
conv_channels=subsampling_conv_channels,
activation=nn.ReLU(),
)
else:
self.pre_encode = nn.Linear(feat_in, proj_size)
self._feat_out = proj_size
self.layers = nn.ModuleList()
SUPPORTED_RNN = {"lstm": nn.LSTM, "gru": nn.GRU, "rnn": nn.RNN}
if rnn_type not in SUPPORTED_RNN:
raise ValueError(f"rnn_type can be one from the following:{SUPPORTED_RNN.keys()}")
else:
rnn_module = SUPPORTED_RNN[rnn_type]
for i in range(n_layers):
rnn_proj_size = proj_size // 2 if bidirectional else proj_size
if rnn_type == "lstm":
layer = rnn_module(
input_size=self._feat_out,
hidden_size=d_model,
num_layers=1,
batch_first=True,
bidirectional=bidirectional,
proj_size=rnn_proj_size,
)
self.layers.append(layer)
self.layers.append(nn.LayerNorm(proj_size))
self.layers.append(nn.Dropout(p=dropout))
self._feat_out = proj_size
@typecheck()
def forward(self, audio_signal, length=None):
max_audio_length: int = audio_signal.size(-1)
if length is None:
length = audio_signal.new_full(
audio_signal.size(0), max_audio_length, dtype=torch.int32, device=self.seq_range.device
)
audio_signal = torch.transpose(audio_signal, 1, 2)
if isinstance(self.pre_encode, nn.Linear):
audio_signal = self.pre_encode(audio_signal)
else:
audio_signal, length = self.pre_encode(audio_signal, length)
for lth, layer in enumerate(self.layers):
audio_signal = layer(audio_signal)
if isinstance(audio_signal, tuple):
audio_signal, _ = audio_signal
audio_signal = torch.transpose(audio_signal, 1, 2)
return audio_signal, length
|
NeMo-main
|
nemo/collections/asr/modules/rnn_encoder.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.core.classes import NeuralModule, typecheck
from nemo.core.neural_types import LengthsType, LogprobsType, NeuralType, PredictionsType
class _TokensWrapper:
def __init__(self, vocabulary: List[str], tokenizer: TokenizerSpec):
self.vocabulary = vocabulary
self.tokenizer = tokenizer
if tokenizer is None:
self.reverse_map = {self.vocabulary[i]: i for i in range(len(self.vocabulary))}
self.vocab_len = len(self.vocabulary)
if (self.tokenizer is not None) and hasattr(self.tokenizer, 'unk_id') and self.tokenizer.unk_id is not None:
self.unknown_id = self.tokenizer.unk_id
elif ' ' in self.vocabulary:
self.unknown_id = self.token_to_id(' ')
elif '<unk>' in self.vocabulary:
self.unknown_id = self.token_to_id('<unk>')
else:
self.unknown_id = -1
@property
def blank(self):
return self.vocab_len
@property
def unk_id(self):
return self.unknown_id
@property
def vocab(self):
return self.vocabulary
@property
def vocab_size(self):
# the +1 is because we add the blank id
return self.vocab_len + 1
def token_to_id(self, token: str):
if token == self.blank:
return -1
if self.tokenizer is not None:
return self.tokenizer.token_to_id(token)
else:
return self.reverse_map[token]
def text_to_tokens(self, text: str):
if self.tokenizer is not None:
return self.tokenizer.text_to_tokens(text)
else:
return list(text)
class FlashLightKenLMBeamSearchDecoder(NeuralModule):
'''
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"log_probs": NeuralType(('B', 'T', 'D'), LogprobsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"hypos": NeuralType(('B'), PredictionsType())}
'''
def __init__(
self,
lm_path: str,
vocabulary: List[str],
tokenizer: Optional[TokenizerSpec] = None,
lexicon_path: Optional[str] = None,
boost_path: Optional[str] = None,
beam_size: int = 32,
beam_size_token: int = 32,
beam_threshold: float = 25.0,
lm_weight: float = 2.0,
word_score: float = -1.0,
unk_weight: float = -math.inf,
sil_weight: float = 0.0,
):
try:
from flashlight.lib.text.decoder import (
LM,
CriterionType,
KenLM,
LexiconDecoder,
LexiconDecoderOptions,
SmearingMode,
Trie,
)
from flashlight.lib.text.dictionary import create_word_dict, load_words
except ModuleNotFoundError:
raise ModuleNotFoundError(
"FlashLightKenLMBeamSearchDecoder requires the installation of flashlight python bindings "
"from https://github.com/flashlight/text. Please follow the build instructions there."
)
super().__init__()
self.criterion_type = CriterionType.CTC
self.tokenizer_wrapper = _TokensWrapper(vocabulary, tokenizer)
self.vocab_size = self.tokenizer_wrapper.vocab_size
self.blank = self.tokenizer_wrapper.blank
self.silence = self.tokenizer_wrapper.unk_id
if lexicon_path is not None:
self.lexicon = load_words(lexicon_path)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
# loads in the boosted words if given via a file
if boost_path is not None:
with open(boost_path, 'r', encoding='utf_8') as fr:
boost_words = [line.strip().split('\t') for line in fr]
boost_words = {w[0]: w[1] for w in boost_words}
else:
boost_words = {}
# add OOV boosted words to word_dict so it gets picked up in LM obj creation
for word in boost_words.keys():
if word not in self.lexicon:
self.word_dict.add_entry(word)
# loads in the kenlm binary and combines in with the dictionary object from the lexicon
# this gives a mapping between each entry in the kenlm binary and its mapping to whatever
# numeraire is used by the AM, which is explicitly mapped via the lexicon
# this information is ued to build a vocabulary trie for decoding
self.lm = KenLM(lm_path, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [self.tokenizer_wrapper.token_to_id(token) for token in spelling]
if self.tokenizer_wrapper.unk_id in spelling_idxs:
print(f'tokenizer has unknown id for word[ {word} ] {spelling} {spelling_idxs}', flush=True)
continue
self.trie.insert(
spelling_idxs, word_idx, score if word not in boost_words else float(boost_words[word])
)
# handle OOV boosted words
for word, boost in boost_words.items():
if word not in self.lexicon:
word_idx = self.word_dict.get_index(word)
spelling = self.tokenizer_wrapper.text_to_tokens(word)
spelling_idxs = [self.tokenizer_wrapper.token_to_id(token) for token in spelling]
if self.tokenizer_wrapper.unk_id in spelling_idxs:
print(f'tokenizer has unknown id for word[ {word} ] {spelling} {spelling_idxs}', flush=True)
continue
self.trie.insert(spelling_idxs, word_idx, float(boost))
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=beam_size,
beam_size_token=int(beam_size_token),
beam_threshold=beam_threshold,
lm_weight=lm_weight,
word_score=word_score,
unk_score=unk_weight,
sil_score=sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconDecoder(
self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], False,
)
else:
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {
w: [[w]]
for w in self.tokenizer_wrapper.vocab + ([] if '<unk>' in self.tokenizer_wrapper.vocab else ['<unk>'])
}
self.word_dict = create_word_dict(d)
self.lm = KenLM(lm_path, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=beam_size,
beam_size_token=int(beam_size_token),
beam_threshold=beam_threshold,
lm_weight=lm_weight,
sil_score=sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(self.decoder_opts, self.lm, self.silence, self.blank, [])
def _get_tokens(self, idxs: List[int]):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in itertools.groupby(idxs))
if self.silence < 0:
idxs = filter(lambda x: x != self.blank and x != self.silence, idxs)
else:
idxs = filter(lambda x: x != self.blank, idxs)
idxs = list(idxs)
if idxs[0] == self.silence:
idxs = idxs[1:]
if idxs[-1] == self.silence:
idxs = idxs[:-1]
return torch.LongTensor(idxs)
def _get_timesteps(self, token_idxs: List[int]):
"""Returns frame numbers corresponding to every non-blank token.
Parameters
----------
token_idxs : List[int]
IDs of decoded tokens.
Returns
-------
List[int]
Frame numbers corresponding to every non-blank token.
"""
timesteps = []
for i, token_idx in enumerate(token_idxs):
if token_idx == self.blank:
continue
if i == 0 or token_idx != token_idxs[i - 1]:
timesteps.append(i)
return timesteps
# @typecheck(ignore_collections=True)
@torch.no_grad()
def forward(self, log_probs: Union[np.ndarray, torch.Tensor]):
if isinstance(log_probs, np.ndarray):
log_probs = torch.from_numpy(log_probs).float()
if log_probs.dim() == 2:
log_probs = log_probs.unsqueeze(0)
emissions = log_probs.cpu().contiguous()
B, T, N = emissions.size()
hypos = []
# we iterate over the batch dimension of our input tensor log probabilities
for b in range(B):
# the flashlight C++ expects a C style pointer, so the memory address
# which is what we obtain here. Then we pass it to pybinding method which
# is bound to the underlying C++ code
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
hypos.append(
[
{
"tokens": self._get_tokens(result.tokens),
"score": result.score,
"timesteps": self._get_timesteps(result.tokens),
"words": [self.word_dict.get_entry(x) for x in result.words if x >= 0],
}
for result in results
]
)
return hypos
|
NeMo-main
|
nemo/collections/asr/modules/flashlight_decoder.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.asr.modules import rnnt_abstract
from nemo.collections.asr.parts.submodules import stateless_net
from nemo.collections.asr.parts.utils import adapter_utils, rnnt_utils
from nemo.collections.common.parts import rnn
from nemo.core.classes import adapter_mixins, typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.mixins import AdapterModuleMixin
from nemo.core.neural_types import (
AcousticEncodedRepresentation,
ElementType,
EmbeddedTextType,
LabelsType,
LengthsType,
LogprobsType,
LossType,
NeuralType,
SpectrogramType,
)
from nemo.utils import logging
class StatelessTransducerDecoder(rnnt_abstract.AbstractRNNTDecoder, Exportable):
"""A Stateless Neural Network Transducer Decoder / Prediction Network.
An RNN-T Decoder/Prediction stateless network that simply takes concatenation of embeddings of the history tokens as the output.
Args:
prednet: A dict-like object which contains the following key-value pairs.
pred_hidden: int specifying the hidden dimension of the prediction net.
dropout: float, set to 0.0 by default. Optional dropout applied at the end of the final LSTM RNN layer.
vocab_size: int, specifying the vocabulary size of the embedding layer of the Prediction network,
excluding the RNNT blank token.
context_size: int, specifying the size of the history context used for this decoder.
normalization_mode: Can be either None, 'layer'. By default, is set to None.
Defines the type of normalization applied to the RNN layer.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"targets": NeuralType(('B', 'T'), LabelsType()),
"target_length": NeuralType(tuple('B'), LengthsType()),
"states": [NeuralType(('B', 'T'), LabelsType(), optional=True)],
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {
"outputs": NeuralType(('B', 'D', 'T'), EmbeddedTextType()),
"prednet_lengths": NeuralType(tuple('B'), LengthsType()),
"states": [NeuralType(('B', 'T'), LabelsType(), optional=True)],
}
def input_example(self, max_batch=1, max_dim=1):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
length = max_dim
targets = torch.full(fill_value=self.blank_idx, size=(max_batch, length), dtype=torch.int32).to(
next(self.parameters()).device
)
target_length = torch.randint(0, length, size=(max_batch,), dtype=torch.int32).to(
next(self.parameters()).device
)
states = tuple(self.initialize_state(targets.float()))
return (targets, target_length, states)
def _prepare_for_export(self, **kwargs):
self._rnnt_export = True
super()._prepare_for_export(**kwargs)
def __init__(
self,
prednet: Dict[str, Any],
vocab_size: int,
context_size: int = 1,
normalization_mode: Optional[str] = None,
):
# Required arguments
self.pred_hidden = prednet['pred_hidden']
self.blank_idx = vocab_size
self.context_size = context_size
# Initialize the model (blank token increases vocab size by 1)
super().__init__(vocab_size=vocab_size, blank_idx=self.blank_idx, blank_as_pad=True)
# Optional arguments
dropout = prednet.get('dropout', 0.0)
self.prediction = self._predict_modules(
**{
"context_size": context_size,
"vocab_size": vocab_size,
"emb_dim": self.pred_hidden,
"blank_idx": self.blank_idx,
"normalization_mode": normalization_mode,
"dropout": dropout,
}
)
self._rnnt_export = False
@typecheck()
def forward(self, targets, target_length, states=None):
# y: (B, U)
y = rnn.label_collate(targets)
# state maintenance is unnecessary during training forward call
# to get state, use .predict() method.
if self._rnnt_export:
add_sos = False
else:
add_sos = True
g, state = self.predict(y, state=states, add_sos=add_sos) # (B, U, D)
g = g.transpose(1, 2) # (B, D, U)
return g, target_length, state
def predict(
self,
y: Optional[torch.Tensor] = None,
state: Optional[torch.Tensor] = None,
add_sos: bool = True,
batch_size: Optional[int] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
Stateful prediction of scores and state for a tokenset.
Here:
B - batch size
U - label length
C - context size for stateless decoder
D - total embedding size
Args:
y: Optional torch tensor of shape [B, U] of dtype long which will be passed to the Embedding.
If None, creates a zero tensor of shape [B, 1, D] which mimics output of pad-token on Embedding.
state: An optional one-element list of one tensor. The tensor is used to store previous context labels.
The tensor uses type long and is of shape [B, C].
add_sos: bool flag, whether a zero vector describing a "start of signal" token should be
prepended to the above "y" tensor. When set, output size is (B, U + 1, D).
batch_size: An optional int, specifying the batch size of the `y` tensor.
Can be infered if `y` and `state` is None. But if both are None, then batch_size cannot be None.
Returns:
A tuple (g, state) such that -
If add_sos is False:
g: (B, U, D)
state: [(B, C)] storing the history context including the new words in y.
If add_sos is True:
g: (B, U + 1, D)
state: [(B, C)] storing the history context including the new words in y.
"""
# Get device and dtype of current module
_p = next(self.parameters())
device = _p.device
dtype = _p.dtype
# If y is not None, it is of shape [B, U] with dtype long.
if y is not None:
if y.device != device:
y = y.to(device)
y, state = self.prediction(y, state)
else:
# Y is not provided, assume zero tensor with shape [B, 1, D] is required
# Emulates output of embedding of pad token.
if batch_size is None:
B = 1 if state is None else state[0].size(1)
else:
B = batch_size
y = torch.zeros((B, 1, self.pred_hidden), device=device, dtype=dtype)
# Prepend blank "start of sequence" symbol (zero tensor)
if add_sos:
B, U, D = y.shape
start = torch.zeros((B, 1, D), device=y.device, dtype=y.dtype)
y = torch.cat([start, y], dim=1).contiguous() # (B, U + 1, D)
else:
start = None # makes del call later easier
del start
return y, state
def _predict_modules(self, **kwargs):
"""
Prepare the trainable parameters of the Prediction Network.
Args:
vocab_size: Vocab size (excluding the blank token).
pred_n_hidden: Hidden size of the RNNs.
norm: Type of normalization to perform in RNN.
dropout: Whether to apply dropout to RNN.
"""
net = stateless_net.StatelessNet(**kwargs)
return net
def score_hypothesis(
self, hypothesis: rnnt_utils.Hypothesis, cache: Dict[Tuple[int], Any]
) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
"""
Similar to the predict() method, instead this method scores a Hypothesis during beam search.
Hypothesis is a dataclass representing one hypothesis in a Beam Search.
Args:
hypothesis: Refer to rnnt_utils.Hypothesis.
cache: Dict which contains a cache to avoid duplicate computations.
Returns:
Returns a tuple (y, states, lm_token) such that:
y is a torch.Tensor of shape [1, 1, H] representing the score of the last token in the Hypothesis.
state is a list of RNN states, each of shape [L, 1, H].
lm_token is the final integer token of the hypothesis.
"""
if hypothesis.dec_state is not None:
device = hypothesis.dec_state[0].device
else:
_p = next(self.parameters())
device = _p.device
# parse "blank" tokens in hypothesis
if len(hypothesis.y_sequence) > 0 and hypothesis.y_sequence[-1] == self.blank_idx:
blank_state = True
else:
blank_state = False
# Convert last token of hypothesis to torch.Tensor
target = torch.full([1, 1], fill_value=hypothesis.y_sequence[-1], device=device, dtype=torch.long)
lm_token = target[:, -1] # [1]
# Convert current hypothesis into a tuple to preserve in cache
sequence = tuple(hypothesis.y_sequence)
if sequence in cache:
y, new_state = cache[sequence]
else:
# Obtain score for target token and new states
if blank_state:
y, new_state = self.predict(None, state=None, add_sos=False, batch_size=1) # [1, 1, H]
else:
y, new_state = self.predict(
target, state=hypothesis.dec_state, add_sos=False, batch_size=1
) # [1, 1, H]
y = y[:, -1:, :] # Extract just last state : [1, 1, H]
cache[sequence] = (y, new_state)
return y, new_state, lm_token
def initialize_state(self, y: torch.Tensor) -> List[torch.Tensor]:
batch = y.size(0)
state = [torch.ones([batch, self.context_size], dtype=torch.long, device=y.device) * self.blank_idx]
return state
def batch_initialize_states(self, batch_states: List[torch.Tensor], decoder_states: List[List[torch.Tensor]]):
"""
Create batch of decoder states.
Args:
batch_states (list): batch of decoder states
([(B, H)])
decoder_states (list of list): list of decoder states
[B x ([(1, C)]]
Returns:
batch_states (tuple): batch of decoder states
([(B, C)])
"""
new_state = torch.stack([s[0] for s in decoder_states])
return [new_state]
def batch_select_state(self, batch_states: List[torch.Tensor], idx: int) -> List[List[torch.Tensor]]:
"""Get decoder state from batch of states, for given id.
Args:
batch_states (list): batch of decoder states
[(B, C)]
idx (int): index to extract state from batch of states
Returns:
(tuple): decoder states for given id
[(C)]
"""
if batch_states is not None:
states = batch_states[0][idx]
states = (
states.long()
) # beam search code assumes the batch_states tensor is always of float type, so need conversion
return [states]
else:
return None
def batch_concat_states(self, batch_states: List[List[torch.Tensor]]) -> List[torch.Tensor]:
"""Concatenate a batch of decoder state to a packed state.
Args:
batch_states (list): batch of decoder states
B x ([(C)]
Returns:
(tuple): decoder states
[(B x C)]
"""
state_list = []
batch_list = []
for sample_id in range(len(batch_states)):
tensor = torch.stack(batch_states[sample_id]) # [1, H]
batch_list.append(tensor)
state_tensor = torch.cat(batch_list, 0) # [B, H]
state_list.append(state_tensor)
return state_list
def batch_copy_states(
self,
old_states: List[torch.Tensor],
new_states: List[torch.Tensor],
ids: List[int],
value: Optional[float] = None,
) -> List[torch.Tensor]:
"""Copy states from new state to old state at certain indices.
Args:
old_states: packed decoder states
single element list of (B x C)
new_states: packed decoder states
single element list of (B x C)
ids (list): List of indices to copy states at.
value (optional float): If a value should be copied instead of a state slice, a float should be provided
Returns:
batch of decoder states with partial copy at ids (or a specific value).
(B x C)
"""
if value is None:
old_states[0][ids, :] = new_states[0][ids, :]
return old_states
def batch_score_hypothesis(
self, hypotheses: List[rnnt_utils.Hypothesis], cache: Dict[Tuple[int], Any], batch_states: List[torch.Tensor]
) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
"""
Used for batched beam search algorithms. Similar to score_hypothesis method.
Args:
hypothesis: List of Hypotheses. Refer to rnnt_utils.Hypothesis.
cache: Dict which contains a cache to avoid duplicate computations.
batch_states: List of torch.Tensor which represent the states of the RNN for this batch.
Each state is of shape [L, B, H]
Returns:
Returns a tuple (b_y, b_states, lm_tokens) such that:
b_y is a torch.Tensor of shape [B, 1, H] representing the scores of the last tokens in the Hypotheses.
b_state is a list of list of RNN states, each of shape [L, B, H].
Represented as B x List[states].
lm_token is a list of the final integer tokens of the hypotheses in the batch.
"""
final_batch = len(hypotheses)
if final_batch == 0:
raise ValueError("No hypotheses was provided for the batch!")
_p = next(self.parameters())
device = _p.device
dtype = _p.dtype
tokens = []
process = []
done = [None for _ in range(final_batch)]
# For each hypothesis, cache the last token of the sequence and the current states
for i, hyp in enumerate(hypotheses):
sequence = tuple(hyp.y_sequence)
if sequence in cache:
done[i] = cache[sequence]
else:
tokens.append(hyp.y_sequence[-1])
process.append((sequence, hyp.dec_state))
if process:
batch = len(process)
# convert list of tokens to torch.Tensor, then reshape.
tokens = torch.tensor(tokens, device=device, dtype=torch.long).view(batch, -1)
dec_states = self.initialize_state(tokens) # [B, C]
dec_states = self.batch_initialize_states(dec_states, [d_state for seq, d_state in process])
y, dec_states = self.predict(
tokens, state=dec_states, add_sos=False, batch_size=batch
) # [B, 1, H], List([L, 1, H])
dec_states = tuple(state.to(dtype=dtype) for state in dec_states)
# Update done states and cache shared by entire batch.
j = 0
for i in range(final_batch):
if done[i] is None:
# Select sample's state from the batch state list
new_state = self.batch_select_state(dec_states, j)
# Cache [1, H] scores of the current y_j, and its corresponding state
done[i] = (y[j], new_state)
cache[process[j][0]] = (y[j], new_state)
j += 1
# Set the incoming batch states with the new states obtained from `done`.
batch_states = self.batch_initialize_states(batch_states, [d_state for y_j, d_state in done])
# Create batch of all output scores
# List[1, 1, H] -> [B, 1, H]
batch_y = torch.stack([y_j for y_j, d_state in done])
# Extract the last tokens from all hypotheses and convert to a tensor
lm_tokens = torch.tensor([h.y_sequence[-1] for h in hypotheses], device=device, dtype=torch.long).view(
final_batch
)
return batch_y, batch_states, lm_tokens
class RNNTDecoder(rnnt_abstract.AbstractRNNTDecoder, Exportable, AdapterModuleMixin):
"""A Recurrent Neural Network Transducer Decoder / Prediction Network (RNN-T Prediction Network).
An RNN-T Decoder/Prediction network, comprised of a stateful LSTM model.
Args:
prednet: A dict-like object which contains the following key-value pairs.
pred_hidden: int specifying the hidden dimension of the prediction net.
pred_rnn_layers: int specifying the number of rnn layers.
Optionally, it may also contain the following:
forget_gate_bias: float, set by default to 1.0, which constructs a forget gate
initialized to 1.0.
Reference:
[An Empirical Exploration of Recurrent Network Architectures](http://proceedings.mlr.press/v37/jozefowicz15.pdf)
t_max: int value, set to None by default. If an int is specified, performs Chrono Initialization
of the LSTM network, based on the maximum number of timesteps `t_max` expected during the course
of training.
Reference:
[Can recurrent neural networks warp time?](https://openreview.net/forum?id=SJcKhk-Ab)
weights_init_scale: Float scale of the weights after initialization. Setting to lower than one
sometimes helps reduce variance between runs.
hidden_hidden_bias_scale: Float scale for the hidden-to-hidden bias scale. Set to 0.0 for
the default behaviour.
dropout: float, set to 0.0 by default. Optional dropout applied at the end of the final LSTM RNN layer.
vocab_size: int, specifying the vocabulary size of the embedding layer of the Prediction network,
excluding the RNNT blank token.
normalization_mode: Can be either None, 'batch' or 'layer'. By default, is set to None.
Defines the type of normalization applied to the RNN layer.
random_state_sampling: bool, set to False by default. When set, provides normal-distribution
sampled state tensors instead of zero tensors during training.
Reference:
[Recognizing long-form speech using streaming end-to-end models](https://arxiv.org/abs/1910.11455)
blank_as_pad: bool, set to True by default. When set, will add a token to the Embedding layer of this
prediction network, and will treat this token as a pad token. In essence, the RNNT pad token will
be treated as a pad token, and the embedding layer will return a zero tensor for this token.
It is set by default as it enables various batch optimizations required for batched beam search.
Therefore, it is not recommended to disable this flag.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"targets": NeuralType(('B', 'T'), LabelsType()),
"target_length": NeuralType(tuple('B'), LengthsType()),
"states": [NeuralType(('D', 'B', 'D'), ElementType(), optional=True)], # must always be last
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {
"outputs": NeuralType(('B', 'D', 'T'), EmbeddedTextType()),
"prednet_lengths": NeuralType(tuple('B'), LengthsType()),
"states": [NeuralType((('D', 'B', 'D')), ElementType(), optional=True)], # must always be last
}
def input_example(self, max_batch=1, max_dim=1):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
length = max_dim
targets = torch.full(fill_value=self.blank_idx, size=(max_batch, length), dtype=torch.int32).to(
next(self.parameters()).device
)
target_length = torch.randint(0, length, size=(max_batch,), dtype=torch.int32).to(
next(self.parameters()).device
)
states = tuple(self.initialize_state(targets.float()))
return (targets, target_length, states)
def _prepare_for_export(self, **kwargs):
self._rnnt_export = True
super()._prepare_for_export(**kwargs)
def __init__(
self,
prednet: Dict[str, Any],
vocab_size: int,
normalization_mode: Optional[str] = None,
random_state_sampling: bool = False,
blank_as_pad: bool = True,
):
# Required arguments
self.pred_hidden = prednet['pred_hidden']
self.pred_rnn_layers = prednet["pred_rnn_layers"]
self.blank_idx = vocab_size
# Initialize the model (blank token increases vocab size by 1)
super().__init__(vocab_size=vocab_size, blank_idx=self.blank_idx, blank_as_pad=blank_as_pad)
# Optional arguments
forget_gate_bias = prednet.get('forget_gate_bias', 1.0)
t_max = prednet.get('t_max', None)
weights_init_scale = prednet.get('weights_init_scale', 1.0)
hidden_hidden_bias_scale = prednet.get('hidden_hidden_bias_scale', 0.0)
dropout = prednet.get('dropout', 0.0)
self.random_state_sampling = random_state_sampling
self.prediction = self._predict_modules(
vocab_size=vocab_size, # add 1 for blank symbol
pred_n_hidden=self.pred_hidden,
pred_rnn_layers=self.pred_rnn_layers,
forget_gate_bias=forget_gate_bias,
t_max=t_max,
norm=normalization_mode,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
dropout=dropout,
rnn_hidden_size=prednet.get("rnn_hidden_size", -1),
)
self._rnnt_export = False
@typecheck()
def forward(self, targets, target_length, states=None):
# y: (B, U)
y = rnn.label_collate(targets)
# state maintenance is unnecessary during training forward call
# to get state, use .predict() method.
if self._rnnt_export:
add_sos = False
else:
add_sos = True
g, states = self.predict(y, state=states, add_sos=add_sos) # (B, U, D)
g = g.transpose(1, 2) # (B, D, U)
return g, target_length, states
def predict(
self,
y: Optional[torch.Tensor] = None,
state: Optional[List[torch.Tensor]] = None,
add_sos: bool = True,
batch_size: Optional[int] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
Stateful prediction of scores and state for a (possibly null) tokenset.
This method takes various cases into consideration :
- No token, no state - used for priming the RNN
- No token, state provided - used for blank token scoring
- Given token, states - used for scores + new states
Here:
B - batch size
U - label length
H - Hidden dimension size of RNN
L - Number of RNN layers
Args:
y: Optional torch tensor of shape [B, U] of dtype long which will be passed to the Embedding.
If None, creates a zero tensor of shape [B, 1, H] which mimics output of pad-token on EmbeddiNg.
state: An optional list of states for the RNN. Eg: For LSTM, it is the state list length is 2.
Each state must be a tensor of shape [L, B, H].
If None, and during training mode and `random_state_sampling` is set, will sample a
normal distribution tensor of the above shape. Otherwise, None will be passed to the RNN.
add_sos: bool flag, whether a zero vector describing a "start of signal" token should be
prepended to the above "y" tensor. When set, output size is (B, U + 1, H).
batch_size: An optional int, specifying the batch size of the `y` tensor.
Can be infered if `y` and `state` is None. But if both are None, then batch_size cannot be None.
Returns:
A tuple (g, hid) such that -
If add_sos is False:
g: (B, U, H)
hid: (h, c) where h is the final sequence hidden state and c is the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
If add_sos is True:
g: (B, U + 1, H)
hid: (h, c) where h is the final sequence hidden state and c is the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
"""
# Get device and dtype of current module
_p = next(self.parameters())
device = _p.device
dtype = _p.dtype
# If y is not None, it is of shape [B, U] with dtype long.
if y is not None:
if y.device != device:
y = y.to(device)
# (B, U) -> (B, U, H)
y = self.prediction["embed"](y)
else:
# Y is not provided, assume zero tensor with shape [B, 1, H] is required
# Emulates output of embedding of pad token.
if batch_size is None:
B = 1 if state is None else state[0].size(1)
else:
B = batch_size
y = torch.zeros((B, 1, self.pred_hidden), device=device, dtype=dtype)
# Prepend blank "start of sequence" symbol (zero tensor)
if add_sos:
B, U, H = y.shape
start = torch.zeros((B, 1, H), device=y.device, dtype=y.dtype)
y = torch.cat([start, y], dim=1).contiguous() # (B, U + 1, H)
else:
start = None # makes del call later easier
# If in training mode, and random_state_sampling is set,
# initialize state to random normal distribution tensor.
if state is None:
if self.random_state_sampling and self.training:
state = self.initialize_state(y)
# Forward step through RNN
y = y.transpose(0, 1) # (U + 1, B, H)
g, hid = self.prediction["dec_rnn"](y, state)
g = g.transpose(0, 1) # (B, U + 1, H)
del y, start, state
# Adapter module forward step
if self.is_adapter_available():
g = self.forward_enabled_adapters(g)
return g, hid
def _predict_modules(
self,
vocab_size,
pred_n_hidden,
pred_rnn_layers,
forget_gate_bias,
t_max,
norm,
weights_init_scale,
hidden_hidden_bias_scale,
dropout,
rnn_hidden_size,
):
"""
Prepare the trainable parameters of the Prediction Network.
Args:
vocab_size: Vocab size (excluding the blank token).
pred_n_hidden: Hidden size of the RNNs.
pred_rnn_layers: Number of RNN layers.
forget_gate_bias: Whether to perform unit forget gate bias.
t_max: Whether to perform Chrono LSTM init.
norm: Type of normalization to perform in RNN.
weights_init_scale: Float scale of the weights after initialization. Setting to lower than one
sometimes helps reduce variance between runs.
hidden_hidden_bias_scale: Float scale for the hidden-to-hidden bias scale. Set to 0.0 for
the default behaviour.
dropout: Whether to apply dropout to RNN.
rnn_hidden_size: the hidden size of the RNN, if not specified, pred_n_hidden would be used
"""
if self.blank_as_pad:
embed = torch.nn.Embedding(vocab_size + 1, pred_n_hidden, padding_idx=self.blank_idx)
else:
embed = torch.nn.Embedding(vocab_size, pred_n_hidden)
layers = torch.nn.ModuleDict(
{
"embed": embed,
"dec_rnn": rnn.rnn(
input_size=pred_n_hidden,
hidden_size=rnn_hidden_size if rnn_hidden_size > 0 else pred_n_hidden,
num_layers=pred_rnn_layers,
norm=norm,
forget_gate_bias=forget_gate_bias,
t_max=t_max,
dropout=dropout,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
proj_size=pred_n_hidden if pred_n_hidden < rnn_hidden_size else 0,
),
}
)
return layers
def initialize_state(self, y: torch.Tensor) -> List[torch.Tensor]:
"""
Initialize the state of the RNN layers, with same dtype and device as input `y`.
Args:
y: A torch.Tensor whose device the generated states will be placed on.
Returns:
List of torch.Tensor, each of shape [L, B, H], where
L = Number of RNN layers
B = Batch size
H = Hidden size of RNN.
"""
batch = y.size(0)
if self.random_state_sampling and self.training:
state = [
torch.randn(self.pred_rnn_layers, batch, self.pred_hidden, dtype=y.dtype, device=y.device),
torch.randn(self.pred_rnn_layers, batch, self.pred_hidden, dtype=y.dtype, device=y.device),
]
else:
state = [
torch.zeros(self.pred_rnn_layers, batch, self.pred_hidden, dtype=y.dtype, device=y.device),
torch.zeros(self.pred_rnn_layers, batch, self.pred_hidden, dtype=y.dtype, device=y.device),
]
return state
def score_hypothesis(
self, hypothesis: rnnt_utils.Hypothesis, cache: Dict[Tuple[int], Any]
) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
"""
Similar to the predict() method, instead this method scores a Hypothesis during beam search.
Hypothesis is a dataclass representing one hypothesis in a Beam Search.
Args:
hypothesis: Refer to rnnt_utils.Hypothesis.
cache: Dict which contains a cache to avoid duplicate computations.
Returns:
Returns a tuple (y, states, lm_token) such that:
y is a torch.Tensor of shape [1, 1, H] representing the score of the last token in the Hypothesis.
state is a list of RNN states, each of shape [L, 1, H].
lm_token is the final integer token of the hypothesis.
"""
if hypothesis.dec_state is not None:
device = hypothesis.dec_state[0].device
else:
_p = next(self.parameters())
device = _p.device
# parse "blank" tokens in hypothesis
if len(hypothesis.y_sequence) > 0 and hypothesis.y_sequence[-1] == self.blank_idx:
blank_state = True
else:
blank_state = False
# Convert last token of hypothesis to torch.Tensor
target = torch.full([1, 1], fill_value=hypothesis.y_sequence[-1], device=device, dtype=torch.long)
lm_token = target[:, -1] # [1]
# Convert current hypothesis into a tuple to preserve in cache
sequence = tuple(hypothesis.y_sequence)
if sequence in cache:
y, new_state = cache[sequence]
else:
# Obtain score for target token and new states
if blank_state:
y, new_state = self.predict(None, state=None, add_sos=False, batch_size=1) # [1, 1, H]
else:
y, new_state = self.predict(
target, state=hypothesis.dec_state, add_sos=False, batch_size=1
) # [1, 1, H]
y = y[:, -1:, :] # Extract just last state : [1, 1, H]
cache[sequence] = (y, new_state)
return y, new_state, lm_token
def batch_score_hypothesis(
self, hypotheses: List[rnnt_utils.Hypothesis], cache: Dict[Tuple[int], Any], batch_states: List[torch.Tensor]
) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
"""
Used for batched beam search algorithms. Similar to score_hypothesis method.
Args:
hypothesis: List of Hypotheses. Refer to rnnt_utils.Hypothesis.
cache: Dict which contains a cache to avoid duplicate computations.
batch_states: List of torch.Tensor which represent the states of the RNN for this batch.
Each state is of shape [L, B, H]
Returns:
Returns a tuple (b_y, b_states, lm_tokens) such that:
b_y is a torch.Tensor of shape [B, 1, H] representing the scores of the last tokens in the Hypotheses.
b_state is a list of list of RNN states, each of shape [L, B, H].
Represented as B x List[states].
lm_token is a list of the final integer tokens of the hypotheses in the batch.
"""
final_batch = len(hypotheses)
if final_batch == 0:
raise ValueError("No hypotheses was provided for the batch!")
_p = next(self.parameters())
device = _p.device
dtype = _p.dtype
tokens = []
process = []
done = [None for _ in range(final_batch)]
# For each hypothesis, cache the last token of the sequence and the current states
for i, hyp in enumerate(hypotheses):
sequence = tuple(hyp.y_sequence)
if sequence in cache:
done[i] = cache[sequence]
else:
tokens.append(hyp.y_sequence[-1])
process.append((sequence, hyp.dec_state))
if process:
batch = len(process)
# convert list of tokens to torch.Tensor, then reshape.
tokens = torch.tensor(tokens, device=device, dtype=torch.long).view(batch, -1)
dec_states = self.initialize_state(tokens.to(dtype=dtype)) # [L, B, H]
dec_states = self.batch_initialize_states(dec_states, [d_state for seq, d_state in process])
y, dec_states = self.predict(
tokens, state=dec_states, add_sos=False, batch_size=batch
) # [B, 1, H], List([L, 1, H])
dec_states = tuple(state.to(dtype=dtype) for state in dec_states)
# Update done states and cache shared by entire batch.
j = 0
for i in range(final_batch):
if done[i] is None:
# Select sample's state from the batch state list
new_state = self.batch_select_state(dec_states, j)
# Cache [1, H] scores of the current y_j, and its corresponding state
done[i] = (y[j], new_state)
cache[process[j][0]] = (y[j], new_state)
j += 1
# Set the incoming batch states with the new states obtained from `done`.
batch_states = self.batch_initialize_states(batch_states, [d_state for y_j, d_state in done])
# Create batch of all output scores
# List[1, 1, H] -> [B, 1, H]
batch_y = torch.stack([y_j for y_j, d_state in done])
# Extract the last tokens from all hypotheses and convert to a tensor
lm_tokens = torch.tensor([h.y_sequence[-1] for h in hypotheses], device=device, dtype=torch.long).view(
final_batch
)
return batch_y, batch_states, lm_tokens
def batch_initialize_states(self, batch_states: List[torch.Tensor], decoder_states: List[List[torch.Tensor]]):
"""
Create batch of decoder states.
Args:
batch_states (list): batch of decoder states
([L x (B, H)], [L x (B, H)])
decoder_states (list of list): list of decoder states
[B x ([L x (1, H)], [L x (1, H)])]
Returns:
batch_states (tuple): batch of decoder states
([L x (B, H)], [L x (B, H)])
"""
# LSTM has 2 states
new_states = [[] for _ in range(len(decoder_states[0]))]
for layer in range(self.pred_rnn_layers):
for state_id in range(len(decoder_states[0])):
# batch_states[state_id][layer] = torch.stack([s[state_id][layer] for s in decoder_states])
new_state_for_layer = torch.stack([s[state_id][layer] for s in decoder_states])
new_states[state_id].append(new_state_for_layer)
for state_id in range(len(decoder_states[0])):
new_states[state_id] = torch.stack([state for state in new_states[state_id]])
return new_states
def batch_select_state(self, batch_states: List[torch.Tensor], idx: int) -> List[List[torch.Tensor]]:
"""Get decoder state from batch of states, for given id.
Args:
batch_states (list): batch of decoder states
([L x (B, H)], [L x (B, H)])
idx (int): index to extract state from batch of states
Returns:
(tuple): decoder states for given id
([L x (1, H)], [L x (1, H)])
"""
if batch_states is not None:
state_list = []
for state_id in range(len(batch_states)):
states = [batch_states[state_id][layer][idx] for layer in range(self.pred_rnn_layers)]
state_list.append(states)
return state_list
else:
return None
def batch_concat_states(self, batch_states: List[List[torch.Tensor]]) -> List[torch.Tensor]:
"""Concatenate a batch of decoder state to a packed state.
Args:
batch_states (list): batch of decoder states
B x ([L x (H)], [L x (H)])
Returns:
(tuple): decoder states
(L x B x H, L x B x H)
"""
state_list = []
for state_id in range(len(batch_states[0])):
batch_list = []
for sample_id in range(len(batch_states)):
tensor = torch.stack(batch_states[sample_id][state_id]) # [L, H]
tensor = tensor.unsqueeze(0) # [1, L, H]
batch_list.append(tensor)
state_tensor = torch.cat(batch_list, 0) # [B, L, H]
state_tensor = state_tensor.transpose(1, 0) # [L, B, H]
state_list.append(state_tensor)
return state_list
def batch_copy_states(
self,
old_states: List[torch.Tensor],
new_states: List[torch.Tensor],
ids: List[int],
value: Optional[float] = None,
) -> List[torch.Tensor]:
"""Copy states from new state to old state at certain indices.
Args:
old_states(list): packed decoder states
(L x B x H, L x B x H)
new_states: packed decoder states
(L x B x H, L x B x H)
ids (list): List of indices to copy states at.
value (optional float): If a value should be copied instead of a state slice, a float should be provided
Returns:
batch of decoder states with partial copy at ids (or a specific value).
(L x B x H, L x B x H)
"""
for state_id in range(len(old_states)):
if value is None:
old_states[state_id][:, ids, :] = new_states[state_id][:, ids, :]
else:
old_states[state_id][:, ids, :] *= 0.0
old_states[state_id][:, ids, :] += value
return old_states
# Adapter method overrides
def add_adapter(self, name: str, cfg: DictConfig):
# Update the config with correct input dim
cfg = self._update_adapter_cfg_input_dim(cfg)
# Add the adapter
super().add_adapter(name=name, cfg=cfg)
def _update_adapter_cfg_input_dim(self, cfg: DictConfig):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=self.pred_hidden)
return cfg
class RNNTJoint(rnnt_abstract.AbstractRNNTJoint, Exportable, AdapterModuleMixin):
"""A Recurrent Neural Network Transducer Joint Network (RNN-T Joint Network).
An RNN-T Joint network, comprised of a feedforward model.
Args:
jointnet: A dict-like object which contains the following key-value pairs.
encoder_hidden: int specifying the hidden dimension of the encoder net.
pred_hidden: int specifying the hidden dimension of the prediction net.
joint_hidden: int specifying the hidden dimension of the joint net
activation: Activation function used in the joint step. Can be one of
['relu', 'tanh', 'sigmoid'].
Optionally, it may also contain the following:
dropout: float, set to 0.0 by default. Optional dropout applied at the end of the joint net.
num_classes: int, specifying the vocabulary size that the joint network must predict,
excluding the RNNT blank token.
vocabulary: Optional list of strings/tokens that comprise the vocabulary of the joint network.
Unused and kept only for easy access for character based encoding RNNT models.
log_softmax: Optional bool, set to None by default. If set as None, will compute the log_softmax()
based on the value provided.
preserve_memory: Optional bool, set to False by default. If the model crashes due to the memory
intensive joint step, one might try this flag to empty the tensor cache in pytorch.
Warning: This will make the forward-backward pass much slower than normal.
It also might not fix the OOM if the GPU simply does not have enough memory to compute the joint.
fuse_loss_wer: Optional bool, set to False by default.
Fuses the joint forward, loss forward and
wer forward steps. In doing so, it trades of speed for memory conservation by creating sub-batches
of the provided batch of inputs, and performs Joint forward, loss forward and wer forward (optional),
all on sub-batches, then collates results to be exactly equal to results from the entire batch.
When this flag is set, prior to calling forward, the fields `loss` and `wer` (either one) *must*
be set using the `RNNTJoint.set_loss()` or `RNNTJoint.set_wer()` methods.
Further, when this flag is set, the following argument `fused_batch_size` *must* be provided
as a non negative integer. This value refers to the size of the sub-batch.
When the flag is set, the input and output signature of `forward()` of this method changes.
Input - in addition to `encoder_outputs` (mandatory argument), the following arguments can be provided.
- decoder_outputs (optional). Required if loss computation is required.
- encoder_lengths (required)
- transcripts (optional). Required for wer calculation.
- transcript_lengths (optional). Required for wer calculation.
- compute_wer (bool, default false). Whether to compute WER or not for the fused batch.
Output - instead of the usual `joint` log prob tensor, the following results can be returned.
- loss (optional). Returned if decoder_outputs, transcripts and transript_lengths are not None.
- wer_numerator + wer_denominator (optional). Returned if transcripts, transcripts_lengths are provided
and compute_wer is set.
fused_batch_size: Optional int, required if `fuse_loss_wer` flag is set. Determines the size of the
sub-batches. Should be any value below the actual batch size per GPU.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"encoder_outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"decoder_outputs": NeuralType(('B', 'D', 'T'), EmbeddedTextType()),
"encoder_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"transcripts": NeuralType(('B', 'T'), LabelsType(), optional=True),
"transcript_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"compute_wer": NeuralType(optional=True),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
if not self._fuse_loss_wer:
return {
"outputs": NeuralType(('B', 'T', 'T', 'D'), LogprobsType()),
}
else:
return {
"loss": NeuralType(elements_type=LossType(), optional=True),
"wer": NeuralType(elements_type=ElementType(), optional=True),
"wer_numer": NeuralType(elements_type=ElementType(), optional=True),
"wer_denom": NeuralType(elements_type=ElementType(), optional=True),
}
def _prepare_for_export(self, **kwargs):
self._fuse_loss_wer = False
self.log_softmax = False
super()._prepare_for_export(**kwargs)
def input_example(self, max_batch=1, max_dim=8192):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
B, T, U = max_batch, max_dim, max_batch
encoder_outputs = torch.randn(B, self.encoder_hidden, T).to(next(self.parameters()).device)
decoder_outputs = torch.randn(B, self.pred_hidden, U).to(next(self.parameters()).device)
return (encoder_outputs, decoder_outputs)
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
return set(["encoder_lengths", "transcripts", "transcript_lengths", "compute_wer"])
def __init__(
self,
jointnet: Dict[str, Any],
num_classes: int,
num_extra_outputs: int = 0,
vocabulary: Optional[List] = None,
log_softmax: Optional[bool] = None,
preserve_memory: bool = False,
fuse_loss_wer: bool = False,
fused_batch_size: Optional[int] = None,
experimental_fuse_loss_wer: Any = None,
):
super().__init__()
self.vocabulary = vocabulary
self._vocab_size = num_classes
self._num_extra_outputs = num_extra_outputs
self._num_classes = num_classes + 1 + num_extra_outputs # 1 is for blank
if experimental_fuse_loss_wer is not None:
# Override fuse_loss_wer from deprecated argument
fuse_loss_wer = experimental_fuse_loss_wer
self._fuse_loss_wer = fuse_loss_wer
self._fused_batch_size = fused_batch_size
if fuse_loss_wer and (fused_batch_size is None):
raise ValueError("If `fuse_loss_wer` is set, then `fused_batch_size` cannot be None!")
self._loss = None
self._wer = None
# Log softmax should be applied explicitly only for CPU
self.log_softmax = log_softmax
self.preserve_memory = preserve_memory
if preserve_memory:
logging.warning(
"`preserve_memory` was set for the Joint Model. Please be aware this will severely impact "
"the forward-backward step time. It also might not solve OOM issues if the GPU simply "
"does not have enough memory to compute the joint."
)
# Required arguments
self.encoder_hidden = jointnet['encoder_hidden']
self.pred_hidden = jointnet['pred_hidden']
self.joint_hidden = jointnet['joint_hidden']
self.activation = jointnet['activation']
# Optional arguments
dropout = jointnet.get('dropout', 0.0)
self.pred, self.enc, self.joint_net = self._joint_net_modules(
num_classes=self._num_classes, # add 1 for blank symbol
pred_n_hidden=self.pred_hidden,
enc_n_hidden=self.encoder_hidden,
joint_n_hidden=self.joint_hidden,
activation=self.activation,
dropout=dropout,
)
# Flag needed for RNNT export support
self._rnnt_export = False
# to change, requires running ``model.temperature = T`` explicitly
self.temperature = 1.0
@typecheck()
def forward(
self,
encoder_outputs: torch.Tensor,
decoder_outputs: Optional[torch.Tensor],
encoder_lengths: Optional[torch.Tensor] = None,
transcripts: Optional[torch.Tensor] = None,
transcript_lengths: Optional[torch.Tensor] = None,
compute_wer: bool = False,
) -> Union[torch.Tensor, List[Optional[torch.Tensor]]]:
# encoder = (B, D, T)
# decoder = (B, D, U) if passed, else None
encoder_outputs = encoder_outputs.transpose(1, 2) # (B, T, D)
if decoder_outputs is not None:
decoder_outputs = decoder_outputs.transpose(1, 2) # (B, U, D)
if not self._fuse_loss_wer:
if decoder_outputs is None:
raise ValueError(
"decoder_outputs passed is None, and `fuse_loss_wer` is not set. "
"decoder_outputs can only be None for fused step!"
)
out = self.joint(encoder_outputs, decoder_outputs) # [B, T, U, V + 1]
return out
else:
# At least the loss module must be supplied during fused joint
if self._loss is None or self._wer is None:
raise ValueError("`fuse_loss_wer` flag is set, but `loss` and `wer` modules were not provided! ")
# If fused joint step is required, fused batch size is required as well
if self._fused_batch_size is None:
raise ValueError("If `fuse_loss_wer` is set, then `fused_batch_size` cannot be None!")
# When using fused joint step, both encoder and transcript lengths must be provided
if (encoder_lengths is None) or (transcript_lengths is None):
raise ValueError(
"`fuse_loss_wer` is set, therefore encoder and target lengths " "must be provided as well!"
)
losses = []
target_lengths = []
batch_size = int(encoder_outputs.size(0)) # actual batch size
# Iterate over batch using fused_batch_size steps
for batch_idx in range(0, batch_size, self._fused_batch_size):
begin = batch_idx
end = min(begin + self._fused_batch_size, batch_size)
# Extract the sub batch inputs
# sub_enc = encoder_outputs[begin:end, ...]
# sub_transcripts = transcripts[begin:end, ...]
sub_enc = encoder_outputs.narrow(dim=0, start=begin, length=int(end - begin))
sub_transcripts = transcripts.narrow(dim=0, start=begin, length=int(end - begin))
sub_enc_lens = encoder_lengths[begin:end]
sub_transcript_lens = transcript_lengths[begin:end]
# Sub transcripts does not need the full padding of the entire batch
# Therefore reduce the decoder time steps to match
max_sub_enc_length = sub_enc_lens.max()
max_sub_transcript_length = sub_transcript_lens.max()
if decoder_outputs is not None:
# Reduce encoder length to preserve computation
# Encoder: [sub-batch, T, D] -> [sub-batch, T', D]; T' < T
if sub_enc.shape[1] != max_sub_enc_length:
sub_enc = sub_enc.narrow(dim=1, start=0, length=int(max_sub_enc_length))
# sub_dec = decoder_outputs[begin:end, ...] # [sub-batch, U, D]
sub_dec = decoder_outputs.narrow(dim=0, start=begin, length=int(end - begin)) # [sub-batch, U, D]
# Reduce decoder length to preserve computation
# Decoder: [sub-batch, U, D] -> [sub-batch, U', D]; U' < U
if sub_dec.shape[1] != max_sub_transcript_length + 1:
sub_dec = sub_dec.narrow(dim=1, start=0, length=int(max_sub_transcript_length + 1))
# Perform joint => [sub-batch, T', U', V + 1]
sub_joint = self.joint(sub_enc, sub_dec)
del sub_dec
# Reduce transcript length to correct alignment
# Transcript: [sub-batch, L] -> [sub-batch, L']; L' <= L
if sub_transcripts.shape[1] != max_sub_transcript_length:
sub_transcripts = sub_transcripts.narrow(dim=1, start=0, length=int(max_sub_transcript_length))
# Compute sub batch loss
# preserve loss reduction type
loss_reduction = self.loss.reduction
# override loss reduction to sum
self.loss.reduction = None
# compute and preserve loss
loss_batch = self.loss(
log_probs=sub_joint,
targets=sub_transcripts,
input_lengths=sub_enc_lens,
target_lengths=sub_transcript_lens,
)
losses.append(loss_batch)
target_lengths.append(sub_transcript_lens)
# reset loss reduction type
self.loss.reduction = loss_reduction
else:
losses = None
# Update WER for sub batch
if compute_wer:
sub_enc = sub_enc.transpose(1, 2) # [B, T, D] -> [B, D, T]
sub_enc = sub_enc.detach()
sub_transcripts = sub_transcripts.detach()
# Update WER on each process without syncing
self.wer.update(sub_enc, sub_enc_lens, sub_transcripts, sub_transcript_lens)
del sub_enc, sub_transcripts, sub_enc_lens, sub_transcript_lens
# Reduce over sub batches
if losses is not None:
losses = self.loss.reduce(losses, target_lengths)
# Collect sub batch wer results
if compute_wer:
# Sync and all_reduce on all processes, compute global WER
wer, wer_num, wer_denom = self.wer.compute()
self.wer.reset()
else:
wer = None
wer_num = None
wer_denom = None
return losses, wer, wer_num, wer_denom
def joint(self, f: torch.Tensor, g: torch.Tensor) -> torch.Tensor:
"""
Compute the joint step of the network.
Here,
B = Batch size
T = Acoustic model timesteps
U = Target sequence length
H1, H2 = Hidden dimensions of the Encoder / Decoder respectively
H = Hidden dimension of the Joint hidden step.
V = Vocabulary size of the Decoder (excluding the RNNT blank token).
NOTE:
The implementation of this model is slightly modified from the original paper.
The original paper proposes the following steps :
(enc, dec) -> Expand + Concat + Sum [B, T, U, H1+H2] -> Forward through joint hidden [B, T, U, H] -- *1
*1 -> Forward through joint final [B, T, U, V + 1].
We instead split the joint hidden into joint_hidden_enc and joint_hidden_dec and act as follows:
enc -> Forward through joint_hidden_enc -> Expand [B, T, 1, H] -- *1
dec -> Forward through joint_hidden_dec -> Expand [B, 1, U, H] -- *2
(*1, *2) -> Sum [B, T, U, H] -> Forward through joint final [B, T, U, V + 1].
Args:
f: Output of the Encoder model. A torch.Tensor of shape [B, T, H1]
g: Output of the Decoder model. A torch.Tensor of shape [B, U, H2]
Returns:
Logits / log softmaxed tensor of shape (B, T, U, V + 1).
"""
# f = [B, T, H1]
f = self.enc(f)
f.unsqueeze_(dim=2) # (B, T, 1, H)
# g = [B, U, H2]
g = self.pred(g)
g.unsqueeze_(dim=1) # (B, 1, U, H)
inp = f + g # [B, T, U, H]
del f, g
# Forward adapter modules on joint hidden
if self.is_adapter_available():
inp = self.forward_enabled_adapters(inp)
res = self.joint_net(inp) # [B, T, U, V + 1]
del inp
if self.preserve_memory:
torch.cuda.empty_cache()
# If log_softmax is automatic
if self.log_softmax is None:
if not res.is_cuda: # Use log softmax only if on CPU
if self.temperature != 1.0:
res = (res / self.temperature).log_softmax(dim=-1)
else:
res = res.log_softmax(dim=-1)
else:
if self.log_softmax:
if self.temperature != 1.0:
res = (res / self.temperature).log_softmax(dim=-1)
else:
res = res.log_softmax(dim=-1)
return res
def _joint_net_modules(self, num_classes, pred_n_hidden, enc_n_hidden, joint_n_hidden, activation, dropout):
"""
Prepare the trainable modules of the Joint Network
Args:
num_classes: Number of output classes (vocab size) excluding the RNNT blank token.
pred_n_hidden: Hidden size of the prediction network.
enc_n_hidden: Hidden size of the encoder network.
joint_n_hidden: Hidden size of the joint network.
activation: Activation of the joint. Can be one of [relu, tanh, sigmoid]
dropout: Dropout value to apply to joint.
"""
pred = torch.nn.Linear(pred_n_hidden, joint_n_hidden)
enc = torch.nn.Linear(enc_n_hidden, joint_n_hidden)
if activation not in ['relu', 'sigmoid', 'tanh']:
raise ValueError("Unsupported activation for joint step - please pass one of " "[relu, sigmoid, tanh]")
activation = activation.lower()
if activation == 'relu':
activation = torch.nn.ReLU(inplace=True)
elif activation == 'sigmoid':
activation = torch.nn.Sigmoid()
elif activation == 'tanh':
activation = torch.nn.Tanh()
layers = (
[activation]
+ ([torch.nn.Dropout(p=dropout)] if dropout else [])
+ [torch.nn.Linear(joint_n_hidden, num_classes)]
)
return pred, enc, torch.nn.Sequential(*layers)
# Adapter method overrides
def add_adapter(self, name: str, cfg: DictConfig):
# Update the config with correct input dim
cfg = self._update_adapter_cfg_input_dim(cfg)
# Add the adapter
super().add_adapter(name=name, cfg=cfg)
def _update_adapter_cfg_input_dim(self, cfg: DictConfig):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=self.joint_hidden)
return cfg
@property
def num_classes_with_blank(self):
return self._num_classes
@property
def num_extra_outputs(self):
return self._num_extra_outputs
@property
def loss(self):
return self._loss
def set_loss(self, loss):
if not self._fuse_loss_wer:
raise ValueError("Attempting to set loss module even though `fuse_loss_wer` is not set!")
self._loss = loss
@property
def wer(self):
return self._wer
def set_wer(self, wer):
if not self._fuse_loss_wer:
raise ValueError("Attempting to set WER module even though `fuse_loss_wer` is not set!")
self._wer = wer
@property
def fuse_loss_wer(self):
return self._fuse_loss_wer
def set_fuse_loss_wer(self, fuse_loss_wer, loss=None, metric=None):
self._fuse_loss_wer = fuse_loss_wer
self._loss = loss
self._wer = metric
@property
def fused_batch_size(self):
return self._fuse_loss_wer
def set_fused_batch_size(self, fused_batch_size):
self._fused_batch_size = fused_batch_size
class RNNTDecoderJoint(torch.nn.Module, Exportable):
"""
Utility class to export Decoder+Joint as a single module
"""
def __init__(self, decoder, joint):
super().__init__()
self.decoder = decoder
self.joint = joint
@property
def input_types(self):
state_type = NeuralType(('D', 'B', 'D'), ElementType())
mytypes = {
'encoder_outputs': NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"targets": NeuralType(('B', 'T'), LabelsType()),
"target_length": NeuralType(tuple('B'), LengthsType()),
'input_states_1': state_type,
'input_states_2': state_type,
}
return mytypes
def input_example(self, max_batch=1, max_dim=1):
decoder_example = self.decoder.input_example(max_batch=max_batch, max_dim=max_dim)
state1, state2 = decoder_example[-1]
return tuple([self.joint.input_example()[0]]) + decoder_example[:2] + (state1, state2)
@property
def output_types(self):
return {
"outputs": NeuralType(('B', 'T', 'T', 'D'), LogprobsType()),
"prednet_lengths": NeuralType(tuple('B'), LengthsType()),
"output_states_1": NeuralType((('D', 'B', 'D')), ElementType()),
"output_states_2": NeuralType((('D', 'B', 'D')), ElementType()),
}
def forward(self, encoder_outputs, targets, target_length, input_states_1, input_states_2):
decoder_outputs = self.decoder(targets, target_length, (input_states_1, input_states_2))
decoder_output = decoder_outputs[0]
decoder_length = decoder_outputs[1]
input_states_1, input_states_2 = decoder_outputs[2][0], decoder_outputs[2][1]
joint_output = self.joint(encoder_outputs, decoder_output)
return (joint_output, decoder_length, input_states_1, input_states_2)
class RNNTDecoderJointSSL(torch.nn.Module):
def __init__(self, decoder, joint):
super().__init__()
self.decoder = decoder
self.joint = joint
@property
def needs_labels(self):
return True
@property
def input_types(self):
return {
"encoder_output": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"targets": NeuralType(('B', 'T'), LabelsType()),
"target_lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {"log_probs": NeuralType(('B', 'T', 'D'), SpectrogramType())}
def forward(self, encoder_output, targets, target_lengths):
decoder, target_length, states = self.decoder(targets=targets, target_length=target_lengths)
log_probs = self.joint(encoder_outputs=encoder_output, decoder_outputs=decoder)
return log_probs
class SampledRNNTJoint(RNNTJoint):
"""A Sampled Recurrent Neural Network Transducer Joint Network (RNN-T Joint Network).
An RNN-T Joint network, comprised of a feedforward model, where the vocab size will be sampled instead
of computing the full vocabulary joint.
Args:
jointnet: A dict-like object which contains the following key-value pairs.
encoder_hidden: int specifying the hidden dimension of the encoder net.
pred_hidden: int specifying the hidden dimension of the prediction net.
joint_hidden: int specifying the hidden dimension of the joint net
activation: Activation function used in the joint step. Can be one of
['relu', 'tanh', 'sigmoid'].
Optionally, it may also contain the following:
dropout: float, set to 0.0 by default. Optional dropout applied at the end of the joint net.
num_classes: int, specifying the vocabulary size that the joint network must predict,
excluding the RNNT blank token.
n_samples: int, specifies the number of tokens to sample from the vocabulary space,
excluding the RNNT blank token. If a given value is larger than the entire vocabulary size,
then the full vocabulary will be used.
vocabulary: Optional list of strings/tokens that comprise the vocabulary of the joint network.
Unused and kept only for easy access for character based encoding RNNT models.
log_softmax: Optional bool, set to None by default. If set as None, will compute the log_softmax()
based on the value provided.
preserve_memory: Optional bool, set to False by default. If the model crashes due to the memory
intensive joint step, one might try this flag to empty the tensor cache in pytorch.
Warning: This will make the forward-backward pass much slower than normal.
It also might not fix the OOM if the GPU simply does not have enough memory to compute the joint.
fuse_loss_wer: Optional bool, set to False by default.
Fuses the joint forward, loss forward and
wer forward steps. In doing so, it trades of speed for memory conservation by creating sub-batches
of the provided batch of inputs, and performs Joint forward, loss forward and wer forward (optional),
all on sub-batches, then collates results to be exactly equal to results from the entire batch.
When this flag is set, prior to calling forward, the fields `loss` and `wer` (either one) *must*
be set using the `RNNTJoint.set_loss()` or `RNNTJoint.set_wer()` methods.
Further, when this flag is set, the following argument `fused_batch_size` *must* be provided
as a non negative integer. This value refers to the size of the sub-batch.
When the flag is set, the input and output signature of `forward()` of this method changes.
Input - in addition to `encoder_outputs` (mandatory argument), the following arguments can be provided.
- decoder_outputs (optional). Required if loss computation is required.
- encoder_lengths (required)
- transcripts (optional). Required for wer calculation.
- transcript_lengths (optional). Required for wer calculation.
- compute_wer (bool, default false). Whether to compute WER or not for the fused batch.
Output - instead of the usual `joint` log prob tensor, the following results can be returned.
- loss (optional). Returned if decoder_outputs, transcripts and transript_lengths are not None.
- wer_numerator + wer_denominator (optional). Returned if transcripts, transcripts_lengths are provided
and compute_wer is set.
fused_batch_size: Optional int, required if `fuse_loss_wer` flag is set. Determines the size of the
sub-batches. Should be any value below the actual batch size per GPU.
"""
def __init__(
self,
jointnet: Dict[str, Any],
num_classes: int,
n_samples: int,
vocabulary: Optional[List] = None,
log_softmax: Optional[bool] = None,
preserve_memory: bool = False,
fuse_loss_wer: bool = False,
fused_batch_size: Optional[int] = None,
):
super().__init__(
jointnet=jointnet,
num_classes=num_classes,
vocabulary=vocabulary,
log_softmax=log_softmax,
preserve_memory=preserve_memory,
fuse_loss_wer=fuse_loss_wer,
fused_batch_size=fused_batch_size,
)
self.n_samples = n_samples
self.register_buffer('blank_id', torch.tensor([self.num_classes_with_blank - 1]), persistent=False)
@typecheck()
def forward(
self,
encoder_outputs: torch.Tensor,
decoder_outputs: Optional[torch.Tensor],
encoder_lengths: Optional[torch.Tensor] = None,
transcripts: Optional[torch.Tensor] = None,
transcript_lengths: Optional[torch.Tensor] = None,
compute_wer: bool = False,
) -> Union[torch.Tensor, List[Optional[torch.Tensor]]]:
# If in inference mode, revert to basic RNNT Joint behaviour.
# Sampled RNNT is only used for training.
if not torch.is_grad_enabled() or torch.is_inference_mode_enabled():
# Simply call full tensor joint
return super().forward(
encoder_outputs=encoder_outputs,
decoder_outputs=decoder_outputs,
encoder_lengths=encoder_lengths,
transcripts=transcripts,
transcript_lengths=transcript_lengths,
compute_wer=compute_wer,
)
if transcripts is None or transcript_lengths is None:
logging.warning(
"Sampled RNNT Joint currently only works with `fuse_loss_wer` set to True, "
"and when `fused_batch_size` is a positive integer."
)
raise ValueError(
"Sampled RNNT loss only works when the transcripts are provided during training."
"Please ensure that you correctly pass the `transcripts` and `transcript_lengths`."
)
# encoder = (B, D, T)
# decoder = (B, D, U) if passed, else None
encoder_outputs = encoder_outputs.transpose(1, 2) # (B, T, D)
if decoder_outputs is not None:
decoder_outputs = decoder_outputs.transpose(1, 2) # (B, U, D)
# At least the loss module must be supplied during fused joint
if self._loss is None or self._wer is None:
raise ValueError("`fuse_loss_wer` flag is set, but `loss` and `wer` modules were not provided! ")
# If fused joint step is required, fused batch size is required as well
if self._fused_batch_size is None:
raise ValueError("If `fuse_loss_wer` is set, then `fused_batch_size` cannot be None!")
# When using fused joint step, both encoder and transcript lengths must be provided
if (encoder_lengths is None) or (transcript_lengths is None):
raise ValueError(
"`fuse_loss_wer` is set, therefore encoder and target lengths " "must be provided as well!"
)
losses = []
target_lengths = []
batch_size = int(encoder_outputs.size(0)) # actual batch size
# Iterate over batch using fused_batch_size steps
for batch_idx in range(0, batch_size, self._fused_batch_size):
begin = batch_idx
end = min(begin + self._fused_batch_size, batch_size)
# Extract the sub batch inputs
# sub_enc = encoder_outputs[begin:end, ...]
# sub_transcripts = transcripts[begin:end, ...]
sub_enc = encoder_outputs.narrow(dim=0, start=begin, length=int(end - begin))
sub_transcripts = transcripts.narrow(dim=0, start=begin, length=int(end - begin))
sub_enc_lens = encoder_lengths[begin:end]
sub_transcript_lens = transcript_lengths[begin:end]
# Sub transcripts does not need the full padding of the entire batch
# Therefore reduce the decoder time steps to match
max_sub_enc_length = sub_enc_lens.max()
max_sub_transcript_length = sub_transcript_lens.max()
if decoder_outputs is not None:
# Reduce encoder length to preserve computation
# Encoder: [sub-batch, T, D] -> [sub-batch, T', D]; T' < T
if sub_enc.shape[1] != max_sub_enc_length:
sub_enc = sub_enc.narrow(dim=1, start=0, length=int(max_sub_enc_length))
# sub_dec = decoder_outputs[begin:end, ...] # [sub-batch, U, D]
sub_dec = decoder_outputs.narrow(dim=0, start=begin, length=int(end - begin)) # [sub-batch, U, D]
# Reduce decoder length to preserve computation
# Decoder: [sub-batch, U, D] -> [sub-batch, U', D]; U' < U
if sub_dec.shape[1] != max_sub_transcript_length + 1:
sub_dec = sub_dec.narrow(dim=1, start=0, length=int(max_sub_transcript_length + 1))
# Reduce transcript length to correct alignment
# Transcript: [sub-batch, L] -> [sub-batch, L']; L' <= L
if sub_transcripts.shape[1] != max_sub_transcript_length:
sub_transcripts = sub_transcripts.narrow(dim=1, start=0, length=int(max_sub_transcript_length))
# Perform sampled joint => [sub-batch, T', U', {V' < V} + 1}]
sub_joint, sub_transcripts_remapped = self.sampled_joint(
sub_enc, sub_dec, transcript=sub_transcripts, transcript_lengths=sub_transcript_lens
)
del sub_dec
# Compute sub batch loss
# preserve loss reduction type
loss_reduction = self.loss.reduction
# override loss reduction to sum
self.loss.reduction = None
# override blank idx in order to map to new vocabulary space
# in the new vocabulary space, we set the mapping of the RNNT Blank from index V+1 to 0
# So the loss here needs to be updated accordingly.
# TODO: See if we can have some formal API for rnnt loss to update inner blank index.
cached_blank_id = self.loss._loss.blank
self.loss._loss.blank = 0
# compute and preserve loss
loss_batch = self.loss(
log_probs=sub_joint,
targets=sub_transcripts_remapped, # Note: We have to use remapped transcripts here !
input_lengths=sub_enc_lens,
target_lengths=sub_transcript_lens, # Note: Even after remap, the transcript lengths remain intact.
)
losses.append(loss_batch)
target_lengths.append(sub_transcript_lens)
# reset loss reduction type and blank id
self.loss.reduction = loss_reduction
self.loss._loss.blank = cached_blank_id
else:
losses = None
# Update WER for sub batch
if compute_wer:
sub_enc = sub_enc.transpose(1, 2) # [B, T, D] -> [B, D, T]
sub_enc = sub_enc.detach()
sub_transcripts = sub_transcripts.detach()
# Update WER on each process without syncing
self.wer.update(sub_enc, sub_enc_lens, sub_transcripts, sub_transcript_lens)
del sub_enc, sub_transcripts, sub_enc_lens, sub_transcript_lens
# Reduce over sub batches
if losses is not None:
losses = self.loss.reduce(losses, target_lengths)
# Collect sub batch wer results
if compute_wer:
# Sync and all_reduce on all processes, compute global WER
wer, wer_num, wer_denom = self.wer.compute()
self.wer.reset()
else:
wer = None
wer_num = None
wer_denom = None
return losses, wer, wer_num, wer_denom
def sampled_joint(
self, f: torch.Tensor, g: torch.Tensor, transcript: torch.Tensor, transcript_lengths: torch.Tensor,
) -> torch.Tensor:
"""
Compute the sampled joint step of the network.
# Reference
- [Memory-Efficient Training of RNN-Transducer with Sampled Softmax](https://arxiv.org/abs/2203.16868)
Here,
B = Batch size
T = Acoustic model timesteps
U = Target sequence length
H1, H2 = Hidden dimensions of the Encoder / Decoder respectively
H = Hidden dimension of the Joint hidden step.
V = Vocabulary size of the Decoder (excluding the RNNT blank token).
S = Sample size of vocabulary.
NOTE:
The implementation of this joint model is slightly modified from the original paper.
The original paper proposes the following steps :
(enc, dec) -> Expand + Concat + Sum [B, T, U, H1+H2] -> Forward through joint hidden [B, T, U, H] -- *1
*1 -> Forward through joint final [B, T, U, V + 1].
We instead split the joint hidden into joint_hidden_enc and joint_hidden_dec and act as follows:
enc -> Forward through joint_hidden_enc -> Expand [B, T, 1, H] -- *1
dec -> Forward through joint_hidden_dec -> Expand [B, 1, U, H] -- *2
(*1, *2) -> Sum [B, T, U, H] -> Sample Vocab V_Pos (for target tokens) and V_Neg ->
(V_Neg is sampled not uniformly by as a rand permutation of all vocab tokens, then eliminate
all Intersection(V_Pos, V_Neg) common tokens to avoid duplication of loss) ->
Concat new Vocab V_Sampled = Union(V_Pos, V_Neg)
-> Forward partially through the joint final to create [B, T, U, V_Sampled]
Args:
f: Output of the Encoder model. A torch.Tensor of shape [B, T, H1]
g: Output of the Decoder model. A torch.Tensor of shape [B, U, H2]
transcript: Batch of transcripts. A torch.Tensor of shape [B, U]
transcript_lengths: Batch of lengths of the transcripts. A torch.Tensor of shape [B]
Returns:
Logits / log softmaxed tensor of shape (B, T, U, V + 1).
"""
# If under inference mode, ignore sampled joint and compute full joint.
if self.training is False or torch.is_grad_enabled() is False or torch.is_inference_mode_enabled():
# Simply call full tensor joint
return super().joint(f=f, g=g)
# Compute sampled softmax
# f = [B, T, H1]
f = self.enc(f)
f.unsqueeze_(dim=2) # (B, T, 1, H)
# g = [B, U, H2]
g = self.pred(g)
g.unsqueeze_(dim=1) # (B, 1, U, H)
inp = f + g # [B, T, U, H]
del f, g
# Forward adapter modules on joint hidden
if self.is_adapter_available():
inp = self.forward_enabled_adapters(inp)
# Do partial forward of joint net (skipping the final linear)
for module in self.joint_net[:-1]:
inp = module(inp) # [B, T, U, H]
# Begin compute of sampled RNNT joint
with torch.no_grad():
# gather true labels
transcript_vocab_ids = torch.unique(transcript)
# augment with blank token id
transcript_vocab_ids = torch.cat([self.blank_id, transcript_vocab_ids])
# Remap the transcript label ids to new positions of label ids (in the transcript_vocab_ids)
# This is necessary cause the RNNT loss doesnt care about the value, only the position of the ids
# of the transcript tokens. We can skip this step for noise samples cause those are only used for softmax
# estimation, not for computing actual label.
# From `https://stackoverflow.com/a/68969697` - bucketize algo.
t_ids = torch.arange(transcript_vocab_ids.size(0), device='cpu')
mapping = {k: v for k, v in zip(transcript_vocab_ids.to('cpu'), t_ids)}
# From `https://stackoverflow.com/questions/13572448`.
palette, key = zip(*mapping.items())
t_device = transcript.device
key = torch.tensor(key, device=t_device)
palette = torch.tensor(palette, device=t_device)
# This step maps old token id to new token id in broadcasted manner.
# For example, if original transcript tokens were [2, 1, 4, 5, 4, 1]
# But after computing the unique token set of above we get
# transcript_vocab_ids = [1, 2, 4, 5] # note: pytorch returns sorted unique values thankfully
# Then we get the index map of the new vocab ids as:
# {0: 1, 1: 2, 2: 4, 3: 5}
# Now we need to map the original transcript tokens to new vocab id space
# So we construct the inverted map as follow :
# {1: 0, 2: 1, 4: 2, 5: 3}
# Then remap the original transcript tokens to new token ids
# new_transcript = [1, 0, 2, 3, 2, 0]
index = torch.bucketize(transcript.ravel(), palette)
transcript = key[index].reshape(transcript.shape)
transcript = transcript.to(t_device)
# Extract out partial weight tensor and bias tensor of just the V_Pos vocabulary from the full joint.
true_weights = self.joint_net[-1].weight[transcript_vocab_ids, :]
true_bias = self.joint_net[-1].bias[transcript_vocab_ids]
# Compute the transcript joint scores (only of vocab V_Pos)
transcript_scores = torch.matmul(inp, true_weights.transpose(0, 1)) + true_bias
# Construct acceptance criteria in vocab space, reject all tokens in Intersection(V_Pos, V_Neg)
with torch.no_grad():
# Instead of uniform sample, first we create arange V (ignoring blank), then randomly shuffle
# this range of ids, then subset `n_samples` amount of vocab tokens out of the permuted tensor.
# This is good because it guarentees that no token will ever be repeated in V_Neg;
# which dramatically complicates loss calculation.
# Further more, with this strategy, given a `n_samples` > V + 1; we are guarenteed to get the
# V_Samples = V (i.e., full vocabulary will be used in such a case).
# Useful to debug cases where you expect sampled vocab to get exact same training curve as
# full vocab.
sample_ids = torch.randperm(n=self.num_classes_with_blank - 1, device=transcript_scores.device)[
: self.n_samples
]
# We need to compute the intersection(V_Pos, V_Neg), then eliminate the intersection arguments
# from inside V_Neg.
# First, compute the pairwise commonality to find index inside `sample_ids` which match the token id
# inside transcript_vocab_ids.
# Note: It is important to ignore the hardcoded RNNT Blank token injected at id 0 of the transcript
# vocab ids, otherwise the blank may occur twice, once for RNNT blank and once as negative sample,
# doubling the gradient of the RNNT blank token.
reject_samples = torch.where(transcript_vocab_ids[1:, None] == sample_ids[None, :])
# Let accept samples be a set of ids which is a subset of sample_ids
# such that intersection(V_Pos, accept_samples) is a null set.
accept_samples = sample_ids.clone()
# In order to construct such an accept_samples tensor, first we construct a bool map
# and fill all the indices where there is a match inside of sample_ids.
# reject_samples is a tuple (transcript_vocab_position, sample_position) which gives a
# many to many map between N values of transript and M values of sample_ids.
# We dont care about transcript side matches, only the ids inside of sample_ids that matched.
sample_mask = torch.ones_like(accept_samples, dtype=torch.bool)
sample_mask[reject_samples[1]] = False
# Finally, compute the subset of tokens by selecting only those sample_ids which had no matches
accept_samples = accept_samples[sample_mask]
# Extract out partial weight tensor and bias tensor of just the V_Neg vocabulary from the full joint.
sample_weights = self.joint_net[-1].weight[accept_samples, :]
sample_bias = self.joint_net[-1].bias[accept_samples]
# Compute the noise joint scores (only of vocab V_Neg) to be used for softmax
# The quality of this sample determines the quality of the softmax gradient.
# We use naive algo broadcasted over batch, but it is more efficient than sample level computation.
# One can increase `n_samples` for better estimation of rejection samples and its gradient.
noise_scores = torch.matmul(inp, sample_weights.transpose(0, 1)) + sample_bias
# Finally, construct the sampled joint as the V_Sampled = Union(V_Pos, V_Neg)
# Here, we simply concatenate the two tensors to construct the joint with V_Sampled vocab
# because before we have properly asserted that Intersection(V_Pos, V_Neg) is a null set.
res = torch.cat([transcript_scores, noise_scores], dim=-1)
del inp
if self.preserve_memory:
torch.cuda.empty_cache()
# If log_softmax is automatic
if self.log_softmax is None:
if not res.is_cuda: # Use log softmax only if on CPU
res = res.log_softmax(dim=-1)
else:
if self.log_softmax:
res = res.log_softmax(dim=-1)
return res, transcript
# Add the adapter compatible modules to the registry
for cls in [RNNTDecoder, RNNTJoint, SampledRNNTJoint]:
if adapter_mixins.get_registered_adapter(cls) is None:
adapter_mixins.register_adapter(cls, cls) # base class is adapter compatible itself
|
NeMo-main
|
nemo/collections/asr/modules/rnnt.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
import torch.nn as nn
from nemo.collections.asr.modules.transformer.transformer_modules import MultiHeadAttention, PositionWiseFF
from nemo.collections.common.parts import form_attention_mask
__all__ = ["TransformerEncoder"]
class TransformerEncoderBlock(nn.Module):
"""
Building block of Transformer encoder.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
attention layers, but before layer normalization
ffn_dropout: probability of dropout applied to FFN output
hidden_act: activation function used between two linear layers in FFN
"""
def __init__(
self,
hidden_size: int,
inner_size: int,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
):
super().__init__()
self.pre_ln = pre_ln
self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=1e-5)
self.first_sub_layer = MultiHeadAttention(
hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout
)
self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=1e-5)
self.second_sub_layer = PositionWiseFF(hidden_size, inner_size, ffn_dropout, hidden_act)
def forward_preln(self, encoder_query, encoder_mask, encoder_keys):
"""
Pre-LayerNorm block
Order of operations: LN -> Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN
"""
residual = encoder_query
encoder_query = self.layer_norm_1(encoder_query)
encoder_keys = self.layer_norm_1(encoder_keys)
self_attn_output = self.first_sub_layer(encoder_query, encoder_keys, encoder_keys, encoder_mask)
self_attn_output += residual
residual = self_attn_output
self_attn_output = self.layer_norm_2(self_attn_output)
output_states = self.second_sub_layer(self_attn_output)
output_states += residual
return output_states
def forward_postln(self, encoder_query, encoder_mask, encoder_keys):
"""
Post-LayerNorm block
Order of operations: Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN -> Residual -> LN
"""
self_attn_output = self.first_sub_layer(encoder_query, encoder_keys, encoder_keys, encoder_mask)
self_attn_output += encoder_query
self_attn_output = self.layer_norm_1(self_attn_output)
output_states = self.second_sub_layer(self_attn_output)
output_states += self_attn_output
output_states = self.layer_norm_2(output_states)
return output_states
def forward(self, encoder_query, encoder_mask, encoder_keys):
if self.pre_ln:
return self.forward_preln(encoder_query, encoder_mask, encoder_keys)
else:
return self.forward_postln(encoder_query, encoder_mask, encoder_keys)
class TransformerEncoder(nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
mask_future: bool = False,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
):
super().__init__()
if pre_ln and pre_ln_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(hidden_size, eps=1e-5)
else:
self.final_layer_norm = None
layer = TransformerEncoderBlock(
hidden_size,
inner_size,
num_attention_heads,
attn_score_dropout,
attn_layer_dropout,
ffn_dropout,
hidden_act,
pre_ln,
)
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
self.diag = 0 if mask_future else None
def _get_memory_states(self, encoder_states, encoder_mems_list=None, i=0):
if encoder_mems_list is not None:
memory_states = torch.cat((encoder_mems_list[i], encoder_states), dim=1)
else:
memory_states = encoder_states
return memory_states
def forward(self, encoder_states, encoder_mask, encoder_mems_list=None, return_mems=False):
"""
Args:
encoder_states: output of the embedding_layer (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
encoder_mems_list: list of the cached encoder hidden states
for fast autoregressive generation which will be used instead
of encoder_states as keys and values if not None
return_mems: bool, whether to return outputs of all encoder layers
or the last layer only
"""
encoder_attn_mask = form_attention_mask(encoder_mask, self.diag)
memory_states = self._get_memory_states(encoder_states, encoder_mems_list, 0)
cached_mems_list = [memory_states]
for i, layer in enumerate(self.layers):
encoder_states = layer(encoder_states, encoder_attn_mask, memory_states)
memory_states = self._get_memory_states(encoder_states, encoder_mems_list, i + 1)
cached_mems_list.append(memory_states)
if self.final_layer_norm is not None:
encoder_states = self.final_layer_norm(encoder_states)
memory_states = self._get_memory_states(encoder_states, encoder_mems_list, i + 1)
cached_mems_list.append(memory_states)
if return_mems:
return cached_mems_list
else:
return cached_mems_list[-1]
|
NeMo-main
|
nemo/collections/asr/modules/transformer/transformer_encoders.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import torch
from nemo.collections.common.parts import NEG_INF, mask_padded_tokens
__all__ = [
"GreedySequenceGenerator",
"TopKSequenceGenerator",
"BeamSearchSequenceGenerator",
"BeamSearchSequenceGeneratorWithLanguageModel",
"EnsembleBeamSearchSequenceGenerator",
]
class GreedySequenceGenerator:
"""
Greedy sequence generator based on the decoder followed by log_softmax.
Args:
embedding: nn.Module, transforms input_ids into vector embeddings
decoder: nn.Module, takes embeddings and produces hidden_states
log_softmax: nn.Module, takes hidden_states and produces log_probs
which correspond to probability distribution of tokens (ids)
pad: index of padding token in the vocabulary
bos: index of beginning of sequence token in the vocabulary
eos: index of end of sequence token in the vocabulary
max_sequence_length: maximum allowed length for generated sequences
max_delta_length: in case of encoder-decoder generation (e.g. NMT),
forbids generated sequences to be longer than the length of
source sequences plus max_delta_length
batch_size: size of the batch of generated sequences if neither
source nor target starting sequences are provided
"""
def __init__(
self,
embedding,
decoder,
log_softmax,
pad=0,
bos=1,
eos=2,
max_sequence_length=512,
max_delta_length=20,
batch_size=1,
):
super().__init__()
self.embedding = embedding
self.decoder = decoder
self.log_softmax = log_softmax
self.pad, self.bos, self.eos = pad, bos, eos
self.max_seq_length = max_sequence_length
self.max_delta_len = max_delta_length
self.batch_size = batch_size
def _one_step_forward(
self,
decoder_input_ids=None,
encoder_hidden_states=None,
encoder_input_mask=None,
decoder_mems_list=None,
pos=0,
):
"""
One step of autoregressive output generation.
Args:
decoder_input_ids: starting sequence of tokens to generate from;
if None, generation will start from a batch of <bos> tokens
encoder_hidden_states: output of the encoder for conditional
sequence generation; if None, generator will use unconditional
mode (e.g., language modeling)
encoder_input_mask: input mask used in the encoder
decoder_mems_list: list of size num_layers with cached activations
of sequence (x[1], ..., x[k-1]) for fast generation of x[k]
pos: starting position in positional encoding
"""
decoder_hidden_states = self.embedding.forward(decoder_input_ids, start_pos=pos)
decoder_input_mask = mask_padded_tokens(decoder_input_ids, self.pad).float()
if encoder_hidden_states is not None:
decoder_mems_list = self.decoder.forward(
decoder_hidden_states,
decoder_input_mask,
encoder_hidden_states,
encoder_input_mask,
decoder_mems_list,
return_mems=True,
)
else:
decoder_mems_list = self.decoder.forward(
decoder_hidden_states, decoder_input_mask, decoder_mems_list, return_mems=True
)
log_probs = self.log_softmax.forward(hidden_states=decoder_mems_list[-1][:, -1:])
return log_probs, decoder_mems_list
def _prepare_for_search(self, decoder_input_ids=None, encoder_hidden_states=None):
"""
Helper function which defines starting sequence to begin generating
with and maximum allowed number of tokens to be generated.
"""
decoder_parameter = next(self.decoder.parameters())
batch_size = self.batch_size
# for encoder-decoder generation, maximum length of generated sequence
# is min(max_sequence_length, src_len + max_delta_length)
if encoder_hidden_states is not None:
batch_size, src_len, _ = encoder_hidden_states.size()
if self.max_delta_len >= 0:
max_seq_length = min(self.max_seq_length, src_len + self.max_delta_len)
else:
max_seq_length = self.max_seq_length
else:
max_seq_length = self.max_seq_length
# if no input is provided, start with the batch of <bos> tokens
if decoder_input_ids is not None:
tgt = decoder_input_ids
batch_size, tgt_len = decoder_input_ids.size()
else:
tgt = torch.zeros(batch_size, 1).long().fill_(self.bos).to(decoder_parameter.device)
tgt_len = 1
max_generation_length = max_seq_length - tgt_len
return tgt, batch_size, max_generation_length
def _forward(
self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False
):
assert not return_beam_scores
tgt, batch_size, max_generation_length = self._prepare_for_search(decoder_input_ids, encoder_hidden_states)
# pad profile tracks sequences ending with <eos> token to replace
# everything after <eos> with <pad> token
decoder_parameter = next(self.decoder.parameters())
pad_profile = torch.zeros(batch_size, 1).long().to(decoder_parameter.device)
decoder_mems_list = None
for i in range(max_generation_length):
log_probs, decoder_mems_list = self._one_step_forward(
tgt[:, -1:], encoder_hidden_states, encoder_input_mask, decoder_mems_list, i
)
next_tokens = torch.argmax(log_probs[:, -1], dim=-1, keepdim=True)
next_tokens = self.pad * pad_profile + next_tokens * (1 - pad_profile)
pad_profile = torch.max(pad_profile, (next_tokens == self.eos).long())
tgt = torch.cat((tgt, next_tokens), dim=-1)
# abort generation if all sequences end with <eos>
if pad_profile.sum() == batch_size:
break
return tgt
def __call__(
self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False
):
with self.as_frozen():
return self._forward(
decoder_input_ids, encoder_hidden_states, encoder_input_mask, return_beam_scores=return_beam_scores
)
def freeze(self) -> None:
"""Freeze weights of embedding, decoder, and classification layers to prevent memory leak.
"""
for param in self.embedding.parameters():
param.requires_grad = False
self.embedding.eval()
for param in self.decoder.parameters():
param.requires_grad = False
self.decoder.eval()
for param in self.log_softmax.parameters():
param.requires_grad = False
self.log_softmax.eval()
def unfreeze(self) -> None:
"""Unfreeze weights of embedding, decoder, and classification layers.
"""
for param in self.embedding.parameters():
param.requires_grad = True
self.embedding.train()
for param in self.decoder.parameters():
param.requires_grad = True
self.decoder.train()
for param in self.log_softmax.parameters():
param.requires_grad = True
self.log_softmax.train()
@contextmanager
def as_frozen(self):
"""
Context manager which temporarily freezes embedding, decoder, and log_softmax modules,
yields control and finally unfreezes the modules.
"""
self.freeze()
try:
yield
finally:
self.unfreeze()
class TopKSequenceGenerator(GreedySequenceGenerator):
"""
Top-k sequence generator based on the decoder followed by log_softmax.
Args:
*all args of GreedySequenceGenerator class
beam_size: size of the beam (parameter k in top-k)
temperature: temperature of top-k sampling, all logits are divided
by temperature before rescaling. High temperature leads to
uniform distribution, low leads to delta-like distribution.
Kwargs:
all remaining parameters of GreedySequenceGenerator class
"""
def __init__(self, embedding, decoder, log_softmax, beam_size=1, temperature=1.0, **kwargs):
super().__init__(embedding, decoder, log_softmax, **kwargs)
self.beam_size = beam_size
self.temp = temperature
# @torch.no_grad()
def _one_step_forward(
self,
decoder_input_ids=None,
encoder_hidden_states=None,
encoder_input_mask=None,
decoder_mems_list=None,
pos=0,
):
log_probs, decoder_mems_list = super()._one_step_forward(
decoder_input_ids, encoder_hidden_states, encoder_input_mask, decoder_mems_list, pos
)
batch_size, seq_len, vocab_size = log_probs.size()
scores, indices = torch.topk(log_probs, self.beam_size, dim=-1)
rescaled_logexp = torch.zeros_like(log_probs).scatter(-1, indices, scores.div(self.temp).exp())
probs = rescaled_logexp / rescaled_logexp.norm(1, -1, keepdim=True)
# We randomly sample next tokens from rescaled probability distribution
# over top-k candidates and return a binary tensor which indicates
# candidates that have been selected. We call this object
# `pseudo_log_probs` as genuine log_probs should have -infs instead of
# 0s and 0s instead of 1s.
ids = torch.multinomial(probs.view(-1, vocab_size), 1).view(-1, seq_len, 1)
pseudo_log_probs = torch.zeros_like(log_probs).scatter(-1, ids, 1.0)
return pseudo_log_probs, decoder_mems_list
class BeamSearchSequenceGenerator(GreedySequenceGenerator):
def __init__(self, embedding, decoder, log_softmax, beam_size=1, len_pen=0, **kwargs):
"""
Beam Search sequence generator based on the decoder followed by
log_softmax.
Args:
*all args of GreedySequenceGenerator class
beam_size: size of the beam
len_pen: length penalty parameter
Kwargs:
all remaining parameters of GreedySequenceGenerator class
"""
super().__init__(embedding, decoder, log_softmax, **kwargs)
self.beam_size = beam_size
self.len_pen = len_pen
@staticmethod
def compute_len_penalty(lengths, alpha):
"""Returns length penalty according to https://arxiv.org/pdf/1609.08144.pdf"""
return ((5 + lengths) / 6).pow(alpha)
def _forward(
self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False
):
tgt, batch_size, max_generation_length = self._prepare_for_search(decoder_input_ids, encoder_hidden_states)
# generate initial buffer of beam_size prefixes-hypotheses
log_probs, decoder_mems_list = self._one_step_forward(tgt, encoder_hidden_states, encoder_input_mask, None, 0)
scores, prefixes = torch.topk(log_probs.permute(0, 2, 1), self.beam_size, dim=1)
scores, prefixes = scores.view(-1, 1), prefixes.view(-1, 1)
# repeat init target prefixes and cached memory states beam_size times
prefixes = torch.cat((tgt.repeat(1, self.beam_size).view(-1, 1), prefixes), dim=1)
for j in range(len(decoder_mems_list)):
decoder_mems_list[j] = decoder_mems_list[j].repeat(self.beam_size, 1, 1)
# repeat source sequence beam_size times for beam search
if encoder_hidden_states is not None:
_, src_length, hidden_size = encoder_hidden_states.size()
encoder_input_mask = encoder_input_mask.repeat(1, self.beam_size).view(-1, src_length)
encoder_hidden_states = encoder_hidden_states.repeat(1, self.beam_size, 1).view(
-1, src_length, hidden_size
)
else:
hidden_size = decoder_mems_list[0].size(2)
# pad_profile tracks finished hypotheses to generate only <pad> tokens
# if <eos> or <pad> has been generated
pad_profile = torch.zeros_like(scores).long()
# prefixes_len tracks lengths of generated hypotheses to perform
# length penalty correction
prefixes_len = torch.zeros_like(scores).fill_(prefixes.size(1) + 1)
for i in range(max_generation_length):
# mask all finished hypotheses to exclude them from beam
pad_mask = pad_profile.repeat(1, self.beam_size)
# generate and score candidates for prefixes continuation
log_probs, decoder_mems_list = self._one_step_forward(
prefixes[:, -1:], encoder_hidden_states, encoder_input_mask, decoder_mems_list, i + 1
)
scores_i, prefixes_i = torch.topk(log_probs[:, -1, :], self.beam_size, dim=-1)
# for all prefixes ending with <eos> or <pad> replace generated
# continuations with <pad>
prefixes_i = self.pad * pad_mask + prefixes_i * (1 - pad_mask)
# force all hypotheses but one generated from already finished
# hypotheses to have extremely low score, so they will not be
# considered during beam re-ranking
pad_mask[:, 1:] = pad_mask[:, 1:] * NEG_INF
scores = scores + scores_i * (1 - pad_mask).to(scores.dtype)
# choose top-k hypotheses with length penalty applied
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
scores, indices_i = torch.topk(scores.view(-1, self.beam_size ** 2), self.beam_size, dim=1)
scores = scores.view(-1, 1) * len_penalties
# select prefixes which correspond to the chosen hypotheses
prefixes = prefixes.unsqueeze(1).repeat(1, self.beam_size, 1)
prefixes = torch.cat((prefixes, prefixes_i.unsqueeze(2)), dim=2)
prefixes = prefixes.view(batch_size, self.beam_size ** 2, -1)
p_len = prefixes.size(2)
prefixes_ids = indices_i.unsqueeze(2).repeat(1, 1, p_len)
prefixes = prefixes.gather(1, prefixes_ids).view(-1, p_len)
# reshuffle cached decoder memory states to restore the order
# of hypotheses broken after top-k selection
mems_ids = indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, hidden_size) // self.beam_size
for j in range(len(decoder_mems_list)):
decoder_mems_list[j] = (
decoder_mems_list[j]
.view(-1, self.beam_size, p_len - 1, hidden_size)
.gather(1, mems_ids)
.view(-1, p_len - 1, hidden_size)
)
# update prefixes_len and pad_profile
not_eos_pad = prefixes.ne(self.eos) & prefixes.ne(self.pad)
prefixes_len = 1 + not_eos_pad.sum(dim=1, keepdim=True).to(scores.dtype)
pad_profile = (~not_eos_pad[:, -1:]).long()
# if all hypotheses end with <eos> or <pad>, interrupt search
if pad_profile.sum() == batch_size * self.beam_size:
break
# select best performing hypotheses in each element of the batch
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
best_guesses = (
torch.argmax(scores.view(-1, self.beam_size), dim=1, keepdim=True).repeat(1, prefixes.size(1)).unsqueeze(1)
)
tgt = prefixes.view(batch_size, self.beam_size, -1).gather(1, best_guesses).squeeze(1)
if return_beam_scores:
return prefixes, scores * len_penalties, tgt
else:
return tgt
class EnsembleBeamSearchSequenceGenerator:
def __init__(
self,
encoders,
embeddings,
decoders,
log_softmaxes,
beam_size=1,
len_pen=0,
pad=0,
bos=1,
eos=2,
max_sequence_length=512,
max_delta_length=20,
batch_size=1,
language_model=None,
fusion_coef=None,
):
"""
Ensemble Beam Search sequence generator based on the decoder followed by
log_softmax. Averages the probabilities of different models.
NOTE: All models must have been trained with the same BPE tokenizers.
Args:
encoders: A list of encoders
embeddings: A list of decoder embedding layers
decoders: A list of decoders
log_softmaxes: A list of decoder output layers
beam_size: Beam size
len_pen: Length penalty to adjust logprob scores to favor longer sequences
pad: pad id
bos: beginning of sequence id
eos: end of sequence id
max_sequence_length: maximum sequence length
max_delta_length: maximum length difference between input and output
batch_size: batch size if not inferrable from input sequence
"""
self.encoders = encoders
self.embeddings = embeddings
self.decoders = decoders
self.log_softmaxes = log_softmaxes
self.beam_size = beam_size
self.len_pen = len_pen
self.pad, self.bos, self.eos = pad, bos, eos
self.max_seq_length = max_sequence_length
self.max_delta_len = max_delta_length
self.batch_size = batch_size
assert len(embeddings) == len(decoders) == len(log_softmaxes) == len(encoders)
self.num_models = len(encoders)
self.language_model = language_model
self.fusion_coef = fusion_coef
@staticmethod
def compute_len_penalty(lengths, alpha):
"""Returns length penalty according to https://arxiv.org/pdf/1609.08144.pdf"""
return ((5 + lengths) / 6).pow(alpha)
def _one_step_forward_lm(self, decoder_input_ids=None, lm_mems_list=None, pos=0):
input_mask = mask_padded_tokens(decoder_input_ids, self.pad).float()
lm_hidden_states = self.language_model.encoder.embedding.forward(decoder_input_ids, start_pos=pos)
lm_mems_list = self.language_model.encoder.encoder.forward(
lm_hidden_states, input_mask, lm_mems_list, return_mems=True,
)
lm_log_probs = self.language_model.log_softmax.forward(hidden_states=lm_mems_list[-1][:, -1:])
return lm_log_probs, lm_mems_list
def _one_step_forward(
self,
ensemble_index,
decoder_input_ids=None,
encoder_hidden_states=None,
encoder_input_mask=None,
decoder_mems_list=None,
pos=0,
):
"""
One step of autoregressive output generation for one particular model.
Args:
decoder_input_ids: starting sequence of tokens to generate from;
if None, generation will start from a batch of <bos> tokens
encoder_hidden_states: output of the encoder for conditional
sequence generation; if None, generator will use unconditional
mode (e.g., language modeling)
encoder_input_mask: input mask used in the encoder
decoder_mems_list: list of size num_layers with cached activations
of sequence (x[1], ..., x[k-1]) for fast generation of x[k]
pos: starting position in positional encoding
"""
decoder_hidden_states = self.embeddings[ensemble_index].forward(decoder_input_ids, start_pos=pos)
decoder_input_mask = mask_padded_tokens(decoder_input_ids, self.pad).float()
if encoder_hidden_states is not None:
decoder_mems_list = self.decoders[ensemble_index].forward(
decoder_hidden_states,
decoder_input_mask,
encoder_hidden_states,
encoder_input_mask,
decoder_mems_list,
return_mems=True,
)
else:
decoder_mems_list = self.decoders[ensemble_index].forward(
decoder_hidden_states, decoder_input_mask, decoder_mems_list, return_mems=True
)
log_probs = self.log_softmaxes[ensemble_index].forward(hidden_states=decoder_mems_list[-1][:, -1:])
return log_probs, decoder_mems_list
def _prepare_for_search(self, decoder_input_ids=None, encoder_hidden_states=None):
"""
Helper function which defines starting sequence to begin generating
with and maximum allowed number of tokens to be generated.
"""
decoder_parameter = next(self.decoders[0].parameters())
batch_size = self.batch_size
# for encoder-decoder generation, maximum length of generated sequence
# is min(max_sequence_length, src_len + max_delta_length)
if encoder_hidden_states is not None:
batch_size, src_len, _ = encoder_hidden_states.size()
if self.max_delta_len >= 0:
max_seq_length = min(self.max_seq_length, src_len + self.max_delta_len)
else:
max_seq_length = self.max_seq_length
else:
max_seq_length = self.max_seq_length
# if no input is provided, start with the batch of <bos> tokens
if decoder_input_ids is not None:
tgt = decoder_input_ids
batch_size, tgt_len = decoder_input_ids.size()
else:
tgt = torch.zeros(batch_size, 1).long().fill_(self.bos).to(decoder_parameter.device)
tgt_len = 1
max_generation_length = max_seq_length - tgt_len
return tgt, batch_size, max_generation_length
def _get_encoder_hidden_states(self, src_ids, encoder_input_mask, ensemble_index):
return self.encoders[ensemble_index](input_ids=src_ids, encoder_mask=encoder_input_mask)
def _average_probs(self, probs_list):
probs_list = torch.stack(probs_list)
return torch.log(torch.exp(probs_list).mean(0))
# probs = torch.stack(probs_list) # Ens x B x T x V
# return torch.log(probs.sum(0) / probs.sum(-1).sum(0).unsqueeze(-1))
def _forward(self, src_ids, encoder_input_mask, decoder_input_ids=None, return_beam_scores=False):
encoder_hidden_states = [
self._get_encoder_hidden_states(src_ids, encoder_input_mask, i) for i in range(self.num_models)
]
tgt, batch_size, max_generation_length = self._prepare_for_search(decoder_input_ids, encoder_hidden_states[0])
# generate initial buffer of beam_size prefixes-hypotheses
outputs = [
self._one_step_forward(i, tgt, encoder_hidden_states[i], encoder_input_mask, None, 0)
for i in range(self.num_models)
]
nmt_log_probs = self._average_probs([x[0] for x in outputs])
decoder_mems_lists = [x[1] for x in outputs]
if self.language_model is not None:
lm_log_probs, lm_mems_list = self._one_step_forward_lm(tgt, None, 0)
log_probs = nmt_log_probs + self.fusion_coef * lm_log_probs
else:
log_probs = nmt_log_probs
scores, prefixes = torch.topk(log_probs.permute(0, 2, 1), self.beam_size, dim=1)
scores, prefixes = scores.view(-1, 1), prefixes.view(-1, 1)
# repeat init target prefixes and cached memory states beam_size times
prefixes = torch.cat((tgt.repeat(1, self.beam_size).view(-1, 1), prefixes), dim=1)
for i in range(self.num_models):
for j in range(len(decoder_mems_lists[i])):
decoder_mems_lists[i][j] = decoder_mems_lists[i][j].repeat(self.beam_size, 1, 1)
if self.language_model is not None:
for j in range(len(lm_mems_list)):
lm_mems_list[j] = lm_mems_list[j].repeat(self.beam_size, 1, 1)
lm_hidden_size = lm_mems_list[0].size(2)
encoder_input_mask = encoder_input_mask.repeat(1, self.beam_size).view(-1, encoder_input_mask.size(1))
for i in range(self.num_models):
_, src_length, hidden_size = encoder_hidden_states[i].size()
encoder_hidden_states[i] = (
encoder_hidden_states[i].repeat(1, self.beam_size, 1).view(-1, src_length, hidden_size)
)
# pad_profile tracks finished hypotheses to generate only <pad> tokens
# if <eos> or <pad> has been generated
pad_profile = torch.zeros_like(scores).long()
# prefixes_len tracks lengths of generated hypotheses to perform
# length penalty correction
prefixes_len = torch.zeros_like(scores).fill_(prefixes.size(1) + 1)
for i in range(max_generation_length):
# mask all finished hypotheses to exclude them from beam
pad_mask = pad_profile.repeat(1, self.beam_size)
# generate and score candidates for prefixes continuation
outputs = [
self._one_step_forward(
model_num,
prefixes[:, -1:],
encoder_hidden_states[model_num],
encoder_input_mask,
decoder_mems_lists[model_num],
i + 1,
)
for model_num in range(self.num_models)
]
nmt_log_probs = self._average_probs([x[0] for x in outputs])
decoder_mems_lists = [x[1] for x in outputs]
if self.language_model is not None:
lm_log_probs, lm_mems_list = self._one_step_forward_lm(prefixes[:, -1:], lm_mems_list, i + 1)
log_probs = nmt_log_probs + self.fusion_coef * lm_log_probs
else:
log_probs = nmt_log_probs
scores_i, prefixes_i = torch.topk(log_probs[:, -1, :], self.beam_size, dim=-1)
# for all prefixes ending with <eos> or <pad> replace generated
# continuations with <pad>
prefixes_i = self.pad * pad_mask + prefixes_i * (1 - pad_mask)
# force all hypotheses but one generated from already finished
# hypotheses to have extremely low score, so they will not be
# considered during beam re-ranking
pad_mask[:, 1:] = pad_mask[:, 1:] * NEG_INF
scores = scores + scores_i * (1 - pad_mask).to(scores.dtype)
# choose top-k hypotheses with length penalty applied
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
scores, indices_i = torch.topk(scores.view(-1, self.beam_size ** 2), self.beam_size, dim=1)
scores = scores.view(-1, 1) * len_penalties
# select prefixes which correspond to the chosen hypotheses
prefixes = prefixes.unsqueeze(1).repeat(1, self.beam_size, 1)
prefixes = torch.cat((prefixes, prefixes_i.unsqueeze(2)), dim=2)
prefixes = prefixes.view(batch_size, self.beam_size ** 2, -1)
p_len = prefixes.size(2)
prefixes_ids = indices_i.unsqueeze(2).repeat(1, 1, p_len)
prefixes = prefixes.gather(1, prefixes_ids).view(-1, p_len)
# reshuffle cached decoder memory states to restore the order
# of hypotheses broken after top-k selection
for model_num in range(self.num_models):
hidden_size = decoder_mems_lists[model_num][0].size(2)
mems_ids = indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, hidden_size) // self.beam_size
for j in range(len(decoder_mems_lists[model_num])):
decoder_mems_lists[model_num][j] = (
decoder_mems_lists[model_num][j]
.view(-1, self.beam_size, p_len - 1, hidden_size)
.gather(1, mems_ids)
.view(-1, p_len - 1, hidden_size)
)
if self.language_model is not None:
lm_mems_ids = (
indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, lm_hidden_size) // self.beam_size
)
for j in range(len(lm_mems_list)):
lm_mems_list[j] = (
lm_mems_list[j]
.view(-1, self.beam_size, p_len - 1, lm_hidden_size)
.gather(1, lm_mems_ids)
.view(-1, p_len - 1, lm_hidden_size)
)
# update prefixes_len and pad_profile
not_eos_pad = prefixes.ne(self.eos) & prefixes.ne(self.pad)
prefixes_len = 1 + not_eos_pad.sum(dim=1, keepdim=True).to(scores.dtype)
pad_profile = (~not_eos_pad[:, -1:]).long()
# if all hypotheses end with <eos> or <pad>, interrupt search
if pad_profile.sum() == batch_size * self.beam_size:
break
# select best performing hypotheses in each element of the batch
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
best_guesses = (
torch.argmax(scores.view(-1, self.beam_size), dim=1, keepdim=True).repeat(1, prefixes.size(1)).unsqueeze(1)
)
tgt = prefixes.view(batch_size, self.beam_size, -1).gather(1, best_guesses).squeeze(1)
if return_beam_scores:
return prefixes, scores * len_penalties, tgt
else:
return tgt
def __call__(self, src_ids, encoder_input_mask, decoder_input_ids=None, return_beam_scores=False):
with self.as_frozen():
return self._forward(src_ids, encoder_input_mask, decoder_input_ids, return_beam_scores)
def freeze(self) -> None:
"""Freeze weights of embedding, decoder, and classification layers to prevent memory leak.
"""
for model_num in range(self.num_models):
for param in self.embeddings[model_num].parameters():
param.requires_grad = False
self.embeddings[model_num].eval()
for param in self.decoders[model_num].parameters():
param.requires_grad = False
self.decoders[model_num].eval()
for param in self.log_softmaxes[model_num].parameters():
param.requires_grad = False
self.log_softmaxes[model_num].eval()
for param in self.encoders[model_num].parameters():
param.requires_grad = False
self.encoders[model_num].eval()
def unfreeze(self) -> None:
"""Unfreeze weights of embedding, decoder, and classification layers.
"""
for model_num in range(self.num_models):
for param in self.embeddings[model_num].parameters():
param.requires_grad = True
self.embeddings[model_num].train()
for param in self.decoders[model_num].parameters():
param.requires_grad = True
self.decoders[model_num].train()
for param in self.log_softmaxes[model_num].parameters():
param.requires_grad = True
self.log_softmaxes[model_num].train()
for param in self.encoders[model_num].parameters():
param.requires_grad = True
self.encoders[model_num].train()
@contextmanager
def as_frozen(self):
"""
Context manager which temporarily freezes embedding, decoder, and log_softmax modules,
yields control and finally unfreezes the modules.
"""
self.freeze()
try:
yield
finally:
self.unfreeze()
class BeamSearchSequenceGeneratorWithLanguageModel(GreedySequenceGenerator):
def __init__(
self, embedding, decoder, log_softmax, language_model, beam_size=1, len_pen=0, fusion_coef=0.0, **kwargs
):
"""
Beam Search sequence generator based on the decoder followed by log_softmax
with external language model fusion.
Args:
*all args of BeamSearchSequenceGenerator class
language_model: nemo TransformerLMModel
fusion_coef: coefficient before language model score, the resulting score is
score = log P_NMT(y|x) + fusion_coef * log P_LM(y)
Kwargs:
all remaining parameters of GreedySequenceGenerator class
"""
super().__init__(embedding, decoder, log_softmax, **kwargs)
self.language_model = language_model
self.beam_size = beam_size
self.len_pen = len_pen
self.fusion_coef = fusion_coef
def _one_step_forward(
self,
decoder_input_ids=None,
encoder_hidden_states=None,
encoder_input_mask=None,
decoder_mems_list=None,
lm_mems_list=None,
pos=0,
):
nmt_log_probs, decoder_mems_list = super()._one_step_forward(
decoder_input_ids, encoder_hidden_states, encoder_input_mask, decoder_mems_list, pos,
)
input_mask = mask_padded_tokens(decoder_input_ids, self.pad).float()
lm_hidden_states = self.language_model.encoder.embedding.forward(decoder_input_ids, start_pos=pos)
lm_mems_list = self.language_model.encoder.encoder.forward(
lm_hidden_states, input_mask, lm_mems_list, return_mems=True,
)
lm_log_probs = self.language_model.log_softmax.forward(hidden_states=lm_mems_list[-1][:, -1:])
log_probs = nmt_log_probs + self.fusion_coef * lm_log_probs
return log_probs, decoder_mems_list, lm_mems_list
@staticmethod
def compute_len_penalty(lengths, alpha):
"""Returns length penalty according to https://arxiv.org/pdf/1609.08144.pdf"""
return ((5 + lengths) / 6).pow(alpha)
def _forward(
self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False
):
tgt, batch_size, max_generation_length = self._prepare_for_search(decoder_input_ids, encoder_hidden_states)
# generate initial buffer of beam_size prefixes-hypotheses
log_probs, decoder_mems_list, lm_mems_list = self._one_step_forward(
tgt, encoder_hidden_states, encoder_input_mask, None, None, 0
)
scores, prefixes = torch.topk(log_probs.permute(0, 2, 1), self.beam_size, dim=1)
scores, prefixes = scores.view(-1, 1), prefixes.view(-1, 1)
# repeat init target prefixes and cached memory states beam_size times
prefixes = torch.cat((tgt.repeat(1, self.beam_size).view(-1, 1), prefixes), dim=1)
for j in range(len(decoder_mems_list)):
decoder_mems_list[j] = decoder_mems_list[j].repeat(self.beam_size, 1, 1)
for j in range(len(lm_mems_list)):
lm_mems_list[j] = lm_mems_list[j].repeat(self.beam_size, 1, 1)
# repeat source sequence beam_size times for beam search
if encoder_hidden_states is not None:
_, src_length, hidden_size = encoder_hidden_states.size()
encoder_input_mask = encoder_input_mask.repeat(1, self.beam_size).view(-1, src_length)
encoder_hidden_states = encoder_hidden_states.repeat(1, self.beam_size, 1).view(
-1, src_length, hidden_size
)
else:
hidden_size = decoder_mems_list[0].size(2)
lm_hidden_size = lm_mems_list[0].size(2)
# pad_profile tracks finished hypotheses to generate only <pad> tokens
# if <eos> or <pad> has been generated
pad_profile = torch.zeros_like(scores).long()
# prefixes_len tracks lengths of generated hypotheses to perform
# length penalty correction
prefixes_len = torch.zeros_like(scores).fill_(prefixes.size(1) + 1)
for i in range(max_generation_length):
# mask all finished hypotheses to exclude them from beam
pad_mask = pad_profile.repeat(1, self.beam_size)
# generate and score candidates for prefixes continuation
log_probs, decoder_mems_list, lm_mems_list = self._one_step_forward(
prefixes[:, -1:], encoder_hidden_states, encoder_input_mask, decoder_mems_list, lm_mems_list, i + 1
)
scores_i, prefixes_i = torch.topk(log_probs[:, -1, :], self.beam_size, dim=-1)
# for all prefixes ending with <eos> or <pad> replace generated
# continuations with <pad>
prefixes_i = self.pad * pad_mask + prefixes_i * (1 - pad_mask)
# force all hypotheses but one generated from already finished
# hypotheses to have extremely low score, so they will not be
# considered during beam re-ranking
pad_mask[:, 1:] = pad_mask[:, 1:] * NEG_INF
scores = scores + scores_i * (1 - pad_mask).to(scores.dtype)
# choose top-k hypotheses with length penalty applied
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
scores, indices_i = torch.topk(scores.view(-1, self.beam_size ** 2), self.beam_size, dim=1)
scores = scores.view(-1, 1) * len_penalties
# select prefixes which correspond to the chosen hypotheses
prefixes = prefixes.unsqueeze(1).repeat(1, self.beam_size, 1)
prefixes = torch.cat((prefixes, prefixes_i.unsqueeze(2)), dim=2)
prefixes = prefixes.view(batch_size, self.beam_size ** 2, -1)
p_len = prefixes.size(2)
prefixes_ids = indices_i.unsqueeze(2).repeat(1, 1, p_len)
prefixes = prefixes.gather(1, prefixes_ids).view(-1, p_len)
# reshuffle cached decoder memory states to restore the order
# of hypotheses broken after top-k selection
mems_ids = indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, hidden_size) // self.beam_size
for j in range(len(decoder_mems_list)):
decoder_mems_list[j] = (
decoder_mems_list[j]
.view(-1, self.beam_size, p_len - 1, hidden_size)
.gather(1, mems_ids)
.view(-1, p_len - 1, hidden_size)
)
lm_mems_ids = indices_i.unsqueeze(2).unsqueeze(3).repeat(1, 1, p_len - 1, lm_hidden_size) // self.beam_size
for j in range(len(lm_mems_list)):
lm_mems_list[j] = (
lm_mems_list[j]
.view(-1, self.beam_size, p_len - 1, lm_hidden_size)
.gather(1, lm_mems_ids)
.view(-1, p_len - 1, lm_hidden_size)
)
# update prefixes_len and pad_profile
not_eos_pad = prefixes.ne(self.eos) & prefixes.ne(self.pad)
prefixes_len = 1 + not_eos_pad.sum(dim=1, keepdim=True).to(scores.dtype)
pad_profile = (~not_eos_pad[:, -1:]).long()
# if all hypotheses end with <eos> or <pad>, interrupt search
if pad_profile.sum() == batch_size * self.beam_size:
break
# select best performing hypotheses in each element of the batch
len_penalties = self.compute_len_penalty(prefixes_len, self.len_pen)
scores = scores / len_penalties
best_guesses = (
torch.argmax(scores.view(-1, self.beam_size), dim=1, keepdim=True).repeat(1, prefixes.size(1)).unsqueeze(1)
)
tgt = prefixes.view(batch_size, self.beam_size, -1).gather(1, best_guesses).squeeze(1)
if return_beam_scores:
return prefixes, scores * len_penalties, tgt
else:
return tgt
|
NeMo-main
|
nemo/collections/asr/modules/transformer/transformer_generators.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional
from nemo.collections.asr.modules.transformer.bridge_encoders import BridgeEncoder
from nemo.collections.asr.modules.transformer.perceiver_encoders import PerceiverEncoder
from nemo.collections.asr.modules.transformer.reduction_encoders import PoolingEncoder
from nemo.collections.asr.modules.transformer.transformer import (
NeMoTransformerConfig,
TransformerDecoderNM,
TransformerEncoderNM,
)
from nemo.core.classes.common import typecheck
from nemo.core.neural_types import MaskType, NeuralType
from nemo.core.neural_types.elements import BoolType
__all__ = [
"NeMoTransformerBottleneckConfig",
"NeMoTransformerBottleneckEncoderConfig",
"NeMoTransformerBottleneckDecoderConfig",
"TransformerBottleneckEncoderNM",
]
@dataclass
class NeMoTransformerBottleneckConfig(NeMoTransformerConfig):
# architecture details (default is no bottleneck)
arch: str = ''
hidden_steps: int = -1
hidden_blocks: int = 1
hidden_init_method: str = "params"
@dataclass
class NeMoTransformerBottleneckEncoderConfig(NeMoTransformerBottleneckConfig):
mask_future: bool = False
# change return_mask to False to return hidden states only (default for non-bottleneck encoder)
return_mask: bool = True
@dataclass
class NeMoTransformerBottleneckDecoderConfig(NeMoTransformerBottleneckConfig):
r2l: bool = False
class TransformerBottleneckEncoderNM(TransformerEncoderNM):
_SUPPORTED_ARCH = ["seq2seq", "bridge", "perceiver", "max_pool", "avg_pool"]
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_layers: int,
inner_size: int,
num_attention_heads: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
ffn_dropout: float = 0.0,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
hidden_act: str = 'relu',
mask_future: bool = False,
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
arch: str = '',
hidden_steps: int = -1,
hidden_blocks: int = 1,
hidden_init_method: str = "default",
# default whether forward() method returns hidden or (hidden, mask)
return_mask=True,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
mask_future=mask_future,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self._arch = arch
self._return_mask = return_mask
# replace encoder
self._encoder = self._build_encoder(
arch=arch,
hidden_steps=hidden_steps,
hidden_blocks=hidden_blocks,
hidden_init_method=hidden_init_method,
hidden_size=hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
mask_future=mask_future,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
def _build_encoder(self, arch, **kwargs):
"""
Returns a decoder based on architecture arch and kwargs
"""
# default non-bottleneck transformer encoder
if (not arch) or (arch == "seq2seq"):
encoder = self.encoder
elif arch == "bridge":
encoder = BridgeEncoder(
num_layers=kwargs["num_layers"],
hidden_size=kwargs["hidden_size"],
inner_size=kwargs["inner_size"],
num_attention_heads=kwargs["num_attention_heads"],
attn_score_dropout=kwargs["attn_score_dropout"],
attn_layer_dropout=kwargs["attn_layer_dropout"],
ffn_dropout=kwargs["ffn_dropout"],
hidden_act=kwargs["hidden_act"],
mask_future=kwargs["mask_future"],
pre_ln=kwargs["pre_ln"],
pre_ln_final_layer_norm=kwargs["pre_ln_final_layer_norm"],
hidden_steps=kwargs["hidden_steps"],
hidden_blocks=kwargs["hidden_blocks"],
hidden_init_method=kwargs["hidden_init_method"],
)
elif arch == "perceiver":
encoder = PerceiverEncoder(
num_layers=kwargs["num_layers"],
hidden_size=kwargs["hidden_size"],
inner_size=kwargs["inner_size"],
num_attention_heads=kwargs["num_attention_heads"],
attn_score_dropout=kwargs["attn_score_dropout"],
attn_layer_dropout=kwargs["attn_layer_dropout"],
ffn_dropout=kwargs["ffn_dropout"],
hidden_act=kwargs["hidden_act"],
mask_future=kwargs["mask_future"],
pre_ln=kwargs["pre_ln"],
pre_ln_final_layer_norm=kwargs["pre_ln_final_layer_norm"],
hidden_steps=kwargs["hidden_steps"],
hidden_blocks=kwargs["hidden_blocks"],
hidden_init_method=kwargs["hidden_init_method"],
)
elif arch == "max_pool":
encoder = PoolingEncoder(
num_layers=kwargs["num_layers"],
hidden_size=kwargs["hidden_size"],
inner_size=kwargs["inner_size"],
num_attention_heads=kwargs["num_attention_heads"],
attn_score_dropout=kwargs["attn_score_dropout"],
attn_layer_dropout=kwargs["attn_layer_dropout"],
ffn_dropout=kwargs["ffn_dropout"],
hidden_act=kwargs["hidden_act"],
mask_future=kwargs["mask_future"],
pre_ln=kwargs["pre_ln"],
pre_ln_final_layer_norm=kwargs["pre_ln_final_layer_norm"],
hidden_steps=kwargs["hidden_steps"],
hidden_blocks=kwargs["hidden_blocks"],
hidden_init_method=kwargs["hidden_init_method"],
pooling_type="max",
)
elif arch == "avg_pool":
encoder = PoolingEncoder(
num_layers=kwargs["num_layers"],
hidden_size=kwargs["hidden_size"],
inner_size=kwargs["inner_size"],
num_attention_heads=kwargs["num_attention_heads"],
attn_score_dropout=kwargs["attn_score_dropout"],
attn_layer_dropout=kwargs["attn_layer_dropout"],
ffn_dropout=kwargs["ffn_dropout"],
hidden_act=kwargs["hidden_act"],
mask_future=kwargs["mask_future"],
pre_ln=kwargs["pre_ln"],
pre_ln_final_layer_norm=kwargs["pre_ln_final_layer_norm"],
hidden_steps=kwargs["hidden_steps"],
hidden_blocks=kwargs["hidden_blocks"],
hidden_init_method=kwargs["hidden_init_method"],
pooling_type="avg",
)
else:
raise ValueError(f"Unknown arch = {self.arch}, supported arch = {self.supported_arch}")
return encoder
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
input_types = super().input_types
input_types.update(
{"return_mask": NeuralType((), BoolType(), True),}
)
return input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
output_types = super().output_types
output_types.update(
{"hidden_mask": NeuralType(('B', 'T'), MaskType(), True),}
)
return output_types
@property
def supported_arch(self):
return self._SUPPORTED_ARCH
@property
def arch(self):
return self._arch
@typecheck()
def forward(self, input_ids, encoder_mask, return_mask=None):
if return_mask is None:
return_mask = self._return_mask
embeddings = self._embedding(input_ids=input_ids)
if (not self.arch) or (self.arch == "seq2seq"):
encoder_hidden_states = self._encoder(encoder_states=embeddings, encoder_mask=encoder_mask)
encoder_hidden_mask = encoder_mask
else:
encoder_hidden_states, encoder_hidden_mask = self._encoder(
encoder_states=embeddings, encoder_mask=encoder_mask,
)
if return_mask:
return encoder_hidden_states, encoder_hidden_mask
else:
return encoder_hidden_states
class TransformerBottleneckDecoderNM(TransformerDecoderNM):
_SUPPORTED_ARCH = ["seq2seq"]
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_layers: int,
inner_size: int,
num_attention_heads: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
ffn_dropout: float = 0.0,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
hidden_act: str = 'relu',
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
arch='',
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self._arch = arch
# replace decoder
self._decoder = self._build_decoder(
arch=arch,
hidden_size=hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
def _build_decoder(self, arch, **kwargs):
"""
Returns a decoder based on architecture arch and kwargs
"""
# usual non-bottleneck transformer decoder
if (not arch) or (arch == "seq2seq"):
decoder = self.decoder
else:
raise ValueError(f"Unknown arch = {self.arch}, supported arch = {self.supported_arch}")
return decoder
@property
def supported_arch(self):
return self._SUPPORTED_ARCH
@property
def arch(self):
return self._arch
|
NeMo-main
|
nemo/collections/asr/modules/transformer/transformer_bottleneck.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
import torch.nn as nn
from nemo.collections.asr.modules.transformer.transformer_modules import MultiHeadAttention, PositionWiseFF
from nemo.collections.common.parts import form_attention_mask
__all__ = ["TransformerDecoder"]
class TransformerDecoderBlock(nn.Module):
"""
Building block of Transformer decoder.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
attention layers, but before layer normalization
ffn_dropout: probability of dropout applied to FFN output
hidden_act: activation function used between two linear layers in FFN
"""
def __init__(
self,
hidden_size: int,
inner_size: int,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
):
super().__init__()
self.pre_ln = pre_ln
self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=1e-5)
self.first_sub_layer = MultiHeadAttention(
hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout
)
self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=1e-5)
self.second_sub_layer = MultiHeadAttention(
hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout
)
self.layer_norm_3 = nn.LayerNorm(hidden_size, eps=1e-5)
self.third_sub_layer = PositionWiseFF(hidden_size, inner_size, ffn_dropout, hidden_act)
def forward_preln(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
"""
Pre-LayerNorm block
Order of operations: LN -> Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN
"""
residual = decoder_query
decoder_query = self.layer_norm_1(decoder_query)
decoder_keys = self.layer_norm_1(decoder_keys)
self_attn_output = self.first_sub_layer(decoder_query, decoder_keys, decoder_keys, decoder_mask)
self_attn_output += residual
residual = self_attn_output
self_attn_output = self.layer_norm_2(self_attn_output)
enc_dec_attn_output = self.second_sub_layer(self_attn_output, encoder_states, encoder_states, encoder_mask)
enc_dec_attn_output += residual
residual = enc_dec_attn_output
enc_dec_attn_output = self.layer_norm_3(enc_dec_attn_output)
output_states = self.third_sub_layer(enc_dec_attn_output)
output_states += residual
return output_states
def forward_postln(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
"""
Post-LayerNorm block
Order of operations: Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN -> Residual -> LN
"""
self_attn_output = self.first_sub_layer(decoder_query, decoder_keys, decoder_keys, decoder_mask)
self_attn_output += decoder_query
self_attn_output = self.layer_norm_1(self_attn_output)
enc_dec_attn_output = self.second_sub_layer(self_attn_output, encoder_states, encoder_states, encoder_mask)
enc_dec_attn_output += self_attn_output
enc_dec_attn_output = self.layer_norm_2(enc_dec_attn_output)
output_states = self.third_sub_layer(enc_dec_attn_output)
output_states += enc_dec_attn_output
return self.layer_norm_3(output_states)
def forward(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
if self.pre_ln:
return self.forward_preln(decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask)
else:
return self.forward_postln(decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask)
class TransformerDecoder(nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
):
super().__init__()
if pre_ln and pre_ln_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(hidden_size, eps=1e-5)
else:
self.final_layer_norm = None
layer = TransformerDecoderBlock(
hidden_size,
inner_size,
num_attention_heads,
attn_score_dropout,
attn_layer_dropout,
ffn_dropout,
hidden_act,
pre_ln,
)
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
self.diagonal = 0
def _get_memory_states(self, decoder_states, decoder_mems_list=None, i=0):
if decoder_mems_list is not None:
inp1 = torch.transpose(decoder_mems_list[i], 1, 2) # Putting seq_len to last dim to handle export cases
inp2 = torch.transpose(decoder_states, 1, 2)
memory_states = torch.cat((inp1, inp2), dim=2)
memory_states = torch.transpose(memory_states, 1, 2) # Transposing back
else:
memory_states = decoder_states
return memory_states
def forward(
self,
decoder_states,
decoder_mask,
encoder_states,
encoder_mask,
decoder_mems_list=None,
return_mems=False,
return_mems_as_list=True,
):
"""
Args:
decoder_states: output of the embedding layer (B x L_dec x H)
decoder_mask: decoder inputs mask (B x L_dec)
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
decoder_mems_list: list of the cached decoder hidden states
for fast autoregressive generation which will be used instead
of decoder_states as keys and values if not None
return_mems: bool, whether to return outputs of all decoder layers
or the last layer only
return_mems_as_list: bool, when True, mems returned are as a list; otherwise mems are Tensor
"""
decoder_attn_mask = form_attention_mask(decoder_mask, diagonal=self.diagonal)
encoder_attn_mask = form_attention_mask(encoder_mask)
memory_states = self._get_memory_states(decoder_states, decoder_mems_list, 0)
if return_mems_as_list:
cached_mems_list = [memory_states]
else:
cached_mems_list = memory_states.unsqueeze(0)
for i, layer in enumerate(self.layers):
decoder_states = layer(decoder_states, decoder_attn_mask, memory_states, encoder_states, encoder_attn_mask)
memory_states = self._get_memory_states(decoder_states, decoder_mems_list, i + 1)
if return_mems_as_list:
cached_mems_list.append(memory_states)
else:
cached_mems_list = torch.cat((cached_mems_list, memory_states.unsqueeze(0)), dim=0)
if self.final_layer_norm is not None:
decoder_states = self.final_layer_norm(decoder_states)
memory_states = self._get_memory_states(decoder_states, decoder_mems_list, i + 2)
if return_mems_as_list:
cached_mems_list.append(memory_states)
else:
cached_mems_list = torch.cat((cached_mems_list, memory_states.unsqueeze(0)), dim=0)
if return_mems:
return cached_mems_list
else:
return cached_mems_list[-1]
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
input_ids = torch.randint(low=0, high=2048, size=(max_batch, max_dim, 1024), device=sample.device)
encoder_mask = torch.randint(low=0, high=1, size=(max_batch, max_dim), device=sample.device)
return tuple([input_ids, encoder_mask, input_ids, encoder_mask])
|
NeMo-main
|
nemo/collections/asr/modules/transformer/transformer_decoders.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
from omegaconf.dictconfig import DictConfig
from nemo.collections.asr.modules.transformer.transformer import TransformerDecoderNM, TransformerEncoderNM
from nemo.collections.asr.modules.transformer.transformer_bottleneck import TransformerBottleneckEncoderNM
def get_nemo_transformer(
model_name: Optional[str] = None,
pretrained: bool = False,
config_dict: Optional[Union[dict, DictConfig]] = None,
encoder: bool = True,
pre_ln_final_layer_norm: bool = True,
) -> Union[TransformerEncoderNM, TransformerDecoderNM]:
"""Returns NeMo transformer.
The following configurations are mandatory:
vocab_size: int
hidden_size: int
num_layers: int
inner_size: int
and must be specified if using config_dict.
Args:
model_name (Optional[str]): model name to download from NGC
pretrained: (bool): False will instantiate the named model architecture with random weights.
config_dict (Optional[dict], optional): model configuration parameters. Defaults to None.
config_file (Optional[str], optional): path to json file containing model configuration. Defaults to None.
checkpoint_file (Optional[str], optional): load weights from path to local checkpoint. Defaults to None.
encoder (bool, optional): True will use EncoderTransformerNM, False will use DecoderTransformerNM. Defaults to True.
"""
if model_name is not None:
raise ValueError(f'NeMo transformers cannot be loaded from NGC yet. model_name should be None')
if pretrained:
raise ValueError(f'NeMo transformers cannot be loaded from NGC yet. pretrained should be False')
cfg = None
if not pretrained:
assert (
config_dict.get('vocab_size') is not None
and config_dict.get('hidden_size') is not None
and config_dict.get('num_layers') is not None
and config_dict.get('inner_size') is not None
), f'Using config_dict: {config_dict}. vocab_size, hidden_size, num_layers, and inner_size must are mandatory arguments'
cfg = config_dict
if encoder:
# if arch exists in cfg we return TransformerBottleneckEncoderNM
arch = cfg.get('arch', '')
if not arch:
model = TransformerEncoderNM(
vocab_size=cfg.get('vocab_size'),
hidden_size=cfg.get('hidden_size'),
num_layers=cfg.get('num_layers'),
inner_size=cfg.get('inner_size'),
max_sequence_length=cfg.get('max_sequence_length', 512),
embedding_dropout=cfg.get('embedding_dropout', 0.0),
learn_positional_encodings=cfg.get('learn_positional_encodings', False),
num_attention_heads=cfg.get('num_attention_heads'),
ffn_dropout=cfg.get('ffn_dropout', 0.0),
attn_score_dropout=cfg.get('attn_score_dropout', 0.0),
attn_layer_dropout=cfg.get('attn_layer_dropout', 0.0),
hidden_act=cfg.get('hidden_act', 'relu'),
mask_future=cfg.get('mask_future', True),
pre_ln=cfg.get('pre_ln', False),
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
num_token_types=cfg.get('num_token_types', 2),
)
elif arch in TransformerBottleneckEncoderNM._SUPPORTED_ARCH:
model = TransformerBottleneckEncoderNM(
vocab_size=cfg.get('vocab_size'),
hidden_size=cfg.get('hidden_size'),
num_layers=cfg.get('num_layers'),
inner_size=cfg.get('inner_size'),
max_sequence_length=cfg.get('max_sequence_length', 512),
embedding_dropout=cfg.get('embedding_dropout', 0.0),
learn_positional_encodings=cfg.get('learn_positional_encodings', False),
num_attention_heads=cfg.get('num_attention_heads'),
ffn_dropout=cfg.get('ffn_dropout', 0.0),
attn_score_dropout=cfg.get('attn_score_dropout', 0.0),
attn_layer_dropout=cfg.get('attn_layer_dropout', 0.0),
hidden_act=cfg.get('hidden_act', 'relu'),
mask_future=cfg.get('mask_future', False),
pre_ln=cfg.get('pre_ln', False),
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
num_token_types=cfg.get('num_token_types', 2),
arch=cfg.get('arch', 'full'),
hidden_steps=cfg.get('hidden_steps', -1),
hidden_blocks=cfg.get('hidden_blocks', 1),
hidden_init_method=cfg.get('hidden_init_method', 'default'),
return_mask=cfg.get('return_mask', True),
)
else:
raise ValueError(f"Unknown arch = {arch}")
else:
model = TransformerDecoderNM(
vocab_size=cfg.get('vocab_size'),
hidden_size=cfg.get('hidden_size'),
num_layers=cfg.get('num_layers'),
inner_size=cfg.get('inner_size'),
max_sequence_length=cfg.get('max_sequence_length', 512),
embedding_dropout=cfg.get('embedding_dropout', 0.0),
learn_positional_encodings=cfg.get('learn_positional_encodings', False),
num_attention_heads=cfg.get('num_attention_heads'),
ffn_dropout=cfg.get('ffn_dropout', 0.0),
attn_score_dropout=cfg.get('attn_score_dropout', 0.0),
attn_layer_dropout=cfg.get('attn_layer_dropout', 0.0),
hidden_act=cfg.get('hidden_act', 'relu'),
pre_ln=cfg.get('pre_ln', False),
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
num_token_types=cfg.get('num_token_types', 2),
)
return model
# def get_huggingface_transformer(
# model_name: Optional[str] = None,
# pretrained: bool = False,
# config_dict: Optional[Union[dict, DictConfig]] = None,
# encoder: bool = True,
# ) -> Union[HuggingFaceEncoderModule, HuggingFaceDecoderModule]:
# if encoder:
# model = HuggingFaceEncoderModule(model_name, pretrained, config_dict)
# else:
# model = HuggingFaceDecoderModule(model_name, pretrained, config_dict)
# return model
def get_megatron_transformer(
model_name: Optional[str] = None,
pretrained: bool = True,
config_dict: Optional[Union[dict, DictConfig]] = None,
encoder: bool = True,
checkpoint_file: str = None,
) -> None:
raise ValueError(
"megatron-lm bert encoders are deprecated in NeMo 1.5.0. Please use NeMo 1.4.0 until megatron bert support is added again."
)
# vocab_file = config_dict.pop('vocab_file', None)
# if encoder:
# model = MegatronEncoderModule(
# model_name=model_name,
# pretrained=pretrained,
# config_dict=config_dict,
# checkpoint_file=checkpoint_file,
# vocab_file=vocab_file,
# )
# else:
# raise ValueError('Megatron decoders are not currently supported.')
# return model
|
NeMo-main
|
nemo/collections/asr/modules/transformer/transformer_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Any, Dict, Optional
from nemo.core.classes import NeuralModule
from nemo.core.neural_types import ChannelType, EncodedRepresentation, MaskType, NeuralType
__all__ = ['DecoderModule']
class DecoderModule(NeuralModule, ABC):
""" Base class for decoder neural module to be used in NLP models. """
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"decoder_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"encoder_embeddings": NeuralType(('B', 'T', 'D'), ChannelType(), optional=True),
"encoder_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"decoder_mems": NeuralType(('B', 'D', 'T', 'D'), EncodedRepresentation(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
@property
def hidden_size(self) -> Optional[int]:
raise NotImplementedError
@property
def vocab_size(self) -> Optional[int]:
raise NotImplementedError
@property
def embedding(self) -> Optional[Any]:
raise NotImplementedError
@property
def decoder(self) -> Optional[Any]:
raise NotImplementedError
@property
def max_sequence_length(self) -> Optional[int]:
raise NotImplementedError
|
NeMo-main
|
nemo/collections/asr/modules/transformer/decoder_module.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.modules.transformer.bridge_encoders import *
from nemo.collections.asr.modules.transformer.perceiver_encoders import *
from nemo.collections.asr.modules.transformer.transformer_bottleneck import *
from nemo.collections.asr.modules.transformer.transformer_decoders import *
from nemo.collections.asr.modules.transformer.transformer_encoders import *
from nemo.collections.asr.modules.transformer.transformer_generators import *
from nemo.collections.asr.modules.transformer.transformer_modules import *
|
NeMo-main
|
nemo/collections/asr/modules/transformer/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Dict, Optional
from nemo.core.classes import NeuralModule
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
__all__ = ['EncoderModule']
class EncoderModule(NeuralModule, ABC):
""" Base class for encoder neural module to be used in NLP models. """
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"encoder_mask": NeuralType(('B', 'T'), MaskType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
@property
def hidden_size(self) -> Optional[int]:
raise NotImplementedError
|
NeMo-main
|
nemo/collections/asr/modules/transformer/encoder_module.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from nemo.collections.asr.modules.transformer.transformer_decoders import TransformerDecoder
from nemo.collections.asr.modules.transformer.transformer_encoders import TransformerEncoder
from nemo.collections.asr.modules.transformer.transformer_modules import AttentionBridge
__all__ = ["PerceiverEncoder"]
class PerceiverEncoder(torch.nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
mask_future: bool = False,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
hidden_steps: int = 32,
hidden_init_method: str = "default",
hidden_blocks: int = 2,
):
super().__init__()
self._hidden_steps = hidden_steps
self._hidden_init_method = hidden_init_method
self._hidden_blocks = hidden_blocks
if self._hidden_init_method == "default":
self._hidden_init_method = "params"
if self.hidden_init_method not in self.supported_init_methods:
raise ValueError(
"Unknown hidden_init_method = {hidden_init_method}, supported methods are {supported_init_methods}".format(
hidden_init_method=self.hidden_init_method, supported_init_methods=self.supported_init_methods,
)
)
diagonal = 0 if mask_future else None
if self.hidden_init_method == "params":
# learnable initial hidden values
self.init_hidden = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(hidden_steps, hidden_size)))
self.init_cross_att = TransformerDecoder(
num_layers=1,
hidden_size=hidden_size,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self.init_cross_att.diagonal = diagonal
elif self.hidden_init_method == "bridge":
# initialize latent with attention bridge
self.att_bridge = AttentionBridge(hidden_size=hidden_size, k=hidden_steps, bridge_size=inner_size,)
# cross-attention encoder
layer = TransformerDecoder(
num_layers=1,
hidden_size=hidden_size,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
layer.diagonal = diagonal
self.cross_att_layers = torch.nn.ModuleList([copy.deepcopy(layer) for _ in range(hidden_blocks)])
# self-attention encoder
layer = TransformerEncoder(
num_layers=num_layers,
hidden_size=hidden_size,
inner_size=inner_size,
mask_future=mask_future,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self.self_att_layers = torch.nn.ModuleList([copy.deepcopy(layer) for _ in range(hidden_blocks)])
@property
def supported_init_methods(self):
return ["params", "bridge"]
@property
def hidden_steps(self):
return self._hidden_steps
@property
def hidden_blocks(self):
return self._hidden_blocks
@property
def hidden_init_method(self):
return self._hidden_init_method
def forward(self, encoder_states, encoder_mask):
"""
Args:
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
"""
# all hidden values are active
hidden_mask = torch.ones(
encoder_states.shape[0], self._hidden_steps, dtype=encoder_mask.dtype, device=encoder_mask.device
)
# initialize hidden state
if self._hidden_init_method == "params":
# initialize latent with learned parameters
hidden_states = self.init_hidden.unsqueeze(0).expand(encoder_states.shape[0], -1, -1)
hidden_states = self.init_cross_att(
decoder_states=hidden_states,
decoder_mask=hidden_mask,
encoder_states=encoder_states,
encoder_mask=encoder_mask,
)
elif self._hidden_init_method == "bridge":
# initialize latent with attention bridge
hidden_states = self.att_bridge(hidden=encoder_states, hidden_mask=encoder_mask,)
# apply block (cross-attention, self-attention) multiple times
# for block in range(self._hidden_blocks):
for self_att, cross_att in zip(self.self_att_layers, self.cross_att_layers):
residual = hidden_states
# cross attention of hidden over encoder states
hidden_states = cross_att(
decoder_states=hidden_states,
decoder_mask=hidden_mask,
encoder_states=encoder_states,
encoder_mask=encoder_mask,
)
# self-attention over hidden
hidden_states = self_att(encoder_states=hidden_states, encoder_mask=hidden_mask,)
# residual connection
hidden_states += residual
return hidden_states, hidden_mask
|
NeMo-main
|
nemo/collections/asr/modules/transformer/perceiver_encoders.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.asr.modules.transformer.transformer_encoders import TransformerEncoder
from nemo.collections.asr.modules.transformer.transformer_modules import AttentionBridge
__all__ = ["BridgeEncoder"]
class BridgeEncoder(torch.nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
mask_future: bool = False,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
hidden_steps: int = 32,
hidden_init_method: str = "default",
hidden_blocks: int = 0,
):
super().__init__()
self._hidden_steps = hidden_steps
self._hidden_init_method = hidden_init_method
self._hidden_blocks = hidden_blocks
if self._hidden_init_method == "default":
self._hidden_init_method = "enc_shared"
if self.hidden_init_method not in self.supported_init_methods:
raise ValueError(
"Unknown hidden_init_method = {hidden_init_method}, supported methods are {supported_init_methods}".format(
hidden_init_method=self.hidden_init_method, supported_init_methods=self.supported_init_methods,
)
)
# attention bridge
self.att_bridge = AttentionBridge(hidden_size=hidden_size, k=hidden_steps, bridge_size=inner_size,)
if self.hidden_init_method == "enc":
self.init_hidden_enc = TransformerEncoder(
num_layers=num_layers,
hidden_size=hidden_size,
inner_size=inner_size,
mask_future=mask_future,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
# self attention
self.hidden_enc = TransformerEncoder(
num_layers=num_layers,
hidden_size=hidden_size,
inner_size=inner_size,
mask_future=mask_future,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
@property
def supported_init_methods(self):
return ["enc_shared", "identity", "enc"]
@property
def hidden_steps(self):
return self._hidden_steps
@property
def hidden_blocks(self):
return self._hidden_blocks
@property
def hidden_init_method(self):
return self._hidden_init_method
def forward(self, encoder_states, encoder_mask):
"""
Args:
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
"""
# self-attention over input
if self.hidden_init_method == "enc_shared":
residual = encoder_states
hidden_states = self.hidden_enc(encoder_states=encoder_states, encoder_mask=encoder_mask)
# residual connection
hidden_states += residual
elif self.hidden_init_method == "identity":
hidden_states = encoder_states
elif self.hidden_init_method == "enc":
residual = encoder_states
hidden_states = self.init_hidden_enc(encoder_states=encoder_states, encoder_mask=encoder_mask)
# residual connection
hidden_states += residual
# project encoder states to a fixed steps hidden using k attention heads
hidden_states = self.att_bridge(hidden=hidden_states, hidden_mask=encoder_mask)
# all hidden values are active
hidden_mask = torch.ones(
encoder_states.shape[0], self._hidden_steps, dtype=encoder_mask.dtype, device=encoder_mask.device
)
# apply self-attention over fixed-size hidden_states
for block in range(self._hidden_blocks):
residual = hidden_states
hidden_states = self.hidden_enc(encoder_states=hidden_states, encoder_mask=hidden_mask)
# residual connection
hidden_states += residual
return hidden_states, hidden_mask
|
NeMo-main
|
nemo/collections/asr/modules/transformer/bridge_encoders.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional
import torch
from omegaconf.omegaconf import MISSING
from nemo.collections.asr.modules.transformer.decoder_module import DecoderModule
from nemo.collections.asr.modules.transformer.encoder_module import EncoderModule
from nemo.collections.asr.modules.transformer.transformer_decoders import TransformerDecoder
from nemo.collections.asr.modules.transformer.transformer_encoders import TransformerEncoder
from nemo.collections.asr.modules.transformer.transformer_modules import TransformerEmbedding
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import ChannelType, NeuralType
@dataclass
class NeMoTransformerConfig:
# must be configured by the user
hidden_size: int = MISSING
num_layers: int = MISSING
inner_size: int = MISSING
num_attention_heads: int = MISSING
# embedding
max_sequence_length: int = 512
num_token_types: int = 2
embedding_dropout: float = 0.0
learn_positional_encodings: bool = False
# transformer
ffn_dropout: float = 0.0
attn_score_dropout: float = 0.0
attn_layer_dropout: float = 0.0
hidden_act: str = 'relu'
pre_ln: bool = False
pre_ln_final_layer_norm: bool = True
# named model arguments
library: str = 'nemo'
model_name: Optional[str] = None
pretrained: bool = False
@dataclass
class NeMoTransformerEncoderConfig(NeMoTransformerConfig):
mask_future: bool = False
@dataclass
class NeMoTransformerDecoderConfig(NeMoTransformerConfig):
r2l: bool = False
class TransformerEncoderNM(EncoderModule, Exportable):
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_layers: int,
inner_size: int,
num_attention_heads: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
ffn_dropout: float = 0.0,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
hidden_act: str = 'relu',
mask_future: bool = False,
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
):
super().__init__()
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._max_sequence_length = max_sequence_length
self._embedding = TransformerEmbedding(
vocab_size=self._vocab_size,
hidden_size=self._hidden_size,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
)
self._encoder = TransformerEncoder(
hidden_size=self._hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
mask_future=mask_future,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
@typecheck()
def forward(self, input_ids, encoder_mask):
embeddings = self._embedding(input_ids=input_ids)
encoder_hidden_states = self._encoder(encoder_states=embeddings, encoder_mask=encoder_mask)
return encoder_hidden_states
@property
def hidden_size(self):
return self._hidden_size
@property
def vocab_size(self):
return self._vocab_size
@property
def max_sequence_length(self):
return self._max_sequence_length
@property
def embedding(self):
return self._embedding
@property
def encoder(self):
return self._encoder
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
sz = (max_batch, max_dim)
input_ids = torch.randint(low=0, high=2048, size=sz, device=sample.device)
encoder_mask = torch.randint(low=0, high=1, size=sz, device=sample.device)
return tuple([input_ids, encoder_mask])
class TransformerDecoderNM(DecoderModule, Exportable):
def __init__(
self,
vocab_size: int,
hidden_size: int,
num_layers: int,
inner_size: int,
num_attention_heads: int,
max_sequence_length: int = 512,
num_token_types: int = 2,
embedding_dropout: float = 0.0,
learn_positional_encodings: bool = False,
ffn_dropout: float = 0.0,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
hidden_act: str = 'relu',
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
):
super().__init__()
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._max_sequence_length = max_sequence_length
self.num_states = num_layers + 1
self.return_mems = False
if pre_ln_final_layer_norm:
self.num_states += 1
self._embedding = TransformerEmbedding(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
max_sequence_length=max_sequence_length,
num_token_types=num_token_types,
embedding_dropout=embedding_dropout,
learn_positional_encodings=learn_positional_encodings,
)
self._decoder = TransformerDecoder(
hidden_size=self.hidden_size,
num_layers=num_layers,
inner_size=inner_size,
num_attention_heads=num_attention_heads,
ffn_dropout=ffn_dropout,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
@typecheck()
def forward(
self, input_ids, decoder_mask, encoder_embeddings, encoder_mask, decoder_mems=None,
):
start_pos = 0
if decoder_mems is not None:
start_pos = input_ids.shape[1] - 1
input_ids = input_ids[:, -1:]
decoder_mask = decoder_mask[:, -1:]
decoder_mems = torch.transpose(decoder_mems, 0, 1)
decoder_embeddings = self._embedding(input_ids=input_ids, start_pos=start_pos)
decoder_hidden_states = self._decoder(
decoder_states=decoder_embeddings,
decoder_mask=decoder_mask,
encoder_states=encoder_embeddings,
encoder_mask=encoder_mask,
decoder_mems_list=decoder_mems,
return_mems=self.return_mems,
return_mems_as_list=False,
)
if self.return_mems:
decoder_hidden_states = torch.transpose(decoder_hidden_states, 0, 1)
return decoder_hidden_states
@property
def hidden_size(self):
return self._hidden_size
@property
def vocab_size(self):
return self._vocab_size
@property
def max_sequence_length(self):
return self._max_sequence_length
@property
def embedding(self):
return self._embedding
@property
def decoder(self):
return self._decoder
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
sz = (max_batch, max_dim)
input_ids = torch.randint(low=0, high=2048, size=sz, device=sample.device)
encoder_mask = torch.randint(low=0, high=1, size=sz, device=sample.device)
mem_size = [max_batch, self.num_states, max_dim - 1, self._hidden_size]
decoder_mems = torch.rand(mem_size, device=sample.device)
return tuple([input_ids, encoder_mask, self._embedding(input_ids), encoder_mask, decoder_mems])
def _prepare_for_export(self, **kwargs):
self._decoder.diagonal = None
self.return_mems = True
super()._prepare_for_export(**kwargs)
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
if self.return_mems:
return {"last_hidden_states": NeuralType(('B', 'D', 'T', 'D'), ChannelType())}
else:
return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())}
|
NeMo-main
|
nemo/collections/asr/modules/transformer/transformer.py
|
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import torch
from torch import nn
from torch.nn.functional import gelu
from nemo.collections.common.parts import form_attention_mask
from nemo.utils import logging
__all__ = ["TransformerEmbedding", "AttentionBridge"]
class FixedPositionalEncoding(nn.Module):
"""
Fixed positional encoding (embedding layer) from sine and cosine functions
of different frequencies according to https://arxiv.org/abs/1706.03762
Args:
hidden_size: size of the embeddings in the model, also known as d_model
max_sequence_length: maximum allowed length of the input sequence
"""
def __init__(self, hidden_size, max_sequence_length=512):
super().__init__()
self._hidden_size = hidden_size
self._max_sequence_length = max_sequence_length
self._build_pos_enc(hidden_size=self._hidden_size, max_sequence_length=self._max_sequence_length)
def _build_pos_enc(self, hidden_size, max_sequence_length, device=None):
"""
Builds/replaces pre-computed positional encoding.
"""
pos_enc = torch.zeros(max_sequence_length, hidden_size, device=device)
position = torch.arange(0.0, max_sequence_length).unsqueeze(1)
coef = -math.log(10000.0) / hidden_size
div_term = torch.exp(coef * torch.arange(0.0, hidden_size, 2))
pos_enc[:, 0::2] = torch.sin(position * div_term)
pos_enc[:, 1::2] = torch.cos(position * div_term)
pos_enc.div_(math.sqrt(hidden_size))
self.register_buffer('pos_enc', pos_enc)
def forward(self, position_ids):
max_pos_id = position_ids.max()
# update positional encoding if needed
if max_pos_id >= self._max_sequence_length:
logging.warning(
f'Max position id {max_pos_id} is greater than max sequence length {self._max_sequence_length}. Expanding position embeddings just for this batch. This is not expected to work very well. Consider chunking your input into smaller sequences.'
)
self._build_pos_enc(
hidden_size=self._hidden_size, max_sequence_length=max_pos_id + 1, device=position_ids.device,
)
embeddings = torch.embedding(self.pos_enc, position_ids)
# Revert expansion of position embeddings since this wall checkpoint size mismatches.
if max_pos_id >= self._max_sequence_length:
self._build_pos_enc(
hidden_size=self._hidden_size,
max_sequence_length=self._max_sequence_length,
device=position_ids.device,
)
return embeddings
class TransformerEmbedding(nn.Module):
"""
Embedding from token and position embeddings.
Optionally add token_type embedding (e.g. type of the sentence in BERT).
Args:
vocab_size: size of the vocabulary
hidden_size: size of the embeddings in the model, also known as d_model
max_sequence_length: maximum allowed length of the input sequence
num_token_types: number of different token types
(e.g. tokens of sentence A and tokens of sentence B in BERT)
embedding_dropout: probability of dropout applied to embeddings
learn_positional_encodings: whether to learn positional encodings or
use fixed (sine-cosine) ones
"""
def __init__(
self,
vocab_size,
hidden_size,
max_sequence_length=512,
num_token_types=2,
embedding_dropout=0.0,
learn_positional_encodings=False,
):
super().__init__()
self.max_sequence_length = max_sequence_length
self.learn_positional_encodings = learn_positional_encodings
self.token_embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=0)
if learn_positional_encodings:
self.position_embedding = nn.Embedding(max_sequence_length, hidden_size)
else:
self.position_embedding = FixedPositionalEncoding(hidden_size, max_sequence_length)
if num_token_types > 0:
self.token_type_embedding = nn.Embedding(num_token_types, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-5)
self.dropout = nn.Dropout(embedding_dropout)
def forward(self, input_ids, token_type_ids=None, start_pos=0):
seq_length = input_ids.size(1)
# we fail here only with parametric positional embedding. FixedPositionalEncoding automatically extends.
if self.learn_positional_encodings and (seq_length > self.max_sequence_length):
raise ValueError(
f"Input sequence is longer than maximum allowed sequence length for positional encoding. "
f"Got {seq_length} and {self.max_sequence_length}"
)
position_ids = torch.arange(
start=start_pos, end=start_pos + seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).repeat(input_ids.size(0), 1)
token_embeddings = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = token_embeddings + position_embeddings
if token_type_ids is not None:
token_type_embeddings = self.token_type_embedding(token_type_ids)
embeddings = embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MultiHeadAttention(nn.Module):
"""
Multi-head scaled dot-product attention layer.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
whole layer, but before layer normalization
"""
def __init__(self, hidden_size, num_attention_heads, attn_score_dropout=0.0, attn_layer_dropout=0.0):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number "
"of attention heads (%d)" % (hidden_size, num_attention_heads)
)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_head_size = int(hidden_size / num_attention_heads)
self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size))
self.query_net = nn.Linear(hidden_size, hidden_size)
self.key_net = nn.Linear(hidden_size, hidden_size)
self.value_net = nn.Linear(hidden_size, hidden_size)
self.out_projection = nn.Linear(hidden_size, hidden_size)
self.attn_dropout = nn.Dropout(attn_score_dropout)
self.layer_dropout = nn.Dropout(attn_layer_dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attn_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, queries, keys, values, attention_mask):
# attention_mask is needed to hide the tokens which correspond to [PAD]
# in the case of BERT, or to hide the future tokens in the case of
# vanilla language modeling and translation
query = self.query_net(queries)
key = self.key_net(keys)
value = self.value_net(values)
query = self.transpose_for_scores(query) / self.attn_scale
key = self.transpose_for_scores(key) / self.attn_scale
value = self.transpose_for_scores(value)
# for numerical stability we pre-divide query and key by sqrt(sqrt(d))
attention_scores = torch.matmul(query, key.transpose(-1, -2))
if attention_mask is not None:
attention_scores = attention_scores + attention_mask.to(attention_scores.dtype)
attention_probs = torch.softmax(attention_scores, dim=-1)
attention_probs = self.attn_dropout(attention_probs)
context = torch.matmul(attention_probs, value)
context = context.permute(0, 2, 1, 3).contiguous()
new_context_shape = context.size()[:-2] + (self.hidden_size,)
context = context.view(*new_context_shape)
# output projection
output_states = self.out_projection(context)
output_states = self.layer_dropout(output_states)
return output_states
class PositionWiseFF(nn.Module):
"""
Position-wise feed-forward network of Transformer block.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
ffn_dropout: probability of dropout applied to net output
hidden_act: activation function used between two linear layers
"""
def __init__(self, hidden_size, inner_size, ffn_dropout=0.0, hidden_act="relu"):
super().__init__()
self.dense_in = nn.Linear(hidden_size, inner_size)
self.dense_out = nn.Linear(inner_size, hidden_size)
self.layer_dropout = nn.Dropout(ffn_dropout)
ACT2FN = {"gelu": gelu, "relu": torch.relu}
self.act_fn = ACT2FN[hidden_act]
def forward(self, hidden_states):
output_states = self.dense_in(hidden_states)
output_states = self.act_fn(output_states)
output_states = self.dense_out(output_states)
output_states = self.layer_dropout(output_states)
return output_states
class AttentionBridge(torch.nn.Module):
"""
A multi-head attention bridge to project a variable-size hidden states
to k hidden states (per attention head).
Code is based on the paper https://arxiv.org/pdf/1703.03130.pdf
"""
def __init__(self, hidden_size, k, bridge_size):
"""
hidden_size - size of input hidden state
k - number of attention heads
bridge_size - size of internal feed forward weights (i.e., attention head size)
"""
super().__init__()
self.hidden_size = hidden_size
self.k = k
self.bridge_size = bridge_size
self.attn_scale = np.sqrt(np.sqrt(self.bridge_size))
# build model
self.W1 = torch.nn.Linear(hidden_size, bridge_size, bias=False)
self.W2 = torch.nn.Linear(bridge_size, k, bias=False)
self.act = torch.nn.ReLU()
def forward(self, hidden, hidden_mask=None, return_ortho_loss=False):
"""
Project hidden [B x N x H] to fixed-size [B x k x H]
return_ortho_loss - if True returns loss term to encourage
orthogonal attention vectors
"""
attention_scores = self.W2(self.act(self.W1(hidden) / self.attn_scale) / self.attn_scale).transpose(-1, -2)
attention_mask = form_attention_mask(hidden_mask)
if attention_mask is not None:
attention_mask.squeeze_(1)
attention_scores = attention_scores + attention_mask.to(attention_scores.dtype)
A = torch.softmax(attention_scores, dim=-1)
M = A @ hidden
if return_ortho_loss:
ortho_loss = ((A @ A.transpose(-1, -2)) - torch.eye(self.k).type_as(A)).pow(2).sum()
return M, ortho_loss
else:
return M
|
NeMo-main
|
nemo/collections/asr/modules/transformer/transformer_modules.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import List, Tuple, Union
from torch import Tensor
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
class LengthParam(TypedDict):
max_length: int # The maximum length of the sequence to be generated.
min_length: int # The minimum length of the sequence to be generated.
class SamplingParam(TypedDict):
use_greedy: bool # Whether or not to use sampling ; use greedy decoding otherwise
temperature: float # sampling temperature
top_k: int # The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p: float # If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.
repetition_penalty: float # The parameter for repetition penalty. 1.0 means no penalty.
add_BOS: bool # add the bos token at the begining of the prompt
all_probs: bool # whether return the log prob for all the tokens in vocab
compute_logprob: bool # a flag used to compute logprob of all the input text, a very special case of running inference, default False
class OutputType(TypedDict):
sentences: List[str] # output sentences
tokens: List[List[str]] # output sentences borken into tokens
logprob: List[List[float]] # log prob of generated tokens
full_logprob: List[List[float]] # log prob of all the tokens in the vocab
token_ids: List[List[int]] # output sentence token ids
offsets: List[List[int]] # list of tokens start positions in text
class TextGeneration:
"""
Interface for all text generation models.
"""
def generate(
self,
inputs: Union[List[str], Tuple[Tensor, Tensor], List[dict]],
length_params: LengthParam,
sampling_params: SamplingParam = None,
) -> OutputType:
"""
Public method to generate text.
Args:
inputs (Union[List[str], Tensor, List[dict]]):
Can be one of the 3 types:
1. List of strings. Each element of the list provides input prompt. The model will apply tokenizer on it.
E.g [‘sentence’, ‘sentence2’ … ]
2. Tuple of Pytorch Tensors (context_tokens, context_lengths). The `context_tokens` has shape (batch_size, seq_length), it's the batched sequences of tokens used as a prompst for the generation or as model inputs to the encoder.
The generative model will skip the tokenization and padding step. The `context_lengths` has shape (batch_size,), it indicates the length of the context tokens for each of the input sequences.
E.g. ( torch.tensor([[23,5234,23,35,…], [223,323,23,23232,232,...] …]), torch.tensor([20, 30, …]))
3. List of python dict objects. Used for prompt/p-tuning inputs where a set of key-value pairs are converted into input token embeddings for the model.
E.g. [{"prompt-tag": "sentiment", "sentence": "this is a good movie"},
{"prompt-tag": "qa", "context": "some context text", "question": "a simple question"} ... ]
where 'prompt-tag' is used to identify the type of NLP task to solve.
length_params (LengthParam):
a dictionary type which controls the sampling length.
max_length: int, The maximum length of the sequence to be generated.
min_length: int, The minimum length of the sequence to be generated.
If None, max_length is set to 30, and min_length is set to None
sampling_params (SamplingParam):
a dictionary type which contains the parameters for text sampling. It has the following keys
use_greedy: bool, Whether or not to use sampling ; use greedy decoding otherwise
top_k: int, The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p: float, If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.
repetition_penalty: float, The parameter for repetition penalty. 1.0 means no penalty.
add_BOS: bool, Whether add the bos token at the begining of the prompt
all_probs: bool # whether return the log prob for all the tokens in vocab
compute_logprob: bool # a flag used to compute logprob of all the input text, a very special case of running inference, default False
Default None, If it is None, use_greedy will be "True".
Returns:
OutputType: It generates the output in a dictionary type. It has the following keys:
sentences: List[str], output sentences
tokens: List[List[str]], output sentences borken into tokens
logprob: List[List[float]], log prob of generated tokens
full_logprob: List[List[float]], log prob of all the tokens in the vocab
token_ids: List[List[int]], output sentence token ids
offsets: List[List[int]] # list of tokens start positions in text
"""
raise NotImplementedError("please implement this method")
|
NeMo-main
|
nemo/collections/asr/modules/transformer/text_generation.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from nemo.collections.asr.modules.transformer.transformer_encoders import TransformerEncoder
__all__ = ["PoolingEncoder"]
class PoolingEncoder(torch.nn.Module):
_SUPPORTED_ARCH = ["max", "avg"]
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
mask_future: bool = False,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
pre_ln_final_layer_norm: bool = True,
hidden_steps: int = 4,
hidden_init_method: str = "default",
hidden_blocks: int = 2,
pooling_type: str = "max",
):
super().__init__()
# minimal steps to allow reduction
self._hidden_steps = hidden_steps
self._hidden_init_method = hidden_init_method
self._hidden_blocks = hidden_blocks
self._pooling_type = pooling_type
if self._hidden_steps < 2:
raise ValueError("Expected hidden_steps >= 2 but received hidden_steps = {self._hidden_steps}")
if self.hidden_init_method not in self.supported_init_methods:
raise ValueError(
"Unknown hidden_init_method = {hidden_init_method}, supported methods are {supported_init_methods}".format(
hidden_init_method=self.hidden_init_method, supported_init_methods=self.supported_init_methods,
)
)
if self._pooling_type not in self.supported_arch:
raise ValueError(f"Unknown pooling_type = {pooling_type}. Available values = {self.supported_arch}")
# self-attention encoder
layer = TransformerEncoder(
num_layers=num_layers,
hidden_size=hidden_size,
inner_size=inner_size,
mask_future=mask_future,
num_attention_heads=num_attention_heads,
attn_score_dropout=attn_score_dropout,
attn_layer_dropout=attn_layer_dropout,
ffn_dropout=ffn_dropout,
hidden_act=hidden_act,
pre_ln=pre_ln,
pre_ln_final_layer_norm=pre_ln_final_layer_norm,
)
self.self_att_layers = torch.nn.ModuleList([copy.deepcopy(layer) for _ in range(hidden_blocks)])
self.pooling = self._build_pooling_module()
def _build_pooling_module(self):
"""
Returns pooling module.
Allows to override for child classes.
"""
if self._pooling_type == "max":
pooling = torch.nn.MaxPool1d(kernel_size=2, stride=2)
elif self._pooling_type == "avg":
pooling = torch.nn.AvgPool1d(kernel_size=2, stride=2)
return pooling
@property
def supported_arch(self):
return self._SUPPORTED_ARCH
@property
def supported_init_methods(self):
return ["default"]
@property
def hidden_steps(self):
return self._hidden_steps
@property
def hidden_blocks(self):
return self._hidden_blocks
@property
def hidden_init_method(self):
return self._hidden_init_method
def forward(self, encoder_states, encoder_mask):
"""
Args:
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
"""
# initialize hidden state
hidden_mask = encoder_mask
hidden_states = encoder_states
# apply block (self-attention, max-pool) multiple times
for self_att in self.self_att_layers:
residual = hidden_states
# self-attention over hidden
hidden_states = self_att(encoder_states=hidden_states, encoder_mask=hidden_mask)
hidden_states += residual
# max pool reduction if possible
if hidden_states.shape[1] >= self.hidden_steps:
# max pool hidden states
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.pooling(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1)
# max pool mask
hidden_mask = (
self.pooling(hidden_mask.unsqueeze(0).type_as(hidden_states)).squeeze(0).type_as(hidden_mask)
)
return hidden_states, hidden_mask
|
NeMo-main
|
nemo/collections/asr/modules/transformer/reduction_encoders.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import math
import multiprocessing
import os
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import braceexpand
import numpy as np
import torch
import webdataset as wd
from torch.utils.data import ChainDataset
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.asr.parts.utils.audio_utils import ChannelSelectorType
from nemo.collections.common import tokenizers
from nemo.collections.common.parts.preprocessing import collections, parsers
from nemo.core.classes import Dataset, IterableDataset
from nemo.core.neural_types import *
from nemo.utils import logging
from nemo.utils.data_utils import (
DataStoreObject,
datastore_object_get,
datastore_path_to_webdataset_url,
is_datastore_cache_shared,
is_datastore_path,
is_tarred_path,
)
from nemo.utils.get_rank import is_global_rank_zero
__all__ = [
'AudioToCharDataset',
'AudioToBPEDataset',
'TarredAudioToCharDataset',
'TarredAudioToBPEDataset',
]
def _speech_collate_fn(batch, pad_id):
"""collate batch of audio sig, audio len, tokens, tokens len
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of signal, signal lengths,
encoded tokens, and encoded tokens length. This collate func
assumes the signals are 1d torch tensors (i.e. mono audio).
"""
packed_batch = list(zip(*batch))
if len(packed_batch) == 5:
_, audio_lengths, _, tokens_lengths, sample_ids = packed_batch
elif len(packed_batch) == 4:
sample_ids = None
_, audio_lengths, _, tokens_lengths = packed_batch
else:
raise ValueError("Expects 4 or 5 tensors in the batch!")
max_audio_len = 0
has_audio = audio_lengths[0] is not None
if has_audio:
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
audio_signal, tokens = [], []
for b in batch:
if len(b) == 5:
sig, sig_len, tokens_i, tokens_i_len, _ = b
else:
sig, sig_len, tokens_i, tokens_i_len = b
if has_audio:
sig_len = sig_len.item()
if sig_len < max_audio_len:
pad = (0, max_audio_len - sig_len)
sig = torch.nn.functional.pad(sig, pad)
audio_signal.append(sig)
tokens_i_len = tokens_i_len.item()
if tokens_i_len < max_tokens_len:
pad = (0, max_tokens_len - tokens_i_len)
tokens_i = torch.nn.functional.pad(tokens_i, pad, value=pad_id)
tokens.append(tokens_i)
if has_audio:
audio_signal = torch.stack(audio_signal)
audio_lengths = torch.stack(audio_lengths)
else:
audio_signal, audio_lengths = None, None
tokens = torch.stack(tokens)
tokens_lengths = torch.stack(tokens_lengths)
if sample_ids is None:
return audio_signal, audio_lengths, tokens, tokens_lengths
else:
sample_ids = torch.tensor(sample_ids, dtype=torch.int32)
return audio_signal, audio_lengths, tokens, tokens_lengths, sample_ids
class ASRManifestProcessor:
"""
Class that processes a manifest json file containing paths to audio files, transcripts, and durations (in seconds).
Each new line is a different sample. Example below:
{"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath: Path to manifest json as described above. Can be comma-separated paths.
parser: Str for a language specific preprocessor or a callable.
max_duration: If audio exceeds this length, do not include in dataset.
min_duration: If audio is less than this length, do not include in dataset.
max_utts: Limit number of utterances.
bos_id: Id of beginning of sequence symbol to append if not None.
eos_id: Id of end of sequence symbol to append if not None.
pad_id: Id of pad symbol. Defaults to 0.
"""
def __init__(
self,
manifest_filepath: str,
parser: Union[str, Callable],
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
max_utts: int = 0,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
index_by_file_id: bool = False,
):
self.parser = parser
self.collection = collections.ASRAudioText(
manifests_files=manifest_filepath,
parser=parser,
min_duration=min_duration,
max_duration=max_duration,
max_number=max_utts,
index_by_file_id=index_by_file_id,
)
self.eos_id = eos_id
self.bos_id = bos_id
self.pad_id = pad_id
def process_text_by_id(self, index: int) -> Tuple[List[int], int]:
sample = self.collection[index]
return self.process_text_by_sample(sample)
def process_text_by_file_id(self, file_id: str) -> Tuple[List[int], int]:
manifest_idx = self.collection.mapping[file_id][0]
sample = self.collection[manifest_idx]
return self.process_text_by_sample(sample)
def process_text_by_sample(self, sample: collections.ASRAudioText.OUTPUT_TYPE) -> Tuple[List[int], int]:
t, tl = sample.text_tokens, len(sample.text_tokens)
if self.bos_id is not None:
t = [self.bos_id] + t
tl += 1
if self.eos_id is not None:
t = t + [self.eos_id]
tl += 1
return t, tl
def expand_sharded_filepaths(sharded_filepaths, shard_strategy: str, world_size: int, global_rank: int):
valid_shard_strategies = ['scatter', 'replicate']
if shard_strategy not in valid_shard_strategies:
raise ValueError(f"`shard_strategy` must be one of {valid_shard_strategies}")
if isinstance(sharded_filepaths, str):
# Replace '(' and '[' with '{'
brace_keys_open = ['(', '[', '<', '_OP_']
for bkey in brace_keys_open:
if bkey in sharded_filepaths:
sharded_filepaths = sharded_filepaths.replace(bkey, "{")
# Replace ')' and ']' with '}'
brace_keys_close = [')', ']', '>', '_CL_']
for bkey in brace_keys_close:
if bkey in sharded_filepaths:
sharded_filepaths = sharded_filepaths.replace(bkey, "}")
if isinstance(sharded_filepaths, str):
# Brace expand, set escape=False for Windows compatibility
sharded_filepaths = list(braceexpand.braceexpand(sharded_filepaths, escape=False))
# Expand store paths into WebDataset URLs
sharded_filepaths = [
datastore_path_to_webdataset_url(p) if is_datastore_path(p) and is_tarred_path(p) else p
for p in sharded_filepaths
]
# Check for distributed and partition shards accordingly
if world_size > 1:
if shard_strategy == 'scatter':
logging.info("All tarred dataset shards will be scattered evenly across all nodes.")
if len(sharded_filepaths) % world_size != 0:
logging.warning(
f"Number of shards in tarred dataset ({len(sharded_filepaths)}) is not divisible "
f"by number of distributed workers ({world_size})."
)
begin_idx = (len(sharded_filepaths) // world_size) * global_rank
end_idx = begin_idx + len(sharded_filepaths) // world_size
sharded_filepaths = sharded_filepaths[begin_idx:end_idx]
logging.info(
"Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
)
elif shard_strategy == 'replicate':
logging.info("All tarred dataset shards will be replicated across all nodes.")
else:
raise ValueError(f"Invalid shard strategy ! Allowed values are : {valid_shard_strategies}")
return sharded_filepaths
def cache_datastore_manifests(
manifest_filepaths: Union[str, List[str]],
cache_audio: bool = False,
shared_cache: Optional[bool] = None,
num_workers: Optional[int] = None,
max_num_workers: int = 20,
):
"""Cache manifests and audio from an object store.
It is assumed that remote manifests are using relative paths.
Args:
manifest_filepaths: list of paths to manifest files (list of strings or a string with `,` as separator)
cache_audio: If True, audio from manifest will also be cached
shared_cache: Optional, True if cache is shared across all nodes
num_workers: Optional, number of workers to be used for download
max_num_workers: max number of workers to be used for download, used when setting num_workers automatically
"""
if isinstance(manifest_filepaths, str):
manifest_filepaths = manifest_filepaths.split(',')
num_datastore_manifests = sum([is_datastore_path(f) for f in manifest_filepaths])
if num_datastore_manifests > 0:
# Local utility function
def cache_data(manifest_filepaths, cache_audio, num_workers, max_num_workers):
"""Cache manifests and audio data from object store.
"""
# Determine the number of workers to use
if num_workers is None:
num_workers = os.cpu_count() - 1
num_workers = min(num_workers, max_num_workers)
# Process each manifest file
for manifest_file in manifest_filepaths:
# If manifest is on a data store, then cache it.
# Otherwise, nothing to do.
if is_datastore_path(manifest_file):
logging.info('Cache manifest file: %s', manifest_file)
cached_manifest_file = DataStoreObject(manifest_file).get()
logging.info('Cached at: %s', str(cached_manifest_file))
if cache_audio:
# Each audio file from manifest will be cached.
logging.info('Cache audio from manifest file: %s', manifest_file)
# Assumes that manifest is using relative paths
manifest_dir = os.path.dirname(manifest_file)
# Prepare all store objects
audio_objects = []
with open(cached_manifest_file, 'r') as f:
for line in f:
item = json.loads(line)
store_path = os.path.join(manifest_dir, item['audio_filepath'])
audio_objects.append(DataStoreObject(store_path=store_path))
if num_workers is not None and num_workers > 1:
logging.debug('Using multiprocessing with num_workers: %d.', num_workers)
with multiprocessing.Pool(processes=num_workers) as p:
result = list(
tqdm(p.imap(datastore_object_get, audio_objects), total=len(audio_objects))
)
else:
logging.debug('Using a single process.')
result = []
for audio_object in tqdm(audio_objects):
result.append(audio_object.get() is not None)
if not all(result):
raise RuntimeError('Some files not downloaded successfully')
logging.info('Caching complete')
else:
# Nothing to do here
logging.debug('Manifest is not on a data store: %s', manifest_file)
if torch.distributed.is_available() and torch.distributed.is_initialized():
logging.debug('Distributed environment is available and initialized.')
# Handle distributed environment
if shared_cache is None:
shared_cache = is_datastore_cache_shared()
if shared_cache:
logging.debug('Cache is shared among nodes, cache data on global rank zero.')
is_rank_zero = is_global_rank_zero()
else:
logging.debug('Cache is not shared among nodes, cache data on local rank zero.')
local_rank = int(os.environ.get("LOCAL_RANK", 0))
is_rank_zero = local_rank == 0
if is_rank_zero:
logging.info('Cache data from %s rank 0', 'global' if shared_cache else 'local')
cache_data(
manifest_filepaths=manifest_filepaths,
cache_audio=cache_audio,
num_workers=num_workers,
max_num_workers=max_num_workers,
)
logging.debug('Reached barrier')
torch.distributed.barrier()
elif is_global_rank_zero():
# Handle non-distributed environment, e.g., if running on a single GPU
logging.warning(
'Torch distributed is not initialized and caching may be prone to data race conditions. '
'Now caching data from global rank 0. If there are other ranks and they pass this '
'before rank 0, errors might result.'
)
cache_data(
manifest_filepaths=manifest_filepaths,
cache_audio=cache_audio,
num_workers=num_workers,
max_num_workers=max_num_workers,
)
else:
raise RuntimeError(
'Torch distributed is not initialized and caching on nodes other than global rank zero is disabled '
'to avoid race condition between different ranks. To ensure distributed environment is '
'initialized, please update data config to use `defer_setup = True`.'
)
"""Optionally expand / shard the list of manifests
This is made to use the same notation as the sharded audio files
Args:
manifest_filepaths: list of manifest files (the sharded notation)
shard_strategy: scatter or replicate (scatter by default)
shard_manifests: bool, if False, no sharding / manifest filepath expansion will be attempted
global_rank: int, the rank of this worker
world_size: int, total number of workers
"""
def shard_manifests_if_needed(
manifest_filepaths: Union[str, List[str]],
shard_strategy: str,
shard_manifests: bool,
global_rank: int,
world_size: int,
):
if shard_manifests:
if not torch.distributed.is_available():
logging.warning("Not running in torch.distributed mode. Manifest sharding not available")
return manifest_filepaths
if not torch.distributed.is_initialized():
logging.warning(
'Manifest sharding was requested but torch.distributed is not initialized '
'Did you intend to set the defer_setup flag?'
)
return manifest_filepaths
manifest_filepaths = expand_sharded_filepaths(
sharded_filepaths=manifest_filepaths,
shard_strategy=shard_strategy,
world_size=world_size,
global_rank=global_rank,
)
return manifest_filepaths
class _AudioTextDataset(Dataset):
"""
Dataset that loads tensors via a json file containing paths to audio files, transcripts, and durations (in seconds).
Each new line is a different sample. Example below:
{"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath: Path to manifest json as described above. Can be comma-separated paths.
parser: Str for a language specific preprocessor or a callable.
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor object used to augment loaded
audio
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include in dataset
max_utts: Limit number of utterances
trim: whether or not to trim silence. Defaults to False
bos_id: Id of beginning of sequence symbol to append if not None
eos_id: Id of end of sequence symbol to append if not None
pad_id: Id of pad symbol. Defaults to 0
return_sample_id (bool): whether to return the sample_id as a part of each sample
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
'transcripts': NeuralType(('B', 'T'), LabelsType()),
'transcript_length': NeuralType(tuple('B'), LengthsType()),
'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True),
}
def __init__(
self,
manifest_filepath: str,
parser: Union[str, Callable],
sample_rate: int,
int_values: bool = False,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
max_duration: Optional[int] = None,
min_duration: Optional[int] = None,
max_utts: int = 0,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
return_sample_id: bool = False,
channel_selector: Optional[ChannelSelectorType] = None,
):
if type(manifest_filepath) == str:
manifest_filepath = manifest_filepath.split(",")
# If necessary, cache manifests and audio from object store
cache_datastore_manifests(manifest_filepaths=manifest_filepath, cache_audio=True)
self.manifest_processor = ASRManifestProcessor(
manifest_filepath=manifest_filepath,
parser=parser,
max_duration=max_duration,
min_duration=min_duration,
max_utts=max_utts,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
)
self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
self.trim = trim
self.return_sample_id = return_sample_id
self.channel_selector = channel_selector
def get_manifest_sample(self, sample_id):
return self.manifest_processor.collection[sample_id]
def __getitem__(self, index):
sample = self.manifest_processor.collection[index]
offset = sample.offset
if offset is None:
offset = 0
features = self.featurizer.process(
sample.audio_file,
offset=offset,
duration=sample.duration,
trim=self.trim,
orig_sr=sample.orig_sr,
channel_selector=self.channel_selector,
)
f, fl = features, torch.tensor(features.shape[0]).long()
t, tl = self.manifest_processor.process_text_by_sample(sample=sample)
if self.return_sample_id:
output = f, fl, torch.tensor(t).long(), torch.tensor(tl).long(), index
else:
output = f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
return output
def __len__(self):
return len(self.manifest_processor.collection)
def _collate_fn(self, batch):
return _speech_collate_fn(batch, pad_id=self.manifest_processor.pad_id)
class AudioToCharDataset(_AudioTextDataset):
"""
Dataset that loads tensors via a json file containing paths to audio
files, transcripts, and durations (in seconds). Each new line is a
different sample. Example below:
{"audio_filepath": "/path/to/audio.wav", "text_filepath":
"/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the
transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath: Path to manifest json as described above. Can
be comma-separated paths.
labels: String containing all the possible characters to map to
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include
in dataset
max_utts: Limit number of utterances
blank_index: blank character index, default = -1
unk_index: unk_character index, default = -1
normalize: whether to normalize transcript text (default): True
bos_id: Id of beginning of sequence symbol to append if not None
eos_id: Id of end of sequence symbol to append if not None
return_sample_id (bool): whether to return the sample_id as a part of each sample
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
'transcripts': NeuralType(('B', 'T'), LabelsType()),
'transcript_length': NeuralType(tuple('B'), LengthsType()),
'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True),
}
def __init__(
self,
manifest_filepath: str,
labels: Union[str, List[str]],
sample_rate: int,
int_values: bool = False,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
max_utts: int = 0,
blank_index: int = -1,
unk_index: int = -1,
normalize: bool = True,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
parser: Union[str, Callable] = 'en',
return_sample_id: bool = False,
channel_selector: Optional[ChannelSelectorType] = None,
):
self.labels = labels
parser = parsers.make_parser(
labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
)
super().__init__(
manifest_filepath=manifest_filepath,
parser=parser,
sample_rate=sample_rate,
int_values=int_values,
augmentor=augmentor,
max_duration=max_duration,
min_duration=min_duration,
max_utts=max_utts,
trim=trim,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
return_sample_id=return_sample_id,
channel_selector=channel_selector,
)
class AudioToBPEDataset(_AudioTextDataset):
"""
Dataset that loads tensors via a json file containing paths to audio
files, transcripts, and durations (in seconds). Each new line is a
different sample. Example below:
{"audio_filepath": "/path/to/audio.wav", "text_filepath":
"/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the
transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
In practice, the dataset and manifest used for character encoding and byte pair encoding
are exactly the same. The only difference lies in how the dataset tokenizes the text in
the manifest.
Args:
manifest_filepath: Path to manifest json as described above. Can
be comma-separated paths.
tokenizer: A subclass of the Tokenizer wrapper found in the common collection,
nemo.collections.common.tokenizers.TokenizerSpec. ASR Models support a subset of
all available tokenizers.
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include
in dataset
max_utts: Limit number of utterances
trim: Whether to trim silence segments
use_start_end_token: Boolean which dictates whether to add [BOS] and [EOS]
tokens to beginning and ending of speech respectively.
return_sample_id (bool): whether to return the sample_id as a part of each sample
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
'transcripts': NeuralType(('B', 'T'), LabelsType()),
'transcript_length': NeuralType(tuple('B'), LengthsType()),
'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True),
}
def __init__(
self,
manifest_filepath: str,
tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
sample_rate: int,
int_values: bool = False,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
max_duration: Optional[int] = None,
min_duration: Optional[int] = None,
max_utts: int = 0,
trim: bool = False,
use_start_end_token: bool = True,
return_sample_id: bool = False,
channel_selector: Optional[ChannelSelectorType] = None,
):
if use_start_end_token and hasattr(tokenizer, "bos_id") and tokenizer.bos_id > 0:
bos_id = tokenizer.bos_id
else:
bos_id = None
if use_start_end_token and hasattr(tokenizer, "eos_id") and tokenizer.eos_id > 0:
eos_id = tokenizer.eos_id
else:
eos_id = None
if hasattr(tokenizer, "pad_id") and tokenizer.pad_id > 0:
pad_id = tokenizer.pad_id
else:
pad_id = 0
class TokenizerWrapper:
def __init__(self, tokenizer):
if isinstance(tokenizer, tokenizers.aggregate_tokenizer.AggregateTokenizer):
self.is_aggregate = True
else:
self.is_aggregate = False
self._tokenizer = tokenizer
def __call__(self, *args):
if isinstance(args[0], List) and self.is_aggregate:
t = []
for span in args[0]:
t.extend(self._tokenizer.text_to_ids(span['str'], span['lang']))
return t
t = self._tokenizer.text_to_ids(*args)
return t
super().__init__(
manifest_filepath=manifest_filepath,
parser=TokenizerWrapper(tokenizer),
sample_rate=sample_rate,
int_values=int_values,
augmentor=augmentor,
max_duration=max_duration,
min_duration=min_duration,
max_utts=max_utts,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
trim=trim,
return_sample_id=return_sample_id,
channel_selector=channel_selector,
)
class _TarredAudioToTextDataset(IterableDataset):
"""
A similar Dataset to the AudioToCharDataset/AudioToBPEDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset/AudioToBPEDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
Supported opening braces - { <=> (, [, < and the special tag _OP_.
Supported closing braces - } <=> ), ], > and the special tag _CL_.
For SLURM based tasks, we suggest the use of the special tags for ease of use.
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple workers the number of shards should be divisible by world_size to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToCharDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
parser (callable): A callable which is used to pre-process the text output.
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
blank_index (int): Blank character index, defaults to -1.
unk_index (int): Unknown character index, defaults to -1.
normalize (bool): Dataset parameter.
Whether to use automatic text cleaning.
It is highly recommended to manually clean text for best results.
Defaults to True.
trim (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
bos_id (id): Dataset parameter.
Beginning of string symbol id used for seq2seq models.
Defaults to None.
eos_id (id): Dataset parameter.
End of string symbol id used for seq2seq models.
Defaults to None.
pad_id (id): Token used to pad when collating samples in batches.
If this is None, pads using 0s.
Defaults to None.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
shard_manifests (bool): Whether or not to try / shard manifests. Defaults to False.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
return_sample_id (bool): whether to return the sample_id as a part of each sample
"""
def __init__(
self,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: str,
parser: Callable,
sample_rate: int,
int_values: bool = False,
augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
shuffle_n: int = 0,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
shard_strategy: str = "scatter",
shard_manifests: bool = False,
global_rank: int = 0,
world_size: int = 0,
return_sample_id: bool = False,
):
self.shard_manifests = shard_manifests
# Shard manifests if necessary and possible and then expand the paths
manifest_filepath = shard_manifests_if_needed(
shard_manifests=shard_manifests,
shard_strategy=shard_strategy,
manifest_filepaths=manifest_filepath,
world_size=world_size,
global_rank=global_rank,
)
# If necessary, cache manifests from object store
cache_datastore_manifests(manifest_filepaths=manifest_filepath)
self.manifest_processor = ASRManifestProcessor(
manifest_filepath=manifest_filepath,
parser=parser,
max_duration=max_duration,
min_duration=min_duration,
max_utts=0,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
index_by_file_id=True, # Must set this so the manifest lines can be indexed by file ID
)
self.len = self._compute_len()
self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
self.trim = trim
self.eos_id = eos_id
self.bos_id = bos_id
self.pad_id = pad_id
self.return_sample_id = return_sample_id
audio_tar_filepaths = expand_sharded_filepaths(
sharded_filepaths=audio_tar_filepaths,
shard_strategy=shard_strategy,
world_size=world_size,
global_rank=global_rank,
)
# Put together WebDataset
self._dataset = wd.WebDataset(urls=audio_tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self._dataset = self._dataset.shuffle(shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = (
self._dataset.rename(audio='wav;ogg;flac', key='__key__')
.to_tuple('audio', 'key')
.pipe(self._filter)
.pipe(self._loop_offsets)
.map(f=self._build_sample)
)
def _filter(self, iterator):
"""This function is used to remove samples that have been filtered out by ASRAudioText already.
Otherwise, we would get a KeyError as _build_sample attempts to find the manifest entry for a sample
that was filtered out (e.g. for duration).
Note that if using multi-GPU training, filtering may lead to an imbalance in samples in each shard,
which may make your code hang as one process will finish before the other.
"""
class TarredAudioFilter:
def __init__(self, collection):
self.iterator = iterator
self.collection = collection
def __iter__(self):
return self
def __next__(self):
while True:
audio_bytes, audio_filename = next(self.iterator)
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
if file_id in self.collection.mapping:
return audio_bytes, audio_filename
return TarredAudioFilter(self.manifest_processor.collection)
def _loop_offsets(self, iterator):
"""This function is used to iterate through utterances with different offsets for each file.
"""
class TarredAudioLoopOffsets:
def __init__(self, collection):
self.iterator = iterator
self.collection = collection
self.current_fn = None
self.current_bytes = None
self.offset_id = 0
def __iter__(self):
return self
def __next__(self):
if self.current_fn is None:
self.current_bytes, self.current_fn = next(self.iterator)
self.offset_id = 0
else:
offset_list = self.collection.mapping[self.current_fn]
if len(offset_list) == self.offset_id + 1:
self.current_bytes, self.current_fn = next(self.iterator)
self.offset_id = 0
else:
self.offset_id += 1
return self.current_bytes, self.current_fn, self.offset_id
return TarredAudioLoopOffsets(self.manifest_processor.collection)
def _collate_fn(self, batch):
return _speech_collate_fn(batch, self.pad_id)
def _build_sample(self, tup):
"""Builds the training sample by combining the data from the WebDataset with the manifest info.
"""
audio_bytes, audio_filename, offset_id = tup
# Grab manifest entry from self.manifest_preprocessor.collection
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
manifest_idx = self.manifest_processor.collection.mapping[file_id][offset_id]
manifest_entry = self.manifest_processor.collection[manifest_idx]
offset = manifest_entry.offset
if offset is None:
offset = 0
# Convert audio bytes to IO stream for processing (for SoundFile to read)
audio_filestream = io.BytesIO(audio_bytes)
features = self.featurizer.process(
audio_filestream,
offset=offset,
duration=manifest_entry.duration,
trim=self.trim,
orig_sr=manifest_entry.orig_sr,
)
audio_filestream.close()
# Audio features
f, fl = features, torch.tensor(features.shape[0]).long()
# Text features
t, tl = manifest_entry.text_tokens, len(manifest_entry.text_tokens)
self.manifest_processor.process_text_by_sample(sample=manifest_entry)
if self.bos_id is not None:
t = [self.bos_id] + t
tl += 1
if self.eos_id is not None:
t = t + [self.eos_id]
tl += 1
if self.return_sample_id:
return f, fl, torch.tensor(t).long(), torch.tensor(tl).long(), manifest_idx
else:
return f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
def get_manifest_sample(self, sample_id):
return self.manifest_processor.collection[sample_id]
def __iter__(self):
return self._dataset.__iter__()
def _compute_len(self):
if self.shard_manifests and torch.distributed.is_available() and torch.distributed.is_initialized():
my_len = torch.tensor(len(self.manifest_processor.collection), dtype=torch.int32).cuda()
torch.distributed.all_reduce(my_len)
my_len = my_len.int()
logging.info(f'Sharded manifests: Total length: {my_len}')
else:
my_len = len(self.manifest_processor.collection)
return my_len
def __len__(self):
return self.len
class TarredAudioToCharDataset(_TarredAudioToTextDataset):
"""
A similar Dataset to the AudioToCharDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple workers the number of shards should be divisible by world_size to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToCharDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
labels (list): List of characters that can be output by the ASR model.
For Jasper, this is the 28 character set {a-z '}. The CTC blank
symbol is automatically added later for models using ctc.
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
blank_index (int): Blank character index, defaults to -1.
unk_index (int): Unknown character index, defaults to -1.
normalize (bool): Dataset parameter.
Whether to use automatic text cleaning.
It is highly recommended to manually clean text for best results.
Defaults to True.
trim (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
bos_id (id): Dataset parameter.
Beginning of string symbol id used for seq2seq models.
Defaults to None.
eos_id (id): Dataset parameter.
End of string symbol id used for seq2seq models.
Defaults to None.
pad_id (id): Token used to pad when collating samples in batches.
If this is None, pads using 0s.
Defaults to None.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
return_sample_id (bool): whether to return the sample_id as a part of each sample
"""
def __init__(
self,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: str,
labels: List[str],
sample_rate: int,
int_values: bool = False,
augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
shuffle_n: int = 0,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
blank_index: int = -1,
unk_index: int = -1,
normalize: bool = True,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
parser: Optional[str] = 'en',
pad_id: int = 0,
shard_strategy: str = "scatter",
shard_manifests: bool = False,
global_rank: int = 0,
world_size: int = 0,
return_sample_id: bool = False,
):
self.labels = labels
parser = parsers.make_parser(
labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
)
super().__init__(
audio_tar_filepaths=audio_tar_filepaths,
manifest_filepath=manifest_filepath,
parser=parser,
sample_rate=sample_rate,
int_values=int_values,
augmentor=augmentor,
shuffle_n=shuffle_n,
min_duration=min_duration,
max_duration=max_duration,
trim=trim,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
shard_strategy=shard_strategy,
shard_manifests=shard_manifests,
global_rank=global_rank,
world_size=world_size,
return_sample_id=return_sample_id,
)
class TarredAudioToBPEDataset(_TarredAudioToTextDataset):
"""
A similar Dataset to the AudioToBPEDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToBPEDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple workers the number of shards should be divisible by world_size to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
tokenizer (TokenizerSpec): Either a Word Piece Encoding tokenizer (BERT),
or a Sentence Piece Encoding tokenizer (BPE). The CTC blank
symbol is automatically added later for models using ctc.
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
trim (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
use_start_end_token: Boolean which dictates whether to add [BOS] and [EOS]
tokens to beginning and ending of speech respectively.
pad_id (id): Token used to pad when collating samples in batches.
If this is None, pads using 0s.
Defaults to None.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
return_sample_id (bool): whether to return the sample_id as a part of each sample
"""
def __init__(
self,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: str,
tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
sample_rate: int,
int_values: bool = False,
augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
shuffle_n: int = 0,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
trim: bool = False,
use_start_end_token: bool = True,
shard_strategy: str = "scatter",
shard_manifests: bool = False,
global_rank: int = 0,
world_size: int = 0,
return_sample_id: bool = False,
):
if use_start_end_token and hasattr(tokenizer, "bos_id") and tokenizer.bos_id > 0:
bos_id = tokenizer.bos_id
else:
bos_id = None
if use_start_end_token and hasattr(tokenizer, "eos_id") and tokenizer.eos_id > 0:
eos_id = tokenizer.eos_id
else:
eos_id = None
if hasattr(tokenizer, "pad_id") and tokenizer.pad_id > 0:
pad_id = tokenizer.pad_id
else:
pad_id = 0
class TokenizerWrapper:
def __init__(self, tokenizer):
if isinstance(tokenizer, tokenizers.aggregate_tokenizer.AggregateTokenizer):
self.is_aggregate = True
else:
self.is_aggregate = False
self._tokenizer = tokenizer
def __call__(self, *args):
if isinstance(args[0], List) and self.is_aggregate:
t = []
for span in args[0]:
t.extend(self._tokenizer.text_to_ids(span['str'], span['lang']))
return t
t = self._tokenizer.text_to_ids(*args)
return t
super().__init__(
audio_tar_filepaths=audio_tar_filepaths,
manifest_filepath=manifest_filepath,
parser=TokenizerWrapper(tokenizer),
sample_rate=sample_rate,
int_values=int_values,
augmentor=augmentor,
shuffle_n=shuffle_n,
min_duration=min_duration,
max_duration=max_duration,
trim=trim,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
shard_strategy=shard_strategy,
shard_manifests=shard_manifests,
global_rank=global_rank,
world_size=world_size,
return_sample_id=return_sample_id,
)
class BucketingDataset(IterableDataset):
"""
A Dataset which wraps another IterableDataset and adopts it for bucketing
Args:
dataset (IterableDataset): The IterableDataset to get wrapped
bucketing_batch_size (int): Number of samples to build a batch
"""
def __init__(
self, dataset: IterableDataset, bucketing_batch_size: int,
):
self.wrapped_dataset = dataset
self.bucketing_batch_size = bucketing_batch_size
super().__init__()
def _collate_fn(self, batch):
return _speech_collate_fn(batch[0], self.wrapped_dataset.pad_id)
def __iter__(self):
return BucketingIterator(
wrapped_ds=self.wrapped_dataset._dataset, bucketing_batch_size=self.bucketing_batch_size
).__iter__()
def __len__(self):
return int(math.ceil(len(self.wrapped_dataset) / float(self.bucketing_batch_size)))
class BucketingIterator:
def __init__(self, wrapped_ds, bucketing_batch_size):
self.wrapped_ds = wrapped_ds
self.wrapped_iter = None
self.bucketing_batch_size = bucketing_batch_size
def __iter__(self):
self.wrapped_iter = iter(self.wrapped_ds)
return self
def __next__(self):
batches = []
for idx in range(self.bucketing_batch_size):
try:
sample = next(self.wrapped_iter)
except StopIteration:
break
batches.append(sample)
if len(batches) == 0:
raise StopIteration
return batches
class RandomizedChainDataset(ChainDataset):
def __init__(self, datasets: Iterable[Dataset], rnd_seed=0) -> None:
super(RandomizedChainDataset, self).__init__(list(datasets))
self.rnd_gen = np.random.RandomState(rnd_seed)
def __iter__(self):
shuffled_order = self.rnd_gen.permutation(len(self.datasets))
for dataset_idx in shuffled_order:
d = self.datasets[dataset_idx]
assert isinstance(d, IterableDataset), "ChainDataset only supports IterableDataset"
for idx, x in enumerate(d):
yield x
# in case d is an infinite dataset, we want to break the loop
# so that the other datasets get a chance to yield too
if idx >= len(d) - 1:
break
|
NeMo-main
|
nemo/collections/asr/data/audio_to_text.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
import torch
from nemo.collections.asr.parts.preprocessing.feature_loader import ExternalFeatureLoader
from nemo.collections.common.parts.preprocessing import collections
from nemo.core.classes import Dataset
from nemo.core.neural_types import AcousticEncodedRepresentation, LabelsType, LengthsType, NeuralType
from nemo.utils import logging
def _feature_collate_fn(batch):
"""collate batch of feat sig, feat len, labels, labels len, assuming all features have the same shape.
Args:
batch (FloatTensor, LongTensor, LongTensor, LongTensor): A tuple of tuples of feature, feature lengths,
encoded labels, and encoded labels length.
"""
packed_batch = list(zip(*batch))
if len(packed_batch) == 5:
_, feat_lengths, _, labels_lengths, sample_ids = packed_batch
elif len(packed_batch) == 4:
sample_ids = None
_, feat_lengths, _, labels_lengths = packed_batch
else:
raise ValueError("Expects 4 or 5 tensors in the batch!")
features, labels = [], []
for b in batch:
feat_i, labels_i = b[0], b[2]
features.append(feat_i)
labels.append(labels_i)
features = torch.stack(features)
feat_lengths = torch.stack(feat_lengths)
labels = torch.stack(labels)
labels_lengths = torch.stack(labels_lengths)
if sample_ids is None:
return features, feat_lengths, labels, labels_lengths
else:
sample_ids = torch.tensor(sample_ids, dtype=torch.int32)
return features, feat_lengths, labels, labels_lengths, sample_ids
def _audio_feature_collate_fn(batch, feat_pad_val, label_pad_id):
"""collate batch of audio feature, audio len, labels, labels len
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of feature, feature lengths,
labels, and label lengths. This collate func assumes the
features are torch tensors of Log-Melspectrogram (i.e. [N_MEL, T]).
"""
packed_batch = list(zip(*batch))
if len(packed_batch) == 5:
_, feat_lengths, _, labels_lengths, sample_ids = packed_batch
elif len(packed_batch) == 4:
sample_ids = None
_, feat_lengths, _, labels_lengths = packed_batch
else:
raise ValueError("Expects 4 or 5 tensors in the batch!")
max_feat_len = 0
has_feat = feat_lengths[0] is not None
if has_feat:
max_feat_len = max(feat_lengths).item()
max_labels_len = max(labels_lengths).item()
features, labels = [], []
for b in batch:
feat_i, feat_i_len, label_i, label_i_len = b[0], b[1], b[2], b[3]
if has_feat:
feat_i_len = feat_i_len.item()
if feat_i_len < max_feat_len:
pad = (0, max_feat_len - feat_i_len)
feat_i = torch.nn.functional.pad(feat_i, pad, value=feat_pad_val)
features.append(feat_i)
label_i_len = label_i_len.item()
if label_i_len < max_labels_len:
pad = (0, max_labels_len - label_i_len)
label_i = torch.nn.functional.pad(label_i, pad, value=label_pad_id)
labels.append(label_i)
if has_feat:
features = torch.stack(features)
feature_lengths = torch.stack(feat_lengths)
else:
features, feat_lengths = None, None
labels = torch.stack(labels)
labels_lengths = torch.stack(labels_lengths)
if sample_ids is None:
return features, feature_lengths, labels, labels_lengths
else:
sample_ids = torch.tensor(sample_ids, dtype=torch.int32)
return features, feature_lengths, labels, labels_lengths, sample_ids
def _vad_feature_segment_collate_fn(batch, window_length_in_sec, shift_length_in_sec, frame_unit_in_sec):
"""collate batch of audio features, features len, tokens, tokens len
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of signal, signal lengths,
encoded tokens, and encoded tokens length. This collate func
assumes the signals are 1d torch tensors (i.e. mono audio).
batch size equals to 1.
"""
slice_length = int(window_length_in_sec / frame_unit_in_sec)
audio_features, feat_lengths, _, tokens_lengths = zip(*batch)
slice_length = int(min(slice_length, max(feat_lengths)))
shift = int(shift_length_in_sec / frame_unit_in_sec)
has_audio = feat_lengths[0] is not None
f_dim = audio_features[0].shape[0]
audio_features, num_slices, tokens, feat_lengths = [], [], [], []
append_len_start = torch.div(slice_length, 2, rounding_mode='trunc')
append_len_end = slice_length - torch.div(slice_length, 2, rounding_mode='trunc')
for feat_i, feat_i_len, tokens_i, _ in batch:
start = torch.zeros(f_dim, append_len_start)
end = torch.zeros(f_dim, append_len_end)
feat_i = torch.cat((start, feat_i, end), dim=1)
feat_i_len += slice_length
if has_audio:
slices = max(1, torch.div(feat_i_len - slice_length, shift, rounding_mode='trunc'))
for slice_id in range(slices):
start_idx = slice_id * shift
end_idx = start_idx + slice_length
feat_slice = feat_i[:, start_idx:end_idx]
audio_features.append(feat_slice)
num_slices.append(slices)
tokens.extend([tokens_i] * slices)
feat_lengths.extend([slice_length] * slices)
if has_audio:
audio_features = torch.stack(audio_features)
feat_lengths = torch.tensor(feat_lengths)
else:
audio_features, feat_lengths = None, None
tokens = torch.stack(tokens)
tokens_lengths = torch.tensor(num_slices)
return audio_features, feat_lengths, tokens, tokens_lengths
class _FeatureSeqSpeakerLabelDataset(Dataset):
"""
Dataset that loads tensors via a json file containing paths to feature files, sequences of labels.
Each new line is a different sample. Example below:
and their target labels. JSON files should be of the following format:
{"feature_filepath": "/path/to/feature_0.p", "seq_label": speakerA speakerB SpeakerA ....} \
...
{"feature_filepath": "/path/to/feature_n.p", "seq_label": target_seq_label_n}
target_seq_label_n is the string of sequence of speaker label, separated by space.
Args:
manifest_filepath (str): Dataset parameter. Path to JSON containing data.
labels (Optional[list]): Dataset parameter. List of unique labels collected from all samples.
feature_loader : Dataset parameter. Feature loader to load (external) feature.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
# TODO output type for external features
output_types = {
'external_feat': NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
'feat_length': NeuralType(tuple('B'), LengthsType()),
}
if self.is_speaker_emb:
output_types.update(
{
'embs': NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
'embs_length': NeuralType(tuple('B'), LengthsType()),
'label': NeuralType(('B', 'T'), LabelsType()),
'label_length': NeuralType(tuple('B'), LengthsType()),
}
)
else:
output_types.update(
{'label': NeuralType(('B', 'T'), LabelsType()), 'label_length': NeuralType(tuple('B'), LengthsType()),}
)
return output_types
def __init__(
self, *, manifest_filepath: str, labels: List[str], feature_loader, is_speaker_emb: bool = False,
):
super().__init__()
self.collection = collections.ASRFeatureSequenceLabel(manifests_files=manifest_filepath.split(','),)
self.feature_loader = feature_loader
self.labels = labels if labels else self.collection.uniq_labels
self.is_speaker_emb = is_speaker_emb
self.label2id, self.id2label = {}, {}
for label_id, label in enumerate(self.labels):
self.label2id[label] = label_id
self.id2label[label_id] = label
for idx in range(len(self.labels[:5])):
logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
def __len__(self):
return len(self.collection)
def __getitem__(self, index):
sample = self.collection[index]
features = self.feature_loader.process(sample.feature_file)
f, fl = features, torch.tensor(features.shape[0]).long()
t = torch.tensor(sample.seq_label).float()
tl = torch.tensor(len(sample.seq_label)).long()
return f, fl, t, tl
class FeatureToSeqSpeakerLabelDataset(_FeatureSeqSpeakerLabelDataset):
"""
Dataset that loads tensors via a json file containing paths to feature
files and sequence of speakers. Each new line is a
different sample. Example below:
{"feature_filepath": "/path/to/feature_0.p", "seq_label": speakerA speakerB SpeakerA ....} \
...
{"feature_filepath": "/path/to/feature_n.p", "seq_label": target_seq_label_n}
target_seq_label_n is the string of sequence of speaker label, separated by space.
Args:
manifest_filepath (str): Path to manifest json as described above. Canbe comma-separated paths.
labels (Optional[list]): String containing all the possible labels to map to
if None then automatically picks from ASRFeatureSequenceLabel collection.
feature_loader, Feature load to loader (external) feature.
"""
def _collate_fn(self, batch):
return _feature_collate_fn(batch)
class FeatureToLabelDataset(Dataset):
"""
Dataset that loads tensors via a json file containing paths to feature files and their labels.
Each new line is a different sample. Example below:
and their target labels. JSON files should be of the following format:
{"feature_filepath": "/path/to/audio_feature.pt", "label": "1"}
...
{"feature_filepath": "/path/to/audio_feature.pt", "label": "0"}
Args:
manifest_filepath (str): Path to JSON containing data.
labels (Optional[list]): List of unique labels collected from all samples.
augmentor (Optional): feature augmentation
window_length_in_sec (float): Window length in seconds.
shift_length_in_sec (float): Shift length in seconds.
is_regression_task (bool): if True, the labels are treated as for a regression task.
cal_labels_occurrence (bool): if True, the labels occurrence will be calculated.
zero_spec_db_val (float): Value to replace non-speech signals in log-melspectrogram.
min_duration (float): Minimum duration of the audio file in seconds.
max_duration (float): Maximum duration of the audio file in seconds.
"""
ZERO_LEVEL_SPEC_DB_VAL = -16.635 # Log-Melspectrogram value for zero signal
FRAME_UNIT_TIME_SECS = 0.01
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
output_types = {
'audio_feat': NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
'feat_length': NeuralType(tuple('B'), LengthsType()),
'labels': NeuralType(('B'), LabelsType()),
'labels_length': NeuralType(tuple('B'), LengthsType()),
}
return output_types
def __init__(
self,
*,
manifest_filepath: str,
labels: List[str] = None,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
window_length_in_sec: float = 0.63,
shift_length_in_sec: float = 0.01,
is_regression_task: bool = False,
cal_labels_occurrence: Optional[bool] = False,
zero_spec_db_val: float = -16.635,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
):
super().__init__()
self.window_length_in_sec = window_length_in_sec
self.shift_length_in_sec = shift_length_in_sec
self.zero_spec_db_val = zero_spec_db_val
if isinstance(manifest_filepath, str):
manifest_filepath = manifest_filepath.split(',')
self.collection = collections.ASRFeatureLabel(
manifests_files=manifest_filepath,
is_regression_task=is_regression_task,
cal_labels_occurrence=cal_labels_occurrence,
min_duration=min_duration,
max_duration=max_duration,
)
self.feature_loader = ExternalFeatureLoader(augmentor=augmentor)
self.labels = labels if labels else self.collection.uniq_labels
self.is_regression_task = is_regression_task
if not is_regression_task:
self.labels = labels if labels else self.collection.uniq_labels
self.num_classes = len(self.labels) if self.labels is not None else 1
self.label2id, self.id2label = {}, {}
self.id2occurrence, self.labels_occurrence = {}, []
for label_id, label in enumerate(self.labels):
self.label2id[label] = label_id
self.id2label[label_id] = label
if cal_labels_occurrence:
self.id2occurrence[label_id] = self.collection.labels_occurrence[label]
if cal_labels_occurrence:
self.labels_occurrence = [self.id2occurrence[k] for k in sorted(self.id2occurrence)]
for idx in range(len(self.labels[:5])):
logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
else:
self.labels = []
self.num_classes = 1
def __len__(self):
return len(self.collection)
def __getitem__(self, index):
sample = self.collection[index]
features = self.feature_loader.process(sample.feature_file)
f, fl = features, torch.tensor(features.shape[1]).long()
t = torch.tensor(self.label2id[sample.label])
tl = torch.tensor(1).long()
return f, fl, t, tl
def _collate_fn(self, batch):
return _audio_feature_collate_fn(batch, self.zero_spec_db_val, 0)
def _vad_segment_collate_fn(self, batch):
return _vad_feature_segment_collate_fn(
batch, self.window_length_in_sec, self.shift_length_in_sec, self.FRAME_UNIT_TIME_SECS
)
class FeatureToMultiLabelDataset(Dataset):
"""
Dataset that loads tensors via a json file containing paths to feature files and their labels.
Each new line is a different sample. Example below:
and their target labels. JSON files should be of the following format:
{"feature_filepath": "/path/to/audio_feature.pt", "label": "1 1 0 0 1"}
...
{"feature_filepath": "/path/to/audio_feature.pt", "label": "0 1 0 0"}
Args:
manifest_filepath (str): Path to JSON containing data.
labels (Optional[list]): List of unique labels collected from all samples.
augmentor (Optional): feature augmentation
delimiter (str): delimiter to split the labels.
is_regression_task (bool): if True, the labels are treated as for a regression task.
cal_labels_occurrence (bool): if True, the labels occurrence will be calculated.
zero_spec_db_val (float): Value to replace non-speech signals in log-melspectrogram.
min_duration (float): Minimum duration of the audio file in seconds.
max_duration (float): Maximum duration of the audio file in seconds.
"""
ZERO_LEVEL_SPEC_DB_VAL = -16.635 # Log-Melspectrogram value for zero signal
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
output_types = {
'audio_feat': NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
'feat_length': NeuralType(tuple('B'), LengthsType()),
'labels': NeuralType(('B', 'T'), LabelsType()),
'labels_length': NeuralType(tuple('B'), LengthsType()),
}
return output_types
def __init__(
self,
*,
manifest_filepath: str,
labels: List[str] = None,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
delimiter: Optional[str] = None,
is_regression_task: bool = False,
cal_labels_occurrence: Optional[bool] = False,
zero_spec_db_val: float = -16.635,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
):
super().__init__()
self.delimiter = delimiter
self.zero_spec_db_val = zero_spec_db_val
if isinstance(manifest_filepath, str):
manifest_filepath = manifest_filepath.split(',')
self.collection = collections.ASRFeatureLabel(
manifests_files=manifest_filepath,
is_regression_task=is_regression_task,
cal_labels_occurrence=cal_labels_occurrence,
delimiter=delimiter,
min_duration=min_duration,
max_duration=max_duration,
)
self.is_regression_task = is_regression_task
self.feature_loader = ExternalFeatureLoader(augmentor=augmentor)
self.labels = labels if labels else self.collection.uniq_labels
self.label2id, self.id2label = {}, {}
if not is_regression_task:
self.labels = labels if labels else self._get_label_set()
self.num_classes = len(self.labels) if self.labels is not None else 1
self.label2id, self.id2label = {}, {}
for label_id, label in enumerate(self.labels):
self.label2id[label] = label_id
self.id2label[label_id] = label
if cal_labels_occurrence:
self.id2occurrence[label_id] = self.collection.labels_occurrence[label]
self.labels_occurrence.append(self.id2occurrence[label_id])
for idx in range(len(self.labels[:5])):
logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
else:
self.labels = []
self.num_classes = 1
def _get_label_set(self):
labels = []
for sample in self.collection:
label_str = sample.label
if label_str:
label_str_list = label_str.split(self.delimiter) if self.delimiter else label_str.split()
labels.extend(label_str_list)
return sorted(set(labels))
def _label_str_to_tensor(self, label_str: str):
labels = label_str.split(self.delimiter) if self.delimiter else label_str.split()
if self.is_regression_task:
labels = [float(s) for s in labels]
labels = torch.tensor(labels).float()
else:
labels = [self.label2id[s] for s in labels]
labels = torch.tensor(labels).long()
return labels
def __len__(self):
return len(self.collection)
def __getitem__(self, index):
sample = self.collection[index]
features = self.feature_loader.process(sample.feature_file)
f, fl = features, torch.tensor(features.shape[1]).long()
t = self._label_str_to_tensor(sample.label)
tl = torch.tensor(t.size(0)).long()
return f, fl, t, tl
def _collate_fn(self, batch):
return _audio_feature_collate_fn(batch, self.zero_spec_db_val, 0)
|
NeMo-main
|
nemo/collections/asr/data/feature_to_label.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import operator
import os.path
import time
from collections.abc import Iterator
from typing import Callable, List, Optional, Union
import torch
from omegaconf import DictConfig
from nemo.collections.asr.data.audio_to_text import ASRManifestProcessor, expand_sharded_filepaths
from nemo.collections.common.parts.preprocessing import parsers
from nemo.utils import logging, model_utils
try:
import nvidia.dali as dali
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.plugin.pytorch import DALIGenericIterator as DALIPytorchIterator
from nvidia.dali.plugin.pytorch import LastBatchPolicy as LastBatchPolicy
HAVE_DALI = True
except (ImportError, ModuleNotFoundError):
HAVE_DALI = False
__all__ = [
'AudioToCharDALIDataset',
'AudioToBPEDALIDataset',
]
"""
Below minimum version is required to access the "read_idxs" argument in
dali.fn.readers.nemo_asr
"""
__DALI_MINIMUM_VERSION__ = "1.11"
DALI_INSTALLATION_MESSAGE = (
"Could not import `nvidia.dali`.\n"
"Please install DALI by following the steps provided here - \n"
"https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html"
)
def is_dali_supported(min_version: str, verbose: bool = False) -> bool:
"""
Checks if DALI in installed, and version is >= min_verion.
Args:
min_version: A semver str that is the minimum requirement.
verbose: Whether to log the installation instructions if DALI is not found.
Returns:
bool - whether DALI could be imported or not.
"""
module_available, _ = model_utils.check_lib_version(
'nvidia.dali', checked_version=min_version, operator=operator.ge
)
# If DALI is not installed
if module_available is None:
if verbose:
logging.info(DALI_INSTALLATION_MESSAGE)
return False
return module_available
class DALIOutputs(object):
def __init__(self, out_dict):
self._has_processed_signal = 'processed_signal' in out_dict and 'processed_signal_len' in out_dict
if not self._has_processed_signal:
assert 'audio' in out_dict and 'audio_len' in out_dict
assert 'transcript' in out_dict and 'transcript_len' in out_dict
if self._has_processed_signal:
self._outs = (
out_dict['processed_signal'],
out_dict['processed_signal_len'].reshape(-1),
out_dict['transcript'],
out_dict['transcript_len'].reshape(-1),
)
else:
self._outs = (
out_dict['audio'],
out_dict['audio_len'].reshape(-1),
out_dict['transcript'],
out_dict['transcript_len'].reshape(-1),
)
@property
def has_processed_signal(self):
return self._has_processed_signal
def __getitem__(self, key):
return self._outs[key]
def __len__(self):
return len(self._outs)
class _AudioTextDALIDataset(Iterator):
"""
NVIDIA DALI pipeline that loads tensors via one or more manifest files where each line containing a sample descriptor in JSON,
including audio files, transcripts, and durations (in seconds).
Here's an example:
{"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath: Path to manifest file with the format described above. Can be comma-separated paths.
device (str): Determines the device type to be used for preprocessing. Allowed values are: 'cpu', 'gpu'.
batch_size (int): Number of samples in a batch.
parser (str, callable): A str for an inbuilt parser, or a callable with signature f(str) -> List[int].
sample_rate (int): Sample rate to resample loaded audio to.
num_threads (int): Number of CPU processing threads to be created by the DALI pipeline.
max_duration (float): Determines the maximum allowed duration, in seconds, of the loaded audio files.
min_duration (float): Determines the minimum allowed duration, in seconds, of the loaded audio files.
bos_id (int): Id of beginning of sequence symbol to append if not None
eos_id (int): Id of end of sequence symbol to append if not None
pad_id (int): Id used to pad the input. Defaults to 0 if not provided.
trim (bool): If True, it will extract the nonsilent region of the loaded audio signal.
shuffle (bool): If set to True, the dataset will shuffled after loading.
drop_last (bool): If set to True, the last batch will be dropped if incomplete. This will be the case when the shard size is not divisible by the batch size.
If set to False and the size of dataset is not divisible by the batch size, then the last batch will be smaller.
device_id (int): Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 1.
preprocessor_cfg (DictConfig): Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
return_sample_id (bool): whether to return the sample_id as a part of each sample (not supported yet).
"""
def __init__(
self,
manifest_filepath: str,
device: str,
batch_size: int,
parser: Union[str, Callable],
audio_tar_filepaths: Optional[Union[str, List[str]]] = None,
audio_tar_index_filepaths: Optional[Union[str, List[str]]] = None,
sample_rate: int = 16000,
num_threads: int = 4,
max_duration: float = 0.0,
min_duration: float = 0.0,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
trim: bool = False,
shuffle: bool = False,
drop_last: bool = False,
shard_strategy: str = "scatter",
device_id: int = 0,
global_rank: int = 0,
world_size: int = 1,
preprocessor_cfg: DictConfig = None,
return_sample_id: bool = False,
):
self.drop_last = drop_last # used by lr_scheduler
if return_sample_id:
raise ValueError(
"Currently DALI data layers don't support returning the sample_id and return_sample_id can not be enabled."
)
self.return_sample_id = return_sample_id
if not HAVE_DALI:
raise ModuleNotFoundError(
f"{self} requires NVIDIA DALI to be installed. "
f"See: https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html#id1"
)
if device not in ('cpu', 'gpu'):
raise ValueError(
f"{self} received an unexpected device argument {device}. Supported values are: 'cpu', 'gpu'"
)
device_id = device_id if device == 'gpu' else None
self.batch_size = batch_size # Used by NeMo
self.device = device
self.device_id = device_id
if world_size > 1:
self.shard_id = global_rank
self.num_shards = world_size
else:
self.shard_id = None
self.num_shards = None
self.eos_id = eos_id
self.bos_id = bos_id
self.sample_rate = sample_rate
self.pipe = Pipeline(
batch_size=batch_size,
num_threads=num_threads,
device_id=self.device_id,
exec_async=True,
exec_pipelined=True,
)
has_preprocessor = preprocessor_cfg is not None
if has_preprocessor:
if preprocessor_cfg._target_ == "nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor":
feature_type = "mel_spectrogram"
elif preprocessor_cfg._target_ == "nemo.collections.asr.modules.AudioToMFCCPreprocessor":
feature_type = "mfcc"
else:
raise ValueError(
f"{self} received an unexpected preprocessor configuration: {preprocessor_cfg._target_}."
f" Supported preprocessors are: AudioToMelSpectrogramPreprocessor, AudioToMFCCPreprocessor"
)
# Default values taken from AudioToMelSpectrogramPreprocessor
params = preprocessor_cfg
self.dither = params['dither'] if 'dither' in params else 0.0
self.preemph = params['preemph'] if 'preemph' in params else 0.97
self.window_size_sec = params['window_size'] if 'window_size' in params else 0.02
self.window_stride_sec = params['window_stride'] if 'window_stride' in params else 0.01
self.sample_rate = params['sample_rate'] if 'sample_rate' in params else sample_rate
self.window_size = int(self.window_size_sec * self.sample_rate)
self.window_stride = int(self.window_stride_sec * self.sample_rate)
normalize = params['normalize'] if 'normalize' in params else 'per_feature'
if normalize == 'per_feature': # Each freq channel independently
self.normalization_axes = (1,)
elif normalize == 'all_features':
self.normalization_axes = (0, 1)
else:
raise ValueError(
f"{self} received {normalize} for the normalize parameter."
f" It must be either 'per_feature' or 'all_features'."
)
self.window = None
window_name = params['window'] if 'window' in params else 'hann'
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
if window_name == 'ones':
window_tensor = torch.ones(self.window_size)
else:
try:
window_fn = torch_windows.get(window_name, None)
except:
raise ValueError(
f"{self} received '{window_name}' for the window parameter."
f" It must be one of: ('hann', 'ones', 'hamming', 'blackman', 'bartlett', None)."
f" None is equivalent to 'hann'."
)
window_tensor = window_fn(self.window_size, periodic=False) if window_fn else None
self.window = window_tensor.numpy().tolist() if window_tensor is not None else None
self.n_fft = params['n_fft'] if 'n_fft' in params else 2 ** math.ceil(math.log2(self.window_size))
self.n_mels = params['n_mels'] if 'n_mels' in params else 64
self.n_mfcc = params['n_mfcc'] if 'n_mfcc' in params else 64
features = params['features'] if 'features' in params else 0
if features > 0:
if feature_type == 'mel_spectrogram':
self.n_mels = features
elif feature_type == 'mfcc':
self.n_mfcc = features
# TODO Implement frame splicing
if 'frame_splicing' in params:
assert params['frame_splicing'] == 1, "Frame splicing is not implemented"
self.freq_low = params['lowfreq'] if 'lowfreq' in params else 0.0
self.freq_high = params['highfreq'] if 'highfreq' in params else self.sample_rate / 2.0
self.log_features = params['log'] if 'log' in params else True
# We want to avoid taking the log of zero
# There are two options: either adding or clamping to a small value
self.log_zero_guard_type = params['log_zero_guard_type'] if 'log_zero_guard_type' in params else 'add'
if self.log_zero_guard_type not in ["add", "clamp"]:
raise ValueError(
f"{self} received {self.log_zero_guard_type} for the "
f"log_zero_guard_type parameter. It must be either 'add' or "
f"'clamp'."
)
self.log_zero_guard_value = (
params['log_zero_guard_value'] if 'log_zero_guard_value' in params else 2 ** -24
)
if isinstance(self.log_zero_guard_value, str):
if self.log_zero_guard_value == "tiny":
self.log_zero_guard_value = torch.finfo(torch.float32).tiny
elif self.log_zero_guard_value == "eps":
self.log_zero_guard_value = torch.finfo(torch.float32).eps
else:
raise ValueError(
f"{self} received {self.log_zero_guard_value} for the log_zero_guard_type parameter."
f"It must be either a number, 'tiny', or 'eps'"
)
self.mag_power = params['mag_power'] if 'mag_power' in params else 2
if self.mag_power != 1.0 and self.mag_power != 2.0:
raise ValueError(
f"{self} received {self.mag_power} for the mag_power parameter." f" It must be either 1.0 or 2.0."
)
self.pad_to = max(params['pad_to'], 1) if 'pad_to' in params else 16
self.pad_value = params['pad_value'] if 'pad_value' in params else 0.0
with self.pipe:
if audio_tar_filepaths is None and audio_tar_index_filepaths is None:
audio, indices = dali.fn.readers.nemo_asr(
name="Reader",
manifest_filepaths=manifest_filepath.split(','),
dtype=dali.types.FLOAT,
downmix=True,
sample_rate=float(self.sample_rate),
min_duration=min_duration,
max_duration=max_duration,
read_sample_rate=False,
read_text=False,
read_idxs=True,
random_shuffle=shuffle,
shard_id=self.shard_id,
num_shards=self.num_shards,
pad_last_batch=True,
)
self.is_tarred_dataset = False
elif audio_tar_filepaths is not None and audio_tar_index_filepaths is not None:
audio_tar_filepaths = expand_sharded_filepaths(
audio_tar_filepaths, shard_strategy=shard_strategy, world_size=world_size, global_rank=global_rank
)
audio_tar_index_filepaths = expand_sharded_filepaths(
audio_tar_index_filepaths,
shard_strategy=shard_strategy,
world_size=world_size,
global_rank=global_rank,
)
if len(audio_tar_filepaths) != len(audio_tar_index_filepaths) and len(audio_tar_index_filepaths) != 0:
raise ValueError(
f"Number of filepaths provided for `audio_tar_filepaths` must match "
f"`audio_tar_index_filepaths`. Got {len(audio_tar_filepaths)} audio_tar_filepaths and "
f"{len(audio_tar_index_filepaths)} audio_tar_index_filepaths."
)
tar_file = dali.fn.readers.webdataset(
paths=audio_tar_filepaths,
index_paths=audio_tar_index_filepaths,
name="Reader",
ext=["wav"],
missing_component_behavior="error",
random_shuffle=shuffle,
shard_id=self.shard_id,
num_shards=self.num_shards,
pad_last_batch=True,
)
audio, _ = dali.fn.decoders.audio(
tar_file, dtype=dali.types.FLOAT, downmix=True, sample_rate=float(self.sample_rate),
)
indices = dali.fn.get_property(tar_file, key="source_info")
indices = dali.fn.pad(indices)
self.is_tarred_dataset = True
else:
raise RuntimeError(
"When using DALI datasets, either `audio_tar_filepaths` "
"and `audio_tar_index_filepaths` should either both be None (sequential dataset)"
"or provided (tarred dataset)."
)
# Extract nonsilent region, if necessary
if trim:
# Need to extract non-silent region before moving to the GPU
roi_start, roi_len = dali.fn.nonsilent_region(audio, cutoff_db=-60)
audio = audio.gpu() if self.device == 'gpu' else audio
audio = dali.fn.slice(
audio, roi_start, roi_len, normalized_anchor=False, normalized_shape=False, axes=[0]
)
else:
audio = audio.gpu() if self.device == 'gpu' else audio
if not has_preprocessor:
# No preprocessing, the output is the audio signal
audio_len = dali.fn.shapes(dali.fn.reshape(audio, shape=[-1]))
audio = dali.fn.pad(audio)
self.pipe.set_outputs(audio, audio_len, indices)
else:
# Additive gaussian noise (dither)
if self.dither > 0.0:
gaussian_noise = dali.fn.random.normal(audio)
audio = audio + self.dither * gaussian_noise
# Preemphasis filter
if self.preemph > 0.0:
audio = dali.fn.preemphasis_filter(audio, preemph_coeff=self.preemph, border='zero')
# Power spectrogram
spec = dali.fn.spectrogram(
audio,
nfft=self.n_fft,
window_length=self.window_size,
window_step=self.window_stride,
window_fn=self.window,
)
if feature_type == 'mel_spectrogram' or feature_type == 'mfcc':
# Spectrogram to Mel Spectrogram
spec = dali.fn.mel_filter_bank(
spec,
sample_rate=self.sample_rate,
nfilter=self.n_mels,
normalize=True,
freq_low=self.freq_low,
freq_high=self.freq_high,
)
# Mel Spectrogram to MFCC
if feature_type == 'mfcc':
spec = dali.fn.mfcc(spec, n_mfcc=self.n_mfcc)
# Logarithm
if self.log_zero_guard_type == 'add':
spec = spec + self.log_zero_guard_value
spec = dali.fn.to_decibels(
spec, multiplier=math.log(10), reference=1.0, cutoff_db=math.log(self.log_zero_guard_value)
)
# Normalization
spec = dali.fn.normalize(spec, axes=self.normalization_axes, epsilon=1e-5 ** 2, ddof=1)
# Extracting the length of the spectrogram
spec_len = dali.fn.slice(dali.fn.shapes(spec), 1, 1, axes=(0,))
# Pads feature dimension to be a multiple of `pad_to` and the temporal dimension to be as big as the largest sample (shape -1)
spec = dali.fn.pad(spec, fill_value=self.pad_value, axes=(0, 1), align=(self.pad_to, 1), shape=(1, -1))
self.pipe.set_outputs(spec, spec_len, indices)
x = time.time()
# Building DALI pipeline
self.pipe.build()
y = time.time()
logging.info(f"Time for pipe.build() : {(y - x)} seconds")
if has_preprocessor:
output_names = ['processed_signal', 'processed_signal_len', 'manifest_indices']
else:
output_names = ['audio', 'audio_len', 'manifest_indices']
x = time.time()
last_batch_policy = LastBatchPolicy.DROP if drop_last else LastBatchPolicy.PARTIAL
self._iter = DALIPytorchIterator(
[self.pipe],
output_map=output_names,
reader_name="Reader",
last_batch_policy=last_batch_policy,
dynamic_shape=True,
auto_reset=True,
)
y = time.time()
logging.info(f"Time for DALIPytorchIterator to initialize : {(y - x)} seconds")
# TODO come up with a better solution
class DummyDataset:
def __init__(self, parent):
self.parent = parent
def __len__(self):
return self.parent.size
self.dataset = DummyDataset(self) # Used by NeMo
x = time.time()
self.manifest_processor = ASRManifestProcessor(
manifest_filepath=manifest_filepath,
parser=parser,
max_duration=max_duration,
min_duration=min_duration,
max_utts=0,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
index_by_file_id=self.is_tarred_dataset,
)
y = time.time()
logging.info(f"Time to build nemo manifest processor - {(y - x)} seconds")
def reset(self):
self._iter.reset()
def __iter__(self):
return self
def next(self):
return self.__next__()
@property
def size(self):
return self._iter.size
def __len__(self):
return len(self._iter)
def __next__(self):
outputs = self._iter.next()
assert len(outputs) == 1
dali_out = outputs[0]
manifest_indices = dali_out['manifest_indices'].numpy()
out = {}
out_names = ['processed_signal', 'processed_signal_len', 'audio', 'audio_len']
for out_name in out_names:
if out_name in dali_out:
out[out_name] = dali_out[out_name].detach().clone()
text_tokens = []
text_tokens_len = []
max_len = 0
batch_size = manifest_indices.shape[0]
for i, manifest_index in enumerate(manifest_indices):
if not self.is_tarred_dataset:
# Loose-file dataset. Index is integer based.
manifest_index = manifest_index[0]
text, text_length = self.manifest_processor.process_text_by_id(manifest_index)
else:
# Tarred-file dataset. Index is filename based.
resolved_manifest_indices = manifest_index.tobytes().decode().split(":")
resolved_manifest_index = resolved_manifest_indices[2] # we require just the filename segment
resolved_manifest_index = os.path.splitext(resolved_manifest_index)[0] # we dont need file extension
text, text_length = self.manifest_processor.process_text_by_file_id(resolved_manifest_index)
text_tokens_len.append(text_length)
text_tokens.append(text)
if text_length > max_len:
max_len = text_length
transcript_out = torch.full([batch_size, max_len], fill_value=self.manifest_processor.pad_id, dtype=torch.long)
for i, n in enumerate(text_tokens_len):
transcript_out[i, :n] = torch.tensor(text_tokens[i], dtype=torch.long)
transcript_len_out = torch.tensor(text_tokens_len, dtype=torch.long)
out['transcript'] = transcript_out
out['transcript_len'] = transcript_len_out
return DALIOutputs(out)
class AudioToCharDALIDataset(_AudioTextDALIDataset):
"""
Character based NVIDIA DALI pipeline that loads tensors via one or more manifest files where each line containing a
sample descriptor in JSON, including audio files, transcripts, and durations (in seconds).
Here's an example:
{"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath: Path to manifest file with the format described above. Can be comma-separated paths.
device (str): Determines the device type to be used for preprocessing. Allowed values are: 'cpu', 'gpu'.
batch_size (int): Number of samples in a batch.
labels (List[str]): String containing all the possible characters to map to.
sample_rate (int): Sample rate to resample loaded audio to.
num_threads (int): Number of CPU processing threads to be created by the DALI pipeline.
max_duration (float): Determines the maximum allowed duration, in seconds, of the loaded audio files.
min_duration (float): Determines the minimum allowed duration, in seconds, of the loaded audio files.
blank_index (int): blank character index, default = -1
unk_index (int): unk_character index, default = -1
normalize (bool): whether to normalize transcript text (default): True
bos_id (int): Id of beginning of sequence symbol to append if not None
eos_id (int): Id of end of sequence symbol to append if not None
pad_id (int): Id used to pad the input. Defaults to 0 if not provided.
trim (bool): If True, it will extract the nonsilent region of the loaded audio signal.
shuffle (bool): If set to True, the dataset will shuffled after loading.
drop_last (bool): If set to True, the last batch will be dropped if incomplete. This will be the case when the shard size is not divisible by the batch size.
If set to False and the size of dataset is not divisible by the batch size, then the last batch will be smaller.
parser (str, callable): A str for an inbuilt parser, or a callable with signature f(str) -> List[int].
device_id (int): Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 1.
preprocessor_cfg (DictConfig): Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
return_sample_id (bool): whether to return the sample_id as a part of each sample (not supported yet).
"""
def __init__(
self,
manifest_filepath: str,
device: str,
batch_size: int,
labels: Union[str, List[str]],
sample_rate: int = 16000,
audio_tar_filepaths: Optional[Union[str, List[str]]] = None,
audio_tar_index_filepaths: Optional[Union[str, List[str]]] = None,
num_threads: int = 4,
max_duration: float = 0.0,
min_duration: float = 0.0,
blank_index: int = -1,
unk_index: int = -1,
normalize: bool = True,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
trim: bool = False,
shuffle: bool = False,
drop_last: bool = False,
parser: Union[str, Callable] = 'en',
shard_strategy: str = "scatter",
device_id: int = 0,
global_rank: int = 0,
world_size: int = 1,
preprocessor_cfg: DictConfig = None,
return_sample_id: bool = False,
):
self.labels = labels
parser = parsers.make_parser(
labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
)
super().__init__(
manifest_filepath=manifest_filepath,
device=device,
batch_size=batch_size,
audio_tar_filepaths=audio_tar_filepaths,
audio_tar_index_filepaths=audio_tar_index_filepaths,
sample_rate=sample_rate,
num_threads=num_threads,
max_duration=max_duration,
min_duration=min_duration,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
trim=trim,
shuffle=shuffle,
drop_last=drop_last,
parser=parser,
shard_strategy=shard_strategy,
device_id=device_id,
global_rank=global_rank,
world_size=world_size,
preprocessor_cfg=preprocessor_cfg,
return_sample_id=return_sample_id,
)
class AudioToBPEDALIDataset(_AudioTextDALIDataset):
"""
Subword based NVIDIA DALI pipeline that loads tensors via one or more manifest files where each line containing a
sample descriptor in JSON, including audio files, transcripts, and durations (in seconds).
Here's an example:
{"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath: Path to manifest file with the format described above. Can be comma-separated paths.
tokenizer (TokenizerSpec): A TokenizerSpec implementation that wraps a tokenization implementation.
device (str): Determines the device type to be used for preprocessing. Allowed values are: 'cpu', 'gpu'.
batch_size (int): Number of samples in a batch.
sample_rate (int): Sample rate to resample loaded audio to.
num_threads (int): Number of CPU processing threads to be created by the DALI pipeline.
max_duration (float): Determines the maximum allowed duration, in seconds, of the loaded audio files.
min_duration (float): Determines the minimum allowed duration, in seconds, of the loaded audio files.
bos_id (int): Id of beginning of sequence symbol to append if not None. Injected from the tokenizer.
eos_id (int): Id of end of sequence symbol to append if not None. Injected from the tokenizer.
pad_id (int): Id used to pad the input. Defaults to 0 if not provided. Injected from the tokenizer.
trim (bool): If True, it will extract the nonsilent region of the loaded audio signal.
shuffle (bool): If set to True, the dataset will shuffled after loading.
drop_last (bool): If set to True, the last batch will be dropped if incomplete. This will be the case when the shard size is not divisible by the batch size.
If set to False and the size of dataset is not divisible by the batch size, then the last batch will be smaller.
device_id (int): Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 1.
preprocessor_cfg (DictConfig): Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
use_start_end_token (bool): Boolean which dictates whether to add [BOS] and [EOS] tokens to beginning and
ending of speech respectively.
return_sample_id (bool): whether to return the sample_id as a part of each sample (not supported yet).
"""
def __init__(
self,
manifest_filepath: str,
tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
device: str,
batch_size: int,
sample_rate: int = 16000,
audio_tar_filepaths: Optional[Union[str, List[str]]] = None,
audio_tar_index_filepaths: Optional[Union[str, List[str]]] = None,
num_threads: int = 4,
max_duration: float = 0.0,
min_duration: float = 0.0,
trim: bool = False,
shuffle: bool = False,
drop_last: bool = False,
shard_strategy: str = "scatter",
device_id: int = 0,
global_rank: int = 0,
world_size: int = 1,
preprocessor_cfg: DictConfig = None,
use_start_end_token: bool = True,
return_sample_id: bool = False,
):
if use_start_end_token and hasattr(tokenizer, 'bos_token'):
bos_id = tokenizer.bos_id
else:
bos_id = None
if use_start_end_token and hasattr(tokenizer, 'eos_token'):
eos_id = tokenizer.eos_id
else:
eos_id = None
if hasattr(tokenizer, 'pad_token'):
pad_id = tokenizer.pad_id
else:
pad_id = 0
class TokenizerWrapper:
def __init__(self, tokenizer):
self._tokenizer = tokenizer
def __call__(self, text):
t = self._tokenizer.text_to_ids(text)
return t
super().__init__(
manifest_filepath=manifest_filepath,
device=device,
batch_size=batch_size,
sample_rate=sample_rate,
audio_tar_filepaths=audio_tar_filepaths,
audio_tar_index_filepaths=audio_tar_index_filepaths,
num_threads=num_threads,
max_duration=max_duration,
min_duration=min_duration,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
trim=trim,
shuffle=shuffle,
drop_last=drop_last,
parser=TokenizerWrapper(tokenizer),
shard_strategy=shard_strategy,
device_id=device_id,
global_rank=global_rank,
world_size=world_size,
preprocessor_cfg=preprocessor_cfg,
return_sample_id=return_sample_id,
)
|
NeMo-main
|
nemo/collections/asr/data/audio_to_text_dali.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.data import audio_to_audio
def get_audio_to_target_dataset(config: dict) -> audio_to_audio.AudioToTargetDataset:
"""Instantiates an audio-to-audio dataset.
Args:
config: Config of AudioToTargetDataset.
Returns:
An instance of AudioToTargetDataset
"""
dataset = audio_to_audio.AudioToTargetDataset(
manifest_filepath=config['manifest_filepath'],
sample_rate=config['sample_rate'],
input_key=config['input_key'],
target_key=config['target_key'],
audio_duration=config.get('audio_duration', None),
random_offset=config.get('random_offset', False),
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
input_channel_selector=config.get('input_channel_selector', None),
target_channel_selector=config.get('target_channel_selector', None),
)
return dataset
def get_audio_to_target_with_reference_dataset(config: dict) -> audio_to_audio.AudioToTargetWithReferenceDataset:
"""Instantiates an audio-to-audio dataset.
Args:
config: Config of AudioToTargetWithReferenceDataset.
Returns:
An instance of AudioToTargetWithReferenceDataset
"""
dataset = audio_to_audio.AudioToTargetWithReferenceDataset(
manifest_filepath=config['manifest_filepath'],
sample_rate=config['sample_rate'],
input_key=config['input_key'],
target_key=config['target_key'],
reference_key=config['reference_key'],
audio_duration=config.get('audio_duration', None),
random_offset=config.get('random_offset', False),
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
input_channel_selector=config.get('input_channel_selector', None),
target_channel_selector=config.get('target_channel_selector', None),
reference_channel_selector=config.get('reference_channel_selector', None),
reference_is_synchronized=config.get('reference_is_synchronized', True),
reference_duration=config.get('reference_duration', None),
)
return dataset
def get_audio_to_target_with_embedding_dataset(config: dict) -> audio_to_audio.AudioToTargetWithEmbeddingDataset:
"""Instantiates an audio-to-audio dataset.
Args:
config: Config of AudioToTargetWithEmbeddingDataset.
Returns:
An instance of AudioToTargetWithEmbeddingDataset
"""
dataset = audio_to_audio.AudioToTargetWithEmbeddingDataset(
manifest_filepath=config['manifest_filepath'],
sample_rate=config['sample_rate'],
input_key=config['input_key'],
target_key=config['target_key'],
embedding_key=config['embedding_key'],
audio_duration=config.get('audio_duration', None),
random_offset=config.get('random_offset', False),
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
input_channel_selector=config.get('input_channel_selector', None),
target_channel_selector=config.get('target_channel_selector', None),
)
return dataset
|
NeMo-main
|
nemo/collections/asr/data/audio_to_audio_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from omegaconf import DictConfig
from nemo.collections.asr.data import audio_to_label
from nemo.collections.asr.data.audio_to_text_dataset import convert_to_config_list, get_chain_dataset
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
from nemo.collections.common.data.dataset import ConcatDataset
def get_classification_label_dataset(featurizer, config: dict) -> audio_to_label.AudioToClassificationLabelDataset:
"""
Instantiates a Classification AudioLabelDataset.
Args:
config: Config of the AudioToClassificationLabelDataset.
Returns:
An instance of AudioToClassificationLabelDataset.
"""
dataset = audio_to_label.AudioToClassificationLabelDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
featurizer=featurizer,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', False),
is_regression_task=config.get('is_regression_task', False),
cal_labels_occurrence=config.get('cal_labels_occurrence', False),
)
return dataset
def get_speech_label_dataset(featurizer, config: dict) -> audio_to_label.AudioToSpeechLabelDataset:
"""
Instantiates a Speech Label (e.g. VAD, speaker recognition) AudioLabelDataset.
Args:
config: Config of the AudioToSpeechLabelDataSet.
Returns:
An instance of AudioToSpeechLabelDataset.
"""
dataset = audio_to_label.AudioToSpeechLabelDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
featurizer=featurizer,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', False),
window_length_in_sec=config.get('window_length_in_sec', 0.31),
shift_length_in_sec=config.get('shift_length_in_sec', 0.01),
normalize_audio=config.get('normalize_audio', False),
cal_labels_occurrence=config.get('cal_labels_occurrence', False),
)
return dataset
def get_tarred_classification_label_dataset(
featurizer, config: dict, shuffle_n: int, global_rank: int, world_size: int
) -> audio_to_label.TarredAudioToClassificationLabelDataset:
"""
Instantiates a Classification TarredAudioLabelDataset.
Args:
config: Config of the TarredAudioToClassificationLabelDataset.
shuffle_n: How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
Returns:
An instance of TarredAudioToClassificationLabelDataset.
"""
tarred_audio_filepaths = config['tarred_audio_filepaths']
manifest_filepaths = config['manifest_filepath']
datasets = []
tarred_audio_filepaths = convert_to_config_list(tarred_audio_filepaths)
manifest_filepaths = convert_to_config_list(manifest_filepaths)
bucketing_weights = config.get('bucketing_weights', None) # For upsampling buckets
if bucketing_weights:
for idx, weight in enumerate(bucketing_weights):
if not isinstance(weight, int) or weight <= 0:
raise ValueError(f"bucket weights must be positive integers")
if len(manifest_filepaths) != len(tarred_audio_filepaths):
raise ValueError(
f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of buckets."
)
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
zip(tarred_audio_filepaths, manifest_filepaths)
):
if len(tarred_audio_filepath) == 1:
tarred_audio_filepath = tarred_audio_filepath[0]
dataset = audio_to_label.TarredAudioToClassificationLabelDataset(
audio_tar_filepaths=tarred_audio_filepath,
manifest_filepath=manifest_filepath,
labels=config['labels'],
featurizer=featurizer,
shuffle_n=shuffle_n,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', False),
shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
global_rank=global_rank,
world_size=world_size,
is_regression_task=config.get('is_regression_task', False),
)
if bucketing_weights:
[datasets.append(dataset) for _ in range(bucketing_weights[dataset_idx])]
else:
datasets.append(dataset)
return get_chain_dataset(datasets=datasets, ds_config=config, rank=global_rank)
def get_concat_tarred_speech_label_dataset(
featurizer, config: dict, shuffle_n: int, global_rank: int, world_size: int,
):
tarred_audio_filepaths = config['tarred_audio_filepaths']
manifest_filepaths = config['manifest_filepath']
datasets = []
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
zip(tarred_audio_filepaths, manifest_filepaths)
):
conf = copy.deepcopy(config)
conf['manifest_filepath'] = manifest_filepath
conf['tarred_audio_filepaths'] = tarred_audio_filepath
dataset = get_tarred_speech_label_dataset(
config=conf, featurizer=featurizer, shuffle_n=shuffle_n, global_rank=global_rank, world_size=world_size,
)
datasets.append(dataset)
dataset = ConcatDataset(
datasets,
sampling_technique=config.get('concat_sampling_technique', 'temperature'),
sampling_temperature=config.get('concat_sampling_temperature', 5),
sampling_probabilities=config.get('concat_sampling_probabilities', None),
global_rank=global_rank,
world_size=world_size,
shuffle=config['shuffle'],
)
return dataset
def get_tarred_speech_label_dataset(
featurizer, config: dict, shuffle_n: int, global_rank: int, world_size: int,
) -> audio_to_label.TarredAudioToSpeechLabelDataset:
"""
InInstantiates a Speech Label (e.g. VAD, speaker recognition) TarredAudioLabelDataset.
Args:
config: Config of the TarredAudioToSpeechLabelDataset.
shuffle_n: How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
Returns:
An instance of TarredAudioToSpeechLabelDataset.
"""
tarred_audio_filepaths = config['tarred_audio_filepaths']
manifest_filepaths = config['manifest_filepath']
datasets = []
tarred_audio_filepaths = convert_to_config_list(tarred_audio_filepaths)
manifest_filepaths = convert_to_config_list(manifest_filepaths)
bucketing_weights = config.get('bucketing_weights', None) # For upsampling buckets
if bucketing_weights:
for idx, weight in enumerate(bucketing_weights):
if not isinstance(weight, int) or weight <= 0:
raise ValueError(f"bucket weights must be positive integers")
if len(manifest_filepaths) != len(tarred_audio_filepaths):
raise ValueError(
f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of buckets."
)
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
zip(tarred_audio_filepaths, manifest_filepaths)
):
if len(tarred_audio_filepath) == 1:
tarred_audio_filepath = tarred_audio_filepath[0]
dataset = audio_to_label.TarredAudioToSpeechLabelDataset(
audio_tar_filepaths=tarred_audio_filepath,
manifest_filepath=manifest_filepath,
labels=config['labels'],
featurizer=featurizer,
shuffle_n=shuffle_n,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', False),
window_length_in_sec=config.get('window_length_in_sec', 8),
shift_length_in_sec=config.get('shift_length_in_sec', 0.075),
normalize_audio=config.get('normalize_audio', False),
shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
global_rank=global_rank,
world_size=world_size,
)
if bucketing_weights:
[datasets.append(dataset) for _ in range(bucketing_weights[dataset_idx])]
else:
datasets.append(dataset)
return get_chain_dataset(datasets=datasets, ds_config=config, rank=global_rank)
def get_audio_multi_label_dataset(cfg: DictConfig) -> audio_to_label.AudioToMultiLabelDataset:
if "augmentor" in cfg:
augmentor = process_augmentations(cfg.augmentor)
else:
augmentor = None
dataset = audio_to_label.AudioToMultiLabelDataset(
manifest_filepath=cfg.get("manifest_filepath"),
sample_rate=cfg.get("sample_rate"),
labels=cfg.get("labels", None),
int_values=cfg.get("int_values", False),
augmentor=augmentor,
min_duration=cfg.get("min_duration", None),
max_duration=cfg.get("max_duration", None),
trim_silence=cfg.get("trim_silence", False),
is_regression_task=cfg.get("is_regression_task", False),
cal_labels_occurrence=cfg.get("cal_labels_occurrence", False),
delimiter=cfg.get("delimiter", None),
normalize_audio_db=cfg.get("normalize_audio_db", None),
)
return dataset
def get_tarred_audio_multi_label_dataset(
cfg: DictConfig, shuffle_n: int, global_rank: int, world_size: int
) -> audio_to_label.TarredAudioToMultiLabelDataset:
if "augmentor" in cfg:
augmentor = process_augmentations(cfg.augmentor)
else:
augmentor = None
tarred_audio_filepaths = cfg['tarred_audio_filepaths']
manifest_filepaths = cfg['manifest_filepath']
datasets = []
tarred_audio_filepaths = convert_to_config_list(tarred_audio_filepaths)
manifest_filepaths = convert_to_config_list(manifest_filepaths)
bucketing_weights = cfg.get('bucketing_weights', None) # For upsampling buckets
if bucketing_weights:
for idx, weight in enumerate(bucketing_weights):
if not isinstance(weight, int) or weight <= 0:
raise ValueError(f"bucket weights must be positive integers")
if len(manifest_filepaths) != len(tarred_audio_filepaths):
raise ValueError(
f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of buckets."
)
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
zip(tarred_audio_filepaths, manifest_filepaths)
):
if len(tarred_audio_filepath) == 1:
tarred_audio_filepath = tarred_audio_filepath[0]
dataset = audio_to_label.TarredAudioToMultiLabelDataset(
audio_tar_filepaths=tarred_audio_filepath,
manifest_filepath=manifest_filepath,
sample_rate=cfg["sample_rate"],
labels=cfg['labels'],
shuffle_n=shuffle_n,
int_values=cfg.get("int_values", False),
augmentor=augmentor,
min_duration=cfg.get('min_duration', None),
max_duration=cfg.get('max_duration', None),
trim_silence=cfg.get('trim_silence', False),
is_regression_task=cfg.get('is_regression_task', False),
delimiter=cfg.get("delimiter", None),
shard_strategy=cfg.get('tarred_shard_strategy', 'scatter'),
global_rank=global_rank,
world_size=world_size,
normalize_audio_db=cfg.get("normalize_audio_db", None),
)
if bucketing_weights:
[datasets.append(dataset) for _ in range(bucketing_weights[dataset_idx])]
else:
datasets.append(dataset)
return get_chain_dataset(datasets=datasets, ds_config=cfg, rank=global_rank)
|
NeMo-main
|
nemo/collections/asr/data/audio_to_label_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/data/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from nemo.collections.asr.data.feature_to_text import FeatureToBPEDataset, FeatureToCharDataset
from nemo.utils import logging
def get_char_dataset(config: dict, augmentor: Optional['FeatureAugmentor'] = None) -> FeatureToCharDataset:
"""
Instantiates a Character Encoding based FeatureToCharDataset.
Args:
config: Config of the FeatureToCharDataset.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of FeatureToCharDataset.
"""
if 'labels' not in config:
logging.warning(f"dataset does not have explicitly defined labels")
dataset = FeatureToCharDataset(
manifest_filepath=config['manifest_filepath'],
labels=config.get('labels', None),
normalize=config.get('normalize', 'post_norm'),
normalize_type=config.get('normalize_type', 'per_feature'),
use_rttm=config.get('use_rttm', False),
rttm_mode=config.get('rttm_mode', 'mask'),
feat_min_len=config.get('feat_min_len', 4),
feat_mask_val=config.get('feat_mask_val', None),
frame_unit_time_secs=config.get('frame_unit_time_secs', 0.01),
sample_rate=config.get('sample_rate', 16000),
augmentor=augmentor,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
trim=config.get('trim_silence', False),
parser=config.get('parser', 'en'),
return_sample_id=config.get('return_sample_id', False),
channel_selector=config.get('channel_selector', None),
)
return dataset
def get_bpe_dataset(
config: dict, tokenizer: 'TokenizerSpec', augmentor: Optional['FeatureAugmentor'] = None
) -> FeatureToBPEDataset:
"""
Instantiates a Byte Pair Encoding / Word Piece Encoding based FeatureoToBPEDataset.
Args:
config: Config of the FeatureToBPEDataset.
tokenizer: An instance of a TokenizerSpec object.
augmentor: Optional FeatureAugmentor object for augmentations on audio features.
Returns:
An instance of FeatureToBPEDataset.
"""
dataset = FeatureToBPEDataset(
manifest_filepath=config['manifest_filepath'],
tokenizer=tokenizer,
normalize=config.get('normalize', 'post_norm'),
normalize_type=config.get('normalize_type', 'per_feature'),
use_rttm=config.get('use_rttm', False),
rttm_mode=config.get('rttm_mode', 'mask'),
feat_min_len=config.get('feat_min_len', 4),
feat_mask_val=config.get('feat_mask_val', None),
frame_unit_time_secs=config.get('frame_unit_time_secs', 0.01),
sample_rate=config.get('sample_rate', 16000),
augmentor=augmentor,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
trim=config.get('trim_silence', False),
use_start_end_token=config.get('use_start_end_token', True),
return_sample_id=config.get('return_sample_id', False),
channel_selector=config.get('channel_selector', None),
)
return dataset
|
NeMo-main
|
nemo/collections/asr/data/feature_to_text_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import random
from math import isclose
from typing import Any, List, Optional, Union
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from omegaconf.listconfig import ListConfig
from pytorch_lightning.callbacks import BasePredictionWriter
from torch.utils.data import ChainDataset
from nemo.collections.asr.data import audio_to_text, audio_to_text_dali
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
from nemo.collections.common.data.dataset import CodeSwitchedDataset, ConcatDataset
from nemo.utils import logging
def inject_dataloader_value_from_model_config(model_cfg: dict, dataloader_cfg: DictConfig, key: str):
"""
Extracts the label set provided at the top level of the model, and propagates it to the dataloader
config.
Args:
model_cfg: A DictConfig representing the model's config.
dataloader_cfg: A DictConfig representing the individual data loader
key: A str value representing a key in the model_cfg whose value will be propagated to the
dataloader config.
"""
if key not in model_cfg:
logging.info(
f"Model level config does not contain `{key}`, please explicitly provide `{key}` to the dataloaders."
)
return
if not isinstance(dataloader_cfg, DictConfig):
dataloader_cfg = DictConfig(dataloader_cfg)
# If key exists in the data loader config (either set explicitly or as a placeholder (via None))
if key in dataloader_cfg:
# Dataloader `labels` is provided and is non-null
if dataloader_cfg[key] is not None and model_cfg[key] != dataloader_cfg[key]:
# Model level `labels` dont match Dataloader level `labels`
logging.warning(
f'`{key}` is explicitly provided to the data loader, and is different from '
f'the `{key}` provided at the model level config.\n'
f'If this is incorrect, please set the dataloader\'s `{key}` to None.'
)
else:
# Dataloader `key` is None or values match
# Propagate from model level `key` (even if they match)
with open_dict(dataloader_cfg):
dataloader_cfg[key] = model_cfg[key]
else:
# If key key doesnt even exist in dataloader_cfg, inject it explicitly
with open_dict(dataloader_cfg):
dataloader_cfg[key] = model_cfg[key]
def get_concat_char_dataset(
config: dict, global_rank: int, world_size: int, augmentor: Optional['AudioAugmentor'] = None
) -> ConcatDataset:
"""
Instantiates an instance of ConcatDataset containing one or more intances of
Character Encoding based AudioToCharDataset.
Args:
config: Config of the AudioToCharDataset.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of ConcatDataset containing one or more instances of AudioToCharDataset.
"""
if 'labels' not in config:
logging.warning(f"dataset does not have explicitly defined labels")
manifest_filepaths = config['manifest_filepath']
datasets = []
# needed to support validation Concat Datasets that arrive here as
# [[dataset1,dataset2]] otherwise ModelPT would interfere
if len(manifest_filepaths) == 1 and not isinstance(manifest_filepaths[0], str):
logging.info(f"removing an extra nesting level from {manifest_filepaths}")
manifest_filepaths = config['manifest_filepath'][0]
for manifest_filepath in manifest_filepaths:
conf = copy.deepcopy(config)
conf['manifest_filepath'] = manifest_filepath
dataset = get_char_dataset(config=conf, augmentor=augmentor)
datasets.append(dataset)
dataset = ConcatDataset(
datasets,
sampling_technique=config.get('concat_sampling_technique', 'temperature'),
sampling_temperature=config.get('concat_sampling_temperature', 5),
sampling_scale=config.get('concat_sampling_scale', 1),
sampling_probabilities=config.get('concat_sampling_probabilities', None),
shuffle=config.get('concat_shuffle', True),
seed=config.get('concat_sampling_seed', None),
global_rank=global_rank,
world_size=world_size,
)
return dataset
def get_char_dataset(config: dict, augmentor: Optional['AudioAugmentor'] = None) -> audio_to_text.AudioToCharDataset:
"""
Instantiates a Character Encoding based AudioToCharDataset.
Args:
config: Config of the AudioToCharDataset.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of AudioToCharDataset.
"""
if 'labels' not in config:
logging.warning(f"dataset does not have explicitly defined labels")
dataset = audio_to_text.AudioToCharDataset(
manifest_filepath=config['manifest_filepath'],
labels=config.get('labels', None),
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
normalize=config.get('normalize_transcripts', False),
trim=config.get('trim_silence', False),
parser=config.get('parser', 'en'),
return_sample_id=config.get('return_sample_id', False),
channel_selector=config.get('channel_selector', None),
)
return dataset
def get_concat_bpe_dataset(
config: dict,
tokenizer: 'TokenizerSpec',
global_rank: int,
world_size: int,
augmentor: Optional['AudioAugmentor'] = None,
) -> ConcatDataset:
"""
Instantiates a ContactDataset based on several Byte Pair Encoding / Word Piece Encoding based AudioToBPEDatasets.
Args:
config: Config of the AudioToBPEDataset.
tokenizer: An instance of a TokenizerSpec object.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of ConcatDataset containing several instances of AudioToBPEDataset.
"""
manifest_filepaths = config['manifest_filepath']
datasets = []
# needed to support validation Concat Datasets that arrive here as
# [[dataset1,dataset2]] otherwise ModelPT would interfere
if len(manifest_filepaths) == 1 and not isinstance(manifest_filepaths[0], str):
logging.info(f"removing an extra nesting level from {manifest_filepaths}")
manifest_filepaths = config['manifest_filepath'][0]
for manifest_filepath in manifest_filepaths:
conf = copy.deepcopy(config)
conf['manifest_filepath'] = manifest_filepath
dataset = get_bpe_dataset(config=conf, tokenizer=tokenizer, augmentor=augmentor)
datasets.append(dataset)
dataset = ConcatDataset(
datasets,
sampling_technique=config.get('concat_sampling_technique', 'temperature'),
sampling_temperature=config.get('concat_sampling_temperature', 5),
sampling_scale=config.get('concat_sampling_scale', 1),
sampling_probabilities=config.get('concat_sampling_probabilities', None),
shuffle=config.get('concat_shuffle', True),
seed=config.get('concat_sampling_seed', None),
global_rank=global_rank,
world_size=world_size,
)
return dataset
def get_bpe_dataset(
config: dict, tokenizer: 'TokenizerSpec', augmentor: Optional['AudioAugmentor'] = None
) -> audio_to_text.AudioToBPEDataset:
"""
Instantiates a Byte Pair Encoding / Word Piece Encoding based AudioToBPEDataset.
Args:
config: Config of the AudioToBPEDataset.
tokenizer: An instance of a TokenizerSpec object.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of AudioToBPEDataset.
"""
dataset = audio_to_text.AudioToBPEDataset(
manifest_filepath=config['manifest_filepath'],
tokenizer=tokenizer,
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
trim=config.get('trim_silence', False),
use_start_end_token=config.get('use_start_end_token', True),
return_sample_id=config.get('return_sample_id', False),
channel_selector=config.get('channel_selector', None),
)
return dataset
def get_concat_tarred_dataset(
config: dict,
shuffle_n: int,
global_rank: int,
world_size: int,
tokenizer: Optional['TokenizerSpec'] = None,
augmentor: Optional['AudioAugmentor'] = None,
) -> ConcatDataset:
"""
Instantiates a ConcatDataset containing multiple Word Piece/BPE Encoding based TarredAudioToBPEDataset or a char based TarredAudioToCharDataset.
Args:
config: Config of the TarredAudioToBPEDataset or TarredAudioToCharDataset.
shuffle_n: How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
tokenizer: An instance of a TokenizerSpec object if BPE dataset is needed.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
Passsing None would return a char-based dataset.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of ConcatDataset containing one or more TarredAudioToBPEDatasets or TarredAudioToCharDatasets.
"""
tarred_audio_filepaths = config['tarred_audio_filepaths']
manifest_filepaths = config['manifest_filepath']
datasets = []
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
zip(tarred_audio_filepaths, manifest_filepaths)
):
conf = copy.deepcopy(config)
conf['manifest_filepath'] = manifest_filepath
conf['tarred_audio_filepaths'] = tarred_audio_filepath
dataset = get_tarred_dataset(
config=conf,
tokenizer=tokenizer,
shuffle_n=shuffle_n,
global_rank=global_rank,
world_size=world_size,
augmentor=augmentor,
)
datasets.append(dataset)
dataset = ConcatDataset(
datasets,
sampling_technique=config.get('concat_sampling_technique', 'temperature'),
sampling_temperature=config.get('concat_sampling_temperature', 5),
sampling_scale=config.get('concat_sampling_scale', 1),
sampling_probabilities=config.get('concat_sampling_probabilities', None),
shuffle=config.get('concat_shuffle', True),
seed=config.get('concat_sampling_seed', None),
global_rank=global_rank,
world_size=world_size,
)
return dataset
def get_tarred_dataset(
config: dict,
shuffle_n: int,
global_rank: int,
world_size: int,
tokenizer: Optional['TokenizerSpec'] = None,
augmentor: Optional['AudioAugmentor'] = None,
) -> Union[audio_to_text.TarredAudioToBPEDataset, audio_to_text.TarredAudioToCharDataset]:
"""
Instantiates a Word Piece/BPE Encoding based TarredAudioToBPEDataset or a char based TarredAudioToCharDataset.
Args:
config: Config of the TarredAudioToBPEDataset or TarredAudioToCharDataset.
shuffle_n: How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
tokenizer: An instance of a TokenizerSpec object if BPE dataset is needed.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
Passsing None would return a char-based dataset.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of TarredAudioToBPEDataset or TarredAudioToCharDataset.
"""
tarred_audio_filepaths = config['tarred_audio_filepaths']
manifest_filepaths = config['manifest_filepath']
datasets = []
tarred_audio_filepaths = convert_to_config_list(tarred_audio_filepaths)
manifest_filepaths = convert_to_config_list(manifest_filepaths)
bucketing_weights = config.get('bucketing_weights', None) # For upsampling buckets
if bucketing_weights:
for idx, weight in enumerate(bucketing_weights):
if not isinstance(weight, int) or weight <= 0:
raise ValueError(f"bucket weights must be positive integers")
if len(manifest_filepaths) != len(tarred_audio_filepaths):
raise ValueError(
f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of buckets."
)
if 'labels' not in config:
logging.warning(f"dataset does not have explicitly defined labels")
if 'max_utts' in config:
raise ValueError('"max_utts" parameter is not supported for tarred datasets')
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
zip(tarred_audio_filepaths, manifest_filepaths)
):
if len(tarred_audio_filepath) == 1:
tarred_audio_filepath = tarred_audio_filepath[0]
if len(manifest_filepath) == 1:
manifest_filepath = manifest_filepath[0]
if tokenizer is None:
dataset = audio_to_text.TarredAudioToCharDataset(
audio_tar_filepaths=tarred_audio_filepath,
manifest_filepath=manifest_filepath,
labels=config.get('labels', None),
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
shuffle_n=shuffle_n,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
normalize=config.get('normalize_transcripts', False),
trim=config.get('trim_silence', False),
parser=config.get('parser', 'en'),
shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
shard_manifests=config.get('shard_manifests', False),
global_rank=global_rank,
world_size=world_size,
return_sample_id=config.get('return_sample_id', False),
)
else:
dataset = audio_to_text.TarredAudioToBPEDataset(
audio_tar_filepaths=tarred_audio_filepath,
manifest_filepath=manifest_filepath,
tokenizer=tokenizer,
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
shuffle_n=shuffle_n,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', False),
use_start_end_token=config.get('use_start_end_token', True),
shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
shard_manifests=config.get('shard_manifests', False),
global_rank=global_rank,
world_size=world_size,
return_sample_id=config.get('return_sample_id', False),
)
if bucketing_weights:
[datasets.append(dataset) for _ in range(bucketing_weights[dataset_idx])]
else:
datasets.append(dataset)
return get_chain_dataset(datasets=datasets, ds_config=config, rank=global_rank)
def get_code_switched_dataset(
config: dict,
shuffle_n: int,
global_rank: int,
world_size: int,
tokenizer: Optional['TokenizerSpec'] = None,
augmentor: Optional['AudioAugmentor'] = None,
) -> CodeSwitchedDataset:
if 'manifest_filepath' not in config:
raise ValueError("`manifest_filepath` must be provided in the dataset config if `is_code_switched=True`")
if 'code_switched' not in config:
raise ValueError("`code_switched` param group must be in the dataset config if `is_code_switched=True`")
manifest_filepaths = config['manifest_filepath']
tarred_audio_filepaths = config.get('tarred_audio_filepaths', None)
cs_config = OmegaConf.to_container(config['code_switched'])
# needed to support validation Datasets that arrive here as
# [[dataset1,dataset2]] otherwise ModelPT would interfere
if len(manifest_filepaths) == 1 and not isinstance(manifest_filepaths[0], str):
manifest_filepaths = config['manifest_filepath'][0]
if tarred_audio_filepaths is None:
tarred_audio_filepaths = [None] * len(manifest_filepaths)
if len(manifest_filepaths) != len(tarred_audio_filepaths):
raise ValueError(
f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of items."
)
datasets = []
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
zip(tarred_audio_filepaths, manifest_filepaths)
):
conf = copy.deepcopy(config)
conf['manifest_filepath'] = manifest_filepath
with open_dict(conf):
conf['tarred_audio_filepaths'] = tarred_audio_filepath
if tarred_audio_filepath is None or len(tarred_audio_filepath) == 0:
if tokenizer is None:
dataset = get_char_dataset(config=conf, augmentor=None)
else:
dataset = get_bpe_dataset(config=conf, tokenizer=tokenizer, augmentor=None)
else:
dataset = get_tarred_dataset(
config=conf,
tokenizer=tokenizer,
shuffle_n=shuffle_n,
global_rank=global_rank,
world_size=world_size,
augmentor=None,
)
datasets.append(dataset)
config = OmegaConf.to_container(config)
dataset = CodeSwitchedDataset(
datasets,
shuffle=cs_config.get('shuffle', True),
min_duration=cs_config.get('min_duration', 4),
max_duration=cs_config.get('max_duration', 20),
min_monolingual=cs_config.get('min_monolingual', 0.3),
lang_probs=cs_config.get('probs', None),
db_norm=cs_config.get('db_norm', -25.0),
pause_start=cs_config.get('pause_start', 0),
pause_join=cs_config.get('pause_join', 0),
pause_end=cs_config.get('pause_end', 0),
sampling_scales=cs_config.get('sampling_scales', None),
seed=cs_config.get('seed', None),
global_rank=global_rank,
world_size=world_size,
pure_random=cs_config.get('pure_random', False),
force_monochannel=cs_config.get('force_monochannel', True),
infinity_mode=cs_config.get('infinity_mode', False),
sample_rate=config['sample_rate'],
augmentor=augmentor,
)
return dataset
def get_dali_char_dataset(
config: dict,
shuffle: bool,
device_id: int,
global_rank: int,
world_size: int,
preprocessor_cfg: Optional[DictConfig] = None,
) -> audio_to_text_dali.AudioToCharDALIDataset:
"""
Instantiates a Character Encoding based AudioToCharDALIDataset.
Args:
config: Config of the AudioToCharDALIDataset.
shuffle: Bool flag whether to shuffle the dataset.
device_id: Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
preprocessor_cfg: Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
Returns:
An instance of AudioToCharDALIDataset.
"""
device = 'gpu' if torch.cuda.is_available() else 'cpu'
dataset = audio_to_text_dali.AudioToCharDALIDataset(
manifest_filepath=config['manifest_filepath'],
device=device,
batch_size=config['batch_size'],
labels=config['labels'],
sample_rate=config['sample_rate'],
audio_tar_filepaths=config.get('tarred_audio_filepaths', None),
audio_tar_index_filepaths=config.get('tarred_audio_index_filepaths', None),
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
normalize=config.get('normalize_transcripts', False),
trim=config.get('trim_silence', False),
parser=config.get('parser', 'en'),
shuffle=shuffle,
shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
device_id=device_id,
global_rank=global_rank,
world_size=world_size,
preprocessor_cfg=preprocessor_cfg,
return_sample_id=config.get('return_sample_id', False),
)
return dataset
def get_dali_bpe_dataset(
config: dict,
tokenizer,
shuffle: bool,
device_id: int,
global_rank: int,
world_size: int,
preprocessor_cfg: Optional[DictConfig] = None,
) -> audio_to_text_dali.AudioToCharDALIDataset:
"""
Instantiates a Subword Encoding based AudioToBPEDALIDataset.
Args:
config: Config of the AudioToBPEDALIDataset.
tokenizer: An implementation of NeMo TokenizerSpec.
shuffle: Bool flag whether to shuffle the dataset.
device_id: Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
preprocessor_cfg: Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
Returns:
An instance of AudioToCharDALIDataset.
"""
device = 'gpu' if torch.cuda.is_available() else 'cpu'
dataset = audio_to_text_dali.AudioToBPEDALIDataset(
manifest_filepath=config['manifest_filepath'],
tokenizer=tokenizer,
device=device,
batch_size=config['batch_size'],
sample_rate=config['sample_rate'],
audio_tar_filepaths=config.get('tarred_audio_filepaths', None),
audio_tar_index_filepaths=config.get('tarred_audio_index_filepaths', None),
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', False),
use_start_end_token=config.get('use_start_end_token', True),
shuffle=shuffle,
shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
device_id=device_id,
global_rank=global_rank,
world_size=world_size,
preprocessor_cfg=preprocessor_cfg,
return_sample_id=config.get('return_sample_id', False),
)
return dataset
def get_audio_to_text_char_dataset_from_config(
config, local_rank: int, global_rank: int, world_size: int, preprocessor_cfg: Optional[DictConfig] = None
):
"""
Construct Audio-To-Text Char dataset from a config.
Args:
config: dataset config
local_rank: model local rank
global_rank: model global rand
world_size: world size
preprocessor_cfg: preprocessor config, for DALI dataset
Returns:
constructed dataset or None if dataset config is invalid or nothing to load
"""
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'], global_rank=global_rank, world_size=world_size)
else:
augmentor = None
is_concat = config.get('is_concat', False)
if is_concat:
if 'concat_sampling_technique' in config and config['concat_sampling_technique'] is None:
logging.warning(
f"Concat dataset requires `concat_sampling_technique` but it was not provided. Config: {config}"
)
return None
if config['concat_sampling_technique'] == 'random':
if not 'concat_sampling_probabilities' in config:
logging.warning(f"Concat dataset requires `concat_sampling_probabilities` list. Config: {config}")
return None
else:
if not isclose(sum(config['concat_sampling_probabilities']), 1, abs_tol=1e-6):
logging.warning(f"`concat_sampling_probabilities` need to sum to 1. Config: {config}")
return None
shuffle = config['shuffle']
device = 'gpu' if torch.cuda.is_available() else 'cpu'
if config.get('use_dali', False):
device_id = local_rank if device == 'gpu' else None
dataset = get_dali_char_dataset(
config=config,
shuffle=shuffle,
device_id=device_id,
global_rank=global_rank,
world_size=world_size,
preprocessor_cfg=preprocessor_cfg,
)
return dataset
# Instantiate a code-switched dataset if config is present
if config.get('is_code_switched', False):
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
if not ('code_switched' in config and config['code_switched'] is not None):
logging.warning(
f"Code switched dataset requires `*_ds.code_switched.*` dict but it was not provided. Config: {config}"
)
return None
if (
('probs' in config['code_switched'])
and (config['code_switched']['probs'] is not None)
and (not isclose(sum(config['code_switched']['probs']), 1, abs_tol=1e-6))
):
logging.warning(f"`.code_switched.probs` need to sum to 1. Config: {config['code_switched']}")
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
dataset = get_code_switched_dataset(
config=config,
shuffle_n=shuffle_n,
global_rank=global_rank,
world_size=world_size,
tokenizer=None,
augmentor=augmentor,
)
# Instantiate tarred dataset loader or normal dataset loader
elif config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
logging.warning(
"Could not load dataset as `manifest_filepath` was None or "
f"`tarred_audio_filepaths` is None. Provided config : {config}"
)
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
if is_concat:
dataset = get_concat_tarred_dataset(
config=config,
shuffle_n=shuffle_n,
global_rank=global_rank,
world_size=world_size,
augmentor=augmentor,
)
else:
dataset = get_tarred_dataset(
config=config,
shuffle_n=shuffle_n,
global_rank=global_rank,
world_size=world_size,
augmentor=augmentor,
)
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
if is_concat:
dataset = get_concat_char_dataset(
config=config, global_rank=global_rank, world_size=world_size, augmentor=augmentor
)
else:
dataset = get_char_dataset(config=config, augmentor=augmentor)
return dataset
def get_audio_to_text_bpe_dataset_from_config(
config,
local_rank: int,
global_rank: int,
world_size: int,
tokenizer,
preprocessor_cfg: Optional[DictConfig] = None,
):
"""
Construct Audio-To-Text BPE dataset from a config.
Args:
config: BPE dataset config
local_rank: model local rank
global_rank: model global rand
world_size: world size
tokenizer: BPE tokenizer
preprocessor_cfg: preprocessor config, for DALI BPE dataset
Returns:
constructed dataset or None if dataset config is invalid or nothing to load
"""
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'], global_rank=global_rank, world_size=world_size)
else:
augmentor = None
is_concat = config.get('is_concat', False)
if is_concat:
if 'concat_sampling_technique' in config and config['concat_sampling_technique'] is None:
logging.warning(
f"Concat dataset requires `concat_sampling_technique` but it was not provided. Config: {config}"
)
return None
if config['concat_sampling_technique'] == 'random':
if not 'concat_sampling_probabilities' in config:
logging.warning(f"Concat dataset requires `concat_sampling_probabilities` list. Config: {config}")
return None
else:
if not isclose(sum(config['concat_sampling_probabilities']), 1, abs_tol=1e-6):
logging.warning(f"`concat_sampling_probabilities` need to sum to 1. Config: {config}")
return None
shuffle = config['shuffle']
device = 'gpu' if torch.cuda.is_available() else 'cpu'
if config.get('use_dali', False):
device_id = local_rank if device == 'gpu' else None
dataset = get_dali_bpe_dataset(
config=config,
tokenizer=tokenizer,
shuffle=shuffle,
device_id=device_id,
global_rank=global_rank,
world_size=world_size,
preprocessor_cfg=preprocessor_cfg,
)
return dataset
# Instantiate a code-switched dataset if config is present
if config.get('is_code_switched', False):
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
if not ('code_switched' in config and config['code_switched'] is not None):
logging.warning(
f"Code switched dataset requires `*_ds.code_switched.*` dict but it was not provided. Config: {config}"
)
return None
if (
('probs' in config['code_switched'])
and (config['code_switched']['probs'] is not None)
and (not isclose(sum(config['code_switched']['probs']), 1, abs_tol=1e-6))
):
logging.warning(f"`.code_switched.probs` need to sum to 1. Config: {config['code_switched']}")
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
dataset = get_code_switched_dataset(
config=config,
shuffle_n=shuffle_n,
global_rank=global_rank,
world_size=world_size,
tokenizer=tokenizer,
augmentor=augmentor,
)
# Instantiate tarred dataset loader or normal dataset loader
elif config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
logging.warning(
"Could not load dataset as `manifest_filepath` was None or "
f"`tarred_audio_filepaths` is None. Provided config : {config}"
)
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
if is_concat:
dataset = get_concat_tarred_dataset(
config=config,
tokenizer=tokenizer,
shuffle_n=shuffle_n,
global_rank=global_rank,
world_size=world_size,
augmentor=augmentor,
)
else:
dataset = get_tarred_dataset(
config=config,
tokenizer=tokenizer,
shuffle_n=shuffle_n,
global_rank=global_rank,
world_size=world_size,
augmentor=augmentor,
)
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
if is_concat:
dataset = get_concat_bpe_dataset(
config=config,
global_rank=global_rank,
world_size=world_size,
tokenizer=tokenizer,
augmentor=augmentor,
)
else:
dataset = get_bpe_dataset(config=config, tokenizer=tokenizer, augmentor=augmentor)
return dataset
class ASRPredictionWriter(BasePredictionWriter):
def __init__(self, dataset, output_file: str):
super().__init__(write_interval="batch")
self.outf = open(output_file, 'w', encoding='utf-8')
self.dataset = dataset
self.samples_num = 0
def write_on_batch_end(
self,
trainer,
pl_module: 'LightningModule',
prediction: Any,
batch_indices: List[int],
batch: Any,
batch_idx: int,
dataloader_idx: int,
):
for sample_id, transcribed_text in prediction:
item = {}
sample = self.dataset.get_manifest_sample(sample_id)
item["audio_filepath"] = sample.audio_file
item["offset"] = sample.offset
item["duration"] = sample.duration
item["text"] = sample.text_raw
item["pred_text"] = transcribed_text
self.outf.write(json.dumps(item) + "\n")
self.samples_num += 1
return
def close_output_file(self):
self.outf.close()
return self.samples_num
def convert_to_config_list(initial_list):
if type(initial_list) is str:
initial_list = initial_list.split(",")
if initial_list is None or initial_list == []:
raise ValueError("manifest_filepaths and tarred_audio_filepaths must not be empty.")
if not isinstance(initial_list, ListConfig):
initial_list = ListConfig([initial_list])
for list_idx, list_val in enumerate(initial_list):
if type(list_val) != type(initial_list[0]):
raise ValueError(
"manifest_filepaths and tarred_audio_filepaths need to be a list of lists for bucketing or just a list of strings"
)
if type(initial_list[0]) is not ListConfig:
initial_list = ListConfig([initial_list])
return initial_list
def get_chain_dataset(datasets, ds_config, rank=0):
if len(datasets) > 1:
if ds_config.get('bucketing_batch_size', None) is not None:
bucketing_batch_sizes = calc_bucketing_batch_sizes(ds_config, len(datasets))
logging.info(
f"Batch bucketing is enabled for {len(datasets)} buckets with adaptive batch sizes of {bucketing_batch_sizes}!"
)
for idx, dataset in enumerate(datasets):
datasets[idx] = audio_to_text.BucketingDataset(
dataset=dataset, bucketing_batch_size=bucketing_batch_sizes[idx]
)
else:
logging.info(
f"Batch bucketing is enabled for {len(datasets)} buckets with fixed batch size of {ds_config['batch_size']}!"
)
if len(datasets) == 1:
return datasets[0]
bucketing_strategy = ds_config.get('bucketing_strategy', 'synced_randomized')
if bucketing_strategy == 'fixed_order':
return ChainDataset(datasets)
elif bucketing_strategy == 'synced_randomized':
return audio_to_text.RandomizedChainDataset(datasets=datasets, rnd_seed=0)
elif bucketing_strategy == 'fully_randomized':
return audio_to_text.RandomizedChainDataset(datasets=datasets, rnd_seed=random.randint(0, 30000) + rank)
else:
raise ValueError(
f'bucketing_strategy={bucketing_strategy} is not supported! Supported strategies are [fixed_order, fully_randomized, synced_randomized].'
)
def calc_bucketing_batch_sizes(ds_config, datasets_len):
bucketing_batch_size = ds_config['bucketing_batch_size']
bucketing_weights = ds_config.get('bucketing_weights', None) # To adjust for upsampled buckets
bucketing_batch_sizes = []
if ds_config['batch_size'] != 1:
raise ValueError(
f"batch_size should be set to one when bucketing_batch_size is set and adaptive bucketing is enabled (batch_size={ds_config['batch_size']}!"
)
if type(bucketing_batch_size) == int: # linear scaling
if bucketing_weights: # Want same batchsize for the same duplicated bucket
for idx, weight in enumerate(bucketing_weights):
scale_factor = datasets_len - idx
[bucketing_batch_sizes.append(scale_factor * bucketing_batch_size) for _ in range(weight)]
else:
for idx in range(datasets_len):
scale_factor = datasets_len - idx
bucketing_batch_sizes.append(scale_factor * bucketing_batch_size)
elif isinstance(bucketing_batch_size, ListConfig) or isinstance(
bucketing_batch_size, list
): # assigned bucket sizes
if bucketing_weights: # Want same batchsize for same duplicated bucket
for idx, weight in enumerate(bucketing_weights):
[bucketing_batch_sizes.append(bucketing_batch_size[idx]) for _ in range(weight)]
else:
bucketing_batch_sizes = bucketing_batch_size
else:
raise ValueError(
f"bucketing_batch_size should be an integer or a list (bucketing_batch_size={bucketing_batch_size})!"
)
if len(bucketing_batch_sizes) != datasets_len:
raise ValueError(
f"batch_size should have the same length as the number of buckets ({len(bucketing_batch_sizes)}!={datasets_len}) "
)
return bucketing_batch_sizes
|
NeMo-main
|
nemo/collections/asr/data/audio_to_text_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from dataclasses import dataclass
from pathlib import Path
from typing import Any, List, Tuple
from nemo.collections.asr.data.audio_to_text_dataset import ASRPredictionWriter
from nemo.utils import logging
@dataclass
class FrameCtmUnit:
"""A container class for one CTM unit with start and length countable in frames.
"""
label: str
start_frame: int
length: int
probability: float
def __repr__(self) -> str:
return f"{self.label}\t({self.probability:1.3f}): [{self.start_frame:6d}, {self.length:6d}]"
@property
def end_frame(self):
return self.start_frame + self.length
def to_ctm_str(self, time_per_frame: int) -> str:
"""Represents the data as part of the CTM line.
The CTM line format is
<utterance_name> <channel> <start_time> <duration> <label_str> <probability>
This method prepares the last four entities."""
return f"{self.start_frame * time_per_frame :.3f} {self.length * time_per_frame :.3f} {self.label} {self.probability :1.3f}"
class ASRCTMPredictionWriter(ASRPredictionWriter):
def __init__(self, dataset, output_file: str, output_ctm_dir: str, time_per_frame: float):
super().__init__(dataset, output_file)
self.output_ctm_dir = output_ctm_dir
self.time_per_frame = time_per_frame
os.makedirs(self.output_ctm_dir, exist_ok=True)
def write_ctm(self, name, filepath, frameCtmUnits):
with open(filepath, "tw", encoding="utf-8") as f:
for unit in frameCtmUnits:
f.write(f"{name} 1 {unit.to_ctm_str(self.time_per_frame)}\n")
def write_on_batch_end(
self,
trainer,
pl_module: 'LightningModule',
prediction: Tuple[int, List[FrameCtmUnit]],
batch_indices: List[int],
batch: Any,
batch_idx: int,
dataloader_idx: int,
):
for sample_id, units in prediction:
sample = self.dataset.get_manifest_sample(sample_id)
with_ctm = True
if len(units) == 0:
logging.warning(
f"""Do not producing CTM output for item `{sample.audio_file}`.
Check if text is empty or if duration is too short: `{sample.text_raw}`, {sample.duration}"""
)
with_ctm = False
item = {}
item["audio_filepath"] = sample.audio_file
item["duration"] = sample.duration
item["text"] = sample.text_raw
if with_ctm:
utt_name = Path(sample.audio_file).stem
ctm_filepath = os.path.join(self.output_ctm_dir, utt_name) + ".ctm"
self.write_ctm(utt_name, ctm_filepath, units)
item["ctm_filepath"] = ctm_filepath
else:
item["ctm_filepath"] = ""
self.outf.write(json.dumps(item) + "\n")
self.samples_num += 1
return
|
NeMo-main
|
nemo/collections/asr/data/audio_to_ctm_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import concurrent.futures
import copy
import gc
import json
import math
import random
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, NamedTuple, Optional, Set, Union
import numpy as np
import torch
import torch.utils.data
from torch.nn.utils.rnn import pad_sequence
from tqdm.auto import tqdm
from nemo.collections.asr.data.audio_to_text import _speech_collate_fn
from nemo.collections.common.tokenizers import TokenizerSpec
from nemo.core.classes import Dataset, IterableDataset
from nemo.utils import logging
try:
from nemo_text_processing.text_normalization.normalize import Normalizer
except Exception as e:
pass # Normalizer imported only for annotation purposes, error can be ignored
AnyPath = Union[Path, str]
class TextToTextItem(NamedTuple):
tts_text: torch.Tensor # normalized and tokenized text for TTS
transcript: torch.Tensor # tokenized text for ASR
speaker: int # speaker id for multi-speaker TTS
class TextToTextBatch(NamedTuple):
tts_texts: torch.Tensor # tokenized texts for tts
tts_text_lengths: torch.Tensor
transcripts: torch.Tensor # tokenized texts for ASR
transcript_lengths: torch.Tensor
speakers: torch.Tensor # speaker ids for multi-speaker TTS
@staticmethod
def collate_fn(batch: List[TextToTextItem], asr_pad_id: int, tts_text_pad_id: int) -> TextToTextBatch:
return TextToTextBatch(
tts_texts=pad_sequence([item.tts_text for item in batch], batch_first=True, padding_value=tts_text_pad_id),
tts_text_lengths=torch.tensor([item.tts_text.shape[0] for item in batch]).long(),
transcripts=pad_sequence([item.transcript for item in batch], batch_first=True, padding_value=asr_pad_id),
transcript_lengths=torch.tensor([item.transcript.shape[0] for item in batch]).long(),
speakers=torch.tensor([item.speaker for item in batch]).long(),
)
class TextOrAudioToTextBatch(NamedTuple):
audio_signals: torch.Tensor
audio_signal_lengths: torch.Tensor
tts_texts: torch.Tensor
tts_text_lengths: torch.Tensor
speakers: torch.Tensor
transcripts: torch.Tensor
transcript_lengths: torch.Tensor
@staticmethod
def collate_fn(
batch: List[Union[TextToTextItem, tuple]], tts_text_pad_id: int, asr_pad_id: int
) -> Union[TextToTextBatch, TextOrAudioToTextBatch, tuple]:
"""
Collate function for dataloader
Can accept mixed batch of text-to-text items and audio-text items (typical for ASR)
"""
text_items: List[TextToTextItem] = [item for item in batch if isinstance(item, TextToTextItem)]
if not text_items:
# pure audio-text batch
return _speech_collate_fn(batch=batch, pad_id=asr_pad_id)
asr_items = [item for item in batch if not isinstance(item, TextToTextItem)]
if not asr_items:
# pure text-to-text batch
return TextToTextBatch.collate_fn(batch=text_items, asr_pad_id=asr_pad_id, tts_text_pad_id=tts_text_pad_id)
# mixed batch
# each asr item is a tuple:
# audio_signal (0), audio_length (1), transcript (2), transcript_length (3), sample_id (4, optional)
audio_signals = pad_sequence([item[0] for item in asr_items], batch_first=True, padding_value=0.0)
audio_signal_lengths = torch.tensor([item[1] for item in asr_items]).long()
tts_texts = pad_sequence(
[item.tts_text for item in text_items], batch_first=True, padding_value=tts_text_pad_id
)
tts_text_lengths = torch.tensor([item.tts_text.shape[0] for item in text_items]).long()
speakers = torch.tensor([item.speaker for item in text_items]).long()
transcripts = pad_sequence(
[item.transcript for item in text_items] + [item[2] for item in asr_items],
batch_first=True,
padding_value=asr_pad_id,
)
transcript_lengths = torch.tensor(
[item.transcript.shape[0] for item in text_items] + [item[3] for item in asr_items]
).long()
return TextOrAudioToTextBatch(
audio_signals=audio_signals,
audio_signal_lengths=audio_signal_lengths,
tts_texts=tts_texts,
tts_text_lengths=tts_text_lengths,
speakers=speakers,
transcripts=transcripts,
transcript_lengths=transcript_lengths,
)
def _asr_text_to_tokens(text: str) -> np.ndarray:
"""
Helper function for asr tokenization with multiprocessing pool only.
Must be defined on the top level.
Expects asr_tokenizer_global, asr_bos_id_global, asr_eos_id_global to exist in the current pool process
"""
ids = asr_tokenizer_global.text_to_ids(text)
if asr_bos_id_global is not None:
ids = [asr_bos_id_global] + ids
if asr_eos_id_global is not None:
ids.append(asr_eos_id_global)
return np.asarray(ids)
def _tts_text_to_tokens(text: str) -> np.ndarray:
"""
Helper function for asr tokenization with multiprocessing pool only.
Must be defined on the top level.
Expects tts_tokenizer_global to exist in the current pool process
"""
return np.asarray(tts_tokenizer_global(text))
def _iterate_manifest(filepath: AnyPath) -> Iterable[Dict[str, Any]]:
"""
Helper function to iterate manifest
"""
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
record = json.loads(line)
yield record
class TextToTextDatasetBase:
"""
Base class for loading text-to-text manifests
Map-style and Iterable datasets should inherit this class
"""
asr_pad_id: int
tts_text_pad_id: int
asr_bos_id: Optional[int] = None
asr_eos_id: Optional[int] = None
data: List[Dict[str, Any]]
def __init__(
self,
manifest_filepath: Union[AnyPath, List[AnyPath]],
speakers_filepath: Union[AnyPath, List[AnyPath]],
asr_tokenizer: TokenizerSpec,
asr_use_start_end_token: bool,
tts_parser: Callable,
tts_text_pad_id: int,
tts_text_normalizer: "Normalizer",
tts_text_normalizer_call_kwargs: Dict,
min_words: int = 1,
max_words: int = 1_000_000,
tokenizer_workers: int = 1,
num_parts: int = 1,
current_part_index: int = 0,
):
super().__init__()
# ASR tokenizer setup
if asr_use_start_end_token and hasattr(asr_tokenizer, 'bos_token'):
self.asr_bos_id = asr_tokenizer.bos_id
if asr_use_start_end_token and hasattr(asr_tokenizer, 'eos_token'):
self.asr_eos_id = asr_tokenizer.eos_id
if hasattr(asr_tokenizer, 'pad_token'):
self.asr_pad_id = asr_tokenizer.pad_id
else:
self.asr_pad_id = 0
self.asr_tokenizer = asr_tokenizer
# TTS tokenizer setup
self.tts_parser = tts_parser
self.tts_normalizer = tts_text_normalizer
self.tts_normalizer_kwargs = tts_text_normalizer_call_kwargs
self.tts_text_pad_id = tts_text_pad_id
# Load speakers
if isinstance(speakers_filepath, str):
speakers_filepath = speakers_filepath.split(",")
elif isinstance(speakers_filepath, Path):
speakers_filepath = [speakers_filepath]
speakers: Set[int] = set()
for filepath in speakers_filepath:
with open(Path(filepath).expanduser(), "r") as f:
speakers.update(map(int, f.read().split()))
self.speakers = np.asarray(sorted(speakers))
logging.info(f"Loaded {len(self.speakers)} speakers")
# Load manifest
if isinstance(manifest_filepath, str):
manifest_filepath = manifest_filepath.split(",")
elif isinstance(manifest_filepath, Path):
manifest_filepath = [manifest_filepath]
self.manifest_paths = [Path(filepath) for filepath in manifest_filepath]
num_skipped_words = 0
num_skipped_utterances = 0
asr_texts = []
tts_texts = []
need_normalization = False
for manifest_path in self.manifest_paths:
for tmp_item in tqdm(_iterate_manifest(manifest_path)):
text = tmp_item["text"]
num_words = len(text.split())
# skip if number of works not in desired range
# TODO: maybe it would be valuable to sample sub-utterances from long utterances
if not (min_words <= num_words <= max_words):
num_skipped_words += num_words
num_skipped_utterances += 1
continue
asr_texts.append(tmp_item["text"])
if "tts_text_normalized" in tmp_item:
tts_texts.append(tmp_item["tts_text_normalized"])
else:
tts_texts.append(tmp_item["tts_text"])
need_normalization = True
if need_normalization:
logging.warning("TTS normalization is extremely slow! It is recommended to normalize TTS text")
if num_skipped_utterances:
logging.warning(f"Skipped {num_skipped_utterances} utterances " f"with {num_skipped_words}")
num_utterances = len(asr_texts)
# preprocessing is very costly, if we need only part - remove unnecessary utterances
if num_parts > 1:
# NB: floor division, full dataset can contain fewer utterances than original, like in tarred dataset
num_utterances_part = num_utterances // num_parts
start = num_utterances_part * current_part_index
end = start + num_utterances_part
logging.info(
f"Taking part of the dataset: {current_part_index} index, total {num_parts} from {start} to {end}"
)
asr_texts = asr_texts[start:end]
tts_texts = tts_texts[start:end]
num_utterances = num_utterances_part
self.data = [dict() for _ in range(num_utterances)]
if len(asr_texts) == 0:
# no data was loaded
logging.warning("Text-to-text dataset is empty")
return
if tokenizer_workers == 1:
logging.warning(
"Preprocessing large text with tokenizer_workers=1 may be slow with TTS tokenizer. "
"Prefer tokenizer_workers=(num_cpu_cores/num_gpus_per_node)"
)
for i, tokenized_text in enumerate(
tqdm((self._asr_text_to_tokens(text) for text in asr_texts), total=len(asr_texts))
):
self.data[i]["asr_text_tokens"] = tokenized_text
else:
# Multiprocessing hack: use global variables for every process (not really global in program context)
def _init_asr_tokenize_process(tokenizer, bos_id, eos_id):
global asr_tokenizer_global, asr_bos_id_global, asr_eos_id_global # process-global
# deepcopy to avoid serialization of parent models
asr_tokenizer_global = copy.deepcopy(tokenizer)
asr_bos_id_global = copy.deepcopy(bos_id)
asr_eos_id_global = copy.deepcopy(eos_id)
with concurrent.futures.ProcessPoolExecutor(
initializer=_init_asr_tokenize_process,
initargs=(asr_tokenizer, self.asr_bos_id, self.asr_eos_id),
max_workers=tokenizer_workers,
) as pool:
# chunk size for pool map is empirically chosen as a trade-off between speed and responsiveness
for i, tokenized_text in enumerate(
tqdm(pool.map(_asr_text_to_tokens, asr_texts, chunksize=1000), total=len(asr_texts))
):
self.data[i]["asr_text_tokens"] = tokenized_text
# force free memory
del asr_texts
gc.collect()
if tokenizer_workers == 1:
logging.warning(
"Preprocessing large text with tokenizer_workers=1 may be slow with TTS tokenizer. "
"Prefer tokenizer_workers=(num_cpu_cores/num_gpus_per_node)"
)
for i, tokenized_text in enumerate(
tqdm(
(self._tts_text_to_tokens(text, normalize=need_normalization) for text in tts_texts),
total=len(tts_texts),
)
):
self.data[i]["tts_text_tokens"] = tokenized_text
else:
if need_normalization:
# TODO: implement, if we really need normalization inplace
raise NotImplementedError(
"Normalization with tokenizer_workers > 1 is not implemented. "
"It is not recommended to use normalization on the fly at all, since it's extremely slow"
)
def _init_tts_tokenize_process(tokenizer):
global tts_tokenizer_global # process-global
tts_tokenizer_global = copy.deepcopy(tokenizer)
with concurrent.futures.ProcessPoolExecutor(
initializer=_init_tts_tokenize_process, initargs=(tts_parser,), max_workers=tokenizer_workers,
) as pool:
# chunk size for pool map is empirically chosen as a trade-off between speed and responsiveness
for i, tokenized_text in enumerate(
tqdm(pool.map(_tts_text_to_tokens, tts_texts, chunksize=1000), total=len(tts_texts))
):
self.data[i]["tts_text_tokens"] = tokenized_text
# force free memory
del tts_texts
gc.collect()
def _asr_text_to_tokens(self, text: str) -> np.ndarray:
ids = self.asr_tokenizer.text_to_ids(text)
if self.asr_bos_id is not None:
ids = [self.asr_bos_id] + ids
if self.asr_eos_id is not None:
ids.append(self.asr_eos_id)
return np.asarray(ids)
def _tts_text_to_tokens(self, text: str, normalize=True) -> np.ndarray:
if normalize:
text = self.tts_normalizer.normalize(text, **self.tts_normalizer_kwargs)
tokens = self.tts_parser(text)
return np.asarray(tokens)
def __getitem__(self, index):
item = self.data[index]
return TextToTextItem(
transcript=torch.from_numpy(item["asr_text_tokens"]).long(),
tts_text=torch.from_numpy(item["tts_text_tokens"]).long(),
speaker=random.choice(self.speakers),
)
def __len__(self):
return len(self.data)
class TextToTextDataset(TextToTextDatasetBase, Dataset):
"""Text-to-Text Map-style Dataset for hybrid ASR-TTS models"""
def __init__(
self,
manifest_filepath: Union[AnyPath, List[AnyPath]],
speakers_filepath: Union[AnyPath, List[AnyPath]],
asr_tokenizer: TokenizerSpec,
asr_use_start_end_token: bool,
tts_parser: Callable,
tts_text_pad_id: int,
tts_text_normalizer: "Normalizer",
tts_text_normalizer_call_kwargs: Dict,
min_words: int = 1,
max_words: int = 1_000_000,
tokenizer_workers: int = 1,
):
super().__init__(
manifest_filepath=manifest_filepath,
speakers_filepath=speakers_filepath,
asr_tokenizer=asr_tokenizer,
asr_use_start_end_token=asr_use_start_end_token,
tts_parser=tts_parser,
tts_text_pad_id=tts_text_pad_id,
tts_text_normalizer=tts_text_normalizer,
tts_text_normalizer_call_kwargs=tts_text_normalizer_call_kwargs,
min_words=min_words,
max_words=max_words,
tokenizer_workers=tokenizer_workers,
num_parts=1,
)
def collate_fn(
self, batch: List[Union[TextToTextItem, tuple]]
) -> Union[TextToTextBatch, TextOrAudioToTextBatch, tuple]:
"""
Collate function for dataloader
Can accept mixed batch of text-to-text items and audio-text items (typical for ASR)
"""
return TextOrAudioToTextBatch.collate_fn(
batch=batch, asr_pad_id=self.asr_pad_id, tts_text_pad_id=self.tts_text_pad_id
)
class TextToTextIterableDataset(TextToTextDatasetBase, IterableDataset):
"""
Text-to-Text Iterable Dataset for hybrid ASR-TTS models
Only part necessary for current process should be loaded and stored
"""
def __init__(
self,
manifest_filepath: Union[AnyPath, List[AnyPath]],
speakers_filepath: Union[AnyPath, List[AnyPath]],
asr_tokenizer: TokenizerSpec,
asr_use_start_end_token: bool,
tts_parser: Callable,
tts_text_pad_id: int,
tts_text_normalizer: "Normalizer",
tts_text_normalizer_call_kwargs: Dict,
min_words: int = 1,
max_words: int = 1_000_000,
tokenizer_workers: int = 1,
num_parts: int = 1,
current_part_index: int = 0,
):
super().__init__(
manifest_filepath=manifest_filepath,
speakers_filepath=speakers_filepath,
asr_tokenizer=asr_tokenizer,
asr_use_start_end_token=asr_use_start_end_token,
tts_parser=tts_parser,
tts_text_pad_id=tts_text_pad_id,
tts_text_normalizer=tts_text_normalizer,
tts_text_normalizer_call_kwargs=tts_text_normalizer_call_kwargs,
min_words=min_words,
max_words=max_words,
tokenizer_workers=tokenizer_workers,
num_parts=num_parts,
current_part_index=current_part_index,
)
def __iter__(self):
# Implementation based on docs: https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading, return the full iterator
start = 0
end = len(self)
else: # in a worker process
# split workload
per_worker = int(math.ceil(len(self) / float(worker_info.num_workers)))
worker_id = worker_info.id
start = worker_id * per_worker
end = min(start + per_worker, len(self))
indices = np.arange(start, end)
np.random.shuffle(indices)
return map(self.__getitem__, indices)
def collate_fn(
self, batch: List[Union[TextToTextItem, tuple]]
) -> Union[TextToTextBatch, TextOrAudioToTextBatch, tuple]:
"""
Collate function for dataloader
Can accept mixed batch of text-to-text items and audio-text items (typical for ASR)
"""
return TextOrAudioToTextBatch.collate_fn(
batch=batch, asr_pad_id=self.asr_pad_id, tts_text_pad_id=self.tts_text_pad_id
)
|
NeMo-main
|
nemo/collections/asr/data/text_to_text.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from nemo.collections.asr.data.feature_to_label import _audio_feature_collate_fn
from nemo.collections.asr.parts.preprocessing.feature_loader import ExternalFeatureLoader
from nemo.collections.asr.parts.preprocessing.features import normalize_batch
from nemo.collections.asr.parts.utils.audio_utils import ChannelSelectorType
from nemo.collections.asr.parts.utils.vad_utils import load_speech_segments_from_rttm
from nemo.collections.common import tokenizers
from nemo.collections.common.parts.preprocessing import collections, parsers
from nemo.core.classes import Dataset
from nemo.core.neural_types import AcousticEncodedRepresentation, LabelsType, LengthsType, NeuralType
class ASRFeatureManifestProcessor:
def __init__(
self,
manifest_filepath: str,
parser: Union[str, Callable],
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
max_utts: int = 0,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
index_by_file_id: bool = False,
):
self.parser = parser
self.collection = collections.ASRFeatureText(
manifests_files=manifest_filepath,
parser=parser,
min_duration=min_duration,
max_duration=max_duration,
max_number=max_utts,
index_by_file_id=index_by_file_id,
)
self.eos_id = eos_id
self.bos_id = bos_id
self.pad_id = pad_id
def process_text_by_id(self, index: int) -> Tuple[List[int], int]:
sample = self.collection[index]
return self.process_text_by_sample(sample)
def process_text_by_file_id(self, file_id: str) -> Tuple[List[int], int]:
manifest_idx = self.collection.mapping[file_id][0]
sample = self.collection[manifest_idx]
return self.process_text_by_sample(sample)
def process_text_by_sample(self, sample: collections.ASRAudioText.OUTPUT_TYPE) -> Tuple[List[int], int]:
t, tl = sample.text_tokens, len(sample.text_tokens)
if self.bos_id is not None:
t = [self.bos_id] + t
tl += 1
if self.eos_id is not None:
t = t + [self.eos_id]
tl += 1
return t, tl
class _FeatureTextDataset(Dataset):
"""
Dataset that loads tensors via a json file containing paths to audio feature files, transcripts,
durations (in seconds) and optional RTTM files. Each new line is a different sample. Example below:
{"feature_filepath": "/path/to/audio_feature.pt", "text_filepath": "/path/to/audio.txt",
"rttm_filepath": "/path/to/audio_rttm.rttm", "duration": 23.147}
...
{"feature_filepath": "/path/to/audio_feature.pt", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath (str): Path to manifest json as described above. Can be comma-separated paths.
parser: Str for a language specific preprocessor or a callable.
normalize (bool): whether and where to normalize feature, must be one of [None, "post_norm", "pre_norm"]
normalize_type (Union[str, dict]): how to normalize feature, see `nemo.collections.asr.parts.preprocessing.features.normalize_batch`
use_rttm (bool): whether to use RTTM files if there is any, default to False
rttm_mode (str): how to use RTTM files, must be one of ['mask', 'drop'], default to 'mask'
feat_min_len (int): minimum length of feature when rttm_mode=deop, default to 4.
feat_mask_val (Optional[float]): value used to mask features with RTTM files, default to None to use zero mel-spectralgram
frame_unit_time_secs (float): time in seconds for each frame
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor object used to augment loaded audio
max_duration (float): If audio exceeds this length, do not include in dataset
min_duration (float): If audio is less than this length, do not include in dataset
max_utts (int): Limit number of utterances
trim (bool): whether or not to trim silence. Defaults to False
bos_id (int): Id of beginning of sequence symbol to append if not None
eos_id (int): Id of end of sequence symbol to append if not None
pad_id (int): Id of pad symbol. Defaults to 0
return_sample_id (bool): whether to return the sample_id as a part of each sample
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
"""
ZERO_LEVEL_SPEC_DB_VAL = -16.635 # Log-Melspectrogram value for zero signal
NORM_MODES = ["pre_norm", "post_norm"]
RTTM_MODES = ["mask", "drop"]
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'features': NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
'feature_length': NeuralType(tuple('B'), LengthsType()),
'transcripts': NeuralType(('B', 'T'), LabelsType()),
'transcript_length': NeuralType(tuple('B'), LengthsType()),
'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True),
}
def __init__(
self,
manifest_filepath: str,
parser: Union[str, Callable],
normalize: Optional[str] = "post_norm",
normalize_type: Union[str, dict] = "per_feature",
use_rttm: bool = False,
rttm_mode: str = "mask",
feat_min_len: int = 4,
feat_mask_val: Optional[float] = None,
frame_unit_time_secs: float = 0.01,
sample_rate: Optional[int] = 16000,
augmentor: 'nemo.collections.asr.parts.perturb.FeatureAugmentor' = None,
max_duration: Optional[int] = None,
min_duration: Optional[int] = None,
max_utts: int = 0,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
return_sample_id: bool = False,
channel_selector: Optional[ChannelSelectorType] = None,
):
if type(manifest_filepath) == str:
manifest_filepath = manifest_filepath.split(",")
self.sample_rate = sample_rate
self.normalize = normalize
self.normalize_type = normalize_type
self.use_rttm = use_rttm
self.rttm_mode = rttm_mode
if self.use_rttm and self.rttm_mode not in self.RTTM_MODES:
raise ValueError(f"`rttm_mode` must be one of {self.RTTM_MODES}, got `{rttm_mode}` instead")
self.feat_min_len = feat_min_len
if feat_mask_val is not None:
self.feat_mask_val = feat_mask_val
elif normalize == "pre_norm":
self.feat_mask_val = 0.0 # similar to SpectralAugmentation
else:
self.feat_mask_val = self.ZERO_LEVEL_SPEC_DB_VAL
if normalize is not None and normalize not in self.NORM_MODES:
raise ValueError(f"`normalize` must be one of {self.NORM_MODES}, got `{normalize}` instead")
self.frame_unit_time_secs = frame_unit_time_secs
self.manifest_processor = ASRFeatureManifestProcessor(
manifest_filepath=manifest_filepath,
parser=parser,
max_duration=max_duration,
min_duration=min_duration,
max_utts=max_utts,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
)
self.featurizer = ExternalFeatureLoader(augmentor=augmentor)
self.trim = trim
self.return_sample_id = return_sample_id
self.channel_selector = channel_selector
def get_manifest_sample(self, sample_id):
return self.manifest_processor.collection[sample_id]
def __getitem__(self, index):
sample = self.manifest_processor.collection[index]
offset = sample.offset
if offset is None:
offset = 0
features = self.featurizer.process(sample.feature_file)
f, fl = features, torch.tensor(features.shape[1]).long()
t, tl = self.manifest_processor.process_text_by_sample(sample=sample)
# Feature normalization
if self.normalize is None:
if self.use_rttm and sample.rttm_file:
f = self.process_features_with_rttm(f, offset, sample.rttm_file, self.feat_mask_val)
elif self.normalize == "post_norm":
# (Optional) Masking based on RTTM file
if self.use_rttm and sample.rttm_file:
f = self.process_features_with_rttm(f, offset, sample.rttm_file, self.feat_mask_val)
f = self.normalize_feature(f)
else: # pre-norm
f = self.normalize_feature(f)
# (Optional) Masking based on RTTM file
if self.use_rttm and sample.rttm_file:
f = self.process_features_with_rttm(f, offset, sample.rttm_file, self.feat_mask_val)
if self.return_sample_id:
output = f, fl, torch.tensor(t).long(), torch.tensor(tl).long(), index
else:
output = f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
return output
def process_features_with_rttm(self, features, offset, rttm_file, mask_val):
segments = load_speech_segments_from_rttm(rttm_file)
new_features = features.clone()
sid, fid = 0, 0
for i in range(features.size(1)):
t = offset + i * self.frame_unit_time_secs
while sid < len(segments) - 1 and segments[sid][1] < t:
sid += 1
if segments[sid][1] == 0 or t < segments[sid][0] or t > segments[sid][1]:
# not in speech segment
if self.rttm_mode == "drop":
# drop the frame
continue
else:
# mask the frame with specified value
new_features[:, i] = mask_val
fid += 1
else:
# in speech segment
new_features[:, fid] = features[:, i]
fid += 1
if fid < self.feat_min_len and self.rttm_mode == "drop":
new_features[:, : self.feat_min_len] = mask_val
return new_features[:, : self.feat_min_len]
return new_features[:, :fid]
def __len__(self):
return len(self.manifest_processor.collection)
def _collate_fn(self, batch):
return _audio_feature_collate_fn(
batch, feat_pad_val=self.feat_mask_val, label_pad_id=self.manifest_processor.pad_id
)
def normalize_feature(self, feat):
"""
Args:
feat: feature tensor of shape [M, T]
"""
feat = feat.unsqueeze(0) # add batch dim
feat, _, _ = normalize_batch(feat, torch.tensor([feat.size(-1)]), self.normalize_type)
return feat.squeeze(0) # delete batch dim
class FeatureToCharDataset(_FeatureTextDataset):
"""
Dataset that loads tensors via a json file containing paths to audio feature
files, transcripts, durations (in seconds) and optional RTTM files. Each new line is a
different sample. Example below:
{"feature_filepath": "/path/to/audio_feature.pt", "text_filepath":
"/path/to/audio.txt", "duration": 23.147, "rttm_filepath": "/path/to/audio_rttm.rttm",}
...
{"feature_filepath": "/path/to/audio_feature.pt", "text": "the
transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
Args:
manifest_filepath (str): Path to manifest json as described above. Can
be comma-separated paths.
labels (str): String containing all the possible characters to map to
normalize (str): how to normalize feature, must be one of [None, "post_norm", "pre_norm"]
normalize_type (Union[str, dict]): how to normalize feature, see `nemo.collections.asr.parts.preprocessing.features.normalize_batch`
use_rttm (bool): whether to use RTTM files if there is any, default to False
rttm_mode (str): how to use RTTM files, must be one of ['mask', 'drop'], default to 'mask'
feat_min_len (int): minimum length of feature, default to 4
feat_mask_val (Optional[float]): value used to mask features with RTTM files, default to None to use zero mel-spectralgram
frame_unit_time_secs: time in seconds for each frame
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include
in dataset
max_utts: Limit number of utterances
blank_index: blank character index, default = -1
unk_index: unk_character index, default = -1
bos_id: Id of beginning of sequence symbol to append if not None
eos_id: Id of end of sequence symbol to append if not None
return_sample_id (bool): whether to return the sample_id as a part of each sample
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
"""
def __init__(
self,
manifest_filepath: str,
labels: Union[str, List[str]],
normalize: Optional[str] = "post_norm",
normalize_type: Union[str, dict] = "per_feature",
use_rttm: bool = False,
rttm_mode: str = "mask",
feat_min_len: int = 4,
feat_mask_val: Optional[float] = None,
frame_unit_time_secs: float = 0.01,
sample_rate: Optional[int] = 16000,
augmentor: 'nemo.collections.asr.parts.perturb.FeatureAugmentor' = None,
max_duration: Optional[int] = None,
min_duration: Optional[int] = None,
max_utts: int = 0,
blank_index: int = -1,
unk_index: int = -1,
trim: bool = False,
bos_id: Optional[int] = None,
eos_id: Optional[int] = None,
pad_id: int = 0,
parser: Union[str, Callable] = 'en',
return_sample_id: bool = False,
channel_selector: Optional[ChannelSelectorType] = None,
):
self.labels = labels
parser = parsers.make_parser(
labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
)
super().__init__(
manifest_filepath=manifest_filepath,
parser=parser,
normalize=normalize,
normalize_type=normalize_type,
use_rttm=use_rttm,
rttm_mode=rttm_mode,
feat_min_len=feat_min_len,
feat_mask_val=feat_mask_val,
frame_unit_time_secs=frame_unit_time_secs,
sample_rate=sample_rate,
augmentor=augmentor,
max_duration=max_duration,
min_duration=min_duration,
max_utts=max_utts,
trim=trim,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
return_sample_id=return_sample_id,
channel_selector=channel_selector,
)
class FeatureToBPEDataset(_FeatureTextDataset):
"""
Dataset that loads tensors via a json file containing paths to audio feature
files, transcripts, durations (in seconds) and optional RTTM files. Each new line is a different sample.
Example below:
{"audio_filepath": "/path/to/audio.wav", "text_filepath":
"/path/to/audio.txt", "duration": 23.147, "rttm_filepath": "/path/to/audio_rttm.rttm",}
...
{"audio_filepath": "/path/to/audio.wav", "text": "the
transcription", "offset": 301.75, "duration": 0.82, "utt":
"utterance_id", "ctm_utt": "en_4156", "side": "A"}
In practice, the dataset and manifest used for character encoding and byte pair encoding
are exactly the same. The only difference lies in how the dataset tokenizes the text in
the manifest.
Args:
manifest_filepath (str): Path to manifest json as described above. Can
be comma-separated paths.
tokenizer: A subclass of the Tokenizer wrapper found in the common collection,
nemo.collections.common.tokenizers.TokenizerSpec. ASR Models support a subset of
all available tokenizers.
normalize (str): how to normalize feature, must be one of [None, "post_norm", "pre_norm"]
normalize_type (Union[str, dict]): how to normalize feature, see `nemo.collections.asr.parts.preprocessing.features.normalize_batch`
use_rttm (bool): whether to use RTTM files if there is any, default to False
rttm_mode (str): how to use RTTM files, must be one of ['mask', 'drop'], default to 'mask'
feat_min_len (int): minimum length of feature, default to 4
feat_mask_val (Optional[float]): value used to mask features with RTTM files, default to None to use zero mel-spectralgram
frame_unit_time_secs: time in seconds for each frame
sample_rate (int): Sample rate to resample loaded audio to
int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
object used to augment loaded audio
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include
in dataset
max_utts: Limit number of utterances
trim: Whether to trim silence segments
use_start_end_token: Boolean which dictates whether to add [BOS] and [EOS]
tokens to beginning and ending of speech respectively.
return_sample_id (bool): whether to return the sample_id as a part of each sample
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
"""
def __init__(
self,
manifest_filepath: str,
tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
normalize: Optional[str] = "post_norm",
normalize_type: Union[str, dict] = "per_feature",
use_rttm: bool = False,
rttm_mode: str = "mask",
feat_min_len: int = 4,
feat_mask_val: Optional[float] = None,
frame_unit_time_secs: float = 0.01,
sample_rate: Optional[int] = 16000,
augmentor: 'nemo.collections.asr.parts.perturb.FeatureAugmentor' = None,
max_duration: Optional[int] = None,
min_duration: Optional[int] = None,
max_utts: int = 0,
use_start_end_token: bool = True,
trim: bool = False,
return_sample_id: bool = False,
channel_selector: Optional[ChannelSelectorType] = None,
):
if use_start_end_token and hasattr(tokenizer, "bos_id") and tokenizer.bos_id > 0:
bos_id = tokenizer.bos_id
else:
bos_id = None
if use_start_end_token and hasattr(tokenizer, "eos_id") and tokenizer.eos_id > 0:
eos_id = tokenizer.eos_id
else:
eos_id = None
if hasattr(tokenizer, "pad_id") and tokenizer.pad_id > 0:
pad_id = tokenizer.pad_id
else:
pad_id = 0
class TokenizerWrapper:
def __init__(self, tokenizer):
if isinstance(tokenizer, tokenizers.aggregate_tokenizer.AggregateTokenizer):
self.is_aggregate = True
else:
self.is_aggregate = False
self._tokenizer = tokenizer
def __call__(self, *args):
if isinstance(args[0], List) and self.is_aggregate:
t = []
for span in args[0]:
t.extend(self._tokenizer.text_to_ids(span['str'], span['lang']))
return t
t = self._tokenizer.text_to_ids(*args)
return t
super().__init__(
manifest_filepath=manifest_filepath,
parser=TokenizerWrapper(tokenizer),
normalize=normalize,
normalize_type=normalize_type,
use_rttm=use_rttm,
rttm_mode=rttm_mode,
feat_min_len=feat_min_len,
feat_mask_val=feat_mask_val,
frame_unit_time_secs=frame_unit_time_secs,
sample_rate=sample_rate,
augmentor=augmentor,
max_duration=max_duration,
min_duration=min_duration,
max_utts=max_utts,
trim=trim,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
return_sample_id=return_sample_id,
channel_selector=channel_selector,
)
|
NeMo-main
|
nemo/collections/asr/data/feature_to_text.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent
import itertools
import multiprocessing
import os
import random
import warnings
from typing import Dict, Iterable, List, Optional, Tuple, Union
import h5py
import librosa
import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
import torch
from numpy.random import default_rng
from omegaconf import DictConfig, OmegaConf
from scipy.signal import convolve
from scipy.signal.windows import cosine, hamming, hann
from scipy.spatial.transform import Rotation
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.asr.parts.utils.audio_utils import db2mag, generate_approximate_noise_field, mag2db, pow2db, rms
from nemo.collections.asr.parts.utils.data_simulation_utils import (
DataAnnotator,
SpeechSampler,
build_speaker_samples_map,
get_background_noise,
get_cleaned_base_path,
get_random_offset_index,
get_speaker_ids,
get_speaker_samples,
get_split_points_in_alignments,
load_speaker_sample,
normalize_audio,
per_speaker_normalize,
perturb_audio,
read_audio_from_buffer,
read_noise_manifest,
)
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest
from nemo.collections.asr.parts.utils.speaker_utils import get_overlap_range, is_overlap, merge_float_intervals
from nemo.utils import logging
try:
import pyroomacoustics as pra
from pyroomacoustics.directivities import CardioidFamily, DirectionVector, DirectivityPattern
PRA = True
except ImportError:
PRA = False
try:
from gpuRIR import att2t_SabineEstimator, beta_SabineEstimation, simulateRIR, t2n
GPURIR = True
except ImportError:
GPURIR = False
class MultiSpeakerSimulator(object):
"""
Multispeaker Audio Session Simulator - Simulates multispeaker audio sessions using single-speaker audio files and
corresponding word alignments.
Change Log:
v1.0: Dec 2022
- First working verison, supports multispeaker simulation with overlaps, silence and RIR
v1.0.1: Feb 2023
- Multi-GPU support for speed up
- Faster random sampling routine
- Fixed sentence duration bug
- Silence and overlap length sampling algorithms are updated to guarantee `mean_silence` approximation
v1.0.2: March 2023
- Added support for segment-level gain perturbation and session-level white-noise perturbation
- Modified speaker sampling mechanism to include as many speakers as possible in each data-generation run
- Added chunking mechanism to avoid freezing in multiprocessing processes
v1.1.0 March 2023
- Faster audio-file loading with maximum audio duration parameter
- Re-organized MultiSpeakerSimulator class and moved util functions to util files.
v1.1.1 March 2023
- Changed `silence_mean` to use exactly the same sampling equation as `overlap_mean`.
Args:
cfg: OmegaConf configuration loaded from yaml file.
Parameters:
manifest_filepath (str): Manifest file with paths to single speaker audio files
sr (int): Sampling rate of the input audio files from the manifest
random_seed (int): Seed to random number generator
session_config:
num_speakers (int): Number of unique speakers per multispeaker audio session
num_sessions (int): Number of sessions to simulate
session_length (int): Length of each simulated multispeaker audio session (seconds). Short sessions
(e.g. ~240 seconds) tend to fall short of the expected overlap-ratio and silence-ratio.
session_params:
max_audio_read_sec (int): The maximum audio length in second when loading an audio file.
The bigger the number, the slower the reading speed. Should be greater than 2.5 second.
sentence_length_params (list): k,p values for a negative_binomial distribution which is sampled to get the
sentence length (in number of words)
dominance_var (float): Variance in speaker dominance (where each speaker's dominance is sampled from a normal
distribution centered on 1/`num_speakers`, and then the dominance values are together
normalized to 1)
min_dominance (float): Minimum percentage of speaking time per speaker (note that this can cause the dominance of
the other speakers to be slightly reduced)
turn_prob (float): Probability of switching speakers after each utterance
mean_silence (float): Mean proportion of silence to speaking time in the audio session. Should be in range [0, 1).
mean_silence_var (float): Variance for mean silence in all audio sessions.
This value should be 0 <= mean_silence_var < mean_silence * (1 - mean_silence).
per_silence_var (float): Variance for each silence in an audio session, set large values (e.g., 20) for de-correlation.
per_silence_min (float): Minimum duration for each silence, default to 0.
per_silence_max (float): Maximum duration for each silence, default to -1 for no maximum.
mean_overlap (float): Mean proportion of overlap in the overall non-silence duration. Should be in range [0, 1) and
recommend [0, 0.15] range for accurate results.
mean_overlap_var (float): Variance for mean overlap in all audio sessions.
This value should be 0 <= mean_overlap_var < mean_overlap * (1 - mean_overlap).
per_overlap_var (float): Variance for per overlap in each session, set large values to de-correlate silence lengths
with the latest speech segment lengths
per_overlap_min (float): Minimum per overlap duration in seconds
per_overlap_max (float): Maximum per overlap duration in seconds, set -1 for no maximum
start_window (bool): Whether to window the start of sentences to smooth the audio signal (and remove silence at
the start of the clip)
window_type (str): Type of windowing used when segmenting utterances ("hamming", "hann", "cosine")
window_size (float): Length of window at the start or the end of segmented utterance (seconds)
start_buffer (float): Buffer of silence before the start of the sentence (to avoid cutting off speech or starting
abruptly)
split_buffer (float): Split RTTM labels if greater than twice this amount of silence (to avoid long gaps between
utterances as being labelled as speech)
release_buffer (float): Buffer before window at end of sentence (to avoid cutting off speech or ending abruptly)
normalize (bool): Normalize speaker volumes
normalization_type (str): Normalizing speakers ("equal" - same volume per speaker, "var" - variable volume per
speaker)
normalization_var (str): Variance in speaker volume (sample from standard deviation centered at 1)
min_volume (float): Minimum speaker volume (only used when variable normalization is used)
max_volume (float): Maximum speaker volume (only used when variable normalization is used)
end_buffer (float): Buffer at the end of the session to leave blank
outputs:
output_dir (str): Output directory for audio sessions and corresponding label files
output_filename (str): Output filename for the wav and RTTM files
overwrite_output (bool): If true, delete the output directory if it exists
output_precision (int): Number of decimal places in output files
background_noise:
add_bg (bool): Add ambient background noise if true
background_manifest (str): Path to background noise manifest file
snr (int): SNR for background noise (using average speaker power), set `snr_min` and `snr_max` values to enable random SNR
snr_min (int): Min random SNR for background noise (using average speaker power), set `null` to use fixed SNR
snr_max (int): Max random SNR for background noise (using average speaker power), set `null` to use fixed SNR
segment_augmentor:
add_seg_aug (bool): Set True to enable augmentation on each speech segment (Default: False)
segmentor:
gain:
prob (float): Probability range (uniform distribution) gain augmentation for individual segment
min_gain_dbfs (float): minimum gain in terms of dB
max_gain_dbfs (float): maximum gain in terms of dB
session_augmentor:
add_sess_aug: (bool) set True to enable audio augmentation on the whole session (Default: False)
segmentor:
white_noise:
prob (float): Probability of adding white noise (Default: 1.0)
min_level (float): minimum gain in terms of dB
max_level (float): maximum gain in terms of dB
speaker_enforcement:
enforce_num_speakers (bool): Enforce that all requested speakers are present in the output wav file
enforce_time (list): Percentage of the way through the audio session that enforcement mode is triggered (sampled
between time 1 and 2)
segment_manifest: (parameters for regenerating the segment manifest file)
window (float): Window length for segmentation
shift (float): Shift length for segmentation
step_count (int): Number of the unit segments you want to create per utterance
deci (int): Rounding decimals for segment manifest file
"""
def __init__(self, cfg):
self._params = cfg
self.annotator = DataAnnotator(cfg)
self.sampler = SpeechSampler(cfg)
# internal params
self._manifest = read_manifest(self._params.data_simulator.manifest_filepath)
self._speaker_samples = build_speaker_samples_map(self._manifest)
self._noise_samples = []
self._sentence = None
self._text = ""
self._words = []
self._alignments = []
# minimum number of alignments for a manifest to be considered valid
self._min_alignment_count = 2
self._merged_speech_intervals = []
# keep track of furthest sample per speaker to avoid overlapping same speaker
self._furthest_sample = [0 for n in range(self._params.data_simulator.session_config.num_speakers)]
# use to ensure overlap percentage is correct
self._missing_overlap = 0
# creating manifests during online data simulation
self.base_manifest_filepath = None
self.segment_manifest_filepath = None
self._max_audio_read_sec = self._params.data_simulator.session_params.max_audio_read_sec
self._turn_prob_min = self._params.data_simulator.session_params.get("turn_prob_min", 0.5)
# variable speaker volume
self._volume = None
self._speaker_ids = None
self._device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self._audio_read_buffer_dict = {}
self.add_missing_overlap = self._params.data_simulator.session_params.get("add_missing_overlap", False)
if (
self._params.data_simulator.segment_augmentor.get("augmentor", None)
and self._params.data_simulator.segment_augmentor.add_seg_aug
):
self.segment_augmentor = process_augmentations(
augmenter=self._params.data_simulator.segment_augmentor.augmentor
)
else:
self.segment_augmentor = None
if (
self._params.data_simulator.session_augmentor.get("augmentor", None)
and self._params.data_simulator.session_augmentor.add_sess_aug
):
self.session_augmentor = process_augmentations(
augmenter=self._params.data_simulator.session_augmentor.augmentor
)
else:
self.session_augmentor = None
# Error check the input arguments for simulation
self._check_args()
# Initialize speaker permutations to maximize the number of speakers in the created dataset
self._permutated_speaker_inds = self._init_speaker_permutations(
num_sess=self._params.data_simulator.session_config.num_sessions,
num_speakers=self._params.data_simulator.session_config.num_speakers,
all_speaker_ids=self._speaker_samples.keys(),
random_seed=self._params.data_simulator.random_seed,
)
# Intialize multiprocessing related variables
self.num_workers = self._params.get("num_workers", 1)
self.multiprocessing_chunksize = self._params.data_simulator.get('multiprocessing_chunksize', 10000)
self.chunk_count = self._init_chunk_count()
def _init_speaker_permutations(self, num_sess: int, num_speakers: int, all_speaker_ids: List, random_seed: int):
"""
Initialize the speaker permutations for the number of speakers in the session.
When generating the simulated sessions, we want to include as many speakers as possible.
This function generates a set of permutations that can be used to sweep all speakers in
the source dataset to make sure we maximize the total number of speakers included in
the simulated sessions.
Args:
num_sess (int): Number of sessions to generate
num_speakers (int): Number of speakers in each session
all_speaker_ids (list): List of all speaker IDs
Returns:
permuted_inds (np.array):
Array of permuted speaker indices to use for each session
Dimensions: (num_sess, num_speakers)
"""
np.random.seed(random_seed)
all_speaker_id_counts = len(list(all_speaker_ids))
# Calculate how many permutations are needed
perm_set_count = int(np.ceil(num_speakers * num_sess / all_speaker_id_counts))
target_count = num_speakers * num_sess
for count in range(perm_set_count):
if target_count < all_speaker_id_counts:
seq_len = target_count
else:
seq_len = all_speaker_id_counts
if seq_len <= 0:
raise ValueError(f"seq_len is {seq_len} at count {count} and should be greater than 0")
if count == 0:
permuted_inds = np.random.permutation(len(all_speaker_ids))[:seq_len]
else:
permuted_inds = np.hstack((permuted_inds, np.random.permutation(len(all_speaker_ids))[:seq_len]))
target_count -= seq_len
logging.info(f"Total {all_speaker_id_counts} speakers in the source dataset.")
logging.info(f"Initialized speaker permutations for {num_sess} sessions with {num_speakers} speakers each.")
return permuted_inds.reshape(num_sess, num_speakers)
def _init_chunk_count(self):
"""
Initialize the chunk count for multi-processing to prevent over-flow of job counts.
The multi-processing pipeline can freeze if there are more than approximately 10,000 jobs
in the pipeline at the same time.
"""
return int(np.ceil(self._params.data_simulator.session_config.num_sessions / self.multiprocessing_chunksize))
def _check_args(self):
"""
Checks YAML arguments to ensure they are within valid ranges.
"""
if self._params.data_simulator.session_config.num_speakers < 1:
raise Exception("At least one speaker is required for making audio sessions (num_speakers < 1)")
if (
self._params.data_simulator.session_params.turn_prob < 0
or self._params.data_simulator.session_params.turn_prob > 1
):
raise Exception("Turn probability is outside of [0,1]")
if (
self._params.data_simulator.session_params.turn_prob < 0
or self._params.data_simulator.session_params.turn_prob > 1
):
raise Exception("Turn probability is outside of [0,1]")
elif (
self._params.data_simulator.session_params.turn_prob < self._turn_prob_min
and self._params.data_simulator.speaker_enforcement.enforce_num_speakers == True
):
logging.warning(
"Turn probability is less than {self._turn_prob_min} while enforce_num_speakers=True, which may result in excessive session lengths. Forcing turn_prob to 0.5."
)
self._params.data_simulator.session_params.turn_prob = self._turn_prob_min
if self._params.data_simulator.session_params.max_audio_read_sec < 2.5:
raise Exception("Max audio read time must be greater than 2.5 seconds")
if self._params.data_simulator.session_params.sentence_length_params[0] <= 0:
raise Exception(
"k (number of success until the exp. ends) in Sentence length parameter value must be a positive number"
)
if not (0 < self._params.data_simulator.session_params.sentence_length_params[1] <= 1):
raise Exception("p (success probability) value in sentence length parameter must be in range (0,1]")
if (
self._params.data_simulator.session_params.mean_overlap < 0
or self._params.data_simulator.session_params.mean_overlap > 1
):
raise Exception("Mean overlap is outside of [0,1]")
if (
self._params.data_simulator.session_params.mean_silence < 0
or self._params.data_simulator.session_params.mean_silence > 1
):
raise Exception("Mean silence is outside of [0,1]")
if self._params.data_simulator.session_params.mean_silence_var < 0:
raise Exception("Mean silence variance is not below 0")
if (
self._params.data_simulator.session_params.mean_silence > 0
and self._params.data_simulator.session_params.mean_silence_var
>= self._params.data_simulator.session_params.mean_silence
* (1 - self._params.data_simulator.session_params.mean_silence)
):
raise Exception("Mean silence variance should be lower than mean_silence * (1-mean_silence)")
if self._params.data_simulator.session_params.per_silence_var < 0:
raise Exception("Per silence variance is below 0")
if self._params.data_simulator.session_params.mean_overlap_var < 0:
raise Exception("Mean overlap variance is not larger than 0")
if (
self._params.data_simulator.session_params.mean_overlap > 0
and self._params.data_simulator.session_params.mean_overlap_var
>= self._params.data_simulator.session_params.mean_overlap
* (1 - self._params.data_simulator.session_params.mean_overlap)
):
raise Exception("Mean overlap variance should be lower than mean_overlap * (1-mean_overlap)")
if self._params.data_simulator.session_params.per_overlap_var < 0:
raise Exception("Per overlap variance is not larger than 0")
if (
self._params.data_simulator.session_params.min_dominance < 0
or self._params.data_simulator.session_params.min_dominance > 1
):
raise Exception("Minimum dominance is outside of [0,1]")
if (
self._params.data_simulator.speaker_enforcement.enforce_time[0] < 0
or self._params.data_simulator.speaker_enforcement.enforce_time[0] > 1
):
raise Exception("Speaker enforcement start is outside of [0,1]")
if (
self._params.data_simulator.speaker_enforcement.enforce_time[1] < 0
or self._params.data_simulator.speaker_enforcement.enforce_time[1] > 1
):
raise Exception("Speaker enforcement end is outside of [0,1]")
if (
self._params.data_simulator.session_params.min_dominance
* self._params.data_simulator.session_config.num_speakers
> 1
):
raise Exception("Number of speakers times minimum dominance is greater than 1")
if (
self._params.data_simulator.session_params.window_type not in ['hamming', 'hann', 'cosine']
and self._params.data_simulator.session_params.window_type is not None
):
raise Exception("Incorrect window type provided")
if len(self._manifest) == 0:
raise Exception("Manifest file is empty. Check that the source path is correct.")
def clean_up(self):
"""
Clear the system memory. Cache data for audio files and alignments are removed.
"""
self._sentence = None
self._words = []
self._alignments = []
self._audio_read_buffer_dict = {}
torch.cuda.empty_cache()
def _get_speaker_dominance(self) -> List[float]:
"""
Get the dominance value for each speaker, accounting for the dominance variance and
the minimum per-speaker dominance.
Returns:
dominance (list): Per-speaker dominance
"""
dominance_mean = 1.0 / self._params.data_simulator.session_config.num_speakers
dominance = np.random.normal(
loc=dominance_mean,
scale=self._params.data_simulator.session_params.dominance_var,
size=self._params.data_simulator.session_config.num_speakers,
)
dominance = np.clip(dominance, a_min=0, a_max=np.inf)
# normalize while maintaining minimum dominance
total = np.sum(dominance)
if total == 0:
for i in range(len(dominance)):
dominance[i] += self._params.data_simulator.session_params.min_dominance
# scale accounting for min_dominance which has to be added after
dominance = (dominance / total) * (
1
- self._params.data_simulator.session_params.min_dominance
* self._params.data_simulator.session_config.num_speakers
)
for i in range(len(dominance)):
dominance[i] += self._params.data_simulator.session_params.min_dominance
if (
i > 0
): # dominance values are cumulative to make it easy to select the speaker using a random value in [0,1]
dominance[i] = dominance[i] + dominance[i - 1]
return dominance
def _increase_speaker_dominance(
self, base_speaker_dominance: List[float], factor: int
) -> Tuple[List[float], bool]:
"""
Increase speaker dominance for unrepresented speakers (used only in enforce mode).
Increases the dominance for these speakers by the input factor (and then re-normalizes the probabilities to 1).
Args:
base_speaker_dominance (list): Dominance values for each speaker.
factor (int): Factor to increase dominance of unrepresented speakers by.
Returns:
dominance (list): Per-speaker dominance
enforce (bool): Whether to keep enforce mode turned on
"""
increase_percent = []
for i in range(self._params.data_simulator.session_config.num_speakers):
if self._furthest_sample[i] == 0:
increase_percent.append(i)
# ramp up enforce counter until speaker is sampled, then reset once all speakers have spoken
if len(increase_percent) > 0:
# extract original per-speaker probabilities
dominance = np.copy(base_speaker_dominance)
for i in range(len(dominance) - 1, 0, -1):
dominance[i] = dominance[i] - dominance[i - 1]
# increase specified speakers by the desired factor
for i in increase_percent:
dominance[i] = dominance[i] * factor
# renormalize
dominance = dominance / np.sum(dominance)
for i in range(1, len(dominance)):
dominance[i] = dominance[i] + dominance[i - 1]
enforce = True
else: # no unrepresented speakers, so enforce mode can be turned off
dominance = base_speaker_dominance
enforce = False
return dominance, enforce
def _set_speaker_volume(self):
"""
Set the volume for each speaker (either equal volume or variable speaker volume).
"""
if self._params.data_simulator.session_params.normalization_type == 'equal':
self._volume = np.ones(self._params.data_simulator.session_config.num_speakers)
elif self._params.data_simulator.session_params.normalization_type == 'variable':
self._volume = np.random.normal(
loc=1.0,
scale=self._params.data_simulator.session_params.normalization_var,
size=self._params.data_simulator.session_config.num_speakers,
)
self._volume = np.clip(
np.array(self._volume),
a_min=self._params.data_simulator.session_params.min_volume,
a_max=self._params.data_simulator.session_params.max_volume,
).tolist()
def _get_next_speaker(self, prev_speaker: int, dominance: List[float]) -> int:
"""
Get the next speaker (accounting for turn probability and dominance distribution).
Args:
prev_speaker (int): Previous speaker turn.
dominance (list): Dominance values for each speaker.
Returns:
prev_speaker/speaker_turn (int): Speaker turn
"""
if self._params.data_simulator.session_config.num_speakers == 1:
prev_speaker = 0 if prev_speaker is None else prev_speaker
return prev_speaker
else:
if (
np.random.uniform(0, 1) > self._params.data_simulator.session_params.turn_prob
and prev_speaker is not None
):
return prev_speaker
else:
speaker_turn = prev_speaker
while speaker_turn == prev_speaker: # ensure another speaker goes next
rand = np.random.uniform(0, 1)
speaker_turn = 0
while rand > dominance[speaker_turn]:
speaker_turn += 1
return speaker_turn
def _get_window(self, window_amount: int, start: bool = False):
"""
Get window curve to alleviate abrupt change of time-series signal when segmenting audio samples.
Args:
window_amount (int): Window length (in terms of number of samples).
start (bool): If true, return the first half of the window.
Returns:
window (tensor): Half window (either first half or second half)
"""
if self._params.data_simulator.session_params.window_type == 'hamming':
window = hamming(window_amount * 2)
elif self._params.data_simulator.session_params.window_type == 'hann':
window = hann(window_amount * 2)
elif self._params.data_simulator.session_params.window_type == 'cosine':
window = cosine(window_amount * 2)
else:
raise Exception("Incorrect window type provided")
window = torch.from_numpy(window).to(self._device)
# return the first half or second half of the window
if start:
return window[:window_amount]
else:
return window[window_amount:]
def _get_start_buffer_and_window(self, first_alignment: int) -> Tuple[int, int]:
"""
Get the start cutoff and window length for smoothing the start of the sentence.
Args:
first_alignment (int): Start of the first word (in terms of number of samples).
Returns:
start_cutoff (int): Amount into the audio clip to start
window_amount (int): Window length
"""
window_amount = int(self._params.data_simulator.session_params.window_size * self._params.data_simulator.sr)
start_buffer = int(self._params.data_simulator.session_params.start_buffer * self._params.data_simulator.sr)
if first_alignment < start_buffer:
window_amount = 0
start_cutoff = 0
elif first_alignment < start_buffer + window_amount:
window_amount = first_alignment - start_buffer
start_cutoff = 0
else:
start_cutoff = first_alignment - start_buffer - window_amount
return start_cutoff, window_amount
def _get_end_buffer_and_window(
self, current_sample_cursor: int, remaining_dur_samples: int, remaining_len_audio_file: int
) -> Tuple[int, int]:
"""
Get the end buffer and window length for smoothing the end of the sentence.
Args:
current_sample_cursor (int): Current location in the target file (in terms of number of samples).
remaining_dur_samples (int): Remaining duration in the target file (in terms of number of samples).
remaining_len_audio_file (int): Length remaining in audio file (in terms of number of samples).
Returns:
release_buffer (int): Amount after the end of the last alignment to include
window_amount (int): Window length
"""
window_amount = int(self._params.data_simulator.session_params.window_size * self._params.data_simulator.sr)
release_buffer = int(
self._params.data_simulator.session_params.release_buffer * self._params.data_simulator.sr
)
if current_sample_cursor + release_buffer > remaining_dur_samples:
release_buffer = remaining_dur_samples - current_sample_cursor
window_amount = 0
elif current_sample_cursor + window_amount + release_buffer > remaining_dur_samples:
window_amount = remaining_dur_samples - current_sample_cursor - release_buffer
if remaining_len_audio_file < release_buffer:
release_buffer = remaining_len_audio_file
window_amount = 0
elif remaining_len_audio_file < release_buffer + window_amount:
window_amount = remaining_len_audio_file - release_buffer
return release_buffer, window_amount
def _check_missing_speakers(self, num_missing: int = 0):
"""
Check if any speakers were not included in the clip and display a warning.
Args:
num_missing (int): Number of missing speakers.
"""
for k in range(len(self._furthest_sample)):
if self._furthest_sample[k] == 0:
num_missing += 1
if num_missing != 0:
warnings.warn(
f"{self._params.data_simulator.session_config.num_speakers - num_missing}"
f"speakers were included in the clip instead of the requested amount of "
f"{self._params.data_simulator.session_config.num_speakers}"
)
def _add_file(
self,
audio_manifest: dict,
audio_file,
sentence_word_count: int,
max_word_count_in_sentence: int,
max_samples_in_sentence: int,
random_offset: bool = False,
) -> Tuple[int, torch.Tensor]:
"""
Add audio file to current sentence (up to the desired number of words).
Uses the alignments to segment the audio file.
NOTE: 0 index is always silence in `audio_manifest['words']`, so we choose `offset_idx=1` as the first word
Args:
audio_manifest (dict): Line from manifest file for current audio file
audio_file (tensor): Current loaded audio file
sentence_word_count (int): Running count for number of words in sentence
max_word_count_in_sentence (int): Maximum count for number of words in sentence
max_samples_in_sentence (int): Maximum length for sentence in terms of samples
Returns:
sentence_word_count+current_word_count (int): Running word count
len(self._sentence) (tensor): Current length of the audio file
"""
# In general, random offset is not needed since random silence index has already been chosen
if random_offset:
offset_idx = np.random.randint(low=1, high=len(audio_manifest['words']))
else:
offset_idx = 1
first_alignment = int(audio_manifest['alignments'][offset_idx - 1] * self._params.data_simulator.sr)
start_cutoff, start_window_amount = self._get_start_buffer_and_window(first_alignment)
if not self._params.data_simulator.session_params.start_window: # cut off the start of the sentence
start_window_amount = 0
# Ensure the desired number of words are added and the length of the output session isn't exceeded
sentence_samples = len(self._sentence)
remaining_dur_samples = max_samples_in_sentence - sentence_samples
remaining_duration = max_word_count_in_sentence - sentence_word_count
prev_dur_samples, dur_samples, curr_dur_samples = 0, 0, 0
current_word_count = 0
word_idx = offset_idx
silence_count = 1
while (
current_word_count < remaining_duration
and dur_samples < remaining_dur_samples
and word_idx < len(audio_manifest['words'])
):
dur_samples = int(audio_manifest['alignments'][word_idx] * self._params.data_simulator.sr) - start_cutoff
# check the length of the generated sentence in terms of sample count (int).
if curr_dur_samples + dur_samples > remaining_dur_samples:
# if the upcoming loop will exceed the remaining sample count, break out of the loop.
break
word = audio_manifest['words'][word_idx]
if silence_count > 0 and word == "":
break
self._words.append(word)
self._alignments.append(
float(sentence_samples * 1.0 / self._params.data_simulator.sr)
- float(start_cutoff * 1.0 / self._params.data_simulator.sr)
+ audio_manifest['alignments'][word_idx]
)
if word == "":
word_idx += 1
silence_count += 1
continue
elif self._text == "":
self._text += word
else:
self._text += " " + word
word_idx += 1
current_word_count += 1
prev_dur_samples = dur_samples
curr_dur_samples += dur_samples
# add audio clip up to the final alignment
if self._params.data_simulator.session_params.window_type is not None: # cut off the start of the sentence
if start_window_amount > 0: # include window
window = self._get_window(start_window_amount, start=True)
self._sentence = self._sentence.to(self._device)
self._sentence = torch.cat(
(
self._sentence,
torch.multiply(audio_file[start_cutoff : start_cutoff + start_window_amount], window),
),
0,
)
self._sentence = torch.cat(
(self._sentence, audio_file[start_cutoff + start_window_amount : start_cutoff + prev_dur_samples],), 0,
).to(self._device)
else:
self._sentence = torch.cat(
(self._sentence, audio_file[start_cutoff : start_cutoff + prev_dur_samples]), 0
).to(self._device)
# windowing at the end of the sentence
if (
word_idx < len(audio_manifest['words'])
) and self._params.data_simulator.session_params.window_type is not None:
release_buffer, end_window_amount = self._get_end_buffer_and_window(
prev_dur_samples, remaining_dur_samples, len(audio_file[start_cutoff + prev_dur_samples :]),
)
self._sentence = torch.cat(
(
self._sentence,
audio_file[start_cutoff + prev_dur_samples : start_cutoff + prev_dur_samples + release_buffer],
),
0,
).to(self._device)
if end_window_amount > 0: # include window
window = self._get_window(end_window_amount, start=False)
sig_start = start_cutoff + prev_dur_samples + release_buffer
sig_end = start_cutoff + prev_dur_samples + release_buffer + end_window_amount
windowed_audio_file = torch.multiply(audio_file[sig_start:sig_end], window)
self._sentence = torch.cat((self._sentence, windowed_audio_file), 0).to(self._device)
del audio_file
return sentence_word_count + current_word_count, len(self._sentence)
def _build_sentence(
self,
speaker_turn: int,
speaker_ids: List[str],
speaker_wav_align_map: Dict[str, list],
max_samples_in_sentence: int,
):
"""
Build a new sentence by attaching utterance samples together until the sentence has reached a desired length.
While generating the sentence, alignment information is used to segment the audio.
Args:
speaker_turn (int): Current speaker turn.
speaker_ids (list): LibriSpeech speaker IDs for each speaker in the current session.
speaker_wav_align_map (dict): Dictionary containing speaker IDs and their corresponding wav filepath and alignments.
max_samples_in_sentence (int): Maximum length for sentence in terms of samples
"""
# select speaker length
sl = (
np.random.negative_binomial(
self._params.data_simulator.session_params.sentence_length_params[0],
self._params.data_simulator.session_params.sentence_length_params[1],
)
+ 1
)
# initialize sentence, text, words, alignments
self._sentence = torch.zeros(0, dtype=torch.float64, device=self._device)
self._text = ""
self._words, self._alignments = [], []
sentence_word_count, sentence_samples = 0, 0
# build sentence
while sentence_word_count < sl and sentence_samples < max_samples_in_sentence:
audio_manifest = load_speaker_sample(
speaker_wav_align_map=speaker_wav_align_map,
speaker_ids=speaker_ids,
speaker_turn=speaker_turn,
min_alignment_count=self._min_alignment_count,
)
offset_index = get_random_offset_index(
audio_manifest=audio_manifest,
audio_read_buffer_dict=self._audio_read_buffer_dict,
offset_min=0,
max_audio_read_sec=self._max_audio_read_sec,
min_alignment_count=self._min_alignment_count,
)
audio_file, sr, audio_manifest = read_audio_from_buffer(
audio_manifest=audio_manifest,
buffer_dict=self._audio_read_buffer_dict,
offset_index=offset_index,
device=self._device,
max_audio_read_sec=self._max_audio_read_sec,
min_alignment_count=self._min_alignment_count,
read_subset=True,
)
# Step 6-2: Add optional perturbations to the specific audio segment (i.e. to `self._sentnece`)
if self._params.data_simulator.segment_augmentor.add_seg_aug:
audio_file = perturb_audio(audio_file, sr, self.segment_augmentor, device=self._device)
sentence_word_count, sentence_samples = self._add_file(
audio_manifest, audio_file, sentence_word_count, sl, max_samples_in_sentence
)
# per-speaker normalization (accounting for active speaker time)
if self._params.data_simulator.session_params.normalize and torch.max(torch.abs(self._sentence)) > 0:
splits = get_split_points_in_alignments(
words=self._words,
alignments=self._alignments,
split_buffer=self._params.data_simulator.session_params.split_buffer,
sr=self._params.data_simulator.sr,
sentence_audio_len=len(self._sentence),
)
self._sentence = per_speaker_normalize(
sentence_audio=self._sentence,
splits=splits,
speaker_turn=speaker_turn,
volume=self._volume,
device=self._device,
)
def _add_silence_or_overlap(
self,
speaker_turn: int,
prev_speaker: int,
start: int,
length: int,
session_len_samples: int,
prev_len_samples: int,
enforce: bool,
) -> int:
"""
Returns new overlapped (or shifted) start position after inserting overlap or silence.
Args:
speaker_turn (int): The integer index of the current speaker turn.
prev_speaker (int): The integer index of the previous speaker turn.
start (int): Current start of the audio file being inserted.
length (int): Length of the audio file being inserted.
session_len_samples (int): Maximum length of the session in terms of number of samples
prev_len_samples (int): Length of previous sentence (in terms of number of samples)
enforce (bool): Whether speaker enforcement mode is being used
Returns:
new_start (int): New starting position in the session accounting for overlap or silence
"""
running_len_samples = start + length
# `length` is the length of the current sentence to be added, so not included in self.sampler.running_speech_len_samples
non_silence_len_samples = self.sampler.running_speech_len_samples + length
# compare silence and overlap ratios
add_overlap = self.sampler.silence_vs_overlap_selector(running_len_samples, non_silence_len_samples)
# choose overlap if this speaker is not the same as the previous speaker and add_overlap is True.
if prev_speaker != speaker_turn and prev_speaker is not None and add_overlap:
desired_overlap_amount = self.sampler.sample_from_overlap_model(non_silence_len_samples)
new_start = start - desired_overlap_amount
# avoid overlap at start of clip
if new_start < 0:
desired_overlap_amount -= 0 - new_start
self._missing_overlap += 0 - new_start
new_start = 0
# if same speaker ends up overlapping from any previous clip, pad with silence instead
if new_start < self._furthest_sample[speaker_turn]:
desired_overlap_amount -= self._furthest_sample[speaker_turn] - new_start
self._missing_overlap += self._furthest_sample[speaker_turn] - new_start
new_start = self._furthest_sample[speaker_turn]
prev_start = start - prev_len_samples
prev_end = start
new_end = new_start + length
# check overlap amount to calculate the actual amount of generated overlaps
overlap_amount = 0
if is_overlap([prev_start, prev_end], [new_start, new_end]):
overlap_range = get_overlap_range([prev_start, prev_end], [new_start, new_end])
overlap_amount = max(overlap_range[1] - overlap_range[0], 0)
if overlap_amount < desired_overlap_amount:
self._missing_overlap += desired_overlap_amount - overlap_amount
self.sampler.running_overlap_len_samples += overlap_amount
# if we are not adding overlap, add silence
else:
silence_amount = self.sampler.sample_from_silence_model(running_len_samples)
if start + length + silence_amount > session_len_samples and not enforce:
new_start = max(session_len_samples - length, start)
else:
new_start = start + silence_amount
return new_start
def _get_session_meta_data(self, array: np.ndarray, snr: float) -> dict:
"""
Get meta data for the current session.
Args:
array (np.ndarray): audio array
snr (float): signal-to-noise ratio
Returns:
dict: meta data
"""
meta_data = {
"duration": array.shape[0] / self._params.data_simulator.sr,
"silence_mean": self.sampler.sess_silence_mean,
"overlap_mean": self.sampler.sess_overlap_mean,
"bg_snr": snr,
"speaker_ids": self._speaker_ids,
"speaker_volumes": list(self._volume),
}
return meta_data
def _get_session_silence_from_rttm(self, rttm_list: List[str], running_len_samples: int):
"""
Calculate the total speech and silence duration in the current session using RTTM file.
Args:
rttm_list (list):
List of RTTM timestamps
running_len_samples (int):
Total number of samples generated so far in the current session
Returns:
sess_speech_len_rttm (int):
The total number of speech samples in the current session
sess_silence_len_rttm (int):
The total number of silence samples in the current session
"""
all_sample_list = []
for x_raw in rttm_list:
x = [token for token in x_raw.split()]
all_sample_list.append([float(x[0]), float(x[1])])
self._merged_speech_intervals = merge_float_intervals(all_sample_list)
total_speech_in_secs = sum([x[1] - x[0] for x in self._merged_speech_intervals])
total_silence_in_secs = running_len_samples / self._params.data_simulator.sr - total_speech_in_secs
sess_speech_len = int(total_speech_in_secs * self._params.data_simulator.sr)
sess_silence_len = int(total_silence_in_secs * self._params.data_simulator.sr)
return sess_speech_len, sess_silence_len
def _add_sentence_to_array(
self, start: int, length: int, array: torch.Tensor, is_speech: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""
Add a sentence to the session array containing time-series signal.
Args:
start (int): Starting position in the session
length (int): Length of the sentence
array (torch.Tensor): Session array
is_speech (torch.Tensor): Session array containing speech/non-speech labels
Returns:
array (torch.Tensor): Session array in torch.Tensor format
is_speech (torch.Tensor): Session array containing speech/non-speech labels in torch.Tensor format
"""
end = start + length
if end > len(array): # only occurs in enforce mode
array = torch.nn.functional.pad(array, (0, end - len(array)))
is_speech = torch.nn.functional.pad(is_speech, (0, end - len(is_speech)))
array[start:end] += self._sentence
is_speech[start:end] = 1
return array, is_speech, end
def _generate_session(
self,
idx: int,
basepath: str,
filename: str,
speaker_ids: List[str],
speaker_wav_align_map: Dict[str, list],
noise_samples: list,
device: torch.device,
enforce_counter: int = 2,
):
"""
_generate_session function without RIR simulation.
Generate a multispeaker audio session and corresponding label files.
Args:
idx (int): Index for current session (out of total number of sessions).
basepath (str): Path to output directory.
filename (str): Filename for output files.
speaker_ids (list): List of speaker IDs that will be used in this session.
speaker_wav_align_map (dict): Dictionary containing speaker IDs and their corresponding wav filepath and alignments.
noise_samples (list): List of randomly sampled noise source files that will be used for generating this session.
device (torch.device): Device to use for generating this session.
enforce_counter (int): In enforcement mode, dominance is increased by a factor of enforce_counter for unrepresented speakers
"""
random_seed = self._params.data_simulator.random_seed
np.random.seed(random_seed + idx)
self._device = device
speaker_dominance = self._get_speaker_dominance() # randomly determine speaker dominance
base_speaker_dominance = np.copy(speaker_dominance)
self._set_speaker_volume()
running_len_samples, prev_len_samples = 0, 0
prev_speaker = None
self.annotator.init_annotation_lists()
self._noise_samples = noise_samples
self._furthest_sample = [0 for n in range(self._params.data_simulator.session_config.num_speakers)]
self._missing_silence = 0
# hold enforce until all speakers have spoken
enforce_time = np.random.uniform(
self._params.data_simulator.speaker_enforcement.enforce_time[0],
self._params.data_simulator.speaker_enforcement.enforce_time[1],
)
enforce = self._params.data_simulator.speaker_enforcement.enforce_num_speakers
session_len_samples = int(
(self._params.data_simulator.session_config.session_length * self._params.data_simulator.sr)
)
array = torch.zeros(session_len_samples).to(self._device)
is_speech = torch.zeros(session_len_samples).to(self._device)
self.sampler.get_session_silence_mean()
self.sampler.get_session_overlap_mean()
while running_len_samples < session_len_samples or enforce:
# Step 1: Prepare parameters for sentence generation
# Enforce speakers depending on running length
if running_len_samples > enforce_time * session_len_samples and enforce:
speaker_dominance, enforce = self._increase_speaker_dominance(base_speaker_dominance, enforce_counter)
if enforce:
enforce_counter += 1
# Step 2: Select a speaker
speaker_turn = self._get_next_speaker(prev_speaker, speaker_dominance)
# Calculate parameters for building a sentence (only add if remaining length > specific time)
max_samples_in_sentence = session_len_samples - running_len_samples
if enforce:
max_samples_in_sentence = float('inf')
elif (
max_samples_in_sentence
< self._params.data_simulator.session_params.end_buffer * self._params.data_simulator.sr
):
break
# Step 3: Generate a sentence
self._build_sentence(speaker_turn, speaker_ids, speaker_wav_align_map, max_samples_in_sentence)
length = len(self._sentence)
# Step 4: Generate a timestamp for either silence or overlap
start = self._add_silence_or_overlap(
speaker_turn=speaker_turn,
prev_speaker=prev_speaker,
start=running_len_samples,
length=length,
session_len_samples=session_len_samples,
prev_len_samples=prev_len_samples,
enforce=enforce,
)
# step 5: add sentence to array
array, is_speech, end = self._add_sentence_to_array(
start=start, length=length, array=array, is_speech=is_speech,
)
# Step 6: Build entries for output files
new_rttm_entries = self.annotator.create_new_rttm_entry(
words=self._words,
alignments=self._alignments,
start=start / self._params.data_simulator.sr,
end=end / self._params.data_simulator.sr,
speaker_id=speaker_ids[speaker_turn],
)
self.annotator.annote_lists['rttm'].extend(new_rttm_entries)
new_json_entry = self.annotator.create_new_json_entry(
text=self._text,
wav_filename=os.path.join(basepath, filename + '.wav'),
start=start / self._params.data_simulator.sr,
length=length / self._params.data_simulator.sr,
speaker_id=speaker_ids[speaker_turn],
rttm_filepath=os.path.join(basepath, filename + '.rttm'),
ctm_filepath=os.path.join(basepath, filename + '.ctm'),
)
self.annotator.annote_lists['json'].append(new_json_entry)
new_ctm_entries = self.annotator.create_new_ctm_entry(
words=self._words,
alignments=self._alignments,
session_name=filename,
speaker_id=speaker_ids[speaker_turn],
start=int(start / self._params.data_simulator.sr),
)
self.annotator.annote_lists['ctm'].extend(new_ctm_entries)
running_len_samples = np.maximum(running_len_samples, end)
(
self.sampler.running_speech_len_samples,
self.sampler.running_silence_len_samples,
) = self._get_session_silence_from_rttm(
rttm_list=self.annotator.annote_lists['rttm'], running_len_samples=running_len_samples
)
self._furthest_sample[speaker_turn] = running_len_samples
prev_speaker = speaker_turn
prev_len_samples = length
# Step 7-1: Add optional perturbations to the whole session, such as white noise.
if self._params.data_simulator.session_augmentor.add_sess_aug:
# NOTE: This perturbation is not reflected in the session SNR in meta dictionary.
array = perturb_audio(array, self._params.data_simulator.sr, self.session_augmentor, device=array.device)
# Step 7-2: Additive background noise from noise manifest files
if self._params.data_simulator.background_noise.add_bg:
if len(self._noise_samples) > 0:
avg_power_array = torch.mean(array[is_speech == 1] ** 2)
bg, snr = get_background_noise(
len_array=len(array),
power_array=avg_power_array,
noise_samples=self._noise_samples,
audio_read_buffer_dict=self._audio_read_buffer_dict,
snr_min=self._params.data_simulator.background_noise.snr_min,
snr_max=self._params.data_simulator.background_noise.snr_max,
background_noise_snr=self._params.data_simulator.background_noise.snr,
seed=(random_seed + idx),
device=self._device,
)
array += bg
else:
raise ValueError('No background noise samples found in self._noise_samples.')
else:
snr = "N/A"
# Step 7: Normalize and write to disk
array = normalize_audio(array)
if torch.is_tensor(array):
array = array.cpu().numpy()
sf.write(os.path.join(basepath, filename + '.wav'), array, self._params.data_simulator.sr)
self.annotator.write_annotation_files(
basepath=basepath, filename=filename, meta_data=self._get_session_meta_data(array=array, snr=snr),
)
# Step 8: Clean up memory
del array
self.clean_up()
return basepath, filename
def generate_sessions(self, random_seed: int = None):
"""
Generate several multispeaker audio sessions and corresponding list files.
Args:
random_seed (int): random seed for reproducibility
"""
logging.info(f"Generating Diarization Sessions")
if random_seed is None:
random_seed = self._params.data_simulator.random_seed
np.random.seed(random_seed)
output_dir = self._params.data_simulator.outputs.output_dir
basepath = get_cleaned_base_path(
output_dir, overwrite_output=self._params.data_simulator.outputs.overwrite_output
)
OmegaConf.save(self._params, os.path.join(output_dir, "params.yaml"))
tp = concurrent.futures.ProcessPoolExecutor(max_workers=self.num_workers)
futures = []
num_sessions = self._params.data_simulator.session_config.num_sessions
source_noise_manifest = read_noise_manifest(
add_bg=self._params.data_simulator.background_noise.add_bg,
background_manifest=self._params.data_simulator.background_noise.background_manifest,
)
queue = []
# add radomly sampled arguments to a list(queue) for multiprocessing
for sess_idx in range(num_sessions):
filename = self._params.data_simulator.outputs.output_filename + f"_{sess_idx}"
speaker_ids = get_speaker_ids(
sess_idx=sess_idx,
speaker_samples=self._speaker_samples,
permutated_speaker_inds=self._permutated_speaker_inds,
)
speaker_wav_align_map = get_speaker_samples(speaker_ids=speaker_ids, speaker_samples=self._speaker_samples)
noise_samples = self.sampler.sample_noise_manifest(noise_manifest=source_noise_manifest)
if torch.cuda.is_available():
device = torch.device(f"cuda:{sess_idx % torch.cuda.device_count()}")
else:
device = self._device
queue.append((sess_idx, basepath, filename, speaker_ids, speaker_wav_align_map, noise_samples, device))
# for multiprocessing speed, we avoid loading potentially huge manifest list and speaker sample files into each process.
if self.num_workers > 1:
self._manifest = None
self._speaker_samples = None
# Chunk the sessions into smaller chunks for very large number of sessions (10K+ sessions)
for chunk_idx in range(self.chunk_count):
futures = []
stt_idx, end_idx = (
chunk_idx * self.multiprocessing_chunksize,
min((chunk_idx + 1) * self.multiprocessing_chunksize, num_sessions),
)
for sess_idx in range(stt_idx, end_idx):
self._furthest_sample = [0 for n in range(self._params.data_simulator.session_config.num_speakers)]
self._audio_read_buffer_dict = {}
if self.num_workers > 1:
futures.append(tp.submit(self._generate_session, *queue[sess_idx]))
else:
futures.append(queue[sess_idx])
if self.num_workers > 1:
generator = concurrent.futures.as_completed(futures)
else:
generator = futures
for future in tqdm(
generator,
desc=f"[{chunk_idx+1}/{self.chunk_count}] Waiting jobs from {stt_idx+1: 2} to {end_idx: 2}",
unit="jobs",
total=len(futures),
):
if self.num_workers > 1:
basepath, filename = future.result()
else:
self._noise_samples = self.sampler.sample_noise_manifest(noise_manifest=source_noise_manifest,)
basepath, filename = self._generate_session(*future)
self.annotator.add_to_filename_lists(basepath=basepath, filename=filename)
# throw warning if number of speakers is less than requested
self._check_missing_speakers()
tp.shutdown()
self.annotator.write_filelist_files(basepath=basepath)
logging.info(f"Data simulation has been completed, results saved at: {basepath}")
class RIRMultiSpeakerSimulator(MultiSpeakerSimulator):
"""
RIR Augmented Multispeaker Audio Session Simulator - simulates multispeaker audio sessions using single-speaker
audio files and corresponding word alignments, as well as simulated RIRs for augmentation.
Args:
cfg: OmegaConf configuration loaded from yaml file.
Parameters (in addition to the base MultiSpeakerSimulator parameters):
rir_generation:
use_rir (bool): Whether to generate synthetic RIR
toolkit (str): Which toolkit to use ("pyroomacoustics", "gpuRIR")
room_config:
room_sz (list): Size of the shoebox room environment (1d array for specific, 2d array for random range to be
sampled from)
pos_src (list): Positions of the speakers in the simulated room environment (2d array for specific, 3d array
for random ranges to be sampled from)
noise_src_pos (list): Position in room for the ambient background noise source
mic_config:
num_channels (int): Number of output audio channels
pos_rcv (list): Microphone positions in the simulated room environment (1d/2d array for specific, 2d/3d array
for range assuming num_channels is 1/2+)
orV_rcv (list or null): Microphone orientations (needed for non-omnidirectional microphones)
mic_pattern (str): Microphone type ("omni" - omnidirectional) - currently only omnidirectional microphones are
supported for pyroomacoustics
absorbtion_params: (Note that only `T60` is used for pyroomacoustics simulations)
abs_weights (list): Absorption coefficient ratios for each surface
T60 (float): Room reverberation time (`T60` is the time it takes for the RIR to decay by 60DB)
att_diff (float): Starting attenuation (if this is different than att_max, the diffuse reverberation model is
used by gpuRIR)
att_max (float): End attenuation when using the diffuse reverberation model (gpuRIR)
"""
def __init__(self, cfg):
super().__init__(cfg)
self._check_args_rir()
def _check_args_rir(self):
"""
Checks RIR YAML arguments to ensure they are within valid ranges
"""
if not (self._params.data_simulator.rir_generation.toolkit in ['pyroomacoustics', 'gpuRIR']):
raise Exception("Toolkit must be pyroomacoustics or gpuRIR")
if self._params.data_simulator.rir_generation.toolkit == 'pyroomacoustics' and not PRA:
raise ImportError("pyroomacoustics should be installed to run this simulator with RIR augmentation")
if self._params.data_simulator.rir_generation.toolkit == 'gpuRIR' and not GPURIR:
raise ImportError("gpuRIR should be installed to run this simulator with RIR augmentation")
if len(self._params.data_simulator.rir_generation.room_config.room_sz) != 3:
raise Exception("Incorrect room dimensions provided")
if self._params.data_simulator.rir_generation.mic_config.num_channels == 0:
raise Exception("Number of channels should be greater or equal to 1")
if len(self._params.data_simulator.rir_generation.room_config.pos_src) < 2:
raise Exception("Less than 2 provided source positions")
for sublist in self._params.data_simulator.rir_generation.room_config.pos_src:
if len(sublist) != 3:
raise Exception("Three coordinates must be provided for sources positions")
if len(self._params.data_simulator.rir_generation.mic_config.pos_rcv) == 0:
raise Exception("No provided mic positions")
for sublist in self._params.data_simulator.rir_generation.room_config.pos_src:
if len(sublist) != 3:
raise Exception("Three coordinates must be provided for mic positions")
if self._params.data_simulator.session_config.num_speakers != len(
self._params.data_simulator.rir_generation.room_config.pos_src
):
raise Exception("Number of speakers is not equal to the number of provided source positions")
if self._params.data_simulator.rir_generation.mic_config.num_channels != len(
self._params.data_simulator.rir_generation.mic_config.pos_rcv
):
raise Exception("Number of channels is not equal to the number of provided microphone positions")
if (
not self._params.data_simulator.rir_generation.mic_config.orV_rcv
and self._params.data_simulator.rir_generation.mic_config.mic_pattern != 'omni'
):
raise Exception("Microphone orientations must be provided if mic_pattern != omni")
if self._params.data_simulator.rir_generation.mic_config.orV_rcv is not None:
if len(self._params.data_simulator.rir_generation.mic_config.orV_rcv) != len(
self._params.data_simulator.rir_generation.mic_config.pos_rcv
):
raise Exception("A different number of microphone orientations and microphone positions were provided")
for sublist in self._params.data_simulator.rir_generation.mic_config.orV_rcv:
if len(sublist) != 3:
raise Exception("Three coordinates must be provided for orientations")
def _generate_rir_gpuRIR(self):
"""
Create simulated RIR using the gpuRIR library
Returns:
RIR (tensor): Generated RIR
RIR_pad (int): Length of padding added when convolving the RIR with an audio file
"""
room_sz_tmp = np.array(self._params.data_simulator.rir_generation.room_config.room_sz)
if room_sz_tmp.ndim == 2: # randomize
room_sz = np.zeros(room_sz_tmp.shape[0])
for i in range(room_sz_tmp.shape[0]):
room_sz[i] = np.random.uniform(room_sz_tmp[i, 0], room_sz_tmp[i, 1])
else:
room_sz = room_sz_tmp
pos_src_tmp = np.array(self._params.data_simulator.rir_generation.room_config.pos_src)
if pos_src_tmp.ndim == 3: # randomize
pos_src = np.zeros((pos_src_tmp.shape[0], pos_src_tmp.shape[1]))
for i in range(pos_src_tmp.shape[0]):
for j in range(pos_src_tmp.shape[1]):
pos_src[i] = np.random.uniform(pos_src_tmp[i, j, 0], pos_src_tmp[i, j, 1])
else:
pos_src = pos_src_tmp
if self._params.data_simulator.background_noise.add_bg:
pos_src = np.vstack((pos_src, self._params.data_simulator.rir_generation.room_config.noise_src_pos))
mic_pos_tmp = np.array(self._params.data_simulator.rir_generation.mic_config.pos_rcv)
if mic_pos_tmp.ndim == 3: # randomize
mic_pos = np.zeros((mic_pos_tmp.shape[0], mic_pos_tmp.shape[1]))
for i in range(mic_pos_tmp.shape[0]):
for j in range(mic_pos_tmp.shape[1]):
mic_pos[i] = np.random.uniform(mic_pos_tmp[i, j, 0], mic_pos_tmp[i, j, 1])
else:
mic_pos = mic_pos_tmp
orV_rcv = self._params.data_simulator.rir_generation.mic_config.orV_rcv
if orV_rcv: # not needed for omni mics
orV_rcv = np.array(orV_rcv)
mic_pattern = self._params.data_simulator.rir_generation.mic_config.mic_pattern
abs_weights = self._params.data_simulator.rir_generation.absorbtion_params.abs_weights
T60 = self._params.data_simulator.rir_generation.absorbtion_params.T60
att_diff = self._params.data_simulator.rir_generation.absorbtion_params.att_diff
att_max = self._params.data_simulator.rir_generation.absorbtion_params.att_max
sr = self._params.data_simulator.sr
beta = beta_SabineEstimation(room_sz, T60, abs_weights=abs_weights) # Reflection coefficients
Tdiff = att2t_SabineEstimator(att_diff, T60) # Time to start the diffuse reverberation model [s]
Tmax = att2t_SabineEstimator(att_max, T60) # Time to stop the simulation [s]
nb_img = t2n(Tdiff, room_sz) # Number of image sources in each dimension
RIR = simulateRIR(
room_sz, beta, pos_src, mic_pos, nb_img, Tmax, sr, Tdiff=Tdiff, orV_rcv=orV_rcv, mic_pattern=mic_pattern
)
RIR_pad = RIR.shape[2] - 1
return RIR, RIR_pad
def _generate_rir_pyroomacoustics(self) -> Tuple[torch.Tensor, int]:
"""
Create simulated RIR using the pyroomacoustics library
Returns:
RIR (tensor): Generated RIR
RIR_pad (int): Length of padding added when convolving the RIR with an audio file
"""
rt60 = self._params.data_simulator.rir_generation.absorbtion_params.T60 # The desired reverberation time
sr = self._params.data_simulator.sr
room_sz_tmp = np.array(self._params.data_simulator.rir_generation.room_config.room_sz)
if room_sz_tmp.ndim == 2: # randomize
room_sz = np.zeros(room_sz_tmp.shape[0])
for i in range(room_sz_tmp.shape[0]):
room_sz[i] = np.random.uniform(room_sz_tmp[i, 0], room_sz_tmp[i, 1])
else:
room_sz = room_sz_tmp
pos_src_tmp = np.array(self._params.data_simulator.rir_generation.room_config.pos_src)
if pos_src_tmp.ndim == 3: # randomize
pos_src = np.zeros((pos_src_tmp.shape[0], pos_src_tmp.shape[1]))
for i in range(pos_src_tmp.shape[0]):
for j in range(pos_src_tmp.shape[1]):
pos_src[i] = np.random.uniform(pos_src_tmp[i, j, 0], pos_src_tmp[i, j, 1])
else:
pos_src = pos_src_tmp
# We invert Sabine's formula to obtain the parameters for the ISM simulator
e_absorption, max_order = pra.inverse_sabine(rt60, room_sz)
room = pra.ShoeBox(room_sz, fs=sr, materials=pra.Material(e_absorption), max_order=max_order)
if self._params.data_simulator.background_noise.add_bg:
pos_src = np.vstack((pos_src, self._params.data_simulator.rir_generation.room_config.noise_src_pos))
for pos in pos_src:
room.add_source(pos)
# currently only supports omnidirectional microphones
mic_pattern = self._params.data_simulator.rir_generation.mic_config.mic_pattern
if self._params.data_simulator.rir_generation.mic_config.mic_pattern == 'omni':
mic_pattern = DirectivityPattern.OMNI
dir_vec = DirectionVector(azimuth=0, colatitude=90, degrees=True)
dir_obj = CardioidFamily(orientation=dir_vec, pattern_enum=mic_pattern,)
mic_pos_tmp = np.array(self._params.data_simulator.rir_generation.mic_config.pos_rcv)
if mic_pos_tmp.ndim == 3: # randomize
mic_pos = np.zeros((mic_pos_tmp.shape[0], mic_pos_tmp.shape[1]))
for i in range(mic_pos_tmp.shape[0]):
for j in range(mic_pos_tmp.shape[1]):
mic_pos[i] = np.random.uniform(mic_pos_tmp[i, j, 0], mic_pos_tmp[i, j, 1])
else:
mic_pos = mic_pos_tmp
room.add_microphone_array(mic_pos.T, directivity=dir_obj)
room.compute_rir()
rir_pad = 0
for channel in room.rir:
for pos in channel:
if pos.shape[0] - 1 > rir_pad:
rir_pad = pos.shape[0] - 1
return room.rir, rir_pad
def _convolve_rir(self, input, speaker_turn: int, RIR: torch.Tensor) -> Tuple[list, int]:
"""
Augment one sentence (or background noise segment) using a synthetic RIR.
Args:
input (torch.tensor): Input audio.
speaker_turn (int): Current speaker turn.
RIR (torch.tensor): Room Impulse Response.
Returns:
output_sound (list): List of tensors containing augmented audio
length (int): Length of output audio channels (or of the longest if they have different lengths)
"""
output_sound = []
length = 0
for channel in range(self._params.data_simulator.rir_generation.mic_config.num_channels):
if self._params.data_simulator.rir_generation.toolkit == 'gpuRIR':
out_channel = convolve(input, RIR[speaker_turn, channel, : len(input)]).tolist()
elif self._params.data_simulator.rir_generation.toolkit == 'pyroomacoustics':
out_channel = convolve(input, RIR[channel][speaker_turn][: len(input)]).tolist()
if len(out_channel) > length:
length = len(out_channel)
output_sound.append(torch.tensor(out_channel))
return output_sound, length
def _generate_session(
self,
idx: int,
basepath: str,
filename: str,
speaker_ids: list,
speaker_wav_align_map: dict,
noise_samples: list,
device: torch.device,
enforce_counter: int = 2,
):
"""
Generate a multispeaker audio session and corresponding label files.
Args:
idx (int): Index for current session (out of total number of sessions).
basepath (str): Path to output directory.
filename (str): Filename for output files.
speaker_ids (list): List of speaker IDs that will be used in this session.
speaker_wav_align_map (dict): Dictionary containing speaker IDs and their corresponding wav filepath and alignments.
noise_samples (list): List of randomly sampled noise source files that will be used for generating this session.
device (torch.device): Device to use for generating this session.
enforce_counter (int): In enforcement mode, dominance is increased by a factor of enforce_counter for unrepresented speakers
"""
random_seed = self._params.data_simulator.random_seed
np.random.seed(random_seed + idx)
self._device = device
speaker_dominance = self._get_speaker_dominance() # randomly determine speaker dominance
base_speaker_dominance = np.copy(speaker_dominance)
self._set_speaker_volume()
running_len_samples, prev_len_samples = 0, 0 # starting point for each sentence
prev_speaker = None
self.annotator.init_annotation_lists()
self._noise_samples = noise_samples
self._furthest_sample = [0 for n in range(self._params.data_simulator.session_config.num_speakers)]
# Room Impulse Response Generation (performed once per batch of sessions)
if self._params.data_simulator.rir_generation.toolkit == 'gpuRIR':
RIR, RIR_pad = self._generate_rir_gpuRIR()
elif self._params.data_simulator.rir_generation.toolkit == 'pyroomacoustics':
RIR, RIR_pad = self._generate_rir_pyroomacoustics()
else:
raise Exception("Toolkit must be pyroomacoustics or gpuRIR")
# hold enforce until all speakers have spoken
enforce_time = np.random.uniform(
self._params.data_simulator.speaker_enforcement.enforce_time[0],
self._params.data_simulator.speaker_enforcement.enforce_time[1],
)
enforce = self._params.data_simulator.speaker_enforcement.enforce_num_speakers
session_len_samples = int(
(self._params.data_simulator.session_config.session_length * self._params.data_simulator.sr)
)
array = torch.zeros((session_len_samples, self._params.data_simulator.rir_generation.mic_config.num_channels))
is_speech = torch.zeros(session_len_samples)
while running_len_samples < session_len_samples or enforce:
# Step 1: Prepare parameters for sentence generation
# Enforce speakers depending on running length
if running_len_samples > enforce_time * session_len_samples and enforce:
speaker_dominance, enforce = self._increase_speaker_dominance(base_speaker_dominance, enforce_counter)
if enforce:
enforce_counter += 1
# Step 2: Select a speaker
speaker_turn = self._get_next_speaker(prev_speaker, speaker_dominance)
# Calculate parameters for building a sentence (only add if remaining length > specific time)
max_samples_in_sentence = (
session_len_samples - running_len_samples - RIR_pad
) # sentence will be RIR_len - 1 longer than the audio was pre-augmentation
if enforce:
max_samples_in_sentence = float('inf')
elif (
max_samples_in_sentence
< self._params.data_simulator.session_params.end_buffer * self._params.data_simulator.sr
):
break
# Step 3: Generate a sentence
self._build_sentence(speaker_turn, speaker_ids, speaker_wav_align_map, max_samples_in_sentence)
augmented_sentence, length = self._convolve_rir(self._sentence, speaker_turn, RIR)
# Step 4: Generate a time-stamp for either silence or overlap
start = self._add_silence_or_overlap(
speaker_turn=speaker_turn,
prev_speaker=prev_speaker,
start=running_len_samples,
length=length,
session_len_samples=session_len_samples,
prev_len_samples=prev_len_samples,
enforce=enforce,
)
# step 5: add sentence to array
end = start + length
if end > len(array):
array = torch.nn.functional.pad(array, (0, 0, 0, end - len(array)))
is_speech = torch.nn.functional.pad(is_speech, (0, end - len(is_speech)))
is_speech[start:end] = 1
for channel in range(self._params.data_simulator.rir_generation.mic_config.num_channels):
len_ch = len(augmented_sentence[channel]) # accounts for how channels are slightly different lengths
array[start : start + len_ch, channel] += augmented_sentence[channel]
# Step 6: Build entries for output files
new_rttm_entries = self.annotator.create_new_rttm_entry(
self._words,
self._alignments,
start / self._params.data_simulator.sr,
end / self._params.data_simulator.sr,
speaker_ids[speaker_turn],
)
self.annotator.annote_lists['rttm'].extend(new_rttm_entries)
new_json_entry = self.annotator.create_new_json_entry(
self._text,
os.path.join(basepath, filename + '.wav'),
start / self._params.data_simulator.sr,
length / self._params.data_simulator.sr,
speaker_ids[speaker_turn],
os.path.join(basepath, filename + '.rttm'),
os.path.join(basepath, filename + '.ctm'),
)
self.annotator.annote_lists['json'].append(new_json_entry)
new_ctm_entries = self.annotator.create_new_ctm_entry(
filename, speaker_ids[speaker_turn], start / self._params.data_simulator.sr
)
self.annotator.annote_lists['ctm'].extend(new_ctm_entries)
running_len_samples = np.maximum(running_len_samples, end)
self._furthest_sample[speaker_turn] = running_len_samples
prev_speaker = speaker_turn
prev_len_samples = length
# Step 7-1: Add optional perturbations to the whole session, such as white noise.
if self._params.data_simulator.session_augmentor.add_sess_aug:
# NOTE: This perturbation is not reflected in the session SNR in meta dictionary.
array = perturb_audio(array, self._params.data_simulator.sr, self.session_augmentor)
# Step 7-2: Additive background noise from noise manifest files
if self._params.data_simulator.background_noise.add_bg:
if len(self._noise_samples) > 0:
avg_power_array = torch.mean(array[is_speech == 1] ** 2)
bg, snr = get_background_noise(
len_array=len(array),
power_array=avg_power_array,
noise_samples=self._noise_samples,
audio_read_buffer_dict=self._audio_read_buffer_dict,
snr_min=self._params.data_simulator.background_noise.snr_min,
snr_max=self._params.data_simulator.background_noise.snr_max,
background_noise_snr=self._params.data_simulator.background_noise.snr,
seed=(random_seed + idx),
device=self._device,
)
array += bg
length = array.shape[0]
bg, snr = self._get_background(length, avg_power_array)
augmented_bg, _ = self._convolve_rir(bg, -1, RIR)
for channel in range(self._params.data_simulator.rir_generation.mic_config.num_channels):
array[:, channel] += augmented_bg[channel][:length]
else:
snr = "N/A"
# Step 7: Normalize and write to disk
array = normalize_audio(array)
if torch.is_tensor(array):
array = array.cpu().numpy()
sf.write(os.path.join(basepath, filename + '.wav'), array, self._params.data_simulator.sr)
self.annotator.write_annotation_files(
basepath=basepath, filename=filename, meta_data=self._get_session_meta_data(array=array, snr=snr),
)
del array
self.clean_up()
return basepath, filename
def check_angle(key: str, val: Union[float, Iterable[float]]) -> bool:
"""Check if the angle value is within the expected range. Input
values are in degrees.
Note:
azimuth: angle between a projection on the horizontal (xy) plane and
positive x axis. Increases counter-clockwise. Range: [-180, 180].
elevation: angle between a vector an its projection on the horizontal (xy) plane.
Positive above, negative below, i.e., north=+90, south=-90. Range: [-90, 90]
yaw: rotation around the z axis. Defined accoding to right-hand rule.
Range: [-180, 180]
pitch: rotation around the yʹ axis. Defined accoding to right-hand rule.
Range: [-90, 90]
roll: rotation around the xʺ axis. Defined accoding to right-hand rule.
Range: [-180, 180]
Args:
key: angle type
val: values in degrees
Returns:
True if all values are within the expected range.
"""
if np.isscalar(val):
min_val = max_val = val
else:
min_val = min(val)
max_val = max(val)
if key == 'azimuth' and -180 <= min_val <= max_val <= 180:
return True
if key == 'elevation' and -90 <= min_val <= max_val <= 90:
return True
if key == 'yaw' and -180 <= min_val <= max_val <= 180:
return True
if key == 'pitch' and -90 <= min_val <= max_val <= 90:
return True
if key == 'roll' and -180 <= min_val <= max_val <= 180:
return True
raise ValueError(f'Invalid value for angle {key} = {val}')
def wrap_to_180(angle: float) -> float:
"""Wrap an angle to range ±180 degrees.
Args:
angle: angle in degrees
Returns:
Angle in degrees wrapped to ±180 degrees.
"""
return angle - np.floor(angle / 360 + 1 / 2) * 360
class ArrayGeometry(object):
"""A class to simplify handling of array geometry.
Supports translation and rotation of the array and calculation of
spherical coordinates of a given point relative to the internal
coordinate system of the array.
Args:
mic_positions: 3D coordinates, with shape (num_mics, 3)
center: optional position of the center of the array. Defaults to the average of the coordinates.
internal_cs: internal coordinate system for the array relative to the global coordinate system.
Defaults to (x, y, z), and is rotated with the array.
"""
def __init__(
self,
mic_positions: Union[np.ndarray, List],
center: Optional[np.ndarray] = None,
internal_cs: Optional[np.ndarray] = None,
):
if isinstance(mic_positions, Iterable):
mic_positions = np.array(mic_positions)
if not mic_positions.ndim == 2:
raise ValueError(
f'Expecting a 2D array specifying mic positions, but received {mic_positions.ndim}-dim array'
)
if not mic_positions.shape[1] == 3:
raise ValueError(f'Expecting 3D positions, but received {mic_positions.shape[1]}-dim positions')
mic_positions_center = np.mean(mic_positions, axis=0)
self.centered_positions = mic_positions - mic_positions_center
self.center = mic_positions_center if center is None else center
# Internal coordinate system
if internal_cs is None:
# Initially aligned with the global
self.internal_cs = np.eye(3)
else:
self.internal_cs = internal_cs
@property
def num_mics(self):
"""Return the number of microphones for the current array.
"""
return self.centered_positions.shape[0]
@property
def positions(self):
"""Absolute positions of the microphones.
"""
return self.centered_positions + self.center
@property
def internal_positions(self):
"""Positions in the internal coordinate system.
"""
return np.matmul(self.centered_positions, self.internal_cs.T)
@property
def radius(self):
"""Radius of the array, relative to the center.
"""
return max(np.linalg.norm(self.centered_positions, axis=1))
@staticmethod
def get_rotation(yaw: float = 0, pitch: float = 0, roll: float = 0) -> Rotation:
"""Get a Rotation object for given angles.
All angles are defined according to the right-hand rule.
Args:
yaw: rotation around the z axis
pitch: rotation around the yʹ axis
roll: rotation around the xʺ axis
Returns:
A rotation object constructed using the provided angles.
"""
check_angle('yaw', yaw)
check_angle('pitch', pitch)
check_angle('roll', roll)
return Rotation.from_euler('ZYX', [yaw, pitch, roll], degrees=True)
def translate(self, to: np.ndarray):
"""Translate the array center to a new point.
Translation does not change the centered positions or the internal coordinate system.
Args:
to: 3D point, shape (3,)
"""
self.center = to
def rotate(self, yaw: float = 0, pitch: float = 0, roll: float = 0):
"""Apply rotation on the mic array.
This rotates the centered microphone positions and the internal
coordinate system, it doesn't change the center of the array.
All angles are defined according to the right-hand rule.
For example, this means that a positive pitch will result in a rotation from z
to x axis, which will result in a reduced elevation with respect to the global
horizontal plane.
Args:
yaw: rotation around the z axis
pitch: rotation around the yʹ axis
roll: rotation around the xʺ axis
"""
# construct rotation using TB angles
rotation = self.get_rotation(yaw=yaw, pitch=pitch, roll=roll)
# rotate centered positions
self.centered_positions = rotation.apply(self.centered_positions)
# apply the same transformation on the internal coordinate system
self.internal_cs = rotation.apply(self.internal_cs)
def new_rotated_array(self, yaw: float = 0, pitch: float = 0, roll: float = 0):
"""Create a new array by rotating this array.
Args:
yaw: rotation around the z axis
pitch: rotation around the yʹ axis
roll: rotation around the xʺ axis
Returns:
A new ArrayGeometry object constructed using the provided angles.
"""
new_array = ArrayGeometry(mic_positions=self.positions, center=self.center, internal_cs=self.internal_cs)
new_array.rotate(yaw=yaw, pitch=pitch, roll=roll)
return new_array
def spherical_relative_to_array(
self, point: np.ndarray, use_internal_cs: bool = True
) -> Tuple[float, float, float]:
"""Return spherical coordinates of a point relative to the internal coordinate system.
Args:
point: 3D coordinate, shape (3,)
use_internal_cs: Calculate position relative to the internal coordinate system.
If `False`, the positions will be calculated relative to the
external coordinate system centered at `self.center`.
Returns:
A tuple (distance, azimuth, elevation) relative to the mic array.
"""
rel_position = point - self.center
distance = np.linalg.norm(rel_position)
if use_internal_cs:
# transform from the absolute coordinate system to the internal coordinate system
rel_position = np.matmul(self.internal_cs, rel_position)
# get azimuth
azimuth = np.arctan2(rel_position[1], rel_position[0]) / np.pi * 180
# get elevation
elevation = np.arcsin(rel_position[2] / distance) / np.pi * 180
return distance, azimuth, elevation
def __str__(self):
with np.printoptions(precision=3, suppress=True):
desc = f"{type(self)}:\ncenter =\n{self.center}\ncentered positions =\n{self.centered_positions}\nradius = \n{self.radius:.3}\nabsolute positions =\n{self.positions}\ninternal coordinate system =\n{self.internal_cs}\n\n"
return desc
def plot(self, elev=30, azim=-55, mic_size=25):
"""Plot microphone positions.
Args:
elev: elevation for the view of the plot
azim: azimuth for the view of the plot
mic_size: size of the microphone marker in the plot
"""
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# show mic positions
for m in range(self.num_mics):
# show mic
ax.scatter(
self.positions[m, 0],
self.positions[m, 1],
self.positions[m, 2],
marker='o',
c='black',
s=mic_size,
depthshade=False,
)
# add label
ax.text(self.positions[m, 0], self.positions[m, 1], self.positions[m, 2], str(m), c='red', zorder=10)
# show the internal coordinate system
ax.quiver(
self.center[0],
self.center[1],
self.center[2],
self.internal_cs[:, 0],
self.internal_cs[:, 1],
self.internal_cs[:, 2],
length=self.radius,
label='internal cs',
normalize=False,
linestyle=':',
linewidth=1.0,
)
for dim, label in enumerate(['x′', 'y′', 'z′']):
label_pos = self.center + self.radius * self.internal_cs[dim]
ax.text(label_pos[0], label_pos[1], label_pos[2], label, tuple(self.internal_cs[dim]), c='blue')
try:
# Unfortunately, equal aspect ratio has been added very recently to Axes3D
ax.set_aspect('equal')
except NotImplementedError:
logging.warning('Equal aspect ratio not supported by Axes3D')
# Set view
ax.view_init(elev=elev, azim=azim)
# Set reasonable limits for all axes, even for the case of an unequal aspect ratio
ax.set_xlim([self.center[0] - self.radius, self.center[0] + self.radius])
ax.set_ylim([self.center[1] - self.radius, self.center[1] + self.radius])
ax.set_zlim([self.center[2] - self.radius, self.center[2] + self.radius])
ax.set_xlabel('x/m')
ax.set_ylabel('y/m')
ax.set_zlabel('z/m')
ax.set_title('Microphone positions')
ax.legend()
plt.show()
def convert_placement_to_range(
placement: dict, room_dim: Iterable[float], object_radius: float = 0
) -> List[List[float]]:
"""Given a placement dictionary, return ranges for each dimension.
Args:
placement: dictionary containing x, y, height, and min_to_wall
room_dim: dimensions of the room, shape (3,)
object_radius: radius of the object to be placed
Returns
List with a range of values for each dimensions.
"""
if not np.all(np.array(room_dim) > 0):
raise ValueError(f'Room dimensions must be positive: {room_dim}')
if object_radius < 0:
raise ValueError(f'Object radius must be non-negative: {object_radius}')
placement_range = [None] * 3
min_to_wall = placement.get('min_to_wall', 0)
if min_to_wall < 0:
raise ValueError(f'Min distance to wall must be positive: {min_to_wall}')
for idx, key in enumerate(['x', 'y', 'height']):
# Room dimension
dim = room_dim[idx]
# Construct the range
val = placement.get(key)
if val is None:
# No constrained specified on the coordinate of the mic center
min_val, max_val = 0, dim
elif np.isscalar(val):
min_val = max_val = val
else:
if len(val) != 2:
raise ValueError(f'Invalid value for placement for dim {idx}/{key}: {str(placement)}')
min_val, max_val = val
# Make sure the array is not too close to a wall
min_val = max(min_val, min_to_wall + object_radius)
max_val = min(max_val, dim - min_to_wall - object_radius)
if min_val > max_val or min(min_val, max_val) < 0:
raise ValueError(f'Invalid range dim {idx}/{key}: min={min_val}, max={max_val}')
placement_range[idx] = [min_val, max_val]
return placement_range
class RIRCorpusGenerator(object):
"""Creates a corpus of RIRs based on a defined configuration of rooms and microphone array.
RIRs are generated using `generate` method.
"""
def __init__(self, cfg: DictConfig):
"""
Args:
cfg: dictionary with parameters of the simulation
"""
logging.info("Initialize RIRCorpusGenerator")
self._cfg = cfg
self.check_cfg()
@property
def cfg(self):
"""Property holding the internal config of the object.
Note:
Changes to this config are not reflected in the state of the object.
Please create a new model with the updated config.
"""
return self._cfg
@property
def sample_rate(self):
return self._cfg.sample_rate
@cfg.setter
def cfg(self, cfg):
"""Property holding the internal config of the object.
Note:
Changes to this config are not reflected in the state of the object.
Please create a new model with the updated config.
"""
self._cfg = cfg
def check_cfg(self):
"""
Checks provided configuration to ensure it has the minimal required
configuration the values are in a reasonable range.
"""
# sample rate
sample_rate = self.cfg.get('sample_rate')
if sample_rate is None:
raise ValueError('Sample rate not provided.')
elif sample_rate < 0:
raise ValueError(f'Sample rate must to be positive: {sample_rate}')
# room configuration
room_cfg = self.cfg.get('room')
if room_cfg is None:
raise ValueError('Room configuration not provided')
if room_cfg.get('num') is None:
raise ValueError('Number of rooms per subset not provided')
if room_cfg.get('dim') is None:
raise ValueError('Room dimensions not provided')
for idx, key in enumerate(['width', 'length', 'height']):
dim = room_cfg.dim.get(key)
if dim is None:
# not provided
raise ValueError(f'Room {key} needs to be a scalar or a range, currently it is None')
elif np.isscalar(dim) and dim <= 0:
# fixed dimension
raise ValueError(f'A fixed dimension must be positive for {key}: {dim}')
elif len(dim) != 2 or not 0 < dim[0] < dim[1]:
# not a valid range
raise ValueError(f'Range must be specified with two positive increasing elements for {key}: {dim}')
rt60 = room_cfg.get('rt60')
if rt60 is None:
# not provided
raise ValueError(f'RT60 needs to be a scalar or a range, currently it is None')
elif np.isscalar(rt60) and rt60 <= 0:
# fixed dimension
raise ValueError(f'RT60 must be positive: {rt60}')
elif len(rt60) != 2 or not 0 < rt60[0] < rt60[1]:
# not a valid range
raise ValueError(f'RT60 range must be specified with two positive increasing elements: {rt60}')
# mic array
mic_cfg = self.cfg.get('mic_array')
if mic_cfg is None:
raise ValueError('Mic configuration not provided')
if mic_cfg.get('positions') == 'random':
# Only num_mics and placement are required
mic_cfg_keys = ['num_mics', 'placement']
else:
mic_cfg_keys = ['positions', 'placement', 'orientation']
for key in mic_cfg_keys:
if key not in mic_cfg:
raise ValueError(f'Mic array {key} not provided')
# source
source_cfg = self.cfg.get('source')
if source_cfg is None:
raise ValueError('Source configuration not provided')
if source_cfg.get('num') is None:
raise ValueError('Number of sources per room not provided')
elif source_cfg.num <= 0:
raise ValueError(f'Number of sources must be positive: {source_cfg.num}')
if 'placement' not in source_cfg:
raise ValueError('Source placement dictionary not provided')
# anechoic
if self.cfg.get('anechoic') is None:
raise ValueError(f'Anechoic configuratio not provided.')
def generate_room_params(self) -> dict:
"""Generate randomized room parameters based on the provided
configuration.
"""
# Prepare room sim parameters
if not PRA:
raise ImportError('pyroomacoustics is required for room simulation')
room_cfg = self.cfg.room
# Prepare rt60
if room_cfg.rt60 is None:
raise ValueError(f'Room RT60 needs to be a scalar or a range, currently it is None')
if np.isscalar(room_cfg.rt60):
assert room_cfg.rt60 > 0, f'RT60 should be positive: {room_cfg.rt60}'
rt60 = room_cfg.rt60
elif len(room_cfg.rt60) == 2:
assert (
0 < room_cfg.rt60[0] <= room_cfg.rt60[1]
), f'Expecting two non-decreasing values for RT60, received {room_cfg.rt60}'
rt60 = self.random.uniform(low=room_cfg.rt60[0], high=room_cfg.rt60[1])
else:
raise ValueError(f'Unexpected value for RT60: {room_cfg.rt60}')
# Generate a room with random dimensions
num_retries = self.cfg.get('num_retries', 20)
for n in range(num_retries):
# width, length, height
room_dim = np.zeros(3)
# prepare dimensions
for idx, key in enumerate(['width', 'length', 'height']):
# get configured dimension
dim = room_cfg.dim[key]
# set a value
if dim is None:
raise ValueError(f'Room {key} needs to be a scalar or a range, currently it is None')
elif np.isscalar(dim):
assert dim > 0, f'Dimension should be positive for {key}: {dim}'
room_dim[idx] = dim
elif len(dim) == 2:
assert 0 < dim[0] <= dim[1], f'Expecting two non-decreasing values for {key}, received {dim}'
# Reduce dimension if the previous attempt failed
room_dim[idx] = self.random.uniform(low=dim[0], high=dim[1] - n * (dim[1] - dim[0]) / num_retries)
else:
raise ValueError(f'Unexpected value for {key}: {dim}')
try:
# Get parameters from size and RT60
room_absorption, room_max_order = pra.inverse_sabine(rt60, room_dim)
break
except Exception as e:
logging.debug('Inverse sabine failed: %s', str(e))
# Inverse sabine may fail if the room is too large for the selected RT60.
# Try again by generate a smaller room.
room_absorption = room_max_order = None
continue
if room_absorption is None or room_max_order is None:
raise RuntimeError(f'Evaluation of parameters failed for RT60 {rt60}s and room size {room_dim}.')
# Return the required values
room_params = {
'dim': room_dim,
'absorption': room_absorption,
'max_order': room_max_order,
'rt60_theoretical': rt60,
'anechoic_absorption': self.cfg.anechoic.absorption,
'anechoic_max_order': self.cfg.anechoic.max_order,
'sample_rate': self.cfg.sample_rate,
}
return room_params
def generate_array(self, room_dim: Iterable[float]) -> ArrayGeometry:
"""Generate array placement for the current room and config.
Args:
room_dim: dimensions of the room, [width, length, height]
Returns:
Randomly placed microphone array.
"""
mic_cfg = self.cfg.mic_array
if mic_cfg.positions == 'random':
# Create a radom set of microphones
num_mics = mic_cfg.num_mics
mic_positions = []
# Each microphone is placed individually
placement_range = convert_placement_to_range(
placement=mic_cfg.placement, room_dim=room_dim, object_radius=0
)
# Randomize mic placement
for m in range(num_mics):
position_m = [None] * 3
for idx in range(3):
position_m[idx] = self.random.uniform(low=placement_range[idx][0], high=placement_range[idx][1])
mic_positions.append(position_m)
mic_array = ArrayGeometry(mic_positions)
else:
mic_array = ArrayGeometry(mic_cfg.positions)
# Randomize center placement
center = np.zeros(3)
placement_range = convert_placement_to_range(
placement=mic_cfg.placement, room_dim=room_dim, object_radius=mic_array.radius
)
for idx in range(len(center)):
center[idx] = self.random.uniform(low=placement_range[idx][0], high=placement_range[idx][1])
# Place the array at the configured center point
mic_array.translate(to=center)
# Randomize orientation
orientation = dict()
for key in ['yaw', 'roll', 'pitch']:
# angle for current orientation
angle = mic_cfg.orientation[key]
if angle is None:
raise ValueError(f'Mic array {key} should be a scalar or a range, currently it is set to None.')
# check it's within the expected range
check_angle(key, angle)
if np.isscalar(angle):
orientation[key] = angle
elif len(angle) == 2:
assert angle[0] <= angle[1], f"Expecting two non-decreasing values for {key}, received {angle}"
# generate integer values, for easier bucketing, if necessary
orientation[key] = self.random.uniform(low=angle[0], high=angle[1])
else:
raise ValueError(f'Unexpected value for orientation {key}: {angle}')
# Rotate the array to match the selected orientation
mic_array.rotate(**orientation)
return mic_array
def generate_source_position(self, room_dim: Iterable[float]) -> List[List[float]]:
"""Generate position for all sources in a room.
Args:
room_dim: dimensions of a 3D shoebox room
Returns:
List of source positions, with each position characterized with a 3D coordinate
"""
source_cfg = self.cfg.source
placement_range = convert_placement_to_range(placement=source_cfg.placement, room_dim=room_dim)
source_position = []
for n in range(source_cfg.num):
# generate a random point withing the range
s_pos = [None] * 3
for idx in range(len(s_pos)):
s_pos[idx] = self.random.uniform(low=placement_range[idx][0], high=placement_range[idx][1])
source_position.append(s_pos)
return source_position
def generate(self):
"""Generate RIR corpus.
This method will prepare randomized examples based on the current configuration,
run room simulations and save results to output_dir.
"""
logging.info("Generate RIR corpus")
# Initialize
self.random = default_rng(seed=self.cfg.random_seed)
# Prepare output dir
output_dir = self.cfg.output_dir
if output_dir.endswith('.yaml'):
output_dir = output_dir[:-5]
# Create absolute path
logging.info('Output dir set to: %s', output_dir)
# Generate all cases
for subset, num_rooms in self.cfg.room.num.items():
output_dir_subset = os.path.join(output_dir, subset)
examples = []
if not os.path.exists(output_dir_subset):
logging.info('Creating output directory: %s', output_dir_subset)
os.makedirs(output_dir_subset)
elif os.path.isdir(output_dir_subset) and len(os.listdir(output_dir_subset)) > 0:
raise RuntimeError(f'Output directory {output_dir_subset} is not empty.')
# Generate examples
for n_room in range(num_rooms):
# room info
room_params = self.generate_room_params()
# array placement
mic_array = self.generate_array(room_params['dim'])
# source placement
source_position = self.generate_source_position(room_params['dim'])
# file name for the file
room_filepath = os.path.join(output_dir_subset, f'{subset}_room_{n_room:06d}.h5')
# prepare example
example = {
'room_params': room_params,
'mic_array': mic_array,
'source_position': source_position,
'room_filepath': room_filepath,
}
examples.append(example)
# Simulation
if (num_workers := self.cfg.get('num_workers')) is None:
num_workers = os.cpu_count() - 1
if num_workers > 1:
logging.info(f'Simulate using {num_workers} workers')
with multiprocessing.Pool(processes=num_workers) as pool:
metadata = list(tqdm(pool.imap(simulate_room_kwargs, examples), total=len(examples)))
else:
logging.info('Simulate using a single worker')
metadata = []
for example in tqdm(examples, total=len(examples)):
metadata.append(simulate_room(**example))
# Save manifest
manifest_filepath = os.path.join(output_dir, f'{subset}_manifest.json')
if os.path.exists(manifest_filepath) and os.path.isfile(manifest_filepath):
raise RuntimeError(f'Manifest config file exists: {manifest_filepath}')
# Make all paths in the manifest relative to the output dir
for data in metadata:
data['room_filepath'] = os.path.relpath(data['room_filepath'], start=output_dir)
write_manifest(manifest_filepath, metadata)
# Generate plots with information about generated data
plot_filepath = os.path.join(output_dir, f'{subset}_info.png')
if os.path.exists(plot_filepath) and os.path.isfile(plot_filepath):
raise RuntimeError(f'Plot file exists: {plot_filepath}')
plot_rir_manifest_info(manifest_filepath, plot_filepath=plot_filepath)
# Save used configuration for reference
config_filepath = os.path.join(output_dir, 'config.yaml')
if os.path.exists(config_filepath) and os.path.isfile(config_filepath):
raise RuntimeError(f'Output config file exists: {config_filepath}')
OmegaConf.save(self.cfg, config_filepath, resolve=True)
def simulate_room_kwargs(kwargs: dict) -> dict:
"""Wrapper around `simulate_room` to handle kwargs.
`pool.map(simulate_room_kwargs, examples)` would be
equivalent to `pool.starstarmap(simulate_room, examples)`
if `starstarmap` would exist.
Args:
kwargs: kwargs that are forwarded to `simulate_room`
Returns:
Dictionary with metadata, see `simulate_room`
"""
return simulate_room(**kwargs)
def simulate_room(
room_params: dict, mic_array: ArrayGeometry, source_position: Iterable[Iterable[float]], room_filepath: str,
) -> dict:
"""Simulate room
Args:
room_params: parameters of the room to be simulated
mic_array: defines positions of the microphones
source_positions: positions for all sources to be simulated
room_filepath: results are saved to this path
Returns:
Dictionary with metadata based on simulation setup
and simulation results. Used to create the corresponding
manifest file.
"""
# room with the selected parameters
room_sim = pra.ShoeBox(
room_params['dim'],
fs=room_params['sample_rate'],
materials=pra.Material(room_params['absorption']),
max_order=room_params['max_order'],
)
# same geometry for generating anechoic responses
room_anechoic = pra.ShoeBox(
room_params['dim'],
fs=room_params['sample_rate'],
materials=pra.Material(room_params['anechoic_absorption']),
max_order=room_params['anechoic_max_order'],
)
# Compute RIRs
for room in [room_sim, room_anechoic]:
# place the array
room.add_microphone_array(mic_array.positions.T)
# place the sources
for s_pos in source_position:
room.add_source(s_pos)
# generate RIRs
room.compute_rir()
# Get metadata for sources
source_distance = []
source_azimuth = []
source_elevation = []
for s_pos in source_position:
distance, azimuth, elevation = mic_array.spherical_relative_to_array(s_pos)
source_distance.append(distance)
source_azimuth.append(azimuth)
source_elevation.append(elevation)
# RIRs
rir_dataset = {
'rir': convert_rir_to_multichannel(room_sim.rir),
'anechoic': convert_rir_to_multichannel(room_anechoic.rir),
}
# Prepare metadata dict and return
metadata = {
'room_filepath': room_filepath,
'sample_rate': room_params['sample_rate'],
'dim': room_params['dim'],
'rir_absorption': room_params['absorption'],
'rir_max_order': room_params['max_order'],
'rir_rt60_theory': room_sim.rt60_theory(),
'rir_rt60_measured': room_sim.measure_rt60().mean(axis=0), # average across mics for each source
'anechoic_rt60_theory': room_anechoic.rt60_theory(),
'anechoic_rt60_measured': room_anechoic.measure_rt60().mean(axis=0), # average across mics for each source
'anechoic_absorption': room_params['anechoic_absorption'],
'anechoic_max_order': room_params['anechoic_max_order'],
'mic_positions': mic_array.positions,
'mic_center': mic_array.center,
'source_position': source_position,
'source_distance': source_distance,
'source_azimuth': source_azimuth,
'source_elevation': source_elevation,
'num_sources': len(source_position),
}
# Save simulated RIR
save_rir_simulation(room_filepath, rir_dataset, metadata)
return convert_numpy_to_serializable(metadata)
def save_rir_simulation(filepath: str, rir_dataset: Dict[str, List[np.array]], metadata: dict):
"""Save simulated RIRs and metadata.
Args:
filepath: Path to the file where the data will be saved.
rir_dataset: Dictionary with RIR data. Each item is a set of multi-channel RIRs.
metadata: Dictionary with related metadata.
"""
if os.path.exists(filepath):
raise RuntimeError(f'Output file exists: {room_filepath}')
num_sources = metadata['num_sources']
with h5py.File(filepath, 'w') as h5f:
# Save RIRs, each RIR set in a separate group
for rir_key, rir_value in rir_dataset.items():
if len(rir_value) != num_sources:
raise ValueError(
f'Each RIR dataset should have exactly {num_sources} elements. Current RIR {key} has {len(rir_value)} elements'
)
rir_group = h5f.create_group(rir_key)
# RIRs for different sources are saved under [group]['idx']
for idx, rir in enumerate(rir_value):
rir_group.create_dataset(f'{idx}', data=rir_value[idx])
# Save metadata
metadata_group = h5f.create_group('metadata')
for key, value in metadata.items():
metadata_group.create_dataset(key, data=value)
def load_rir_simulation(filepath: str, source: int = 0, rir_key: str = 'rir') -> Tuple[np.ndarray, float]:
"""Load simulated RIRs and metadata.
Args:
filepath: Path to simulated RIR data
source: Index of a source.
rir_key: String to denote which RIR to load, if there are multiple available.
Returns:
Multichannel RIR as ndarray with shape (num_samples, num_channels) and scalar sample rate.
"""
with h5py.File(filepath, 'r') as h5f:
# Load RIR
rir = h5f[rir_key][f'{source}'][:]
# Load metadata
sample_rate = h5f['metadata']['sample_rate'][()]
return rir, sample_rate
def convert_numpy_to_serializable(data: Union[dict, float, np.ndarray]) -> Union[dict, float, np.ndarray]:
"""Convert all numpy estries to list.
Can be used to preprocess data before writing to a JSON file.
Args:
data: Dictionary, array or scalar.
Returns:
The same structure, but converted to list if
the input is np.ndarray, so `data` can be seralized.
"""
if isinstance(data, dict):
for key, val in data.items():
data[key] = convert_numpy_to_serializable(val)
elif isinstance(data, list):
data = [convert_numpy_to_serializable(d) for d in data]
elif isinstance(data, np.ndarray):
data = data.tolist()
elif isinstance(data, np.integer):
data = int(data)
elif isinstance(data, np.floating):
data = float(data)
elif isinstance(data, np.generic):
data = data.item()
return data
def convert_rir_to_multichannel(rir: List[List[np.ndarray]]) -> List[np.ndarray]:
"""Convert RIR to a list of arrays.
Args:
rir: list of lists, each element is a single-channel RIR
Returns:
List of multichannel RIRs
"""
num_mics = len(rir)
num_sources = len(rir[0])
mc_rir = [None] * num_sources
for n_source in range(num_sources):
rir_len = [len(rir[m][n_source]) for m in range(num_mics)]
max_len = max(rir_len)
mc_rir[n_source] = np.zeros((max_len, num_mics))
for n_mic, len_mic in enumerate(rir_len):
mc_rir[n_source][:len_mic, n_mic] = rir[n_mic][n_source]
return mc_rir
def plot_rir_manifest_info(filepath: str, plot_filepath: str = None):
"""Plot distribution of parameters from manifest file.
Args:
filepath: path to a RIR corpus manifest file
plot_filepath: path to save the plot at
"""
metadata = read_manifest(filepath)
# source placement
source_distance = []
source_azimuth = []
source_elevation = []
source_height = []
# room config
rir_rt60_theory = []
rir_rt60_measured = []
anechoic_rt60_theory = []
anechoic_rt60_measured = []
# get the required data
for data in metadata:
# source config
source_distance += data['source_distance']
source_azimuth += data['source_azimuth']
source_elevation += data['source_elevation']
source_height += [s_pos[2] for s_pos in data['source_position']]
# room config
rir_rt60_theory.append(data['rir_rt60_theory'])
rir_rt60_measured += data['rir_rt60_measured']
anechoic_rt60_theory.append(data['anechoic_rt60_theory'])
anechoic_rt60_measured += data['anechoic_rt60_measured']
# plot
plt.figure(figsize=(12, 6))
plt.subplot(2, 4, 1)
plt.hist(source_distance, label='distance')
plt.xlabel('distance / m')
plt.ylabel('# examples')
plt.title('Source-to-array center distance')
plt.subplot(2, 4, 2)
plt.hist(source_azimuth, label='azimuth')
plt.xlabel('azimuth / deg')
plt.ylabel('# examples')
plt.title('Source-to-array center azimuth')
plt.subplot(2, 4, 3)
plt.hist(source_elevation, label='elevation')
plt.xlabel('elevation / deg')
plt.ylabel('# examples')
plt.title('Source-to-array center elevation')
plt.subplot(2, 4, 4)
plt.hist(source_height, label='source height')
plt.xlabel('height / m')
plt.ylabel('# examples')
plt.title('Source height')
plt.subplot(2, 4, 5)
plt.hist(rir_rt60_theory, label='theory')
plt.xlabel('RT60 / s')
plt.ylabel('# examples')
plt.title('RT60 theory')
plt.subplot(2, 4, 6)
plt.hist(rir_rt60_measured, label='measured')
plt.xlabel('RT60 / s')
plt.ylabel('# examples')
plt.title('RT60 measured')
plt.subplot(2, 4, 7)
plt.hist(anechoic_rt60_theory, label='theory')
plt.xlabel('RT60 / s')
plt.ylabel('# examples')
plt.title('RT60 theory (anechoic)')
plt.subplot(2, 4, 8)
plt.hist(anechoic_rt60_measured, label='measured')
plt.xlabel('RT60 / s')
plt.ylabel('# examples')
plt.title('RT60 measured (anechoic)')
for n in range(8):
plt.subplot(2, 4, n + 1)
plt.grid()
plt.legend(loc='lower left')
plt.tight_layout()
if plot_filepath is not None:
plt.savefig(plot_filepath)
plt.close()
logging.info('Plot saved at %s', plot_filepath)
class RIRMixGenerator(object):
"""Creates a dataset of mixed signals at the microphone
by combining target speech, background noise and interference.
Correspnding signals are are generated and saved
using the `generate` method.
Input configuration is expexted to have the following structure
```
sample_rate: sample rate used for simulation
room:
subset: manifest for RIR data
target:
subset: manifest for target source data
noise:
subset: manifest for noise data
interference:
subset: manifest for interference data
interference_probability: probability that interference is present
max_num_interferers: max number of interferers, randomly selected between 0 and max
mix:
subset:
num: number of examples to generate
rsnr: range of RSNR
rsir: range of RSIR
ref_mic: reference microphone
ref_mic_rms: desired RMS at ref_mic
```
"""
def __init__(self, cfg: DictConfig):
"""
Instantiate a RIRMixGenerator object.
Args:
cfg: generator configuration defining data for room,
target signal, noise, interference and mixture
"""
logging.info("Initialize RIRMixGenerator")
self._cfg = cfg
self.check_cfg()
self.subsets = self.cfg.room.keys()
logging.info('Initialized with %d subsets: %s', len(self.subsets), str(self.subsets))
# load manifests
self.metadata = dict()
for subset in self.subsets:
subset_data = dict()
logging.info('Loading data for %s', subset)
for key in ['room', 'target', 'noise', 'interference']:
try:
subset_data[key] = read_manifest(self.cfg[key][subset])
logging.info('\t%-*s: \t%d files', 15, key, len(subset_data[key]))
except Exception as e:
subset_data[key] = None
logging.info('\t%-*s: \t0 files', 15, key)
logging.warning('\t\tManifest data not loaded. Exception: %s', str(e))
self.metadata[subset] = subset_data
logging.info('Loaded all manifests')
self.num_retries = self.cfg.get('num_retries', 5)
@property
def cfg(self):
"""Property holding the internal config of the object.
Note:
Changes to this config are not reflected in the state of the object.
Please create a new model with the updated config.
"""
return self._cfg
@property
def sample_rate(self):
return self._cfg.sample_rate
@cfg.setter
def cfg(self, cfg):
"""Property holding the internal config of the object.
Note:
Changes to this config are not reflected in the state of the object.
Please create a new model with the updated config.
"""
self._cfg = cfg
def check_cfg(self):
"""
Checks provided configuration to ensure it has the minimal required
configuration the values are in a reasonable range.
"""
# sample rate
sample_rate = self.cfg.get('sample_rate')
if sample_rate is None:
raise ValueError('Sample rate not provided.')
elif sample_rate < 0:
raise ValueError(f'Sample rate must be positive: {sample_rate}')
# room configuration
room_cfg = self.cfg.get('room')
if not room_cfg:
raise ValueError(
'Room configuration not provided. Expecting RIR manifests in format {subset: path_to_manifest}'
)
# target configuration
target_cfg = self.cfg.get('target')
if not target_cfg:
raise ValueError(
'Target configuration not provided. Expecting audio manifests in format {subset: path_to_manifest}'
)
for key in ['azimuth', 'elevation', 'distance']:
value = target_cfg.get(key)
if value is None or np.isscalar(value):
# no constraint or a fixed dimension is ok
pass
elif len(value) != 2 or not value[0] < value[1]:
# not a valid range
raise ValueError(f'Range must be specified with two positive increasing elements for {key}: {value}')
# noise configuration
noise_cfg = self.cfg.get('noise')
if not noise_cfg:
raise ValueError(
'Noise configuration not provided. Expecting audio manifests in format {subset: path_to_manifest}'
)
# interference configuration
interference_cfg = self.cfg.get('interference')
if not interference_cfg:
logging.info('Interference configuration not provided.')
else:
interference_probability = interference_cfg.get('interference_probability', 0)
max_num_interferers = interference_cfg.get('max_num_interferers', 0)
min_azimuth_to_target = interference_cfg.get('min_azimuth_to_target', 0)
if interference_probability is not None:
if interference_probability < 0:
raise ValueError(
f'Interference probability must be non-negative. Current value: {interference_prob}'
)
elif interference_probability > 0:
assert (
max_num_interferers is not None and max_num_interferers > 0
), f'Max number of interferers must be positive. Current value: {max_num_interferers}'
assert (
min_azimuth_to_target is not None and min_azimuth_to_target >= 0
), f'Min azimuth to target must be non-negative'
# mix configuration
mix_cfg = self.cfg.get('mix')
if not mix_cfg:
raise ValueError('Mix configuration not provided. Expecting configuration for each subset.')
if 'ref_mic' not in mix_cfg:
raise ValueError('Reference microphone not defined.')
if 'ref_mic_rms' not in mix_cfg:
raise ValueError('Reference microphone RMS not defined.')
def generate_target(self, subset: str) -> dict:
"""
Prepare a dictionary with target configuration.
The output dictionary contains the following information
```
room_index: index of the selected room from the RIR corpus
room_filepath: path to the room simulation file
source: index of the selected source for the target
rt60: reverberation time of the selected room
num_mics: number of microphones
azimuth: azimuth of the target source, relative to the microphone array
elevation: elevation of the target source, relative to the microphone array
distance: distance of the target source, relative to the microphone array
audio_filepath: path to the audio file for the target source
text: text for the target source audio signal, if available
duration: duration of the target source audio signal
```
Args:
subset: string denoting a subset which will be used to selected target
audio and room parameters.
Returns:
Dictionary with target configuration, including room, source index, and audio information.
"""
# Utility function
def select_target_source(room_metadata, room_indices):
"""Find a room and a source that satisfies the constraints.
"""
for room_index in room_indices:
# Select room
room_data = room_metadata[room_index]
# Candidate sources
sources = self.random.choice(room_data['num_sources'], size=self.num_retries, replace=False)
# Select target source in this room
for source in sources:
# Check constraints
constraints_met = []
for constraint in ['azimuth', 'elevation', 'distance']:
if self.cfg.target.get(constraint) is not None:
# Check that the selected source is in the range
source_value = room_data[f'source_{constraint}'][source]
if self.cfg.target[constraint][0] <= source_value <= self.cfg.target[constraint][1]:
constraints_met.append(True)
else:
constraints_met.append(False)
# No need to check the remaining constraints
break
# Check if a feasible source is found
if all(constraints_met):
# A feasible source has been found
return source, room_index
return None, None
# Prepare room & source position
room_metadata = self.metadata[subset]['room']
room_indices = self.random.choice(len(room_metadata), size=self.num_retries, replace=False)
source, room_index = select_target_source(room_metadata, room_indices)
if source is None:
raise RuntimeError(f'Could not find a feasible source given target constraints {self.cfg.target}')
room_data = room_metadata[room_index]
# Optional: select subset of channels
num_available_mics = len(room_data['mic_positions'])
if 'mic_array' in self.cfg:
num_mics = self.cfg.mic_array['num_mics']
mic_selection = self.cfg.mic_array['selection']
if mic_selection == 'random':
logging.debug('Randomly selecting %d mics', num_mics)
selected_mics = self.random.choice(num_available_mics, size=num_mics, replace=False)
elif isinstance(mic_selection, Iterable):
logging.debug('Using explicitly selected mics: %s', str(mic_selection))
assert (
0 <= min(mic_selection) < num_available_mics
), f'Expecting mic_selection in range [0,{num_available_mics}), current value: {mic_selection}'
selected_mics = np.array(mic_selection)
else:
raise ValueError(f'Unexpected value for mic_selection: {mic_selection}')
else:
logging.debug('Using all %d available mics', num_available_mics)
num_mics = num_available_mics
selected_mics = np.arange(num_mics)
# Double-check the number of mics is as expected
assert (
len(selected_mics) == num_mics
), f'Expecting {num_mics} mics, but received {len(selected_mics)} mics: {selected_mics}'
logging.debug('Selected mics: %s', str(selected_mics))
# Calculate distance from the source to each microphone
mic_positions = np.array(room_data['mic_positions'])[selected_mics]
source_position = np.array(room_data['source_position'][source])
distance_source_to_mic = np.linalg.norm(mic_positions - source_position, axis=1)
# Handle relative paths
room_filepath = room_data['room_filepath']
if not os.path.isabs(room_filepath):
manifest_dir = os.path.dirname(self.cfg.room[subset])
room_filepath = os.path.join(manifest_dir, room_filepath)
target_cfg = {
'room_index': int(room_index),
'room_filepath': room_filepath,
'source': source,
'rt60': room_data['rir_rt60_measured'][source],
'selected_mics': selected_mics.tolist(),
# Positions
'source_position': source_position.tolist(),
'mic_positions': mic_positions.tolist(),
# Relative to center of the array
'azimuth': room_data['source_azimuth'][source],
'elevation': room_data['source_elevation'][source],
'distance': room_data['source_distance'][source],
# Relative to mics
'distance_source_to_mic': distance_source_to_mic,
}
return target_cfg
def generate_interference(self, subset: str, target_cfg: dict) -> List[dict]:
"""
Prepare a list of dictionaries with interference configuration.
Args:
subset: string denoting a subset which will be used to select interference audio.
target_cfg: dictionary with target configuration. This is used to determine
the minimal required duration for the noise signal.
Returns:
List of dictionary with interference configuration, including source index and audio information
for one or more interference sources.
"""
if (interference_metadata := self.metadata[subset]['interference']) is None:
# No interference to be configured
return None
# Configure interfering sources
max_num_sources = self.cfg.interference.get('max_num_interferers', 0)
interference_probability = self.cfg.interference.get('interference_probability', 0)
if (
max_num_sources >= 1
and interference_probability > 0
and self.random.uniform(low=0.0, high=1.0) < interference_probability
):
# interference present
num_interferers = self.random.integers(low=1, high=max_num_sources + 1)
else:
# interference not present
return None
# Room setup: same room as target
room_index = target_cfg['room_index']
room_data = self.metadata[subset]['room'][room_index]
feasible_sources = list(range(room_data['num_sources']))
# target source is not eligible
feasible_sources.remove(target_cfg['source'])
# Constraints for interfering sources
min_azimuth_to_target = self.cfg.interference.get('min_azimuth_to_target', 0)
# Prepare interference configuration
interference_cfg = []
for n in range(num_interferers):
# Select a source
source = None
while len(feasible_sources) > 0 and source is None:
# Select a potential source for the target
source = self.random.choice(feasible_sources)
feasible_sources.remove(source)
# Check azimuth separation
if min_azimuth_to_target > 0:
source_azimuth = room_data['source_azimuth'][source]
azimuth_diff = wrap_to_180(source_azimuth - target_cfg['azimuth'])
if abs(azimuth_diff) < min_azimuth_to_target:
# Try again
source = None
continue
if source is None:
logging.warning('Could not select a feasible interference source %d of %s', n, num_interferers)
# Return what we have for now or None
return interference_cfg if interference_cfg else None
# Current source setup
interfering_source = {
'source': source,
'selected_mics': target_cfg['selected_mics'],
'position': room_data['source_position'][source],
'azimuth': room_data['source_azimuth'][source],
'elevation': room_data['source_elevation'][source],
'distance': room_data['source_distance'][source],
}
# Done with interference for this source
interference_cfg.append(interfering_source)
return interference_cfg
def generate_mix(self, subset: str, target_cfg: dict) -> dict:
"""Generate scaling parameters for mixing
the target speech at the microphone, background noise
and interference signal at the microphone.
The output dictionary contains the following information
```
rsnr: reverberant signal-to-noise ratio
rsir: reverberant signal-to-interference ratio
ref_mic: reference microphone for calculating the metrics
ref_mic_rms: RMS of the signal at the reference microphone
```
Args:
subset: string denoting the subset of configuration
target_cfg: dictionary with target configuration
Returns:
Dictionary containing configured RSNR, RSIR, ref_mic
and RMS on ref_mic.
"""
mix_cfg = dict()
for key in ['rsnr', 'rsir', 'ref_mic', 'ref_mic_rms', 'min_duration']:
if key in self.cfg.mix[subset]:
# Take the value from subset config
value = self.cfg.mix[subset].get(key)
else:
# Take the global value
value = self.cfg.mix.get(key)
if value is None:
mix_cfg[key] = None
elif np.isscalar(value):
mix_cfg[key] = value
elif len(value) == 2:
# Select from the given range, including the upper bound
mix_cfg[key] = self.random.integers(low=value[0], high=value[1] + 1)
else:
# Select one of the multiple values
mix_cfg[key] = self.random.choice(value)
if mix_cfg['ref_mic'] == 'closest':
# Select the closest mic as the reference
mix_cfg['ref_mic'] = np.argmin(target_cfg['distance_source_to_mic'])
# Configuration for saving individual components
mix_cfg['save'] = OmegaConf.to_object(self.cfg.mix['save']) if 'save' in self.cfg.mix else {}
return mix_cfg
def generate(self):
"""Generate a corpus of microphone signals by mixing target, background noise
and interference signals.
This method will prepare randomized examples based on the current configuration,
run simulations and save results to output_dir.
"""
logging.info('Generate mixed signals')
# Initialize
self.random = default_rng(seed=self.cfg.random_seed)
# Prepare output dir
output_dir = self.cfg.output_dir
if output_dir.endswith('.yaml'):
output_dir = output_dir[:-5]
# Create absolute path
logging.info('Output dir set to: %s', output_dir)
# Generate all cases
for subset in self.subsets:
output_dir_subset = os.path.join(output_dir, subset)
examples = []
if not os.path.exists(output_dir_subset):
logging.info('Creating output directory: %s', output_dir_subset)
os.makedirs(output_dir_subset)
elif os.path.isdir(output_dir_subset) and len(os.listdir(output_dir_subset)) > 0:
raise RuntimeError(f'Output directory {output_dir_subset} is not empty.')
num_examples = self.cfg.mix[subset].num
logging.info('Preparing %d examples for subset %s', num_examples, subset)
# Generate examples
for n_example in tqdm(range(num_examples), total=num_examples, desc=f'Preparing {subset}'):
# prepare configuration
target_cfg = self.generate_target(subset)
interference_cfg = self.generate_interference(subset, target_cfg)
mix_cfg = self.generate_mix(subset, target_cfg)
# base file name
base_output_filepath = os.path.join(output_dir_subset, f'{subset}_example_{n_example:09d}')
# prepare example
example = {
'sample_rate': self.sample_rate,
'target_cfg': target_cfg,
'interference_cfg': interference_cfg,
'mix_cfg': mix_cfg,
'base_output_filepath': base_output_filepath,
}
examples.append(example)
# Audio data
audio_metadata = {
'target': self.metadata[subset]['target'],
'target_dir': os.path.dirname(self.cfg.target[subset]), # manifest_dir
'noise': self.metadata[subset]['noise'],
'noise_dir': os.path.dirname(self.cfg.noise[subset]), # manifest_dir
}
if interference_cfg is not None:
audio_metadata.update(
{
'interference': self.metadata[subset]['interference'],
'interference_dir': os.path.dirname(self.cfg.interference[subset]), # manifest_dir
}
)
# Simulation
if (num_workers := self.cfg.get('num_workers')) is None:
num_workers = os.cpu_count() - 1
if num_workers is not None and num_workers > 1:
logging.info(f'Simulate using {num_workers} workers')
examples_and_audio_metadata = zip(examples, itertools.repeat(audio_metadata, len(examples)))
with multiprocessing.Pool(processes=num_workers) as pool:
metadata = list(
tqdm(
pool.imap(simulate_room_mix_helper, examples_and_audio_metadata),
total=len(examples),
desc=f'Simulating {subset}',
)
)
else:
logging.info('Simulate using a single worker')
metadata = []
for example in tqdm(examples, total=len(examples), desc=f'Simulating {subset}'):
metadata.append(simulate_room_mix(**example, audio_metadata=audio_metadata))
# Save manifest
manifest_filepath = os.path.join(output_dir, f'{os.path.basename(output_dir)}_{subset}.json')
if os.path.exists(manifest_filepath) and os.path.isfile(manifest_filepath):
raise RuntimeError(f'Manifest config file exists: {manifest_filepath}')
# Make all paths in the manifest relative to the output dir
for data in tqdm(metadata, total=len(metadata), desc=f'Making filepaths relative {subset}'):
for key, val in data.items():
if key.endswith('_filepath') and val is not None:
data[key] = os.path.relpath(val, start=output_dir)
write_manifest(manifest_filepath, metadata)
# Generate plots with information about generated data
plot_filepath = os.path.join(output_dir, f'{os.path.basename(output_dir)}_{subset}_info.png')
if os.path.exists(plot_filepath) and os.path.isfile(plot_filepath):
raise RuntimeError(f'Plot file exists: {plot_filepath}')
plot_mix_manifest_info(manifest_filepath, plot_filepath=plot_filepath)
# Save used configuration for reference
config_filepath = os.path.join(output_dir, 'config.yaml')
if os.path.exists(config_filepath) and os.path.isfile(config_filepath):
raise RuntimeError(f'Output config file exists: {config_filepath}')
OmegaConf.save(self.cfg, config_filepath, resolve=True)
def convolve_rir(signal: np.ndarray, rir: np.ndarray) -> np.ndarray:
"""Convolve signal with a possibly multichannel IR in rir, i.e.,
calculate the following for each channel m:
signal_m = rir_m \ast signal
Args:
signal: single-channel signal (samples,)
rir: single- or multi-channel IR, (samples,) or (samples, channels)
Returns:
out: same length as signal, same number of channels as rir, shape (samples, channels)
"""
num_samples = len(signal)
if rir.ndim == 1:
# convolve and trim to length
out = convolve(signal, rir)[:num_samples]
elif rir.ndim == 2:
num_channels = rir.shape[1]
out = np.zeros((num_samples, num_channels))
for m in range(num_channels):
out[:, m] = convolve(signal, rir[:, m])[:num_samples]
else:
raise RuntimeError(f'RIR with {rir.ndim} not supported')
return out
def calculate_drr(rir: np.ndarray, sample_rate: float, n_direct: List[int], n_0_ms=2.5) -> List[float]:
"""Calculate direct-to-reverberant ratio (DRR) from the measured RIR.
Calculation is done as in eq. (3) from [1].
Args:
rir: room impulse response, shape (num_samples, num_channels)
sample_rate: sample rate for the impulse response
n_direct: direct path delay
n_0_ms: window around n_direct for calculating the direct path energy
Returns:
Calculated DRR for each channel of the input RIR.
References:
[1] Eaton et al, The ACE challenge: Corpus description and performance evaluation, WASPAA 2015
"""
# Define a window around the direct path delay
n_0 = int(n_0_ms * sample_rate / 1000)
len_rir, num_channels = rir.shape
drr = [None] * num_channels
for m in range(num_channels):
# Window around the direct path
dir_start = max(n_direct[m] - n_0, 0)
dir_end = n_direct[m] + n_0
# Power of the direct component
pow_dir = np.sum(np.abs(rir[dir_start:dir_end, m]) ** 2) / len_rir
# Power of the reverberant component
pow_reverberant = (np.sum(np.abs(rir[0:dir_start, m]) ** 2) + np.sum(np.abs(rir[dir_end:, m]) ** 2)) / len_rir
# DRR in dB
drr[m] = pow2db(pow_dir / pow_reverberant)
return drr
def normalize_max(x: np.ndarray, max_db: float = 0, eps: float = 1e-16) -> np.ndarray:
"""Normalize max input value to max_db full scale (±1).
Args:
x: input signal
max_db: desired max magnitude compared to full scale
eps: small regularization constant
Returns:
Normalized signal with max absolute value max_db.
"""
max_val = db2mag(max_db)
return max_val * x / (np.max(np.abs(x)) + eps)
def simultaneously_active_rms(
x: np.ndarray,
y: np.ndarray,
sample_rate: float,
rms_threshold_db: float = -60,
window_len_ms: float = 200,
min_active_duration: float = 0.5,
) -> Tuple[float, float]:
"""Calculate RMS over segments where both input signals are active.
Args:
x: first input signal
y: second input signal
sample_rate: sample rate for input signals in Hz
rms_threshold_db: threshold for determining activity of the signal, relative
to max absolute value
window_len_ms: window length in milliseconds, used for calculating segmental RMS
min_active_duration: minimal duration of the active segments
Returns:
RMS value over active segments for x and y.
"""
if len(x) != len(y):
raise RuntimeError(f'Expecting signals of same length: len(x)={len(x)}, len(y)={len(y)}')
window_len = int(window_len_ms * sample_rate / 1000)
rms_threshold = db2mag(rms_threshold_db) # linear scale
x_normalized = normalize_max(x)
y_normalized = normalize_max(y)
x_active_power = y_active_power = active_len = 0
for start in range(0, len(x) - window_len, window_len):
window = slice(start, start + window_len)
# check activity on the scaled signal
x_window_rms = rms(x_normalized[window])
y_window_rms = rms(y_normalized[window])
if x_window_rms > rms_threshold and y_window_rms > rms_threshold:
# sum the power of the original non-scaled signal
x_active_power += np.sum(np.abs(x[window]) ** 2)
y_active_power += np.sum(np.abs(y[window]) ** 2)
active_len += window_len
if active_len < int(min_active_duration * sample_rate):
raise RuntimeError(
f'Signals are simultaneously active less than {min_active_duration} s: only {active_len/sample_rate} s'
)
# normalize
x_active_power /= active_len
y_active_power /= active_len
return np.sqrt(x_active_power), np.sqrt(y_active_power)
def scaled_disturbance(
signal: np.ndarray,
disturbance: np.ndarray,
sdr: float,
sample_rate: float = None,
ref_channel: int = 0,
eps: float = 1e-16,
) -> np.ndarray:
"""
Args:
signal: numpy array, shape (num_samples, num_channels)
disturbance: numpy array, same shape as signal
sdr: desired signal-to-disturbance ration
sample_rate: sample rate of the input signals
ref_channel: ref mic used to calculate RMS
eps: regularization constant
Returns:
Scaled disturbance, so that signal-to-disturbance ratio at ref_channel
is approximately equal to input SDR during simultaneously active
segment of signal and disturbance.
"""
if signal.shape != disturbance.shape:
raise ValueError(f'Signal and disturbance shapes do not match: {signal.shape} != {disturbance.shape}')
# set scaling based on RMS at ref_mic
signal_rms, disturbance_rms = simultaneously_active_rms(
signal[:, ref_channel], disturbance[:, ref_channel], sample_rate=sample_rate
)
disturbance_gain = db2mag(-sdr) * signal_rms / (disturbance_rms + eps)
# scale disturbance
scaled_disturbance = disturbance_gain * disturbance
return scaled_disturbance
def prepare_source_signal(
signal_type: str,
sample_rate: int,
audio_data: List[dict],
audio_dir: Optional[str] = None,
min_duration: Optional[int] = None,
ref_signal: Optional[np.ndarray] = None,
mic_positions: Optional[np.ndarray] = None,
num_retries: int = 10,
) -> tuple:
"""Prepare an audio signal for a source.
Args:
signal_type: 'point' or 'diffuse'
sample_rate: Sampling rate for the signal
audio_data: List of audio items, each is a dictionary with audio_filepath, duration, offset and optionally text
audio_dir: Base directory for resolving paths, e.g., manifest basedir
min_duration: Minimal duration to be loaded if ref_signal is not provided, in seconds
ref_signal: Optional, used to determine the length of the signal
mic_positions: Optional, used to prepare approximately diffuse signal
num_retries: Number of retries when selecting the source files
Returns:
(audio_signal, metadata), where audio_signal is an ndarray and metadata is a dictionary
with audio filepaths, durations and offsets
"""
if not signal_type in ['point', 'diffuse']:
raise ValueError(f'Unexpected signal type {signal_type}.')
if audio_data is None:
# No data to load
return None
metadata = {}
if ref_signal is None:
audio_signal = None
# load at least one sample if min_duration is not provided
samples_to_load = int(min_duration * sample_rate) if min_duration is not None else 1
source_signals_metadata = {'audio_filepath': [], 'duration': [], 'offset': [], 'text': []}
while samples_to_load > 0:
# Select a random item and load the audio
item = random.choice(audio_data)
audio_filepath = item['audio_filepath']
if not os.path.isabs(audio_filepath) and audio_dir is not None:
audio_filepath = os.path.join(audio_dir, audio_filepath)
# Load audio
check_min_sample_rate(audio_filepath, sample_rate)
audio_segment = AudioSegment.from_file(
audio_file=audio_filepath,
target_sr=sample_rate,
duration=item['duration'],
offset=item.get('offset', 0),
)
if signal_type == 'point':
if audio_segment.num_channels > 1:
raise RuntimeError(
f'Expecting single-channel source signal, but received {audio_segment.num_channels}. File: {audio_filepath}'
)
else:
raise ValueError(f'Unexpected signal type {signal_type}.')
source_signals_metadata['audio_filepath'].append(audio_filepath)
source_signals_metadata['duration'].append(item['duration'])
source_signals_metadata['duration'].append(item.get('offset', 0))
source_signals_metadata['text'].append(item.get('text'))
# not perfect, since different files may have different distributions
segment_samples = normalize_max(audio_segment.samples)
# concatenate
audio_signal = (
np.concatenate((audio_signal, segment_samples)) if audio_signal is not None else segment_samples
)
# remaining samples
samples_to_load -= len(segment_samples)
# Finally, we need only the metadata for the complete signal
metadata = {
'duration': sum(source_signals_metadata['duration']),
'offset': 0,
}
# Add text only if all source signals have text
if all([isinstance(tt, str) for tt in source_signals_metadata['text']]):
metadata['text'] = ' '.join(source_signals_metadata['text'])
else:
# Load a signal with total_len samples and ensure it has enough simultaneous activity/overlap with ref_signal
# Concatenate multiple files if necessary
total_len = len(ref_signal)
for n in range(num_retries):
audio_signal = None
source_signals_metadata = {'audio_filepath': [], 'duration': [], 'offset': []}
if signal_type == 'point':
samples_to_load = total_len
elif signal_type == 'diffuse':
# Load longer signal so it can be reshaped into (samples, mics) and
# used to generate approximately diffuse noise field
num_mics = len(mic_positions)
samples_to_load = num_mics * total_len
while samples_to_load > 0:
# Select an audio file
item = random.choice(audio_data)
audio_filepath = item['audio_filepath']
if not os.path.isabs(audio_filepath) and audio_dir is not None:
audio_filepath = os.path.join(audio_dir, audio_filepath)
# Load audio signal
check_min_sample_rate(audio_filepath, sample_rate)
if (max_offset := item['duration'] - np.ceil(samples_to_load / sample_rate)) > 0:
# Load with a random offset if the example is longer than samples_to_load
offset = random.uniform(0, max_offset)
duration = -1
else:
# Load the whole file
offset, duration = 0, item['duration']
audio_segment = AudioSegment.from_file(
audio_file=audio_filepath, target_sr=sample_rate, duration=duration, offset=offset
)
# Prepare a single-channel signal
if audio_segment.num_channels == 1:
# Take all samples
segment_samples = audio_segment.samples
else:
# Take a random channel
selected_channel = random.choice(range(audio_segment.num_channels))
segment_samples = audio_segment.samples[:, selected_channel]
source_signals_metadata['audio_filepath'].append(audio_filepath)
source_signals_metadata['duration'].append(len(segment_samples) / sample_rate)
source_signals_metadata['offset'].append(offset)
# not perfect, since different files may have different distributions
segment_samples = normalize_max(segment_samples)
# concatenate
audio_signal = (
np.concatenate((audio_signal, segment_samples)) if audio_signal is not None else segment_samples
)
# remaining samples
samples_to_load -= len(segment_samples)
if signal_type == 'diffuse' and num_mics > 1:
try:
# Trim and reshape to num_mics to prepare num_mics source signals
audio_signal = audio_signal[: num_mics * total_len].reshape(num_mics, -1).T
# Make spherically diffuse noise
audio_signal = generate_approximate_noise_field(
mic_positions=np.array(mic_positions), noise_signal=audio_signal, sample_rate=sample_rate
)
except Exception as e:
logging.info('Failed to generate approximate noise field: %s', str(e))
logging.info('Try again.')
# Try again
audio_signal, source_signals_metadata = None, {}
continue
# Trim to length
audio_signal = audio_signal[:total_len, ...]
# Include the channel dimension if the reference includes it
if ref_signal.ndim == 2 and audio_signal.ndim == 1:
audio_signal = audio_signal[:, None]
try:
# Signal and ref_signal should be simultaneously active
simultaneously_active_rms(ref_signal, audio_signal, sample_rate=sample_rate)
# We have enough overlap
break
except Exception as e:
# Signal and ref_signal are not overlapping, try again
logging.info('Exception: %s', str(e))
logging.info('Signals are not overlapping, try again.')
audio_signal, source_signals_metadata = None, {}
continue
if audio_signal is None:
logging.warning('Audio signal not set: %s.', signal_type)
metadata['source_signals'] = source_signals_metadata
return audio_signal, metadata
def check_min_sample_rate(filepath: str, sample_rate: float):
"""Make sure the file's sample rate is at least sample_rate.
This will make sure that we have only downsampling if loading
this file, while upsampling is not permitted.
Args:
filepath: path to a file
sample_rate: desired sample rate
"""
file_sample_rate = librosa.get_samplerate(path=filepath)
if file_sample_rate < sample_rate:
raise RuntimeError(
f'Sample rate ({file_sample_rate}) is lower than the desired sample rate ({sample_rate}). File: {filepath}.'
)
def simulate_room_mix(
sample_rate: int,
target_cfg: dict,
interference_cfg: dict,
mix_cfg: dict,
audio_metadata: dict,
base_output_filepath: str,
max_amplitude: float = 0.999,
eps: float = 1e-16,
) -> dict:
"""Simulate mixture signal at the microphone, including target, noise and
interference signals and mixed at specific RSNR and RSIR.
Args:
sample_rate: Sample rate for all signals
target_cfg: Dictionary with configuration of the target. Includes
room_filepath, source index, audio_filepath, duration
noise_cfg: List of dictionaries, where each item includes audio_filepath,
offset and duration.
interference_cfg: List of dictionaries, where each item contains source
index
mix_cfg: Dictionary with the mixture configuration. Includes RSNR, RSIR,
ref_mic and ref_mic_rms.
audio_metadata: Dictionary with a list of files for target, noise and interference
base_output_filepath: All output audio files will be saved with this prefix by
adding a diffierent suffix for each component, e.g., _mic.wav.
max_amplitude: Maximum amplitude of the mic signal, used to prevent clipping.
eps: Small regularization constant.
Returns:
Dictionary with metadata based on the mixture setup and
simulation results. This corresponds to a line of the
output manifest file.
"""
# Local utilities
def load_rir(
room_filepath: str, source: int, selected_mics: list, sample_rate: float, rir_key: str = 'rir'
) -> np.ndarray:
"""Load a RIR and check that the sample rate is matching the desired sample rate
Args:
room_filepath: Path to a room simulation in an h5 file
source: Index of the desired source
sample_rate: Sample rate of the simulation
rir_key: Key of the RIR to load from the simulation.
Returns:
Numpy array with shape (num_samples, num_channels)
"""
rir, rir_sample_rate = load_rir_simulation(room_filepath, source=source, rir_key=rir_key)
if rir_sample_rate != sample_rate:
raise RuntimeError(
f'RIR sample rate ({sample_rate}) is not matching the expected sample rate ({sample_rate}). File: {room_filepath}'
)
return rir[:, selected_mics]
def get_early_rir(
rir: np.ndarray, rir_anechoic: np.ndarray, sample_rate: int, early_duration: float = 0.050
) -> np.ndarray:
"""Return only the early part of the RIR.
"""
early_len = int(early_duration * sample_rate)
direct_path_delay = np.min(np.argmax(rir_anechoic, axis=0))
rir_early = rir.copy()
rir_early[direct_path_delay + early_len :, :] = 0
return rir_early
def save_audio(
base_path: str,
tag: str,
audio_signal: Optional[np.ndarray],
sample_rate: int,
save: str = 'all',
ref_mic: Optional[int] = None,
format: str = 'wav',
subtype: str = 'float',
):
"""Save audio signal and return filepath.
"""
if (audio_signal is None) or (not save):
return None
if save == 'ref_mic':
# save only ref_mic
audio_signal = audio_signal[:, ref_mic]
audio_filepath = base_path + f'_{tag}.{format}'
sf.write(audio_filepath, audio_signal, sample_rate, subtype)
return audio_filepath
# Target RIRs
target_rir = load_rir(
target_cfg['room_filepath'],
source=target_cfg['source'],
selected_mics=target_cfg['selected_mics'],
sample_rate=sample_rate,
)
target_rir_anechoic = load_rir(
target_cfg['room_filepath'],
source=target_cfg['source'],
sample_rate=sample_rate,
selected_mics=target_cfg['selected_mics'],
rir_key='anechoic',
)
target_rir_early = get_early_rir(rir=target_rir, rir_anechoic=target_rir_anechoic, sample_rate=sample_rate)
# Target signals
target_signal, target_metadata = prepare_source_signal(
signal_type='point',
sample_rate=sample_rate,
audio_data=audio_metadata['target'],
audio_dir=audio_metadata['target_dir'],
min_duration=mix_cfg['min_duration'],
)
source_signals_metadata = {'target': target_metadata['source_signals']}
# Convolve target
target_reverberant = convolve_rir(target_signal, target_rir)
target_anechoic = convolve_rir(target_signal, target_rir_anechoic)
target_early = convolve_rir(target_signal, target_rir_early)
# Prepare noise signal
noise, noise_metadata = prepare_source_signal(
signal_type='diffuse',
sample_rate=sample_rate,
mic_positions=target_cfg['mic_positions'],
audio_data=audio_metadata['noise'],
audio_dir=audio_metadata['noise_dir'],
ref_signal=target_reverberant,
)
source_signals_metadata['noise'] = noise_metadata['source_signals']
# Prepare interference signal
if interference_cfg is None:
interference = None
else:
# Load interference signals
interference = 0
source_signals_metadata['interference'] = []
for i_cfg in interference_cfg:
# Load single-channel signal for directional interference
i_signal, i_metadata = prepare_source_signal(
signal_type='point',
sample_rate=sample_rate,
audio_data=audio_metadata['interference'],
audio_dir=audio_metadata['interference_dir'],
ref_signal=target_signal,
)
source_signals_metadata['interference'].append(i_metadata['source_signals'])
# Load RIR from the same room as the target, but a difference source
i_rir = load_rir(
target_cfg['room_filepath'],
source=i_cfg['source'],
selected_mics=i_cfg['selected_mics'],
sample_rate=sample_rate,
)
# Convolve interference
i_reverberant = convolve_rir(i_signal, i_rir)
# Sum
interference += i_reverberant
# Scale and add components of the signal
mic = target_reverberant.copy()
if noise is not None:
noise = scaled_disturbance(
signal=target_reverberant,
disturbance=noise,
sdr=mix_cfg['rsnr'],
sample_rate=sample_rate,
ref_channel=mix_cfg['ref_mic'],
)
# Update mic signal
mic += noise
if interference is not None:
interference = scaled_disturbance(
signal=target_reverberant,
disturbance=interference,
sdr=mix_cfg['rsir'],
sample_rate=sample_rate,
ref_channel=mix_cfg['ref_mic'],
)
# Update mic signal
mic += interference
# Set the final mic signal level
mic_rms = rms(mic[:, mix_cfg['ref_mic']])
global_gain = db2mag(mix_cfg['ref_mic_rms']) / (mic_rms + eps)
mic_max = np.max(np.abs(mic))
if (clipped_max := mic_max * global_gain) > max_amplitude:
# Downscale the global gain to prevent clipping + adjust ref_mic_rms accordingly
clipping_prevention_gain = max_amplitude / clipped_max
global_gain *= clipping_prevention_gain
mix_cfg['ref_mic_rms'] += mag2db(clipping_prevention_gain)
logging.debug(
'Clipping prevented for example %s (protection gain: %.2f dB)',
base_output_filepath,
mag2db(clipping_prevention_gain),
)
# save signals
signals = {
'mic': mic,
'target_reverberant': target_reverberant,
'target_anechoic': target_anechoic,
'target_early': target_early,
'noise': noise,
'interference': interference,
}
metadata = {}
for tag, signal in signals.items():
if signal is not None:
# scale all signal components with the global gain
signal = global_gain * signal
audio_filepath = save_audio(
base_path=base_output_filepath,
tag=tag,
audio_signal=signal,
sample_rate=sample_rate,
save=mix_cfg['save'].get(tag, 'all'),
ref_mic=mix_cfg['ref_mic'],
format=mix_cfg['save'].get('format', 'wav'),
subtype=mix_cfg['save'].get('subtype', 'float'),
)
if tag == 'mic':
metadata['audio_filepath'] = audio_filepath
else:
metadata[tag + '_filepath'] = audio_filepath
# Add metadata
metadata.update(
{
'text': target_metadata.get('text'),
'duration': target_metadata['duration'],
'target_cfg': target_cfg,
'interference_cfg': interference_cfg,
'mix_cfg': mix_cfg,
'ref_channel': mix_cfg.get('ref_mic'),
'rt60': target_cfg.get('rt60'),
'drr': calculate_drr(target_rir, sample_rate, n_direct=np.argmax(target_rir_anechoic, axis=0)),
'rsnr': None if noise is None else mix_cfg['rsnr'],
'rsir': None if interference is None else mix_cfg['rsir'],
'source_signals': source_signals_metadata,
}
)
return convert_numpy_to_serializable(metadata)
def simulate_room_mix_helper(example_and_audio_metadata: tuple) -> dict:
"""Wrapper around `simulate_room_mix` for pool.imap.
Args:
args: example and audio_metadata that are forwarded to `simulate_room_mix`
Returns:
Dictionary with metadata, see `simulate_room_mix`
"""
example, audio_metadata = example_and_audio_metadata
return simulate_room_mix(**example, audio_metadata=audio_metadata)
def plot_mix_manifest_info(filepath: str, plot_filepath: str = None):
"""Plot distribution of parameters from the manifest file.
Args:
filepath: path to a RIR corpus manifest file
plot_filepath: path to save the plot at
"""
metadata = read_manifest(filepath)
# target info
target_distance = []
target_azimuth = []
target_elevation = []
target_duration = []
# room config
rt60 = []
drr = []
# noise
rsnr = []
rsir = []
# get the required data
for data in metadata:
# target info
target_distance.append(data['target_cfg']['distance'])
target_azimuth.append(data['target_cfg']['azimuth'])
target_elevation.append(data['target_cfg']['elevation'])
target_duration.append(data['duration'])
# room config
rt60.append(data['rt60'])
drr += data['drr'] # average DRR across all mics
# noise
if data['rsnr'] is not None:
rsnr.append(data['rsnr'])
if data['rsir'] is not None:
rsir.append(data['rsir'])
# plot
plt.figure(figsize=(12, 6))
plt.subplot(2, 4, 1)
plt.hist(target_distance, label='distance')
plt.xlabel('distance / m')
plt.ylabel('# examples')
plt.title('Target-to-array distance')
plt.subplot(2, 4, 2)
plt.hist(target_azimuth, label='azimuth')
plt.xlabel('azimuth / deg')
plt.ylabel('# examples')
plt.title('Target-to-array azimuth')
plt.subplot(2, 4, 3)
plt.hist(target_elevation, label='elevation')
plt.xlabel('elevation / deg')
plt.ylabel('# examples')
plt.title('Target-to-array elevation')
plt.subplot(2, 4, 4)
plt.hist(target_duration, label='duration')
plt.xlabel('time / s')
plt.ylabel('# examples')
plt.title('Target duration')
plt.subplot(2, 4, 5)
plt.hist(rt60, label='RT60')
plt.xlabel('RT60 / s')
plt.ylabel('# examples')
plt.title('RT60')
plt.subplot(2, 4, 6)
plt.hist(drr, label='DRR')
plt.xlabel('DRR / dB')
plt.ylabel('# examples')
plt.title('DRR [avg over mics]')
if len(rsnr) > 0:
plt.subplot(2, 4, 7)
plt.hist(rsnr, label='RSNR')
plt.xlabel('RSNR / dB')
plt.ylabel('# examples')
plt.title(f'RSNR [{100 * len(rsnr) / len(rt60):.0f}% ex]')
if len(rsir):
plt.subplot(2, 4, 8)
plt.hist(rsir, label='RSIR')
plt.xlabel('RSIR / dB')
plt.ylabel('# examples')
plt.title(f'RSIR [{100 * len(rsir) / len(rt60):.0f}% ex]')
for n in range(8):
plt.subplot(2, 4, n + 1)
plt.grid()
plt.legend(loc='lower left')
plt.tight_layout()
if plot_filepath is not None:
plt.savefig(plot_filepath)
plt.close()
logging.info('Plot saved at %s', plot_filepath)
|
NeMo-main
|
nemo/collections/asr/data/data_simulation.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
from typing import Dict, List, Optional, Union
import torch
import webdataset as wd
from nemo.collections.asr.data.audio_to_text import cache_datastore_manifests, expand_sharded_filepaths
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.asr.parts.preprocessing.segment import available_formats as valid_sf_formats
from nemo.collections.common.parts.preprocessing import collections
from nemo.core.classes import Dataset, IterableDataset
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType, RegressionValuesType
from nemo.utils import logging
# List of valid file formats (prioritized by order of importance)
VALID_FILE_FORMATS = ';'.join(['wav', 'mp3', 'flac'] + [fmt.lower() for fmt in valid_sf_formats.keys()])
def repeat_signal(signal: torch.Tensor, sig_len: int, required_length: int) -> torch.Tensor:
"""repeat signal to make short signal to have required_length
Args:
signal (Tensor): input signal
sig_len (int): length of input signal
required_length (int): length of generated signal
Returns:
signal (Tensor): generated signal of required_length by repeating itself.
"""
sub: torch.Tensor = torch.tensor([])
repeat = int(required_length // sig_len)
rem = int(required_length % sig_len)
sub: torch.Tensor = torch.tensor([])
rep_sig: torch.Tensor = torch.cat(repeat * [signal])
if rem > 0:
sub = signal[-rem:]
signal = torch.cat((rep_sig, sub))
else:
signal = rep_sig
return signal
def normalize(signal):
"""normalize signal
Args:
signal(FloatTensor): signal to be normalized.
"""
signal_minusmean = signal - signal.mean()
return signal_minusmean / signal_minusmean.abs().max()
def count_occurence(manifest_file_id):
"""Count number of wav files in Dict manifest_file_id. Use for _TarredAudioToLabelDataset.
Args:
manifest_file_id (Dict): Dict of files and their corresponding id. {'A-sub0' : 1, ..., 'S-sub10':100}
Returns:
count (Dict): Dict of wav files {'A' : 2, ..., 'S':10}
"""
count = dict()
for i in manifest_file_id:
audio_filename = i.split("-sub")[0]
count[audio_filename] = count.get(audio_filename, 0) + 1
return count
def _speech_collate_fn(batch, pad_id):
"""collate batch of audio sig, audio len, tokens, tokens len
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of signal, signal lengths,
encoded tokens, and encoded tokens length. This collate func
assumes the signals are 1d torch tensors (i.e. mono audio).
"""
_, audio_lengths, _, tokens_lengths = zip(*batch)
max_audio_len = 0
has_audio = audio_lengths[0] is not None
if has_audio:
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
audio_signal, tokens = [], []
for sig, sig_len, tokens_i, tokens_i_len in batch:
if has_audio:
sig_len = sig_len.item()
if sig_len < max_audio_len:
pad = (0, max_audio_len - sig_len)
sig = torch.nn.functional.pad(sig, pad)
audio_signal.append(sig)
tokens_i_len = tokens_i_len.item()
if tokens_i_len < max_tokens_len:
pad = (0, max_tokens_len - tokens_i_len)
tokens_i = torch.nn.functional.pad(tokens_i, pad, value=pad_id)
tokens.append(tokens_i)
if has_audio:
audio_signal = torch.stack(audio_signal)
audio_lengths = torch.stack(audio_lengths)
else:
audio_signal, audio_lengths = None, None
tokens = torch.stack(tokens)
tokens_lengths = torch.stack(tokens_lengths)
return audio_signal, audio_lengths, tokens, tokens_lengths
def _fixed_seq_collate_fn(self, batch):
"""collate batch of audio sig, audio len, tokens, tokens len
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of signal, signal lengths,
encoded tokens, and encoded tokens length. This collate func
assumes the signals are 1d torch tensors (i.e. mono audio).
"""
_, audio_lengths, _, tokens_lengths = zip(*batch)
has_audio = audio_lengths[0] is not None
fixed_length = int(max(audio_lengths))
audio_signal, tokens, new_audio_lengths = [], [], []
for sig, sig_len, tokens_i, _ in batch:
if has_audio:
sig_len = sig_len.item()
chunck_len = sig_len - fixed_length
if chunck_len < 0:
repeat = fixed_length // sig_len
rem = fixed_length % sig_len
sub = sig[-rem:] if rem > 0 else torch.tensor([])
rep_sig = torch.cat(repeat * [sig])
sig = torch.cat((rep_sig, sub))
new_audio_lengths.append(torch.tensor(fixed_length))
audio_signal.append(sig)
tokens.append(tokens_i)
if has_audio:
audio_signal = torch.stack(audio_signal)
audio_lengths = torch.stack(new_audio_lengths)
else:
audio_signal, audio_lengths = None, None
tokens = torch.stack(tokens)
tokens_lengths = torch.stack(tokens_lengths)
return audio_signal, audio_lengths, tokens, tokens_lengths
def _vad_frame_seq_collate_fn(self, batch):
"""collate batch of audio sig, audio len, tokens, tokens len
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of signal, signal lengths,
encoded tokens, and encoded tokens length. This collate func
assumes the signals are 1d torch tensors (i.e. mono audio).
batch size equals to 1.
"""
slice_length = int(self.featurizer.sample_rate * self.window_length_in_sec)
_, audio_lengths, _, tokens_lengths = zip(*batch)
slice_length = int(min(slice_length, max(audio_lengths)))
shift = int(self.featurizer.sample_rate * self.shift_length_in_sec)
has_audio = audio_lengths[0] is not None
audio_signal, num_slices, tokens, audio_lengths = [], [], [], []
append_len_start = slice_length // 2
append_len_end = slice_length - slice_length // 2
for sig, sig_len, tokens_i, _ in batch:
if self.normalize_audio:
sig = normalize(sig)
start = torch.zeros(append_len_start)
end = torch.zeros(append_len_end)
sig = torch.cat((start, sig, end))
sig_len += slice_length
if has_audio:
slices = torch.div(sig_len - slice_length, shift, rounding_mode='trunc')
for slice_id in range(slices):
start_idx = slice_id * shift
end_idx = start_idx + slice_length
signal = sig[start_idx:end_idx]
audio_signal.append(signal)
num_slices.append(slices)
tokens.extend([tokens_i] * slices)
audio_lengths.extend([slice_length] * slices)
if has_audio:
audio_signal = torch.stack(audio_signal)
audio_lengths = torch.tensor(audio_lengths)
else:
audio_signal, audio_lengths = None, None
tokens = torch.stack(tokens)
tokens_lengths = torch.tensor(num_slices)
return audio_signal, audio_lengths, tokens, tokens_lengths
class _AudioLabelDataset(Dataset):
"""
Dataset that loads tensors via a json file containing paths to audio files,
labels, and durations and offsets(in seconds). Each new line is a
different sample. Example below:
and their target labels. JSON files should be of the following format::
{"audio_filepath": "/path/to/audio_wav_0.wav", "duration": time_in_sec_0, "label": \
target_label_0, "offset": offset_in_sec_0}
...
{"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \
target_label_n, "offset": offset_in_sec_n}
Args:
manifest_filepath (Union[str, List[str]]): Dataset parameter. Path to JSON containing data.
labels (list): Dataset parameter. List of target classes that can be output by the speaker recognition model.
featurizer
min_duration (float): Dataset parameter. All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
trim (bool): Whether to use trim silence from beginning and end of audio signal using librosa.effects.trim().
Defaults to False.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
output_types = {
'audio_signal': NeuralType(
('B', 'T'),
AudioSignal(freq=self._sample_rate)
if self is not None and hasattr(self, '_sample_rate')
else AudioSignal(),
),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
}
if self.is_regression_task:
output_types.update(
{
'targets': NeuralType(tuple('B'), RegressionValuesType()),
'targets_length': NeuralType(tuple('B'), LengthsType()),
}
)
else:
output_types.update(
{'label': NeuralType(tuple('B'), LabelsType()), 'label_length': NeuralType(tuple('B'), LengthsType()),}
)
return output_types
def __init__(
self,
*,
manifest_filepath: Union[str, List[str]],
labels: List[str],
featurizer,
min_duration: Optional[float] = 0.1,
max_duration: Optional[float] = None,
trim: bool = False,
is_regression_task: bool = False,
cal_labels_occurrence: Optional[bool] = False,
):
super().__init__()
if isinstance(manifest_filepath, str):
manifest_filepath = manifest_filepath.split(',')
cache_datastore_manifests(manifest_filepaths=manifest_filepath, cache_audio=True)
self.collection = collections.ASRSpeechLabel(
manifests_files=manifest_filepath,
min_duration=min_duration,
max_duration=max_duration,
is_regression_task=is_regression_task,
cal_labels_occurrence=cal_labels_occurrence,
)
self.featurizer = featurizer
self.trim = trim
self.is_regression_task = is_regression_task
if not is_regression_task:
self.labels = labels if labels else self.collection.uniq_labels
self.num_classes = len(self.labels) if self.labels is not None else 1
self.label2id, self.id2label = {}, {}
self.id2occurrence, self.labels_occurrence = {}, []
for label_id, label in enumerate(self.labels):
self.label2id[label] = label_id
self.id2label[label_id] = label
if cal_labels_occurrence:
self.id2occurrence[label_id] = self.collection.labels_occurrence[label]
if cal_labels_occurrence:
self.labels_occurrence = [self.id2occurrence[k] for k in sorted(self.id2occurrence)]
for idx in range(len(self.labels[:5])):
logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
else:
self.labels = []
self.num_classes = 1
def __len__(self):
return len(self.collection)
def __getitem__(self, index):
sample = self.collection[index]
offset = sample.offset
if offset is None:
offset = 0
features = self.featurizer.process(sample.audio_file, offset=offset, duration=sample.duration, trim=self.trim)
f, fl = features, torch.tensor(features.shape[0]).long()
if not self.is_regression_task:
t = torch.tensor(self.label2id[sample.label]).long()
else:
t = torch.tensor(sample.label).float()
tl = torch.tensor(1).long() # For compatibility with collate_fn used later
return f, fl, t, tl
# Ported from https://github.com/NVIDIA/OpenSeq2Seq/blob/master/open_seq2seq/data/speech2text/speech_commands.py
class AudioToClassificationLabelDataset(_AudioLabelDataset):
"""
Dataset that loads tensors via a json file containing paths to audio
files, command class, and durations (in seconds). Each new line is a
different sample. Example below:
{"audio_filepath": "/path/to/audio_wav_0.wav", "duration": time_in_sec_0, "label": \
target_label_0, "offset": offset_in_sec_0}
...
{"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \
target_label_n, "offset": offset_in_sec_n}
Args:
manifest_filepath (Union[str, List[str]]): Path to manifest json as described above. Can
be comma-separated paths.
labels (Optional[list]): String containing all the possible labels to map to
if None then automatically picks from ASRSpeechLabel collection.
featurizer: Initialized featurizer class that converts paths of
audio to feature tensors
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include
in dataset
trim: Boolean flag whether to trim the audio
"""
def _collate_fn(self, batch):
return _speech_collate_fn(batch, pad_id=0)
class AudioToSpeechLabelDataset(_AudioLabelDataset):
"""
Dataset that loads tensors via a json file containing paths to audio
files, command class, and durations (in seconds). Each new line is a
different sample. Example below:
{"audio_filepath": "/path/to/audio_wav_0.wav", "duration": time_in_sec_0, "label": \
target_label_0, "offset": offset_in_sec_0}
...
{"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \
target_label_n, "offset": offset_in_sec_n}
Args:
manifest_filepath (Union[str, List[str]]): Path to manifest json as described above. Can
be comma-separated paths.
labels (Optional[list]): String containing all the possible labels to map to
if None then automatically picks from ASRSpeechLabel collection.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
trim (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
window_length_in_sec (float): length of window/slice (in seconds)
Use this for speaker recognition and VAD tasks.
shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task in a batch
Use this for VAD task during inference.
normalize_audio (bool): Whether to normalize audio signal.
Defaults to False.
is_regression_task (bool): Whether the dataset is for a regression task instead of classification.
Defaults to False.
cal_labels_occurrence (bool): Whether to calculate occurrence of labels
Defaults to False.
"""
def __init__(
self,
*,
manifest_filepath: Union[str, List[str]],
labels: List[str],
featurizer,
min_duration: Optional[float] = 0.1,
max_duration: Optional[float] = None,
trim: bool = False,
window_length_in_sec: Optional[float] = 8,
shift_length_in_sec: Optional[float] = 1,
normalize_audio: bool = False,
is_regression_task: bool = False,
cal_labels_occurrence: Optional[bool] = False,
):
self.window_length_in_sec = window_length_in_sec
self.shift_length_in_sec = shift_length_in_sec
self.normalize_audio = normalize_audio
logging.debug("Window/slice length considered for collate func is {}".format(self.window_length_in_sec))
logging.debug("Shift length considered for collate func is {}".format(self.shift_length_in_sec))
super().__init__(
manifest_filepath=manifest_filepath,
labels=labels,
featurizer=featurizer,
min_duration=min_duration,
max_duration=max_duration,
trim=trim,
is_regression_task=is_regression_task,
cal_labels_occurrence=cal_labels_occurrence,
)
def fixed_seq_collate_fn(self, batch):
return _fixed_seq_collate_fn(self, batch)
def vad_frame_seq_collate_fn(self, batch):
return _vad_frame_seq_collate_fn(self, batch)
class _TarredAudioLabelDataset(IterableDataset):
"""
A similar Dataset to the AudioLabelDataSet, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToSpeechLabelDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the label and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
Supported opening braces - { <=> (, [, < and the special tag _OP_.
Supported closing braces - } <=> ), ], > and the special tag _CL_.
For SLURM based tasks, we suggest the use of the special tags for ease of use.
See the documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioLabelDataSet; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
labels (list): Dataset parameter.
List of target classes that can be output by the speaker recognition model.
featurizer
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
trim(bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
window_length_in_sec (float): length of slice/window (in seconds) # Pass this only for speaker recognition and VAD task
shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task. in a batch # Pass this only for VAD task during inference.
normalize_audio (bool): Whether to normalize audio signal. Defaults to False.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on the value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
is_regression_task (bool): Whether it is a regression task. Defualts to False.
"""
def __init__(
self,
*,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: Union[str, List[str]],
labels: List[str],
featurizer,
shuffle_n: int = 0,
min_duration: Optional[float] = 0.1,
max_duration: Optional[float] = None,
trim: bool = False,
shard_strategy: str = "scatter",
global_rank: int = 0,
world_size: int = 0,
is_regression_task: bool = False,
):
cache_datastore_manifests(manifest_filepaths=manifest_filepath)
self.collection = collections.ASRSpeechLabel(
manifests_files=manifest_filepath,
min_duration=min_duration,
max_duration=max_duration,
index_by_file_id=True, # Must set this so the manifest lines can be indexed by file ID
)
self.file_occurence = count_occurence(self.collection.mapping)
self.featurizer = featurizer
self.trim = trim
self.labels = labels if labels else self.collection.uniq_labels
self.num_classes = len(self.labels)
self.label2id, self.id2label = {}, {}
for label_id, label in enumerate(self.labels):
self.label2id[label] = label_id
self.id2label[label_id] = label
for idx in range(len(self.labels[:5])):
logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
audio_tar_filepaths = expand_sharded_filepaths(
sharded_filepaths=audio_tar_filepaths,
shard_strategy=shard_strategy,
world_size=world_size,
global_rank=global_rank,
)
# Put together WebDataset
self._dataset = wd.WebDataset(urls=audio_tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self._dataset = self._dataset.shuffle(shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = (
self._dataset.rename(audio=VALID_FILE_FORMATS, key='__key__')
.to_tuple('audio', 'key')
.pipe(self._filter)
.map(f=self._build_sample)
)
def _filter(self, iterator):
"""This function is used to remove samples that have been filtered out by ASRSpeechLabel already.
Otherwise, we would get a KeyError as _build_sample attempts to find the manifest entry for a sample
that was filtered out (e.g. for duration).
Note that if using multi-GPU training, filtering may lead to an imbalance in samples in each shard,
which may make your code hang as one process will finish before the other.
"""
class TarredAudioFilter:
def __init__(self, collection, file_occurence):
self.iterator = iterator
self.collection = collection
self.file_occurence = file_occurence
self._iterable = self._internal_generator()
def __iter__(self):
self._iterable = self._internal_generator()
return self
def __next__(self):
try:
values = next(self._iterable)
except StopIteration:
# reset generator
self._iterable = self._internal_generator()
values = next(self._iterable)
return values
def _internal_generator(self):
"""
WebDataset requires an Iterator, but we require an iterable that yields 1-or-more
values per value inside self.iterator.
Therefore wrap the iterator with a generator function that will yield 1-or-more
values per sample in the iterator.
"""
for _, tup in enumerate(self.iterator):
audio_bytes, audio_filename = tup
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
if audio_filename in self.file_occurence:
for j in range(0, self.file_occurence[file_id]):
if j == 0:
audio_filename = file_id
else:
audio_filename = file_id + "-sub" + str(j)
yield audio_bytes, audio_filename
return TarredAudioFilter(self.collection, self.file_occurence)
def _build_sample(self, tup):
"""Builds the training sample by combining the data from the WebDataset with the manifest info.
"""
audio_bytes, audio_filename = tup
# Grab manifest entry from self.collection
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
manifest_idx = self.collection.mapping[file_id]
manifest_entry = self.collection[manifest_idx]
offset = manifest_entry.offset
if offset is None:
offset = 0
# Convert audio bytes to IO stream for processing (for SoundFile to read)
audio_filestream = io.BytesIO(audio_bytes)
features = self.featurizer.process(
audio_filestream, offset=offset, duration=manifest_entry.duration, trim=self.trim,
)
audio_filestream.close()
# Audio features
f, fl = features, torch.tensor(features.shape[0]).long()
t = self.label2id[manifest_entry.label]
tl = 1 # For compatibility with collate_fn used later
return f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
def __iter__(self):
return self._dataset.__iter__()
def __len__(self):
return len(self.collection)
class TarredAudioToClassificationLabelDataset(_TarredAudioLabelDataset):
"""
A similar Dataset to the AudioToClassificationLabelDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToClassificationLabelDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
labels (list): Dataset parameter.
List of target classes that can be output by the speaker recognition model.
featurizer
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
trim(bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
is_regression_task (bool): Whether it is a regression task. Defualts to False.
"""
def _collate_fn(self, batch):
return _speech_collate_fn(batch, pad_id=0)
class TarredAudioToSpeechLabelDataset(_TarredAudioLabelDataset):
"""
A similar Dataset to the AudioToSpeechLabelDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToSpeechLabelDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
labels (list): Dataset parameter.
List of target classes that can be output by the speaker recognition model.
featurizer
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
trim(bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
window_length_in_sec (float): time length of window/slice (in seconds) # Pass this only for speaker recognition and VAD task
shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task. in a batch # Pass this only for VAD task during inference.
normalize_audio (bool): Whether to normalize audio signal. Defaults to False.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
"""
def __init__(
self,
*,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: Union[str, List[str]],
labels: List[str],
featurizer,
shuffle_n: int = 0,
min_duration: Optional[float] = 0.1,
max_duration: Optional[float] = None,
trim: bool = False,
window_length_in_sec: Optional[float] = 8,
shift_length_in_sec: Optional[float] = 1,
normalize_audio: bool = False,
shard_strategy: str = "scatter",
global_rank: int = 0,
world_size: int = 0,
):
logging.info("Window/slice length considered for collate func is {}".format(window_length_in_sec))
logging.info("Shift length considered for collate func is {}".format(shift_length_in_sec))
self.window_length_in_sec = window_length_in_sec
self.shift_length_in_sec = shift_length_in_sec
self.normalize_audio = normalize_audio
super().__init__(
audio_tar_filepaths=audio_tar_filepaths,
manifest_filepath=manifest_filepath,
labels=labels,
featurizer=featurizer,
shuffle_n=shuffle_n,
min_duration=min_duration,
max_duration=max_duration,
trim=trim,
shard_strategy=shard_strategy,
global_rank=global_rank,
world_size=world_size,
)
def fixed_seq_collate_fn(self, batch):
return _fixed_seq_collate_fn(self, batch)
def sliced_seq_collate_fn(self, batch):
raise NotImplementedError
def vad_frame_seq_collate_fn(self, batch):
return _vad_frame_seq_collate_fn(self, batch)
class AudioToMultiLabelDataset(Dataset):
"""
Dataset that loads a json file containing paths to audio files, durations (in seconds), and a sequence of labels.
Each new line is a different sample. Example below:
{"audio_filepath": "/path/to/audio_wav_0.wav", "duration": time_in_sec_0, "label": \
"0 1 1 0 1", "offset": offset_in_sec_0}
...
{"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \
"0 1 0 0 1", "offset": offset_in_sec_n}
Args:
manifest_filepath (Union[str, List[str]]): Path to manifest json as described above. Can
be comma-separated paths.
labels (Optional[list]): String containing all the possible labels to map to
if None then automatically picks from ASRSpeechLabel collection.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
trim (bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
window_length_in_sec (float): length of window/slice (in seconds)
Use this for speaker recognition and VAD tasks.
shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task in a batch
Use this for VAD task during inference.
normalize_audio (bool): Whether to normalize audio signal.
Defaults to False.
is_regression_task (bool): Whether the dataset is for a regression task instead of classification.
Defaults to False.
cal_labels_occurrence (bool): Whether to calculate occurrence of labels
Defaults to False.
delimiter (Optional[str]): Delimiter to use when splitting the label string, default to None.
normalize_audio_db (Optional[float]): normalize audio signal to a target db, default to None.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
output_types = {
'audio_signal': NeuralType(
('B', 'T'),
AudioSignal(freq=self._sample_rate)
if self is not None and hasattr(self, '_sample_rate')
else AudioSignal(),
),
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
}
if self.is_regression_task:
output_types.update(
{
'targets': NeuralType(tuple('B, T'), RegressionValuesType()),
'targets_length': NeuralType(tuple('B'), LengthsType()),
}
)
else:
output_types.update(
{'label': NeuralType(('B', 'T'), LabelsType()), 'label_length': NeuralType(tuple('B'), LengthsType()),}
)
return output_types
def __init__(
self,
*,
manifest_filepath: Union[str, List[str]],
sample_rate: int,
labels: Optional[List[str]] = None,
int_values: bool = False,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
min_duration: Optional[float] = 0.1,
max_duration: Optional[float] = None,
trim_silence: bool = False,
is_regression_task: bool = False,
cal_labels_occurrence: Optional[bool] = False,
delimiter: Optional[str] = None,
normalize_audio_db: Optional[float] = None,
):
super().__init__()
if isinstance(manifest_filepath, str):
manifest_filepath = manifest_filepath.split(',')
self.delimiter = delimiter
self.normalize_audio_db = normalize_audio_db
self.collection = collections.ASRSpeechLabel(
manifests_files=manifest_filepath,
min_duration=min_duration,
max_duration=max_duration,
is_regression_task=is_regression_task,
cal_labels_occurrence=cal_labels_occurrence,
delimiter=delimiter,
)
self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
self.trim = trim_silence
self.is_regression_task = is_regression_task
self.id2occurrence = {}
self.labels_occurrence = None
if not is_regression_task:
self.labels = labels if labels else self._get_label_set()
self.num_classes = len(self.labels) if self.labels is not None else 1
self.label2id, self.id2label = {}, {}
for label_id, label in enumerate(self.labels):
self.label2id[label] = label_id
self.id2label[label_id] = label
if cal_labels_occurrence:
self.id2occurrence[label_id] = self.collection.labels_occurrence[label]
self.labels_occurrence.append(self.id2occurrence[label_id])
for idx in range(len(self.labels[:5])):
logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
else:
self.labels = []
self.num_classes = 1
def _get_label_set(self):
labels = []
for sample in self.collection:
label_str = sample.label
if label_str:
label_str_list = label_str.split(self.delimiter) if self.delimiter else label_str.split()
labels.extend(label_str_list)
return sorted(set(labels))
def _label_str_to_tensor(self, label_str: str):
labels = label_str.split(self.delimiter) if self.delimiter else label_str.split()
if self.is_regression_task:
labels = [float(s) for s in labels]
labels = torch.tensor(labels).float()
else:
labels = [self.label2id[s] for s in labels]
labels = torch.tensor(labels).long()
return labels
def __len__(self):
return len(self.collection)
def __getitem__(self, index):
sample = self.collection[index]
offset = sample.offset
if offset is None:
offset = 0
features = self.featurizer.process(
sample.audio_file,
offset=offset,
duration=sample.duration,
trim=self.trim,
normalize_db=self.normalize_audio_db,
)
f, fl = features, torch.tensor(features.size(0)).long()
t = self._label_str_to_tensor(sample.label)
tl = torch.tensor(t.size(0)).long()
return f, fl, t, tl
def _collate_fn(self, batch):
return _speech_collate_fn(batch, pad_id=0)
class TarredAudioToMultiLabelDataset(IterableDataset):
"""
A similar Dataset to the AudioToMultiLabelDataset, but which loads tarred audio files.
Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToSpeechLabelDataset),
as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
contain the information for one audio file, including at least the transcript and name of the audio
file within the tarball.
Valid formats for the audio_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
is applied. We currently do not check for this, but your program may hang if the shards are uneven!
Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
replaced by shuffle_n (int).
Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
audio_tar_filepaths: Either a list of audio tarball filepaths, or a
string (can be brace-expandable).
manifest_filepath (str): Path to the manifest.
labels (list): Dataset parameter.
List of target classes that can be output by the speaker recognition model.
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
min_duration (float): Dataset parameter.
All training files which have a duration less than min_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to 0.1.
max_duration (float): Dataset parameter.
All training files which have a duration more than max_duration
are dropped. Note: Duration is read from the manifest JSON.
Defaults to None.
trim(bool): Whether to use trim silence from beginning and end
of audio signal using librosa.effects.trim().
Defaults to False.
window_length_in_sec (float): time length of window/slice (in seconds) # Pass this only for speaker recognition and VAD task
shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task. in a batch # Pass this only for VAD task during inference.
normalize_audio (bool): Whether to normalize audio signal. Defaults to False.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
delimiter (Optional[str]): Delimiter to use when splitting the label string, default to None.
normalize_audio_db (Optional[float]): normalize audio signal to a target db, default to None.
"""
def __init__(
self,
*,
audio_tar_filepaths: Union[str, List[str]],
manifest_filepath: Union[str, List[str]],
sample_rate: int,
labels: Optional[List[str]] = None,
shuffle_n: int = 0,
int_values: bool = False,
augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
min_duration: Optional[float] = 0.1,
max_duration: Optional[float] = None,
trim_silence: bool = False,
is_regression_task: bool = False,
shard_strategy: str = "scatter",
global_rank: int = 0,
world_size: int = 0,
delimiter: Optional[str] = None,
normalize_audio_db: Optional[float] = None,
):
super().__init__()
if isinstance(manifest_filepath, str):
manifest_filepath = manifest_filepath.split(',')
self.trim = trim_silence
self.is_regression_task = is_regression_task
self.delimiter = delimiter
self.normalize_audio_db = normalize_audio_db
self.collection = collections.ASRSpeechLabel(
manifests_files=manifest_filepath,
min_duration=min_duration,
max_duration=max_duration,
is_regression_task=is_regression_task,
index_by_file_id=True,
)
self.file_occurence = count_occurence(self.collection.mapping)
self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
if not is_regression_task:
self.labels = labels if labels else self._get_label_set()
self.num_classes = len(self.labels) if self.labels is not None else 1
self.label2id, self.id2label = {}, {}
for label_id, label in enumerate(self.labels):
self.label2id[label] = label_id
self.id2label[label_id] = label
for idx in range(len(self.labels[:5])):
logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
else:
self.labels = []
self.num_classes = 1
audio_tar_filepaths = expand_sharded_filepaths(
sharded_filepaths=audio_tar_filepaths,
shard_strategy=shard_strategy,
world_size=world_size,
global_rank=global_rank,
)
# Put together WebDataset
self._dataset = wd.WebDataset(urls=audio_tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self._dataset = self._dataset.shuffle(shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = (
self._dataset.rename(audio=VALID_FILE_FORMATS, key='__key__')
.to_tuple('audio', 'key')
.pipe(self._filter)
.map(f=self._build_sample)
)
def _get_label_set(self):
labels = []
for sample in self.collection:
label_str = sample.label
if label_str:
label_str_list = label_str.split(self.delimiter) if self.delimiter else label_str.split()
labels.extend(label_str_list)
return sorted(set(labels))
def _label_str_to_tensor(self, label_str: str):
labels = label_str.split(self.delimiter) if self.delimiter else label_str.split()
if self.is_regression_task:
labels = [float(s) for s in labels]
labels = torch.tensor(labels).float()
else:
labels = [self.label2id[s] for s in labels]
labels = torch.tensor(labels).long()
return labels
def _filter(self, iterator):
"""This function is used to remove samples that have been filtered out by ASRSpeechLabel already.
Otherwise, we would get a KeyError as _build_sample attempts to find the manifest entry for a sample
that was filtered out (e.g. for duration).
Note that if using multi-GPU training, filtering may lead to an imbalance in samples in each shard,
which may make your code hang as one process will finish before the other.
"""
class TarredAudioFilter:
def __init__(self, collection, file_occurence):
self.iterator = iterator
self.collection = collection
self.file_occurence = file_occurence
self._iterable = self._internal_generator()
def __iter__(self):
self._iterable = self._internal_generator()
return self
def __next__(self):
try:
values = next(self._iterable)
except StopIteration:
# reset generator
self._iterable = self._internal_generator()
values = next(self._iterable)
return values
def _internal_generator(self):
"""
WebDataset requires an Iterator, but we require an iterable that yields 1-or-more
values per value inside self.iterator.
Therefore wrap the iterator with a generator function that will yield 1-or-more
values per sample in the iterator.
"""
for _, tup in enumerate(self.iterator):
audio_bytes, audio_filename = tup
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
if audio_filename in self.file_occurence:
for j in range(0, self.file_occurence[file_id]):
if j == 0:
audio_filename = file_id
else:
audio_filename = file_id + "-sub" + str(j)
yield audio_bytes, audio_filename
return TarredAudioFilter(self.collection, self.file_occurence)
def _build_sample(self, tup):
"""Builds the training sample by combining the data from the WebDataset with the manifest info.
"""
audio_bytes, audio_filename = tup
# Grab manifest entry from self.collection
file_id, _ = os.path.splitext(os.path.basename(audio_filename))
manifest_idx = self.collection.mapping[file_id]
manifest_entry = self.collection[manifest_idx]
offset = manifest_entry.offset
if offset is None:
offset = 0
# Convert audio bytes to IO stream for processing (for SoundFile to read)
audio_filestream = io.BytesIO(audio_bytes)
features = self.featurizer.process(
audio_filestream,
offset=offset,
duration=manifest_entry.duration,
trim=self.trim,
normalize_db=self.normalize_audio_db,
)
audio_filestream.close()
# Audio features
f, fl = features, torch.tensor(features.shape[0]).long()
t = self._label_str_to_tensor(manifest_entry.label)
tl = torch.tensor(t.size(0)).long()
return f, fl, t, tl
def __iter__(self):
return self._dataset.__iter__()
def __len__(self):
return len(self.collection)
def _collate_fn(self, batch):
return _speech_collate_fn(batch, pad_id=0)
|
NeMo-main
|
nemo/collections/asr/data/audio_to_label.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from nemo.collections.asr.data import feature_to_label
def get_feature_seq_speakerlabel_dataset(
feature_loader, config: dict
) -> feature_to_label.FeatureToSeqSpeakerLabelDataset:
"""
Instantiates a FeatureSeqSpeakerLabelDataset.
Args:
config: Config of the FeatureToSeqSpeakerLabelDataset.
Returns:
An instance of FeatureToSeqSpeakerLabelDataset.
"""
dataset = feature_to_label.FeatureToSeqSpeakerLabelDataset(
manifest_filepath=config['manifest_filepath'], labels=config['labels'], feature_loader=feature_loader,
)
return dataset
def get_feature_label_dataset(
config: dict, augmentor: Optional['FeatureAugmentor'] = None
) -> feature_to_label.FeatureToLabelDataset:
dataset = feature_to_label.FeatureToLabelDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
augmentor=augmentor,
window_length_in_sec=config.get("window_length_in_sec", 0.63),
shift_length_in_sec=config.get("shift_length_in_sec", 0.08),
is_regression_task=config.get("is_regression_task", False),
cal_labels_occurrence=config.get("cal_labels_occurrence", False),
zero_spec_db_val=config.get("zero_spec_db_val", -16.635),
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
)
return dataset
def get_feature_multi_label_dataset(
config: dict, augmentor: Optional['FeatureAugmentor'] = None
) -> feature_to_label.FeatureToMultiLabelDataset:
dataset = feature_to_label.FeatureToMultiLabelDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
augmentor=augmentor,
delimiter=config.get('delimiter', None),
is_regression_task=config.get("is_regression_task", False),
cal_labels_occurrence=config.get("cal_labels_occurrence", False),
zero_spec_db_val=config.get("zero_spec_db_val", -16.635),
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
)
return dataset
|
NeMo-main
|
nemo/collections/asr/data/feature_to_label_dataset.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.