python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/common/tokenizers/text_to_speech/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import string
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import List, Optional
from nemo.collections.common.tokenizers.text_to_speech.ipa_lexicon import (
get_grapheme_character_set,
get_ipa_punctuation_list,
validate_locale,
)
from nemo.collections.common.tokenizers.text_to_speech.tokenizer_utils import (
any_locale_text_preprocessing,
chinese_text_preprocessing,
english_text_preprocessing,
spanish_text_preprocessing,
)
from nemo.utils import logging
from nemo.utils.decorators import experimental
class BaseTokenizer(ABC):
PAD, BLANK, OOV = '<pad>', '<blank>', '<oov>'
def __init__(self, tokens, *, pad=PAD, blank=BLANK, oov=OOV, sep='', add_blank_at=None):
"""Abstract class for creating an arbitrary tokenizer to convert string to list of int tokens.
Args:
tokens: List of tokens.
pad: Pad token as string.
blank: Blank token as string.
oov: OOV token as string.
sep: Separation token as string.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
"""
super().__init__()
tokens = list(tokens)
# TODO @xueyang: in general, IDs of pad, sil, blank, and oov are preserved ahead instead of dynamically
# assigned according to the number of tokens. The downside of using dynamical assignment leads to different IDs
# for each.
self.pad, tokens = len(tokens), tokens + [pad] # Padding
if add_blank_at is not None:
self.blank, tokens = len(tokens), tokens + [blank] # Reserved for blank from asr-model
else:
# use add_blank_at=None only for ASR where blank is added automatically, disable blank here
self.blank = None
self.oov, tokens = len(tokens), tokens + [oov] # Out Of Vocabulary
if add_blank_at == "last":
tokens[-1], tokens[-2] = tokens[-2], tokens[-1]
self.oov, self.blank = self.blank, self.oov
self.tokens = tokens
self.sep = sep
self._util_ids = {self.pad, self.blank, self.oov}
self._token2id = {l: i for i, l in enumerate(tokens)}
self._id2token = tokens
def __call__(self, text: str) -> List[int]:
return self.encode(text)
@abstractmethod
def encode(self, text: str) -> List[int]:
"""Turns str text into int tokens."""
pass
def decode(self, tokens: List[int]) -> str:
"""Turns ints tokens into str text."""
return self.sep.join(self._id2token[t] for t in tokens if t not in self._util_ids)
class BaseCharsTokenizer(BaseTokenizer):
# fmt: off
# TODO @xueyang: unify definition of the default PUNCT_LIST and import from ipa_lexicon.py
PUNCT_LIST = ( # Derived from LJSpeech and "/" additionally
',', '.', '!', '?', '-',
':', ';', '/', '"', '(',
')', '[', ']', '{', '}',
)
# fmt: on
def __init__(
self,
chars,
punct=True,
apostrophe=True,
add_blank_at=None,
pad_with_space=False,
non_default_punct_list=None,
text_preprocessing_func=lambda x: x,
):
"""Base class for char-based tokenizer.
Args:
chars: string that represents all possible characters.
punct: Whether to reserve grapheme for basic punctuation or not.
apostrophe: Whether to use apostrophe or not.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
pad_with_space: Whether to pad text with spaces at the beginning and at the end or not.
non_default_punct_list: List of punctuation marks which will be used instead default.
text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer.
"""
tokens = []
self.space, tokens = len(tokens), tokens + [' '] # Space
tokens.extend(chars)
if apostrophe:
tokens.append("'") # Apostrophe for saving "don't" and "Joe's"
if punct:
if non_default_punct_list is not None:
self.PUNCT_LIST = non_default_punct_list
tokens.extend(self.PUNCT_LIST)
super().__init__(tokens, add_blank_at=add_blank_at)
self.punct = punct
self.pad_with_space = pad_with_space
self.text_preprocessing_func = text_preprocessing_func
def encode(self, text):
"""See base class."""
cs, space, tokens = [], self.tokens[self.space], set(self.tokens)
text = self.text_preprocessing_func(text)
for c in text:
# Add a whitespace if the current char is a whitespace while the previous char is not a whitespace.
if c == space and len(cs) > 0 and cs[-1] != space:
cs.append(c)
# Add the current char that is an alphanumeric or an apostrophe.
elif (c.isalnum() or c == "'") and c in tokens:
cs.append(c)
# Add a punctuation that has a single char.
elif (c in self.PUNCT_LIST) and self.punct:
cs.append(c)
# Warn about unknown char
elif c != space:
logging.warning(f"Text: [{text}] contains unknown char: [{c}]. Symbol will be skipped.")
# Remove trailing spaces
if cs:
while cs[-1] == space:
cs.pop()
if self.pad_with_space:
cs = [space] + cs + [space]
return [self._token2id[p] for p in cs]
class EnglishCharsTokenizer(BaseCharsTokenizer):
def __init__(
self,
punct=True,
apostrophe=True,
add_blank_at=None,
pad_with_space=False,
non_default_punct_list=None,
text_preprocessing_func=english_text_preprocessing,
):
"""English char-based tokenizer.
Args:
punct: Whether to reserve grapheme for basic punctuation or not.
apostrophe: Whether to use apostrophe or not.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
pad_with_space: Whether to pad text with spaces at the beginning and at the end or not.
non_default_punct_list: List of punctuation marks which will be used instead default.
text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer.
Basically, it replaces all non-unicode characters with unicode ones and apply lower() function.
"""
super().__init__(
chars=string.ascii_lowercase,
punct=punct,
apostrophe=apostrophe,
add_blank_at=add_blank_at,
pad_with_space=pad_with_space,
non_default_punct_list=non_default_punct_list,
text_preprocessing_func=text_preprocessing_func,
)
class GermanCharsTokenizer(BaseCharsTokenizer):
_LOCALE = "de-DE"
_PUNCT_LIST = get_ipa_punctuation_list(_LOCALE)
_CHARSET_STR = get_grapheme_character_set(locale=_LOCALE, case="mixed")
def __init__(
self,
chars=_CHARSET_STR,
punct=True,
apostrophe=True,
add_blank_at=None,
pad_with_space=False,
non_default_punct_list=_PUNCT_LIST,
text_preprocessing_func=any_locale_text_preprocessing,
):
"""German grapheme-based tokenizer.
Args:
punct: Whether to reserve grapheme for basic punctuation or not.
apostrophe: Whether to use apostrophe or not.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
pad_with_space: Whether to pad text with spaces at the beginning and at the end or not.
non_default_punct_list: List of punctuation marks which will be used instead default.
text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. By default, it
would keep any word unchanged.
"""
super().__init__(
chars=chars,
punct=punct,
apostrophe=apostrophe,
add_blank_at=add_blank_at,
pad_with_space=pad_with_space,
non_default_punct_list=non_default_punct_list,
text_preprocessing_func=text_preprocessing_func,
)
class SpanishCharsTokenizer(BaseCharsTokenizer):
PUNCT_LIST = get_ipa_punctuation_list("es-ES")
def __init__(
self, punct=True, apostrophe=True, add_blank_at=None, pad_with_space=False, non_default_punct_list=None,
):
"""Spanish grapheme tokenizer.
Args:
punct: Whether to reserve grapheme for basic punctuation or not.
apostrophe: Whether to use apostrophe or not.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
pad_with_space: Whether to pad text with spaces at the beginning and at the end or not.
non_default_punct_list: List of punctuation marks which will be used instead default.
"""
es_alphabet = "abcdefghijklmnopqrstuvwxyzáéíñóúü"
super().__init__(
chars=es_alphabet,
punct=punct,
apostrophe=apostrophe,
add_blank_at=add_blank_at,
pad_with_space=pad_with_space,
non_default_punct_list=non_default_punct_list,
text_preprocessing_func=spanish_text_preprocessing,
)
class GermanPhonemesTokenizer(BaseCharsTokenizer):
# fmt: off
PUNCT_LIST = ( # Derived from LJSpeech and "/" additionally
',', '.', '!', '?', '-',
':', ';', '/', '"', '(',
')', '[', ']', '{', '}',
)
# fmt: on
def __init__(
self,
punct=True,
apostrophe=True,
add_blank_at=None,
pad_with_space=False,
non_default_punct_list=None,
text_preprocessing_func=any_locale_text_preprocessing,
):
"""Deutsch phoneme-based tokenizer.
Args:
punct: Whether to reserve grapheme for basic punctuation or not.
apostrophe: Whether to use apostrophe or not.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
pad_with_space: Whether to pad text with spaces at the beginning and at the end or not.
non_default_punct_list: List of punctuation marks which will be used instead default.
text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer.
Currently, it only applies lower() function.
"""
de_ipa = "abdefhijklmnoprstuvwxyzçðøŋœɐɑɒɔəɛɜɡɪɹɾʃʊʌʒː̃"
de_suprasegmentals = "12"
super().__init__(
chars=de_ipa + de_suprasegmentals,
punct=punct,
apostrophe=apostrophe,
add_blank_at=add_blank_at,
pad_with_space=pad_with_space,
non_default_punct_list=non_default_punct_list,
text_preprocessing_func=text_preprocessing_func,
)
def encode(self, text):
"""See base class."""
cs, space, tokens = [], self.tokens[self.space], set(self.tokens)
text = self.text_preprocessing_func(text)
for c in text:
# Add space if last one isn't one
if c == space and len(cs) > 0 and cs[-1] != space:
cs.append(c)
# Add next char
elif (c.isalnum() or c == "'" or c == "\u0303") and c in tokens:
cs.append(c)
# Add punct
elif (c in self.PUNCT_LIST) and self.punct:
cs.append(c)
# Warn about unknown char
elif c != space:
logging.warning(f"Text: [{text}] contains unknown char: [{c}]. Symbol will be skipped.")
# Remove trailing spaces
while cs[-1] == space:
cs.pop()
if self.pad_with_space:
cs = [space] + cs + [space]
return [self._token2id[p] for p in cs]
class EnglishPhonemesTokenizer(BaseTokenizer):
# fmt: off
PUNCT_LIST = ( # Derived from LJSpeech and "/" additionally
',', '.', '!', '?', '-',
':', ';', '/', '"', '(',
')', '[', ']', '{', '}',
)
VOWELS = (
'AA', 'AE', 'AH', 'AO', 'AW',
'AY', 'EH', 'ER', 'EY', 'IH',
'IY', 'OW', 'OY', 'UH', 'UW',
)
CONSONANTS = (
'B', 'CH', 'D', 'DH', 'F', 'G',
'HH', 'JH', 'K', 'L', 'M', 'N',
'NG', 'P', 'R', 'S', 'SH', 'T',
'TH', 'V', 'W', 'Y', 'Z', 'ZH',
)
# fmt: on
def __init__(
self,
g2p,
punct=True,
non_default_punct_list=None,
stresses=False,
chars=False,
*,
space=' ',
silence=None,
apostrophe=True,
oov=BaseTokenizer.OOV,
sep='|', # To be able to distinguish between 2/3 letters codes.
add_blank_at=None,
pad_with_space=False,
text_preprocessing_func=lambda text: english_text_preprocessing(text, lower=False),
):
"""English phoneme-based tokenizer.
Args:
g2p: Grapheme to phoneme module.
punct: Whether to reserve grapheme for basic punctuation or not.
non_default_punct_list: List of punctuation marks which will be used instead default.
stresses: Whether to use phonemes codes with stresses (0-2) or not.
chars: Whether to additionally use chars together with phonemes. It is useful if g2p module can return chars too.
space: Space token as string.
silence: Silence token as string (will be disabled if it is None).
apostrophe: Whether to use apostrophe or not.
oov: OOV token as string.
sep: Separation token as string.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
pad_with_space: Whether to pad text with spaces at the beginning and at the end or not.
text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer.
Basically, it replaces all non-unicode characters with unicode ones.
Note that lower() function shouldn't be applied here, in case the text contains phonemes (it will be handled by g2p).
"""
self.phoneme_probability = None
if hasattr(g2p, "phoneme_probability"):
self.phoneme_probability = g2p.phoneme_probability
tokens = []
self.space, tokens = len(tokens), tokens + [space] # Space
if silence is not None:
self.silence, tokens = len(tokens), tokens + [silence] # Silence
tokens.extend(self.CONSONANTS)
vowels = list(self.VOWELS)
if stresses:
vowels = [f'{p}{s}' for p, s in itertools.product(vowels, (0, 1, 2))]
tokens.extend(vowels)
if chars or self.phoneme_probability is not None:
if not chars:
logging.warning(
"phoneme_probability was not None, characters will be enabled even though "
"chars was set to False."
)
tokens.extend(string.ascii_lowercase)
if apostrophe:
tokens.append("'") # Apostrophe
if punct:
if non_default_punct_list is not None:
self.PUNCT_LIST = non_default_punct_list
tokens.extend(self.PUNCT_LIST)
super().__init__(tokens, oov=oov, sep=sep, add_blank_at=add_blank_at)
self.chars = chars if self.phoneme_probability is None else True
self.punct = punct
self.stresses = stresses
self.pad_with_space = pad_with_space
self.text_preprocessing_func = text_preprocessing_func
self.g2p = g2p
def encode(self, text):
"""See base class for more information."""
text = self.text_preprocessing_func(text)
g2p_text = self.g2p(text) # TODO: handle infer
return self.encode_from_g2p(g2p_text, text)
def encode_from_g2p(self, g2p_text: List[str], raw_text: Optional[str] = None):
"""
Encodes text that has already been run through G2P.
Called for encoding to tokens after text preprocessing and G2P.
Args:
g2p_text: G2P's output, could be a mixture of phonemes and graphemes,
e.g. "see OOV" -> ['S', 'IY1', ' ', 'O', 'O', 'V']
raw_text: original raw input
"""
ps, space, tokens = [], self.tokens[self.space], set(self.tokens)
for p in g2p_text: # noqa
# Remove stress
if p.isalnum() and len(p) == 3 and not self.stresses:
p = p[:2]
# Add space if last one isn't one
if p == space and len(ps) > 0 and ps[-1] != space:
ps.append(p)
# Add next phoneme or char (if chars=True)
elif (p.isalnum() or p == "'") and p in tokens:
ps.append(p)
# Add punct
elif (p in self.PUNCT_LIST) and self.punct:
ps.append(p)
# Warn about unknown char/phoneme
elif p != space:
message = f"Text: [{''.join(g2p_text)}] contains unknown char/phoneme: [{p}]."
if raw_text is not None:
message += f"Original text: [{raw_text}]. Symbol will be skipped."
logging.warning(message)
# Remove trailing spaces
if ps:
while ps[-1] == space:
ps.pop()
if self.pad_with_space:
ps = [space] + ps + [space]
return [self._token2id[p] for p in ps]
@contextmanager
def set_phone_prob(self, prob):
if hasattr(self.g2p, "phoneme_probability"):
self.g2p.phoneme_probability = prob
try:
yield
finally:
if hasattr(self.g2p, "phoneme_probability"):
self.g2p.phoneme_probability = self.phoneme_probability
@experimental
class IPATokenizer(BaseTokenizer):
def __init__(
self,
g2p,
locale="en-US",
punct=True,
non_default_punct_list=None,
fixed_vocab=None,
*,
space=' ',
silence=None,
apostrophe=False,
oov=BaseTokenizer.OOV,
sep='|', # To be able to distinguish between symbols
add_blank_at=None,
pad_with_space=False,
):
"""General-purpose IPA-based tokenizer.
Args:
g2p: Grapheme to phoneme module, should be IpaG2p or some subclass thereof.
locale: Locale used to determine default text processing logic and punctuation.
Supports ["en-US", "de-DE", "es-ES"]. Defaults to "en-US".
Specify None if implementing custom logic for a new locale.
punct: Whether to reserve grapheme for basic punctuation or not.
non_default_punct_list: List of punctuation marks which will be used instead default, if any.
fixed_vocab: List of valid grapheme/phoneme tokens for the model.
Set only if overriding the default vocab generation process (reading from G2P dict).
If set, any dataset entries that have unincluded graphemes will be filtered out, and any words whose
pronunciations have unincluded phonemes will be treated as OOV.
Please make sure that the grapheme prefixes and cases are consistent with the G2P module's settings.
Defaults to None, which means default vocab generation is used.
space: Space token as string.
silence: Silence token as string (will be disabled if it is None).
apostrophe: Whether to use apostrophe or not.
oov: OOV token as string.
sep: Separation token as string.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
pad_with_space: Whether to pad text with spaces at the beginning and at the end or not.
"""
if not hasattr(g2p, "symbols"):
logging.error(
f"Please make sure the G2P module passed into the IPATokenizer has a `symbols` attribute. "
f"This is required in order to build the tokenizer vocabulary.\n"
f"Expected e.g. IpaG2p, found {type(g2p)}"
)
raise ValueError("G2P modules passed into the IPATokenizer must have `symbols` defined.")
if locale is not None:
validate_locale(locale)
self.phoneme_probability = None
if hasattr(g2p, "phoneme_probability"):
self.phoneme_probability = g2p.phoneme_probability
if locale == "en-US":
self.text_preprocessing_func = lambda text: english_text_preprocessing(text, lower=False)
else:
self.text_preprocessing_func = any_locale_text_preprocessing
# Build tokens list if fixed_vocab isn't set
if fixed_vocab:
tokens = {self.text_preprocessing_func(c) for c in fixed_vocab}
self.set_fixed_vocab = True # Used to check whether dataset entries need filtering
if g2p.symbols == tokens:
logging.info(
"Did not replace G2P valid symbol set since the given set is equivalent to the existing one."
)
self.set_fixed_vocab = False
else:
g2p.replace_symbols(tokens)
else:
tokens = set(g2p.symbols)
self.set_fixed_vocab = False
if apostrophe:
tokens.add("'")
if punct:
if non_default_punct_list is not None:
self.punct_list = non_default_punct_list
else:
self.punct_list = get_ipa_punctuation_list(locale)
tokens.update(self.punct_list)
# Sort to ensure that vocab is in the same order every time
tokens = sorted(list(tokens))
if space in g2p.symbols:
self.space = tokens.index(space)
else:
self.space, tokens = len(tokens), tokens + [space]
if silence is not None:
self.silence, tokens = len(tokens), tokens + [silence]
super().__init__(tokens, oov=oov, sep=sep, add_blank_at=add_blank_at)
self.tokens_set = set(self.tokens) # To save some repeated work when filtering entries
self.punct = punct
self.pad_with_space = pad_with_space
self.g2p = g2p
def encode(self, text: str) -> List[int]:
"""See base class for more information."""
# normalize the input text with "NFC" form.
text = self.text_preprocessing_func(text)
# transliterate the text into phoneme sequences and/or grapheme sequences.
g2p_text = self.g2p(text)
return self.encode_from_g2p(g2p_text, text)
def encode_from_g2p(self, g2p_text: List[str], raw_text: Optional[str] = None) -> List[int]:
"""
Tokenize the `g2p_text` that has been already run through G2P. Each item in the `g2p_text` would be encoded as
one of the integer IDs predefined in `self._token2id`. Note that this function should be called after
`self.text_preprocessing_func` and `self.g2p` functions
Args:
g2p_text (List[str]): a sequence of tokens from G2P's output. It could be a sequence of phonemes, a sequence
of graphemes, or a mixture of both. For example, `['ˈ', 's', 'i', ' ', '#O', '#O', '#V']`, which is the
G2P's output of the text "see OOV", where '#' is prepended to each grapheme in order to distinguish
graphemes from phonemes if there are overlaps in between. The prefix '#' can be customized in
`nemo.collections.tts.g2p.models.i18n_ipa.IpaG2p.grapheme_prefix`.
raw_text (str): the original text after calling `self.text_preprocessing_func`. It is optional. It is only
used to deliver a warning message that some graphemes from the original text are skipped.
Returns: a list of integer IDs that tokenize the `g2p_text`.
"""
ps, space, tokens = [], self.tokens[self.space], set(self.tokens)
for p in g2p_text:
if p == space and len(ps) > 0 and ps[-1] != space:
# Add space if last token isn't one
ps.append(p)
elif p in tokens:
# Add next phoneme or char (if chars=True)
ps.append(p)
elif (p in self.punct_list) and self.punct:
# Add punct
ps.append(p)
elif p != space:
message = f"Text: [{''.join(g2p_text)}] contains unknown char/phoneme: [{p}]."
if raw_text is not None:
message += f"Original text: [{raw_text}]. Symbol will be skipped."
logging.warning(message)
# Remove trailing spaces
if ps:
while ps[-1] == space:
ps.pop()
if self.pad_with_space:
ps = [space] + ps + [space]
# Token index lookups
return [self._token2id[p] for p in ps]
@contextmanager
def set_phone_prob(self, prob):
if hasattr(self.g2p, "phoneme_probability"):
self.g2p.phoneme_probability = prob
try:
yield
finally:
if hasattr(self.g2p, "phoneme_probability"):
self.g2p.phoneme_probability = self.phoneme_probability
class ChinesePhonemesTokenizer(BaseTokenizer):
# fmt: off
PUNCT_LIST = ( # Derived from LJSpeech and "/" additionally
',', '.', '!', '?', '-',
':', ';', '/', '"', '(',
')', '[', ']', '{', '}',
)
ZH_PUNCT_LIST = list(",。?!;:、‘’“”()【】「」《》") + list(PUNCT_LIST)
def __init__(
self,
g2p,
punct=True,
non_default_punct_list=None,
*,
space=' ',
silence=None,
apostrophe=True,
sep='|', # To be able to distinguish between 2/3 letters codes.
add_blank_at=None,
pad_with_space=False,
text_preprocessing_func=chinese_text_preprocessing,
):
"""
Chinese phoneme-based tokenizer.
Note: This tokenizer for now covers Chinese phonemes/tones and English letters because our dataset contains
both Chinese and English graphemes.
Args:
g2p: Grapheme to phoneme module.
punct: Whether to reserve grapheme for basic punctuation or not.
non_default_punct_list: List of punctuation marks which will be used instead default.
space: Space token as string.
silence: Silence token as string (will be disabled if it is None).
apostrophe: Whether to use apostrophe or not.
sep: Separation token as string.
add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None),
if None then no blank in labels.
pad_with_space: Whether to pad text with spaces at the beginning and at the end or not.
text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer.
Basically, it replaces all non-unicode characters with unicode ones.
Note that lower() function shouldn't be applied here, in case the text contains phonemes (it will be handled by g2p).
"""
tokens = []
self.space, tokens = len(tokens), tokens + [space] # Space
if silence is not None:
self.silence, tokens = len(tokens), tokens + [silence] # Silence
self.phoneme_list = g2p.phoneme_list
self.tone_list = g2p.tone_list
self.ascii_letter_list = g2p.ascii_letter_list
tokens.extend(self.phoneme_list)
tokens.extend(self.tone_list)
tokens.extend(self.ascii_letter_list)
self.text_preprocessing_func = text_preprocessing_func
if apostrophe:
tokens.append("'") # Apostrophe
if punct:
if non_default_punct_list is not None:
self.PUNCT_LIST = non_default_punct_list
else:
self.PUNCT_LIST = list(self.ZH_PUNCT_LIST)
tokens.extend(self.PUNCT_LIST)
super().__init__(tokens, sep=sep, add_blank_at=add_blank_at)
self.punct = punct
self.pad_with_space = pad_with_space
self.g2p = g2p
def encode(self, text: str) -> List[int]:
"""See base class for more information."""
text = self.text_preprocessing_func(text)
g2p_text = self.g2p(text)
return self.encode_from_g2p(g2p_text, text)
def encode_from_g2p(self, g2p_text: List[str], raw_text: Optional[str] = None):
"""
Encodes text that has already been run through G2Pr.
Called for encoding to tokens after text preprocessing and G2P.
Args:
g2p_text: G2P's output, could be a mixture of Chinese phonemes and English letters.
raw_text: original raw input
"""
ps, space, tokens = [], self.tokens[self.space], set(self.tokens)
for p in g2p_text: # noqa
# Add space if last one isn't one
if p == space and len(ps) > 0 and ps[-1] != space:
ps.append(p)
# Add next phoneme or tone or ascii letter or apostrophe.
elif (p.isalnum() or p == "'" or p in self.phoneme_list + self.tone_list + self.ascii_letter_list) and p in tokens:
ps.append(p)
# Add punctuation
elif (p in self.PUNCT_LIST) and self.punct:
ps.append(p)
# Warn about unknown char/phoneme
elif p != space:
message = f"Text: [{' '.join(g2p_text)}] contains unknown char/phoneme: [{p}]."
if raw_text is not None:
message += f"Original text: [{raw_text}]. Symbol will be skipped."
logging.warning(message)
# Remove trailing spaces
if ps:
while ps[-1] == space:
ps.pop()
if self.pad_with_space:
ps = [space] + ps + [space]
return [self._token2id[p] for p in ps]
|
NeMo-main
|
nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.common.tokenizers import TokenizerSpec
from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import EnglishPhonemesTokenizer
from nemo.collections.tts.g2p.models.en_us_arpabet import EnglishG2p
__all__ = ['TextToSpeechTokenizer']
class TextToSpeechTokenizer(TokenizerSpec):
def __init__(self, phoneme_dict, heteronyms):
self.g2p = EnglishG2p(phoneme_dict=phoneme_dict, heteronyms=heteronyms)
self.tokenizer = EnglishPhonemesTokenizer(
self.g2p, stresses=True, chars=True, pad_with_space=True, add_blank_at=True
)
self.vocab_size = len(self.tokenizer.tokens)
def text_to_ids(self, text):
return self.tokenizer.encode(text)
def text_to_tokens(self, text):
return self.g2p(text)
def tokens_to_text(self, tokens):
pass
def tokens_to_ids(self, tokens):
pass
def ids_to_tokens(self, ids):
pass
def ids_to_text(self, ids):
pass
@property
def pad_id(self):
return self.tokenizer.pad
@property
def bos_id(self):
return self.tokenizer.pad
@property
def eos_id(self):
return self.tokenizer.pad
|
NeMo-main
|
nemo/collections/common/tokenizers/text_to_speech/tokenizer_wrapper.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import os
import threading
from typing import Any, Dict, Iterable
import pytorch_lightning as pl
import torch
from pytorch_lightning import Callback
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.rank_zero import rank_zero_info
class EMA(Callback):
"""
Implements Exponential Moving Averaging (EMA).
When training a model, this callback will maintain moving averages of the trained parameters.
When evaluating, we use the moving averages copy of the trained parameters.
When saving, we save an additional set of parameters with the prefix `ema`.
Args:
decay: The exponential decay used when calculating the moving average. Has to be between 0-1.
validate_original_weights: Validate the original weights, as apposed to the EMA weights.
every_n_steps: Apply EMA every N steps.
cpu_offload: Offload weights to CPU.
"""
def __init__(
self, decay: float, validate_original_weights: bool = False, every_n_steps: int = 1, cpu_offload: bool = False,
):
if not (0 <= decay <= 1):
raise MisconfigurationException("EMA decay value must be between 0 and 1")
self.decay = decay
self.validate_original_weights = validate_original_weights
self.every_n_steps = every_n_steps
self.cpu_offload = cpu_offload
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
device = pl_module.device if not self.cpu_offload else torch.device('cpu')
trainer.optimizers = [
EMAOptimizer(
optim,
device=device,
decay=self.decay,
every_n_steps=self.every_n_steps,
current_step=trainer.global_step,
)
for optim in trainer.optimizers
if not isinstance(optim, EMAOptimizer)
]
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self._should_validate_ema_weights(trainer):
self.swap_model_weights(trainer)
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self._should_validate_ema_weights(trainer):
self.swap_model_weights(trainer)
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self._should_validate_ema_weights(trainer):
self.swap_model_weights(trainer)
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self._should_validate_ema_weights(trainer):
self.swap_model_weights(trainer)
def _should_validate_ema_weights(self, trainer: "pl.Trainer") -> bool:
return not self.validate_original_weights and self._ema_initialized(trainer)
def _ema_initialized(self, trainer: "pl.Trainer") -> bool:
return any(isinstance(optimizer, EMAOptimizer) for optimizer in trainer.optimizers)
def swap_model_weights(self, trainer: "pl.Trainer", saving_ema_model: bool = False):
for optimizer in trainer.optimizers:
assert isinstance(optimizer, EMAOptimizer)
optimizer.switch_main_parameter_weights(saving_ema_model)
@contextlib.contextmanager
def save_ema_model(self, trainer: "pl.Trainer"):
"""
Saves an EMA copy of the model + EMA optimizer states for resume.
"""
self.swap_model_weights(trainer, saving_ema_model=True)
try:
yield
finally:
self.swap_model_weights(trainer, saving_ema_model=False)
@contextlib.contextmanager
def save_original_optimizer_state(self, trainer: "pl.Trainer"):
for optimizer in trainer.optimizers:
assert isinstance(optimizer, EMAOptimizer)
optimizer.save_original_optimizer_state = True
try:
yield
finally:
for optimizer in trainer.optimizers:
optimizer.save_original_optimizer_state = False
def on_load_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any]
) -> None:
checkpoint_callback = trainer.checkpoint_callback
# use the connector as NeMo calls the connector directly in the exp_manager when restoring.
connector = trainer._checkpoint_connector
# Replace connector._ckpt_path with below to avoid calling into lightning's protected API
ckpt_path = trainer.ckpt_path
if ckpt_path and checkpoint_callback is not None and 'NeMo' in type(checkpoint_callback).__name__:
ext = checkpoint_callback.FILE_EXTENSION
if ckpt_path.endswith(f'-EMA{ext}'):
rank_zero_info(
"loading EMA based weights. "
"The callback will treat the loaded EMA weights as the main weights"
" and create a new EMA copy when training."
)
return
ema_path = ckpt_path.replace(ext, f'-EMA{ext}')
if os.path.exists(ema_path):
ema_state_dict = torch.load(ema_path, map_location=torch.device('cpu'))
checkpoint['optimizer_states'] = ema_state_dict['optimizer_states']
del ema_state_dict
rank_zero_info("EMA state has been restored.")
else:
raise MisconfigurationException(
"Unable to find the associated EMA weights when re-loading, "
f"training will start with new EMA weights. Expected them to be at: {ema_path}",
)
@torch.no_grad()
def ema_update(ema_model_tuple, current_model_tuple, decay):
torch._foreach_mul_(ema_model_tuple, decay)
torch._foreach_add_(
ema_model_tuple, current_model_tuple, alpha=(1.0 - decay),
)
def run_ema_update_cpu(ema_model_tuple, current_model_tuple, decay, pre_sync_stream=None):
if pre_sync_stream is not None:
pre_sync_stream.synchronize()
ema_update(ema_model_tuple, current_model_tuple, decay)
class EMAOptimizer(torch.optim.Optimizer):
r"""
EMAOptimizer is a wrapper for torch.optim.Optimizer that computes
Exponential Moving Average of parameters registered in the optimizer.
EMA parameters are automatically updated after every step of the optimizer
with the following formula:
ema_weight = decay * ema_weight + (1 - decay) * training_weight
To access EMA parameters, use ``swap_ema_weights()`` context manager to
perform a temporary in-place swap of regular parameters with EMA
parameters.
Notes:
- EMAOptimizer is not compatible with APEX AMP O2.
Args:
optimizer (torch.optim.Optimizer): optimizer to wrap
device (torch.device): device for EMA parameters
decay (float): decay factor
Returns:
returns an instance of torch.optim.Optimizer that computes EMA of
parameters
Example:
model = Model().to(device)
opt = torch.optim.Adam(model.parameters())
opt = EMAOptimizer(opt, device, 0.9999)
for epoch in range(epochs):
training_loop(model, opt)
regular_eval_accuracy = evaluate(model)
with opt.swap_ema_weights():
ema_eval_accuracy = evaluate(model)
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
device: torch.device,
decay: float = 0.9999,
every_n_steps: int = 1,
current_step: int = 0,
):
self.optimizer = optimizer
self.decay = decay
self.device = device
self.current_step = current_step
self.every_n_steps = every_n_steps
self.save_original_optimizer_state = False
self.first_iteration = True
self.rebuild_ema_params = True
self.stream = None
self.thread = None
self.ema_params = ()
self.in_saving_ema_model_context = False
def all_parameters(self) -> Iterable[torch.Tensor]:
return (param for group in self.param_groups for param in group['params'])
def step(self, closure=None, **kwargs):
self.join()
if self.first_iteration:
if any(p.is_cuda for p in self.all_parameters()):
self.stream = torch.cuda.Stream()
self.first_iteration = False
if self.rebuild_ema_params:
opt_params = list(self.all_parameters())
self.ema_params += tuple(
copy.deepcopy(param.data.detach()).to(self.device) for param in opt_params[len(self.ema_params) :]
)
self.rebuild_ema_params = False
loss = self.optimizer.step(closure)
if self._should_update_at_step():
self.update()
self.current_step += 1
return loss
def _should_update_at_step(self) -> bool:
return self.current_step % self.every_n_steps == 0
@torch.no_grad()
def update(self):
if self.stream is not None:
self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
current_model_state = tuple(
param.data.to(self.device, non_blocking=True) for param in self.all_parameters()
)
if self.device.type == 'cuda':
ema_update(self.ema_params, current_model_state, self.decay)
if self.device.type == 'cpu':
self.thread = threading.Thread(
target=run_ema_update_cpu, args=(self.ema_params, current_model_state, self.decay, self.stream,),
)
self.thread.start()
def swap_tensors(self, tensor1, tensor2):
tmp = torch.empty_like(tensor1)
tmp.copy_(tensor1)
tensor1.copy_(tensor2)
tensor2.copy_(tmp)
def switch_main_parameter_weights(self, saving_ema_model: bool = False):
self.join()
self.in_saving_ema_model_context = saving_ema_model
for param, ema_param in zip(self.all_parameters(), self.ema_params):
self.swap_tensors(param.data, ema_param)
@contextlib.contextmanager
def swap_ema_weights(self, enabled: bool = True):
r"""
A context manager to in-place swap regular parameters with EMA
parameters.
It swaps back to the original regular parameters on context manager
exit.
Args:
enabled (bool): whether the swap should be performed
"""
if enabled:
self.switch_main_parameter_weights()
try:
yield
finally:
if enabled:
self.switch_main_parameter_weights()
def __getattr__(self, name):
return getattr(self.optimizer, name)
def join(self):
if self.stream is not None:
self.stream.synchronize()
if self.thread is not None:
self.thread.join()
def state_dict(self):
self.join()
if self.save_original_optimizer_state:
return self.optimizer.state_dict()
# if we are in the context of saving an EMA model, the EMA weights are in the modules' actual weights
ema_params = self.ema_params if not self.in_saving_ema_model_context else list(self.all_parameters())
state_dict = {
'opt': self.optimizer.state_dict(),
'ema': ema_params,
'current_step': self.current_step,
'decay': self.decay,
'every_n_steps': self.every_n_steps,
}
return state_dict
def load_state_dict(self, state_dict):
self.join()
self.optimizer.load_state_dict(state_dict['opt'])
self.ema_params = tuple(param.to(self.device) for param in copy.deepcopy(state_dict['ema']))
self.current_step = state_dict['current_step']
self.decay = state_dict['decay']
self.every_n_steps = state_dict['every_n_steps']
self.rebuild_ema_params = False
def add_param_group(self, param_group):
self.optimizer.add_param_group(param_group)
self.rebuild_ema_params = True
|
NeMo-main
|
nemo/collections/common/callbacks/ema.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.common.callbacks.callbacks import LogEpochTimeCallback
from nemo.collections.common.callbacks.ema import EMA
|
NeMo-main
|
nemo/collections/common/callbacks/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_only
# from sacrebleu import corpus_bleu
class LogEpochTimeCallback(Callback):
"""Simple callback that logs how long each epoch takes, in seconds, to a pytorch lightning log
"""
@rank_zero_only
def on_train_epoch_start(self, trainer, pl_module):
self.epoch_start = time.time()
@rank_zero_only
def on_train_epoch_end(self, trainer, pl_module):
curr_time = time.time()
duration = curr_time - self.epoch_start
trainer.logger.log_metrics({"epoch_time": duration}, step=trainer.global_step)
# class MachineTranslationLogEvalCallback(Callback):
# def _on_eval_end(self, trainer, pl_module, mode):
# counts = np.array(self._non_pad_tokens)
# eval_loss = np.sum(np.array(self._losses) * counts) / np.sum(counts)
# sacre_bleu = corpus_bleu(self._translations, [self._ground_truths], tokenize="13a")
# print(f"{mode} results for process with global rank {pl_module.global_rank}".upper())
# for i in range(pl_module.num_examples[mode]):
# print('\u0332'.join(f"EXAMPLE {i}:")) # Underline output
# sent_id = np.random.randint(len(self._translations))
# print(f"Ground truth: {self._ground_truths[sent_id]}\n")
# print(f"Translation: {self._translations[sent_id]}\n")
# print()
# print("-" * 50)
# print(f"loss: {eval_loss:.3f}")
# print(f"SacreBLEU: {sacre_bleu}")
# print("-" * 50)
# @rank_zero_only
# def on_test_end(self, trainer, pl_module):
# self._on_eval_end(trainer, pl_module, "test")
# @rank_zero_only
# def on_validation_end(self, trainer, pl_module):
# self._on_eval_end(trainer, pl_module, "val")
# @rank_zero_only
# def on_sanity_check_end(self, trainer, pl_module):
# self._on_eval_end(trainer, pl_module, "val")
# def _on_eval_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx, mode):
# self._translations.extend(outputs['translations'])
# self._ground_truths.extend(outputs['ground_truths'])
# self._non_pad_tokens.append(outputs['num_non_pad_tokens'])
# self._losses.append(outputs[f'{mode}_loss'])
# @rank_zero_only
# def on_test_batch_end(self, trainer, pl_module, batch, outputs, batch_idx, dataloader_idx):
# self._on_eval_batch_end(trainer, pl_module, batch, outputs, batch_idx, dataloader_idx, 'test')
# @rank_zero_only
# def on_validation_batch_end(self, trainer, pl_module, batch, outputs, batch_idx, dataloader_idx):
# self._on_eval_batch_end(trainer, pl_module, batch, outputs, batch_idx, dataloader_idx, 'val')
# def _on_eval_start(self, trainer, pl_module):
# self._translations = []
# self._ground_truths = []
# self._losses = []
# self._non_pad_tokens = []
# @rank_zero_only
# def on_test_start(self, trainer, pl_module):
# self._on_eval_start(trainer, pl_module)
# @rank_zero_only
# def on_validation_start(self, trainer, pl_module):
# self._on_eval_start(trainer, pl_module)
# @rank_zero_only
# def on_sanity_check_start(self, trainer, pl_module):
# self._on_eval_start(trainer, pl_module)
|
NeMo-main
|
nemo/collections/common/callbacks/callbacks.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
# Library version globals
TORCH_VERSION = None
TORCH_VERSION_MIN = version.Version('1.7')
|
NeMo-main
|
nemo/collections/common/parts/patch_utils.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
class NeMoMixedPrecisionPlugin(MixedPrecisionPlugin):
def __init__(self, init_scale: float = 2 ** 32, growth_interval: int = 1000) -> None:
super().__init__(precision=16)
self.scaler = torch.cuda.amp.GradScaler(init_scale=init_scale, growth_interval=growth_interval)
|
NeMo-main
|
nemo/collections/common/parts/ptl_overrides.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class MultiLayerPerceptron(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(
self,
hidden_size: int,
num_classes: int,
num_layers: int = 2,
activation: str = 'relu',
log_softmax: bool = True,
):
super().__init__()
self.layers = 0
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', getattr(torch, activation))
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, hidden_states):
output_states = hidden_states[:]
for i in range(self.layers):
output_states = getattr(self, f'layer{i}')(output_states)
if self.log_softmax:
output_states = torch.log_softmax(output_states, dim=-1)
return output_states
|
NeMo-main
|
nemo/collections/common/parts/multi_layer_perceptron.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
__all__ = ['NEG_INF', 'form_attention_mask', 'transformer_weights_init', 'mask_padded_tokens']
NEG_INF = -10000.0
def form_attention_mask(input_mask, diagonal=None):
"""
Build attention mask with optional masking of future tokens we forbid
to attend to (e.g. as it is in Transformer decoder).
Args:
input_mask: binary mask of size B x L with 1s corresponding to valid
tokens and 0s corresponding to padding tokens
diagonal: diagonal where triangular future mask starts
None -- do not mask anything
0 -- regular translation or language modeling future masking
1 -- query stream masking as in XLNet architecture
Returns:
attention_mask: mask of size B x 1 x L x L with 0s corresponding to
tokens we plan to attend to and -10000 otherwise
"""
if input_mask is None:
return None
attn_shape = (1, input_mask.shape[1], input_mask.shape[1])
attn_mask = input_mask.to(dtype=bool).unsqueeze(1)
if diagonal is not None:
future_mask = torch.tril(torch.ones(attn_shape, dtype=torch.bool, device=input_mask.device), diagonal)
attn_mask = attn_mask & future_mask
attention_mask = (1 - attn_mask.to(torch.float)) * NEG_INF
return attention_mask.unsqueeze(1)
def transformer_weights_init(module, std_init_range=0.02, xavier=True):
"""
Initialize different weights in Transformer model.
Args:
module: torch.nn.Module to be initialized
std_init_range: standard deviation of normal initializer
xavier: if True, xavier initializer will be used in Linear layers
as was proposed in AIAYN paper, otherwise normal initializer
will be used (like in BERT paper)
"""
if isinstance(module, nn.Linear):
if xavier:
nn.init.xavier_uniform_(module.weight)
else:
nn.init.normal_(module.weight, mean=0.0, std=std_init_range)
if module.bias is not None:
nn.init.constant_(module.bias, 0.0)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, mean=0.0, std=std_init_range)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.weight, 1.0)
nn.init.constant_(module.bias, 0.0)
def mask_padded_tokens(tokens, pad_id):
mask = tokens != pad_id
return mask
|
NeMo-main
|
nemo/collections/common/parts/transformer_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.common.parts.adapter_modules import LinearAdapter, LinearAdapterConfig
from nemo.collections.common.parts.mlm_scorer import MLMScorer
from nemo.collections.common.parts.multi_layer_perceptron import MultiLayerPerceptron
from nemo.collections.common.parts.transformer_utils import *
from nemo.collections.common.parts.utils import *
|
NeMo-main
|
nemo/collections/common/parts/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from typing import Iterable, List
import torch.nn as nn
__all__ = ['if_exist', '_compute_softmax', 'flatten']
activation_registry = {
"identity": nn.Identity,
"hardtanh": nn.Hardtanh,
"relu": nn.ReLU,
"selu": nn.SELU,
"swish": nn.SiLU,
"silu": nn.SiLU,
"gelu": nn.GELU,
}
def if_exist(outfold: str, files: List[str]):
"""
Returns true if all given files exist in the given folder
Args:
outfold: folder path
files: list of file names relative to outfold
"""
if not os.path.exists(outfold):
return False
for file in files:
if not os.path.exists(f'{outfold}/{file}'):
return False
return True
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def flatten_iterable(iter: Iterable) -> Iterable:
"""Flatten an iterable which contains values or
iterables with values.
Args:
iter: iterable containing values at the deepest level.
Returns:
A flat iterable containing values.
"""
for it in iter:
if isinstance(it, str) or not isinstance(it, Iterable):
yield it
else:
yield from flatten_iterable(it)
def flatten(list_in: List) -> List:
"""Flatten a list of (nested lists of) values into a flat list.
Args:
list_in: list of values, possibly nested
Returns:
A flat list of values.
"""
return list(flatten_iterable(list_in))
|
NeMo-main
|
nemo/collections/common/parts/utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2020 AWSLABS, AMAZON.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
import torch
from torch.nn.functional import softmax
from transformers import AutoModelForMaskedLM, AutoTokenizer
__all__ = ['MLMScorer']
class MLMScorer:
def __init__(self, model_name: str, device: str = 'cpu'):
"""
Creates MLM scorer from https://arxiv.org/abs/1910.14659.
Args:
model_name: HuggingFace pretrained model name
device: either 'cpu' or 'cuda'
"""
self.model = AutoModelForMaskedLM.from_pretrained(model_name).to(device).eval()
self.tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
self.device = device
self.MASK_LABEL = self.tokenizer.mask_token
def score_sentences(self, sentences: List[str]):
"""
returns list of MLM scores for each sentence in list.
"""
return [self.score_sentence(sentence) for sentence in sentences]
def score_sentence(self, sentence: str):
"""
returns MLM score for sentence.
"""
assert type(sentence) == str
tokens = self.tokenizer.tokenize(sentence)
mask_idx = []
token_type = []
attn_mask = []
ids = []
for m_idx, _ in enumerate(tokens):
masked = self.__mask_text__(m_idx, tokens)
mask_idx.append(m_idx)
ids.append(self.tokenizer.encode(masked))
id_len = len(ids[-1])
token_type.append([0] * id_len)
attn_mask.append([1] * id_len)
data = {
'input_ids': torch.tensor(ids, device=self.device),
'attention_mask': torch.tensor(attn_mask, device=self.device),
'token_type_ids': torch.tensor(token_type, device=self.device),
}
with torch.no_grad():
outputs = self.model(**data)
logits = outputs.logits
scores = []
scores_log_prob = 0.0
for i, m_idx in enumerate(mask_idx):
preds = logits[i].squeeze(0)
probs = softmax(preds, dim=1)
token_id = self.tokenizer.convert_tokens_to_ids([tokens[m_idx]])[0]
log_prob = np.log(probs[m_idx + 1, token_id].cpu().numpy()).item()
scores.append(log_prob)
scores_log_prob += log_prob
return scores_log_prob
def __mask_text__(self, idx: int, tokens: List[str]):
"""
replaces string at index idx in list `tokens` with a masked token and returns the modified list.
"""
masked = tokens.copy()
masked[idx] = self.MASK_LABEL
return masked
|
NeMo-main
|
nemo/collections/common/parts/mlm_scorer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, is_dataclass
from typing import Any, Optional
from hydra.utils import instantiate
from omegaconf import OmegaConf
from torch import nn as nn
from nemo.collections.common.parts.utils import activation_registry
from nemo.core.classes.mixins import access_mixins, adapter_mixin_strategies
class AdapterModuleUtil(access_mixins.AccessMixin):
"""
Base class of Adapter Modules, providing common functionality to all Adapter Modules.
"""
def setup_adapter_strategy(self, adapter_strategy: Optional[adapter_mixin_strategies.AbstractAdapterStrategy]):
"""
Setup adapter strategy of this class, enabling dynamic change in the way the adapter output is
merged with the input.
When called successfully, will assign the variable `adapter_strategy` to the module.
Args:
adapter_strategy: Can be a None or an implementation of AbstractAdapterStrategy.
"""
# set default adapter strategy
if adapter_strategy is None:
adapter_strategy = self.get_default_strategy_config()
if is_dataclass(adapter_strategy):
adapter_strategy = OmegaConf.structured(adapter_strategy)
OmegaConf.set_struct(adapter_strategy, False)
# The config must have the `_target_` field pointing to the actual adapter strategy class
# which will load that strategy dynamically to this module.
if isinstance(adapter_strategy, dict) or OmegaConf.is_config(adapter_strategy):
self.adapter_strategy = instantiate(adapter_strategy)
elif isinstance(adapter_strategy, adapter_mixin_strategies.AbstractAdapterStrategy):
self.adapter_strategy = adapter_strategy
else:
raise AttributeError(f'`adapter_strategy` provided is invalid : {adapter_strategy}')
def get_default_strategy_config(self) -> 'dataclass':
"""
Returns a default adapter module strategy.
"""
return adapter_mixin_strategies.ResidualAddAdapterStrategyConfig()
def adapter_unfreeze(self,):
"""
Sets the requires grad for all parameters in the adapter to True.
This method should be overridden for any custom unfreeze behavior that is required.
For example, if not all params of the adapter should be unfrozen.
"""
for param in self.parameters():
param.requires_grad_(True)
class LinearAdapter(nn.Module, AdapterModuleUtil):
"""
Simple Linear Feedforward Adapter module with LayerNorm and singe hidden layer with activation function.
Note: The adapter explicitly initializes its final layer with all zeros in order to avoid affecting the
original model when all adapters are disabled.
Args:
in_features: Input dimension of the module. Note that for adapters, input_dim == output_dim.
dim: Hidden dimension of the feed forward network.
activation: Str name for an activation function.
norm_position: Str, can be `pre` or `post`. Defaults to `pre`. Determines whether the normalization
will occur in the first layer or the last layer. Certain architectures may prefer one over the other.
dropout: float value, whether to perform dropout on the output of the last layer of the adapter.
adapter_strategy: By default, ResidualAddAdapterStrategyConfig. An adapter composition function object.
"""
def __init__(
self,
in_features: int,
dim: int,
activation: str = 'swish',
norm_position: str = 'pre',
dropout: float = 0.0,
adapter_strategy: adapter_mixin_strategies.ResidualAddAdapterStrategyConfig = None,
):
super().__init__()
activation = activation_registry[activation]()
# If the activation can be executed in place, do so.
if hasattr(activation, 'inplace'):
activation.inplace = True
assert norm_position in ['pre', 'post']
self.norm_position = norm_position
if norm_position == 'pre':
self.module = nn.Sequential(
nn.LayerNorm(in_features),
nn.Linear(in_features, dim, bias=False),
activation,
nn.Linear(dim, in_features, bias=False),
)
elif norm_position == 'post':
self.module = nn.Sequential(
nn.Linear(in_features, dim, bias=False),
activation,
nn.Linear(dim, in_features, bias=False),
nn.LayerNorm(in_features),
)
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
# Setup adapter strategy
self.setup_adapter_strategy(adapter_strategy)
# reset parameters
self.reset_parameters()
def reset_parameters(self):
# Final layer initializations must be 0
if self.norm_position == 'pre':
self.module[-1].weight.data *= 0
elif self.norm_position == 'post':
self.module[-1].weight.data *= 0
self.module[-1].bias.data *= 0
def forward(self, x):
x = self.module(x)
# Add dropout if available
if self.dropout is not None:
x = self.dropout(x)
return x
@dataclass
class LinearAdapterConfig:
in_features: int
dim: int
activation: str = 'swish'
norm_position: str = 'pre'
dropout: float = 0.0
adapter_strategy: Optional[Any] = adapter_mixin_strategies.ResidualAddAdapterStrategyConfig()
_target_: str = "{0}.{1}".format(LinearAdapter.__module__, LinearAdapter.__name__)
|
NeMo-main
|
nemo/collections/common/parts/adapter_modules.py
|
# Copyright (c) 2019, Myrtle Software Limited. All rights reserved.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from nemo.utils import logging
def rnn(
input_size: int,
hidden_size: int,
num_layers: int,
norm: Optional[str] = None,
forget_gate_bias: Optional[float] = 1.0,
dropout: Optional[float] = 0.0,
norm_first_rnn: Optional[bool] = None,
t_max: Optional[int] = None,
weights_init_scale: float = 1.0,
hidden_hidden_bias_scale: float = 0.0,
proj_size: int = 0,
) -> torch.nn.Module:
"""
Utility function to provide unified interface to common LSTM RNN modules.
Args:
input_size: Input dimension.
hidden_size: Hidden dimension of the RNN.
num_layers: Number of RNN layers.
norm: Optional string representing type of normalization to apply to the RNN.
Supported values are None, batch and layer.
forget_gate_bias: float, set by default to 1.0, which constructs a forget gate
initialized to 1.0.
Reference:
[An Empirical Exploration of Recurrent Network Architectures](http://proceedings.mlr.press/v37/jozefowicz15.pdf)
dropout: Optional dropout to apply to end of multi-layered RNN.
norm_first_rnn: Whether to normalize the first RNN layer.
t_max: int value, set to None by default. If an int is specified, performs Chrono Initialization
of the LSTM network, based on the maximum number of timesteps `t_max` expected during the course
of training.
Reference:
[Can recurrent neural networks warp time?](https://openreview.net/forum?id=SJcKhk-Ab)
weights_init_scale: Float scale of the weights after initialization. Setting to lower than one
sometimes helps reduce variance between runs.
hidden_hidden_bias_scale: Float scale for the hidden-to-hidden bias scale. Set to 0.0 for
the default behaviour.
Returns:
A RNN module
"""
if norm not in [None, "batch", "layer"]:
raise ValueError(f"unknown norm={norm}")
if norm is None:
return LSTMDropout(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
forget_gate_bias=forget_gate_bias,
t_max=t_max,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
proj_size=proj_size,
)
if norm == "batch":
return BNRNNSum(
input_size=input_size,
hidden_size=hidden_size,
rnn_layers=num_layers,
batch_norm=True,
dropout=dropout,
forget_gate_bias=forget_gate_bias,
t_max=t_max,
norm_first_rnn=norm_first_rnn,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
proj_size=proj_size,
)
if norm == "layer":
return torch.jit.script(
ln_lstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
forget_gate_bias=forget_gate_bias,
t_max=t_max,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
)
)
class OverLastDim(torch.nn.Module):
"""Collapses a tensor to 2D, applies a module, and (re-)expands the tensor.
An n-dimensional tensor of shape (s_1, s_2, ..., s_n) is first collapsed to
a tensor with shape (s_1*s_2*...*s_n-1, s_n). The module is called with
this as input producing (s_1*s_2*...*s_n-1, s_n') --- note that the final
dimension can change. This is expanded to (s_1, s_2, ..., s_n-1, s_n') and
returned.
Args:
module (torch.nn.Module): Module to apply. Must accept a 2D tensor as
input and produce a 2D tensor as output, optionally changing the
size of the last dimension.
"""
def __init__(self, module: torch.nn.Module):
super().__init__()
self.module = module
def forward(self, x: torch.Tensor) -> torch.Tensor:
*dims, _ = x.size()
reduced_dims = 1
for dim in dims:
reduced_dims *= dim
x = x.view(reduced_dims, -1)
x = self.module(x)
x = x.view(*dims, -1)
return x
class LSTMDropout(torch.nn.Module):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
dropout: Optional[float],
forget_gate_bias: Optional[float],
t_max: Optional[int] = None,
weights_init_scale: float = 1.0,
hidden_hidden_bias_scale: float = 0.0,
proj_size: int = 0,
):
"""Returns an LSTM with forget gate bias init to `forget_gate_bias`.
Args:
input_size: See `torch.nn.LSTM`.
hidden_size: See `torch.nn.LSTM`.
num_layers: See `torch.nn.LSTM`.
dropout: See `torch.nn.LSTM`.
forget_gate_bias: float, set by default to 1.0, which constructs a forget gate
initialized to 1.0.
Reference:
[An Empirical Exploration of Recurrent Network Architectures](http://proceedings.mlr.press/v37/jozefowicz15.pdf)
t_max: int value, set to None by default. If an int is specified, performs Chrono Initialization
of the LSTM network, based on the maximum number of timesteps `t_max` expected during the course
of training.
Reference:
[Can recurrent neural networks warp time?](https://openreview.net/forum?id=SJcKhk-Ab)
weights_init_scale: Float scale of the weights after initialization. Setting to lower than one
sometimes helps reduce variance between runs.
hidden_hidden_bias_scale: Float scale for the hidden-to-hidden bias scale. Set to 0.0 for
the default behaviour.
Returns:
A `torch.nn.LSTM`.
"""
super(LSTMDropout, self).__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, dropout=dropout, proj_size=proj_size
)
if t_max is not None:
# apply chrono init
for name, v in self.lstm.named_parameters():
if 'bias' in name:
p = getattr(self.lstm, name)
n = p.nelement()
hidden_size = n // 4
p.data.fill_(0)
p.data[hidden_size : 2 * hidden_size] = torch.log(
torch.nn.init.uniform_(p.data[0:hidden_size], 1, t_max - 1)
)
# forget gate biases = log(uniform(1, Tmax-1))
p.data[0:hidden_size] = -p.data[hidden_size : 2 * hidden_size]
# input gate biases = -(forget gate biases)
elif forget_gate_bias is not None:
for name, v in self.lstm.named_parameters():
if "bias_ih" in name:
bias = getattr(self.lstm, name)
bias.data[hidden_size : 2 * hidden_size].fill_(forget_gate_bias)
if "bias_hh" in name:
bias = getattr(self.lstm, name)
bias.data[hidden_size : 2 * hidden_size] *= float(hidden_hidden_bias_scale)
self.dropout = torch.nn.Dropout(dropout) if dropout else None
for name, v in self.named_parameters():
if 'weight' in name or 'bias' in name:
v.data *= float(weights_init_scale)
def forward(
self, x: torch.Tensor, h: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
x, h = self.lstm(x, h)
if self.dropout:
x = self.dropout(x)
return x, h
class RNNLayer(torch.nn.Module):
"""A single RNNLayer with optional batch norm."""
def __init__(
self,
input_size: int,
hidden_size: int,
rnn_type: torch.nn.Module = torch.nn.LSTM,
batch_norm: bool = True,
forget_gate_bias: Optional[float] = 1.0,
t_max: Optional[int] = None,
weights_init_scale: float = 1.0,
hidden_hidden_bias_scale: float = 0.0,
proj_size: int = 0,
):
super().__init__()
if batch_norm:
self.bn = OverLastDim(torch.nn.BatchNorm1d(input_size))
if isinstance(rnn_type, torch.nn.LSTM) and not batch_norm:
# batch_norm will apply bias, no need to add a second to LSTM
self.rnn = LSTMDropout(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
dropout=0.0,
forget_gate_bias=forget_gate_bias,
t_max=t_max,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
proj_size=proj_size,
)
else:
self.rnn = rnn_type(input_size=input_size, hidden_size=hidden_size, bias=not batch_norm)
def forward(
self, x: torch.Tensor, hx: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
if hasattr(self, 'bn'):
x = x.contiguous()
x = self.bn(x)
x, h = self.rnn(x, hx=hx)
return x, h
def _flatten_parameters(self):
self.rnn.flatten_parameters()
class BNRNNSum(torch.nn.Module):
"""RNN wrapper with optional batch norm.
Instantiates an RNN. If it is an LSTM it initialises the forget gate
bias =`lstm_gate_bias`. Optionally applies a batch normalisation layer to
the input with the statistics computed over all time steps. If dropout > 0
then it is applied to all layer outputs except the last.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
rnn_type: torch.nn.Module = torch.nn.LSTM,
rnn_layers: int = 1,
batch_norm: bool = True,
dropout: Optional[float] = 0.0,
forget_gate_bias: Optional[float] = 1.0,
norm_first_rnn: bool = False,
t_max: Optional[int] = None,
weights_init_scale: float = 1.0,
hidden_hidden_bias_scale: float = 0.0,
proj_size: int = 0,
):
super().__init__()
self.rnn_layers = rnn_layers
self.layers = torch.nn.ModuleList()
for i in range(rnn_layers):
final_layer = (rnn_layers - 1) == i
self.layers.append(
RNNLayer(
input_size,
hidden_size,
rnn_type=rnn_type,
batch_norm=batch_norm and (norm_first_rnn or i > 0),
forget_gate_bias=forget_gate_bias,
t_max=t_max,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
proj_size=proj_size,
)
)
if dropout is not None and dropout > 0.0 and not final_layer:
self.layers.append(torch.nn.Dropout(dropout))
input_size = hidden_size
def forward(
self, x: torch.Tensor, hx: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
hx = self._parse_hidden_state(hx)
hs = []
cs = []
rnn_idx = 0
for layer in self.layers:
if isinstance(layer, torch.nn.Dropout):
x = layer(x)
else:
x, h_out = layer(x, hx=hx[rnn_idx])
hs.append(h_out[0])
cs.append(h_out[1])
rnn_idx += 1
del h_out
h_0 = torch.stack(hs, dim=0)
c_0 = torch.stack(cs, dim=0)
return x, (h_0, c_0)
def _parse_hidden_state(
self, hx: Optional[Tuple[torch.Tensor, torch.Tensor]]
) -> Union[List[None], List[Tuple[torch.Tensor, torch.Tensor]]]:
"""
Dealing w. hidden state:
Typically in pytorch: (h_0, c_0)
h_0 = ``[num_layers * num_directions, batch, hidden_size]``
c_0 = ``[num_layers * num_directions, batch, hidden_size]``
"""
if hx is None:
return [None] * self.rnn_layers
else:
h_0, c_0 = hx
if h_0.shape[0] != self.rnn_layers:
raise ValueError(
'Provided initial state value `h_0` must be of shape : '
'[num_layers * num_directions, batch, hidden_size]'
)
return [(h_0[i], c_0[i]) for i in range(h_0.shape[0])]
def _flatten_parameters(self):
for layer in self.layers:
if isinstance(layer, (torch.nn.LSTM, torch.nn.GRU, torch.nn.RNN)):
layer._flatten_parameters()
class StackTime(torch.nn.Module):
"""
Stacks time within the feature dim, so as to behave as a downsampling operation.
"""
def __init__(self, factor: int):
super().__init__()
self.factor = int(factor)
def forward(self, x: List[Tuple[torch.Tensor]]) -> Tuple[torch.Tensor, torch.Tensor]:
# T, B, U
x, x_lens = x
seq = [x]
for i in range(1, self.factor):
tmp = torch.zeros_like(x)
tmp[:-i, :, :] = x[i:, :, :]
seq.append(tmp)
x_lens = torch.ceil(x_lens.float() / self.factor).int()
return torch.cat(seq, dim=2)[:: self.factor, :, :], x_lens
def ln_lstm(
input_size: int,
hidden_size: int,
num_layers: int,
dropout: Optional[float],
forget_gate_bias: Optional[float],
t_max: Optional[int],
weights_init_scale: Optional[float] = None, # ignored
hidden_hidden_bias_scale: Optional[float] = None, # ignored
) -> torch.nn.Module:
"""Returns a ScriptModule that mimics a PyTorch native LSTM."""
# The following are not implemented.
if dropout is not None and dropout != 0.0:
raise ValueError('`dropout` not supported with LayerNormLSTM')
if t_max is not None:
logging.warning("LayerNormLSTM does not support chrono init via `t_max`")
if weights_init_scale is not None:
logging.warning("`weights_init_scale` is ignored for LayerNormLSTM")
if hidden_hidden_bias_scale is not None:
logging.warning("`hidden_hidden_bias_scale` is ignored for LayerNormLSTM")
return StackedLSTM(
num_layers,
LSTMLayer,
first_layer_args=[LayerNormLSTMCell, input_size, hidden_size, forget_gate_bias],
other_layer_args=[LayerNormLSTMCell, hidden_size, hidden_size, forget_gate_bias],
)
class LSTMLayer(torch.nn.Module):
def __init__(self, cell, *cell_args):
super(LSTMLayer, self).__init__()
self.cell = cell(*cell_args)
def forward(
self, input: torch.Tensor, state: Tuple[torch.Tensor, torch.Tensor]
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
inputs = input.unbind(0)
outputs = []
for i in range(len(inputs)):
out, state = self.cell(inputs[i], state)
outputs += [out]
return torch.stack(outputs), state
class LayerNormLSTMCell(torch.nn.Module):
def __init__(self, input_size, hidden_size, forget_gate_bias):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = torch.nn.Parameter(torch.randn(4 * hidden_size, input_size))
self.weight_hh = torch.nn.Parameter(torch.randn(4 * hidden_size, hidden_size))
# LayerNorm provide learnable biases
self.layernorm_i = torch.nn.LayerNorm(4 * hidden_size)
self.layernorm_h = torch.nn.LayerNorm(4 * hidden_size)
self.layernorm_c = torch.nn.LayerNorm(hidden_size)
self.reset_parameters()
self.layernorm_i.bias.data[hidden_size : 2 * hidden_size].fill_(0.0)
self.layernorm_h.bias.data[hidden_size : 2 * hidden_size].fill_(forget_gate_bias)
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
torch.nn.init.uniform_(weight, -stdv, stdv)
def forward(
self, input: torch.Tensor, state: Tuple[torch.Tensor, torch.Tensor]
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
hx, cx = state
igates = self.layernorm_i(torch.mm(input, self.weight_ih.t()))
hgates = self.layernorm_h(torch.mm(hx, self.weight_hh.t()))
gates = igates + hgates
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = self.layernorm_c((forgetgate * cx) + (ingate * cellgate))
hy = outgate * torch.tanh(cy)
return hy, (hy, cy)
def init_stacked_lstm(
num_layers: int, layer: torch.nn.Module, first_layer_args: List, other_layer_args: List
) -> torch.nn.ModuleList:
layers = [layer(*first_layer_args)] + [layer(*other_layer_args) for _ in range(num_layers - 1)]
return torch.nn.ModuleList(layers)
class StackedLSTM(torch.nn.Module):
def __init__(self, num_layers: int, layer: torch.nn.Module, first_layer_args: List, other_layer_args: List):
super(StackedLSTM, self).__init__()
self.layers: torch.nn.ModuleList = init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args)
def forward(
self, input: torch.Tensor, states: Optional[List[Tuple[torch.Tensor, torch.Tensor]]]
) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]:
if states is None:
temp_states: List[Tuple[torch.Tensor, torch.Tensor]] = []
batch = input.size(1)
for layer in self.layers:
temp_states.append(
(
torch.zeros(batch, layer.cell.hidden_size, dtype=input.dtype, device=input.device),
torch.zeros(batch, layer.cell.hidden_size, dtype=input.dtype, device=input.device),
)
)
states = temp_states
output_states: List[Tuple[torch.Tensor, torch.Tensor]] = []
output = input
for i, rnn_layer in enumerate(self.layers):
state = states[i]
output, out_state = rnn_layer(output, state)
output_states.append(out_state)
i += 1
return output, output_states
def label_collate(labels, device=None):
"""Collates the label inputs for the rnn-t prediction network.
If `labels` is already in torch.Tensor form this is a no-op.
Args:
labels: A torch.Tensor List of label indexes or a torch.Tensor.
device: Optional torch device to place the label on.
Returns:
A padded torch.Tensor of shape (batch, max_seq_len).
"""
if isinstance(labels, torch.Tensor):
return labels.type(torch.int64)
if not isinstance(labels, (list, tuple)):
raise ValueError(f"`labels` should be a list or tensor not {type(labels)}")
batch_size = len(labels)
max_len = max(len(label) for label in labels)
cat_labels = np.full((batch_size, max_len), fill_value=0.0, dtype=np.int32)
for e, l in enumerate(labels):
cat_labels[e, : len(l)] = l
labels = torch.tensor(cat_labels, dtype=torch.int64, device=device)
return labels
|
NeMo-main
|
nemo/collections/common/parts/rnn.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from os.path import expanduser
from typing import Any, Callable, Dict, Iterator, List, Optional, Union
from nemo.utils import logging
from nemo.utils.data_utils import DataStoreObject, datastore_path_to_local_path, is_datastore_path
class ManifestBase:
def __init__(self, *args, **kwargs):
raise ValueError(
"This class is deprecated, look at https://github.com/NVIDIA/NeMo/pull/284 for correct behaviour."
)
class ManifestEN:
def __init__(self, *args, **kwargs):
raise ValueError(
"This class is deprecated, look at https://github.com/NVIDIA/NeMo/pull/284 for correct behaviour."
)
def item_iter(
manifests_files: Union[str, List[str]], parse_func: Callable[[str, Optional[str]], Dict[str, Any]] = None
) -> Iterator[Dict[str, Any]]:
"""Iterate through json lines of provided manifests.
NeMo ASR pipelines often assume certain manifest files structure. In
particular, each manifest file should consist of line-per-sample files with
each line being correct json dict. Each such json dict should have a field
for audio file string, a field for duration float and a field for text
string. Offset also could be additional field and is set to None by
default.
Args:
manifests_files: Either single string file or list of such -
manifests to yield items from.
parse_func: A callable function which accepts as input a single line
of a manifest and optionally the manifest file itself,
and parses it, returning a dictionary mapping from str -> Any.
Yields:
Parsed key to value item dicts.
Raises:
ValueError: If met invalid json line structure.
"""
if isinstance(manifests_files, str):
manifests_files = [manifests_files]
if parse_func is None:
parse_func = __parse_item
k = -1
logging.debug('Manifest files: %s', str(manifests_files))
for manifest_file in manifests_files:
logging.debug('Using manifest file: %s', str(manifest_file))
cached_manifest_file = DataStoreObject(manifest_file).get()
logging.debug('Cached at: %s', str(cached_manifest_file))
with open(expanduser(cached_manifest_file), 'r') as f:
for line in f:
k += 1
item = parse_func(line, manifest_file)
item['id'] = k
yield item
def __parse_item(line: str, manifest_file: str) -> Dict[str, Any]:
item = json.loads(line)
# Audio file
if 'audio_filename' in item:
item['audio_file'] = item.pop('audio_filename')
elif 'audio_filepath' in item:
item['audio_file'] = item.pop('audio_filepath')
elif 'audio_file' not in item:
raise ValueError(
f"Manifest file {manifest_file} has invalid json line structure: {line} without proper audio file key."
)
# If the audio path is a relative path and does not exist,
# try to attach the parent directory of manifest to the audio path.
# Revert to the original path if the new path still doesn't exist.
# Assume that the audio path is like "wavs/xxxxxx.wav".
item['audio_file'] = get_full_path(audio_file=item['audio_file'], manifest_file=manifest_file)
# Duration.
if 'duration' not in item:
raise ValueError(
f"Manifest file {manifest_file} has invalid json line structure: {line} without proper duration key."
)
# Text.
if 'text' in item:
pass
elif 'text_filepath' in item:
with open(item.pop('text_filepath'), 'r') as f:
item['text'] = f.read().replace('\n', '')
elif 'normalized_text' in item:
item['text'] = item['normalized_text']
else:
item['text'] = ""
# Optional RTTM file
if 'rttm_file' in item:
pass
elif 'rttm_filename' in item:
item['rttm_file'] = item.pop('rttm_filename')
elif 'rttm_filepath' in item:
item['rttm_file'] = item.pop('rttm_filepath')
else:
item['rttm_file'] = None
if item['rttm_file'] is not None:
item['rttm_file'] = get_full_path(audio_file=item['rttm_file'], manifest_file=manifest_file)
# Optional audio feature file
if 'feature_file' in item:
pass
elif 'feature_filename' in item:
item['feature_file'] = item.pop('feature_filename')
elif 'feature_filepath' in item:
item['feature_file'] = item.pop('feature_filepath')
else:
item['feature_file'] = None
if item['feature_file'] is not None:
item['feature_file'] = get_full_path(audio_file=item['feature_file'], manifest_file=manifest_file)
item = dict(
audio_file=item['audio_file'],
duration=item['duration'],
text=item['text'],
rttm_file=item['rttm_file'],
feature_file=item['feature_file'],
offset=item.get('offset', None),
speaker=item.get('speaker', None),
orig_sr=item.get('orig_sample_rate', None),
token_labels=item.get('token_labels', None),
lang=item.get('lang', None),
)
return item
def get_full_path(
audio_file: Union[str, List[str]],
manifest_file: Optional[str] = None,
data_dir: Optional[str] = None,
audio_file_len_limit: int = 255,
) -> Union[str, List[str]]:
"""Get full path to audio_file.
If the audio_file is a relative path and does not exist,
try to attach the parent directory of manifest to the audio path.
Revert to the original path if the new path still doesn't exist.
Assume that the audio path is like "wavs/xxxxxx.wav".
Args:
audio_file: path to an audio file, either absolute or assumed relative
to the manifest directory or data directory.
Alternatively, a list of paths may be provided.
manifest_file: path to a manifest file
data_dir: path to a directory containing data, use only if a manifest file is not provided
audio_file_len_limit: limit for length of audio_file when using relative paths
Returns:
Full path to audio_file or a list of paths.
"""
if isinstance(audio_file, list):
# If input is a list, return a list of full paths
return [
get_full_path(
audio_file=a_file,
manifest_file=manifest_file,
data_dir=data_dir,
audio_file_len_limit=audio_file_len_limit,
)
for a_file in audio_file
]
elif isinstance(audio_file, str):
# If input is a string, get the corresponding full path
if (
(len(audio_file) < audio_file_len_limit)
and not os.path.isabs(audio_file)
and not os.path.isfile(audio_file)
):
# If audio_file is not available and the path is not absolute, the full path is assumed
# to be relative to the manifest file parent directory or data directory.
if manifest_file is None and data_dir is None:
raise ValueError(f'Use either manifest_file or data_dir to specify the data directory.')
elif manifest_file is not None and data_dir is not None:
raise ValueError(
f'Parameters manifest_file and data_dir cannot be used simultaneously. Currently manifest_file is {manifest_file} and data_dir is {data_dir}.'
)
# resolve the data directory
if data_dir is None:
data_dir = os.path.dirname(manifest_file)
# assume audio_file path is relative to data_dir
audio_file_path = os.path.join(data_dir, audio_file)
if is_datastore_path(audio_file_path):
# If audio was originally on an object store, use locally-cached path
audio_file_path = datastore_path_to_local_path(audio_file_path)
if os.path.isfile(audio_file_path):
audio_file = os.path.abspath(audio_file_path)
else:
audio_file = expanduser(audio_file)
else:
audio_file = expanduser(audio_file)
return audio_file
else:
raise ValueError(f'Unexpected audio_file type {type(audio_file)}, audio_file {audio_file}.')
|
NeMo-main
|
nemo/collections/common/parts/preprocessing/manifest.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/common/parts/preprocessing/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of simple character based parsers. These parser handle cleaning and tokenization by default.
We currently support English.
"""
import string
from typing import List, Optional
from nemo.collections.common.parts.preprocessing import cleaners
class CharParser:
"""Functor for parsing raw strings into list of int tokens.
Examples:
>>> parser = CharParser(['a', 'b', 'c'])
>>> parser('abc')
[0, 1, 2]
"""
def __init__(
self,
labels: List[str],
*,
unk_id: int = -1,
blank_id: int = -1,
do_normalize: bool = True,
do_lowercase: bool = True,
do_tokenize: bool = True,
):
"""Creates simple mapping char parser.
Args:
labels: List of labels to allocate indexes for. Essentially,
this is a id to str mapping.
unk_id: Index to choose for OOV words (default: -1).
blank_id: Index to filter out from final list of tokens
(default: -1).
do_normalize: True if apply normalization step before tokenizing
(default: True).
do_lowercase: True if apply lowercasing at normalizing step
(default: True).
"""
self._labels = labels
self._unk_id = unk_id
self._blank_id = blank_id
self._do_normalize = do_normalize
self._do_lowercase = do_lowercase
self._do_tokenize = do_tokenize
self._labels_map = {label: index for index, label in enumerate(labels)}
self._special_labels = set([label for label in labels if len(label) > 1])
def __call__(self, text: str) -> Optional[List[int]]:
if self._do_normalize:
text = self._normalize(text)
if text is None:
return None
if not self._do_tokenize:
return text
text_tokens = self._tokenize(text)
return text_tokens
def _normalize(self, text: str) -> Optional[str]:
text = text.strip()
if self._do_lowercase:
text = text.lower()
return text
def _tokenize(self, text: str) -> List[int]:
tokens = []
# Split by word for find special labels.
for word_id, word in enumerate(text.split(' ')):
if word_id != 0: # Not first word - so we insert space before.
tokens.append(self._labels_map.get(' ', self._unk_id))
if word in self._special_labels:
tokens.append(self._labels_map[word])
continue
for char in word:
tokens.append(self._labels_map.get(char, self._unk_id))
# If unk_id == blank_id, OOV tokens are removed.
tokens = [token for token in tokens if token != self._blank_id]
return tokens
def decode(self, str_input):
r_map = {}
for k, v in self._labels_map.items():
r_map[v] = k
r_map[len(self._labels_map)] = "<BOS>"
r_map[len(self._labels_map) + 1] = "<EOS>"
r_map[len(self._labels_map) + 2] = "<P>"
out = []
for i in str_input:
# Skip OOV
if i not in r_map:
continue
out.append(r_map[i.item()])
return "".join(out)
class ENCharParser(CharParser):
"""Incorporates english-specific parsing logic."""
PUNCTUATION_TO_REPLACE = {'+': 'plus', '&': 'and', '%': 'percent'}
def __init__(self, abbreviation_version=None, make_table=True, *args, **kwargs):
"""Creates english-specific mapping char parser.
This class overrides normalizing implementation.
Args:
*args: Positional args to pass to `CharParser` constructor.
**kwargs: Key-value args to pass to `CharParser` constructor.
"""
super().__init__(*args, **kwargs)
self._table = None
if make_table:
self._table = self.__make_trans_table()
self.abbreviation_version = abbreviation_version
def __make_trans_table(self):
punctuation = string.punctuation
for char in self.PUNCTUATION_TO_REPLACE:
punctuation = punctuation.replace(char, '')
for label in self._labels:
punctuation = punctuation.replace(label, '')
table = str.maketrans(punctuation, ' ' * len(punctuation))
return table
def _normalize(self, text: str) -> Optional[str]:
# noinspection PyBroadException
try:
text = cleaners.clean_text(
string=text,
table=self._table,
punctuation_to_replace=self.PUNCTUATION_TO_REPLACE,
abbreviation_version=self.abbreviation_version,
)
except Exception:
return None
return text
class RUCharParser(CharParser):
"""Incorporates russian-specific parsing logic."""
PUNCTUATION_TO_REPLACE = {'+': 'плюс', 'ё': 'е'}
def __init__(self, *args, **kwargs):
"""Creates cyrillic-specific mapping char parser.
This class overrides normalizing implementation.
Args:
*args: Positional args to pass to `CharParser` constructor.
**kwargs: Key-value args to pass to `CharParser` constructor.
"""
super().__init__(*args, **kwargs)
self._table = self.__make_trans_table()
def __make_trans_table(self):
punctuation = string.punctuation
for char in self.PUNCTUATION_TO_REPLACE:
punctuation = punctuation.replace(char, '')
for label in self._labels:
punctuation = punctuation.replace(label, '')
table = str.maketrans(punctuation, ' ' * len(punctuation))
return table
def _normalize(self, text: str) -> Optional[str]:
# noinspection PyBroadException
try:
text = cleaners.clean_text(
string=text, table=self._table, punctuation_to_replace=self.PUNCTUATION_TO_REPLACE,
)
except Exception:
return None
return text
NAME_TO_PARSER = {'base': CharParser, 'en': ENCharParser, 'ru': RUCharParser}
def make_parser(labels: Optional[List[str]] = None, name: str = 'base', **kwargs,) -> CharParser:
"""Creates parser from labels, set of arguments and concise parser name.
Args:
labels: List of labels to allocate indexes for. If set to
None then labels would be ascii table list. Essentially, this is an
id to str mapping (default: None).
name: Concise name of parser to create (default: 'base').
(default: -1).
**kwargs: Other set of kwargs to pass to parser constructor.
Returns:
Instance of `CharParser`.
Raises:
ValueError: For invalid parser name.
Examples:
>>> type(make_parser(['a', 'b', 'c'], 'en'))
ENCharParser
"""
if name not in NAME_TO_PARSER:
raise ValueError('Invalid parser name.')
if labels is None:
labels = list(string.printable)
parser_type = NAME_TO_PARSER[name]
parser = parser_type(labels=labels, **kwargs)
return parser
|
NeMo-main
|
nemo/collections/common/parts/preprocessing/parsers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os
from itertools import combinations
from typing import Any, Dict, Iterable, List, Optional, Union
import pandas as pd
from nemo.collections.common.parts.preprocessing import manifest, parsers
from nemo.utils import logging
class _Collection(collections.UserList):
"""List of parsed and preprocessed data."""
OUTPUT_TYPE = None # Single element output type.
class Text(_Collection):
"""Simple list of preprocessed text entries, result in list of tokens."""
OUTPUT_TYPE = collections.namedtuple('TextEntity', 'tokens')
def __init__(self, texts: List[str], parser: parsers.CharParser):
"""Instantiates text manifest and do the preprocessing step.
Args:
texts: List of raw texts strings.
parser: Instance of `CharParser` to convert string to tokens.
"""
data, output_type = [], self.OUTPUT_TYPE
for text in texts:
tokens = parser(text)
if tokens is None:
logging.warning("Fail to parse '%s' text line.", text)
continue
data.append(output_type(tokens))
super().__init__(data)
class FromFileText(Text):
"""Another form of texts manifest with reading from file."""
def __init__(self, file: str, parser: parsers.CharParser):
"""Instantiates text manifest and do the preprocessing step.
Args:
file: File path to read from.
parser: Instance of `CharParser` to convert string to tokens.
"""
texts = self.__parse_texts(file)
super().__init__(texts, parser)
@staticmethod
def __parse_texts(file: str) -> List[str]:
if not os.path.exists(file):
raise ValueError('Provided texts file does not exists!')
_, ext = os.path.splitext(file)
if ext == '.csv':
texts = pd.read_csv(file)['transcript'].tolist()
elif ext == '.json': # Not really a correct json.
texts = list(item['text'] for item in manifest.item_iter(file))
else:
with open(file, 'r') as f:
texts = f.readlines()
return texts
class AudioText(_Collection):
"""List of audio-transcript text correspondence with preprocessing."""
OUTPUT_TYPE = collections.namedtuple(
typename='AudioTextEntity',
field_names='id audio_file duration text_tokens offset text_raw speaker orig_sr lang',
)
def __init__(
self,
ids: List[int],
audio_files: List[str],
durations: List[float],
texts: List[str],
offsets: List[str],
speakers: List[Optional[int]],
orig_sampling_rates: List[Optional[int]],
token_labels: List[Optional[int]],
langs: List[Optional[str]],
parser: parsers.CharParser,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
max_number: Optional[int] = None,
do_sort_by_duration: bool = False,
index_by_file_id: bool = False,
):
"""Instantiates audio-text manifest with filters and preprocessing.
Args:
ids: List of examples positions.
audio_files: List of audio files.
durations: List of float durations.
texts: List of raw text transcripts.
offsets: List of duration offsets or None.
speakers: List of optional speakers ids.
orig_sampling_rates: List of original sampling rates of audio files.
langs: List of language ids, one for eadh sample, or None.
parser: Instance of `CharParser` to convert string to tokens.
min_duration: Minimum duration to keep entry with (default: None).
max_duration: Maximum duration to keep entry with (default: None).
max_number: Maximum number of samples to collect.
do_sort_by_duration: True if sort samples list by duration. Not compatible with index_by_file_id.
index_by_file_id: If True, saves a mapping from filename base (ID) to index in data.
"""
output_type = self.OUTPUT_TYPE
data, duration_filtered, num_filtered, total_duration = [], 0.0, 0, 0.0
if index_by_file_id:
self.mapping = {}
for id_, audio_file, duration, offset, text, speaker, orig_sr, token_labels, lang in zip(
ids, audio_files, durations, offsets, texts, speakers, orig_sampling_rates, token_labels, langs
):
# Duration filters.
if min_duration is not None and duration < min_duration:
duration_filtered += duration
num_filtered += 1
continue
if max_duration is not None and duration > max_duration:
duration_filtered += duration
num_filtered += 1
continue
if token_labels is not None:
text_tokens = token_labels
else:
if text != '':
if hasattr(parser, "is_aggregate") and parser.is_aggregate and isinstance(text, str):
if lang is not None:
text_tokens = parser(text, lang)
# for future use if want to add language bypass to audio_to_text classes
# elif hasattr(parser, "lang") and parser.lang is not None:
# text_tokens = parser(text, parser.lang)
else:
raise ValueError("lang required in manifest when using aggregate tokenizers")
else:
text_tokens = parser(text)
else:
text_tokens = []
if text_tokens is None:
duration_filtered += duration
num_filtered += 1
continue
total_duration += duration
data.append(output_type(id_, audio_file, duration, text_tokens, offset, text, speaker, orig_sr, lang))
if index_by_file_id:
file_id, _ = os.path.splitext(os.path.basename(audio_file))
if file_id not in self.mapping:
self.mapping[file_id] = []
self.mapping[file_id].append(len(data) - 1)
# Max number of entities filter.
if len(data) == max_number:
break
if do_sort_by_duration:
if index_by_file_id:
logging.warning("Tried to sort dataset by duration, but cannot since index_by_file_id is set.")
else:
data.sort(key=lambda entity: entity.duration)
logging.info("Dataset loaded with %d files totalling %.2f hours", len(data), total_duration / 3600)
logging.info("%d files were filtered totalling %.2f hours", num_filtered, duration_filtered / 3600)
super().__init__(data)
class ASRAudioText(AudioText):
"""`AudioText` collector from asr structured json files."""
def __init__(self, manifests_files: Union[str, List[str]], *args, **kwargs):
"""Parse lists of audio files, durations and transcripts texts.
Args:
manifests_files: Either single string file or list of such -
manifests to yield items from.
*args: Args to pass to `AudioText` constructor.
**kwargs: Kwargs to pass to `AudioText` constructor.
"""
ids, audio_files, durations, texts, offsets, = (
[],
[],
[],
[],
[],
)
speakers, orig_srs, token_labels, langs = [], [], [], []
for item in manifest.item_iter(manifests_files):
ids.append(item['id'])
audio_files.append(item['audio_file'])
durations.append(item['duration'])
texts.append(item['text'])
offsets.append(item['offset'])
speakers.append(item['speaker'])
orig_srs.append(item['orig_sr'])
token_labels.append(item['token_labels'])
langs.append(item['lang'])
super().__init__(
ids, audio_files, durations, texts, offsets, speakers, orig_srs, token_labels, langs, *args, **kwargs
)
class SpeechLabel(_Collection):
"""List of audio-label correspondence with preprocessing."""
OUTPUT_TYPE = collections.namedtuple(typename='SpeechLabelEntity', field_names='audio_file duration label offset',)
def __init__(
self,
audio_files: List[str],
durations: List[float],
labels: List[Union[int, str]],
offsets: List[Optional[float]],
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
max_number: Optional[int] = None,
do_sort_by_duration: bool = False,
index_by_file_id: bool = False,
):
"""Instantiates audio-label manifest with filters and preprocessing.
Args:
audio_files: List of audio files.
durations: List of float durations.
labels: List of labels.
offsets: List of offsets or None.
min_duration: Minimum duration to keep entry with (default: None).
max_duration: Maximum duration to keep entry with (default: None).
max_number: Maximum number of samples to collect.
do_sort_by_duration: True if sort samples list by duration.
index_by_file_id: If True, saves a mapping from filename base (ID) to index in data.
"""
if index_by_file_id:
self.mapping = {}
output_type = self.OUTPUT_TYPE
data, duration_filtered = [], 0.0
total_duration = 0.0
for audio_file, duration, command, offset in zip(audio_files, durations, labels, offsets):
# Duration filters.
if min_duration is not None and duration < min_duration:
duration_filtered += duration
continue
if max_duration is not None and duration > max_duration:
duration_filtered += duration
continue
data.append(output_type(audio_file, duration, command, offset))
total_duration += duration
if index_by_file_id:
file_id, _ = os.path.splitext(os.path.basename(audio_file))
self.mapping[file_id] = len(data) - 1
# Max number of entities filter.
if len(data) == max_number:
break
if do_sort_by_duration:
if index_by_file_id:
logging.warning("Tried to sort dataset by duration, but cannot since index_by_file_id is set.")
else:
data.sort(key=lambda entity: entity.duration)
logging.info(f"Filtered duration for loading collection is {duration_filtered / 3600: .2f} hours.")
logging.info(f"Dataset loaded with {len(data)} items, total duration of {total_duration / 3600: .2f} hours.")
self.uniq_labels = sorted(set(map(lambda x: x.label, data)))
logging.info("# {} files loaded accounting to # {} labels".format(len(data), len(self.uniq_labels)))
super().__init__(data)
class ASRSpeechLabel(SpeechLabel):
"""`SpeechLabel` collector from structured json files."""
def __init__(
self,
manifests_files: Union[str, List[str]],
is_regression_task=False,
cal_labels_occurrence=False,
delimiter=None,
*args,
**kwargs,
):
"""Parse lists of audio files, durations and transcripts texts.
Args:
manifests_files: Either single string file or list of such -
manifests to yield items from.
is_regression_task: It's a regression task.
cal_labels_occurrence: whether to calculate occurence of labels.
delimiter: separator for labels strings.
*args: Args to pass to `SpeechLabel` constructor.
**kwargs: Kwargs to pass to `SpeechLabel` constructor.
"""
audio_files, durations, labels, offsets = [], [], [], []
all_labels = []
for item in manifest.item_iter(manifests_files, parse_func=self.__parse_item):
audio_files.append(item['audio_file'])
durations.append(item['duration'])
if not is_regression_task:
label = item['label']
label_list = label.split() if not delimiter else label.split(delimiter)
else:
label = float(item['label'])
label_list = [label]
labels.append(label)
offsets.append(item['offset'])
all_labels.extend(label_list)
if cal_labels_occurrence:
self.labels_occurrence = collections.Counter(all_labels)
super().__init__(audio_files, durations, labels, offsets, *args, **kwargs)
def __parse_item(self, line: str, manifest_file: str) -> Dict[str, Any]:
item = json.loads(line)
# Audio file
if 'audio_filename' in item:
item['audio_file'] = item.pop('audio_filename')
elif 'audio_filepath' in item:
item['audio_file'] = item.pop('audio_filepath')
else:
raise ValueError(f"Manifest file has invalid json line structure: {line} without proper audio file key.")
item['audio_file'] = manifest.get_full_path(audio_file=item['audio_file'], manifest_file=manifest_file)
# Duration.
if 'duration' not in item:
raise ValueError(f"Manifest file has invalid json line structure: {line} without proper duration key.")
# Label.
if 'command' in item:
item['label'] = item.pop('command')
elif 'target' in item:
item['label'] = item.pop('target')
elif 'label' in item:
pass
else:
raise ValueError(f"Manifest file has invalid json line structure: {line} without proper label key.")
item = dict(
audio_file=item['audio_file'],
duration=item['duration'],
label=item['label'],
offset=item.get('offset', None),
)
return item
class FeatureSequenceLabel(_Collection):
"""List of feature sequence of label correspondence with preprocessing."""
OUTPUT_TYPE = collections.namedtuple(typename='FeatureSequenceLabelEntity', field_names='feature_file seq_label',)
def __init__(
self,
feature_files: List[str],
seq_labels: List[str],
max_number: Optional[int] = None,
index_by_file_id: bool = False,
):
"""Instantiates feature-SequenceLabel manifest with filters and preprocessing.
Args:
feature_files: List of feature files.
seq_labels: List of sequences of labels.
max_number: Maximum number of samples to collect.
index_by_file_id: If True, saves a mapping from filename base (ID) to index in data.
"""
output_type = self.OUTPUT_TYPE
data, num_filtered = (
[],
0.0,
)
self.uniq_labels = set()
if index_by_file_id:
self.mapping = {}
for feature_file, seq_label in zip(feature_files, seq_labels):
label_tokens, uniq_labels_in_seq = self.relative_speaker_parser(seq_label)
data.append(output_type(feature_file, label_tokens))
self.uniq_labels |= uniq_labels_in_seq
if label_tokens is None:
num_filtered += 1
continue
if index_by_file_id:
file_id, _ = os.path.splitext(os.path.basename(feature_file))
self.mapping[feature_file] = len(data) - 1
# Max number of entities filter.
if len(data) == max_number:
break
logging.info("# {} files loaded including # {} unique labels".format(len(data), len(self.uniq_labels)))
super().__init__(data)
def relative_speaker_parser(self, seq_label):
"""Convert sequence of speaker labels to relative labels.
Convert sequence of absolute speaker to sequence of relative speaker [E A C A E E C] -> [0 1 2 1 0 0 2]
In this seq of label , if label do not appear before, assign new relative labels len(pos); else reuse previous assigned relative labels.
Args:
seq_label (str): A string of a sequence of labels.
Return:
relative_seq_label (List) : A list of relative sequence of labels
unique_labels_in_seq (Set): A set of unique labels in the sequence
"""
seq = seq_label.split()
conversion_dict = dict()
relative_seq_label = []
for seg in seq:
if seg in conversion_dict:
converted = conversion_dict[seg]
else:
converted = len(conversion_dict)
conversion_dict[seg] = converted
relative_seq_label.append(converted)
unique_labels_in_seq = set(conversion_dict.keys())
return relative_seq_label, unique_labels_in_seq
class ASRFeatureSequenceLabel(FeatureSequenceLabel):
"""`FeatureSequenceLabel` collector from asr structured json files."""
def __init__(
self, manifests_files: Union[str, List[str]], max_number: Optional[int] = None, index_by_file_id: bool = False,
):
"""Parse lists of feature files and sequences of labels.
Args:
manifests_files: Either single string file or list of such -
manifests to yield items from.
max_number: Maximum number of samples to collect; pass to `FeatureSequenceLabel` constructor.
index_by_file_id: If True, saves a mapping from filename base (ID) to index in data; pass to `FeatureSequenceLabel` constructor.
"""
feature_files, seq_labels = [], []
for item in manifest.item_iter(manifests_files, parse_func=self._parse_item):
feature_files.append(item['feature_file'])
seq_labels.append(item['seq_label'])
super().__init__(feature_files, seq_labels, max_number, index_by_file_id)
def _parse_item(self, line: str, manifest_file: str) -> Dict[str, Any]:
item = json.loads(line)
# Feature file
if 'feature_filename' in item:
item['feature_file'] = item.pop('feature_filename')
elif 'feature_filepath' in item:
item['feature_file'] = item.pop('feature_filepath')
else:
raise ValueError(
f"Manifest file has invalid json line " f"structure: {line} without proper feature file key."
)
item['feature_file'] = os.path.expanduser(item['feature_file'])
# Seq of Label.
if 'seq_label' in item:
item['seq_label'] = item.pop('seq_label')
else:
raise ValueError(
f"Manifest file has invalid json line " f"structure: {line} without proper seq_label key."
)
item = dict(feature_file=item['feature_file'], seq_label=item['seq_label'],)
return item
class DiarizationLabel(_Collection):
"""List of diarization audio-label correspondence with preprocessing."""
OUTPUT_TYPE = collections.namedtuple(
typename='DiarizationLabelEntity',
field_names='audio_file duration rttm_file offset target_spks sess_spk_dict clus_spk_digits rttm_spk_digits',
)
def __init__(
self,
audio_files: List[str],
durations: List[float],
rttm_files: List[str],
offsets: List[float],
target_spks_list: List[tuple],
sess_spk_dicts: List[Dict],
clus_spk_list: List[tuple],
rttm_spk_list: List[tuple],
max_number: Optional[int] = None,
do_sort_by_duration: bool = False,
index_by_file_id: bool = False,
):
"""Instantiates audio-label manifest with filters and preprocessing.
Args:
audio_files:
List of audio file paths.
durations:
List of float durations.
rttm_files:
List of RTTM files (Groundtruth diarization annotation file).
offsets:
List of offsets or None.
target_spks (tuple):
List of tuples containing the two indices of targeted speakers for evaluation.
Example: [[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], [(0, 1), (1, 2), (0, 2)], ...]
sess_spk_dict (Dict):
List of Mapping dictionaries between RTTM speakers and speaker labels in the clustering result.
clus_spk_digits (tuple):
List of Tuple containing all the speaker indices from the clustering result.
Example: [(0, 1, 2, 3), (0, 1, 2), ...]
rttm_spkr_digits (tuple):
List of tuple containing all the speaker indices in the RTTM file.
Example: (0, 1, 2), (0, 1), ...]
max_number: Maximum number of samples to collect
do_sort_by_duration: True if sort samples list by duration
index_by_file_id: If True, saves a mapping from filename base (ID) to index in data.
"""
if index_by_file_id:
self.mapping = {}
output_type = self.OUTPUT_TYPE
data, duration_filtered = [], 0.0
zipped_items = zip(
audio_files, durations, rttm_files, offsets, target_spks_list, sess_spk_dicts, clus_spk_list, rttm_spk_list
)
for (
audio_file,
duration,
rttm_file,
offset,
target_spks,
sess_spk_dict,
clus_spk_digits,
rttm_spk_digits,
) in zipped_items:
if duration is None:
duration = 0
data.append(
output_type(
audio_file,
duration,
rttm_file,
offset,
target_spks,
sess_spk_dict,
clus_spk_digits,
rttm_spk_digits,
)
)
if index_by_file_id:
file_id, _ = os.path.splitext(os.path.basename(audio_file))
self.mapping[file_id] = len(data) - 1
# Max number of entities filter.
if len(data) == max_number:
break
if do_sort_by_duration:
if index_by_file_id:
logging.warning("Tried to sort dataset by duration, but cannot since index_by_file_id is set.")
else:
data.sort(key=lambda entity: entity.duration)
logging.info(
"Filtered duration for loading collection is %f.", duration_filtered,
)
logging.info(f"Total {len(data)} session files loaded accounting to # {len(audio_files)} audio clips")
super().__init__(data)
class DiarizationSpeechLabel(DiarizationLabel):
"""`DiarizationLabel` diarization data sample collector from structured json files."""
def __init__(
self,
manifests_files: Union[str, List[str]],
emb_dict: Dict,
clus_label_dict: Dict,
round_digit=2,
seq_eval_mode=False,
pairwise_infer=False,
*args,
**kwargs,
):
"""
Parse lists of audio files, durations, RTTM (Diarization annotation) files. Since diarization model infers only
two speakers, speaker pairs are generated from the total number of speakers in the session.
Args:
manifest_filepath (str):
Path to input manifest json files.
emb_dict (Dict):
Dictionary containing cluster-average embeddings and speaker mapping information.
clus_label_dict (Dict):
Segment-level speaker labels from clustering results.
round_digit (int):
Number of digits to be rounded.
seq_eval_mode (bool):
If True, F1 score will be calculated for each speaker pair during inference mode.
pairwise_infer (bool):
If True, this dataset class operates in inference mode. In inference mode, a set of speakers in the input audio
is split into multiple pairs of speakers and speaker tuples (e.g. 3 speakers: [(0,1), (1,2), (0,2)]) and then
fed into the diarization system to merge the individual results.
*args: Args to pass to `SpeechLabel` constructor.
**kwargs: Kwargs to pass to `SpeechLabel` constructor.
"""
self.round_digit = round_digit
self.emb_dict = emb_dict
self.clus_label_dict = clus_label_dict
self.seq_eval_mode = seq_eval_mode
self.pairwise_infer = pairwise_infer
audio_files, durations, rttm_files, offsets, target_spks_list, sess_spk_dicts, clus_spk_list, rttm_spk_list = (
[],
[],
[],
[],
[],
[],
[],
[],
)
for item in manifest.item_iter(manifests_files, parse_func=self.__parse_item_rttm):
# Inference mode
if self.pairwise_infer:
clus_speaker_digits = sorted(list(set([x[2] for x in clus_label_dict[item['uniq_id']]])))
if item['rttm_file']:
base_scale_index = max(self.emb_dict.keys())
_sess_spk_dict = self.emb_dict[base_scale_index][item['uniq_id']]['mapping']
sess_spk_dict = {int(v.split('_')[-1]): k for k, v in _sess_spk_dict.items()}
rttm_speaker_digits = [int(v.split('_')[1]) for k, v in _sess_spk_dict.items()]
if self.seq_eval_mode:
clus_speaker_digits = rttm_speaker_digits
else:
sess_spk_dict = None
rttm_speaker_digits = None
# Training mode
else:
rttm_labels = []
with open(item['rttm_file'], 'r') as f:
for line in f.readlines():
start, end, speaker = self.split_rttm_line(line, decimals=3)
rttm_labels.append('{} {} {}'.format(start, end, speaker))
speaker_set = set()
for rttm_line in rttm_labels:
spk_str = rttm_line.split()[-1]
speaker_set.add(spk_str)
speaker_list = sorted(list(speaker_set))
sess_spk_dict = {key: val for key, val in enumerate(speaker_list)}
target_spks = tuple(sess_spk_dict.keys())
clus_speaker_digits = target_spks
rttm_speaker_digits = target_spks
if len(clus_speaker_digits) <= 2:
spk_comb_list = [(0, 1)]
else:
spk_comb_list = [x for x in combinations(clus_speaker_digits, 2)]
for target_spks in spk_comb_list:
audio_files.append(item['audio_file'])
durations.append(item['duration'])
rttm_files.append(item['rttm_file'])
offsets.append(item['offset'])
target_spks_list.append(target_spks)
sess_spk_dicts.append(sess_spk_dict)
clus_spk_list.append(clus_speaker_digits)
rttm_spk_list.append(rttm_speaker_digits)
super().__init__(
audio_files,
durations,
rttm_files,
offsets,
target_spks_list,
sess_spk_dicts,
clus_spk_list,
rttm_spk_list,
*args,
**kwargs,
)
def split_rttm_line(self, rttm_line: str, decimals: int = 3):
"""
Convert a line in RTTM file to speaker label, start and end timestamps.
An example line of `rttm_line`:
SPEAKER abc_dev_0123 1 146.903 1.860 <NA> <NA> speaker543 <NA> <NA>
The above example RTTM line contains the following information:
session name: abc_dev_0123
segment start time: 146.903
segment duration: 1.860
speaker label: speaker543
Args:
rttm_line (str):
A line in RTTM formatted file containing offset and duration of each segment.
decimals (int):
Number of digits to be rounded.
Returns:
start (float):
Start timestamp in floating point number.
end (float):
End timestamp in floating point number.
speaker (str):
speaker string in RTTM lines.
"""
rttm = rttm_line.strip().split()
start = round(float(rttm[3]), decimals)
end = round(float(rttm[4]), decimals) + round(float(rttm[3]), decimals)
speaker = rttm[7]
return start, end, speaker
def __parse_item_rttm(self, line: str, manifest_file: str) -> Dict[str, Any]:
"""Parse each rttm file and save it to in Dict format"""
item = json.loads(line)
if 'audio_filename' in item:
item['audio_file'] = item.pop('audio_filename')
elif 'audio_filepath' in item:
item['audio_file'] = item.pop('audio_filepath')
else:
raise ValueError(
f"Manifest file has invalid json line " f"structure: {line} without proper audio file key."
)
item['audio_file'] = os.path.expanduser(item['audio_file'])
item['uniq_id'] = os.path.splitext(os.path.basename(item['audio_file']))[0]
if 'duration' not in item:
raise ValueError(f"Manifest file has invalid json line " f"structure: {line} without proper duration key.")
item = dict(
audio_file=item['audio_file'],
uniq_id=item['uniq_id'],
duration=item['duration'],
rttm_file=item['rttm_filepath'],
offset=item.get('offset', None),
)
return item
class Audio(_Collection):
"""Prepare a list of all audio items, filtered by duration.
"""
OUTPUT_TYPE = collections.namedtuple(typename='Audio', field_names='audio_files duration offset text')
def __init__(
self,
audio_files_list: List[Dict[str, str]],
duration_list: List[float],
offset_list: List[float],
text_list: List[str],
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
max_number: Optional[int] = None,
do_sort_by_duration: bool = False,
):
"""Instantiantes an list of audio files.
Args:
audio_files_list: list of dictionaries with mapping from audio_key to audio_filepath
duration_list: list of durations of input files
offset_list: list of offsets
text_list: list of texts
min_duration: Minimum duration to keep entry with (default: None).
max_duration: Maximum duration to keep entry with (default: None).
max_number: Maximum number of samples to collect.
do_sort_by_duration: True if sort samples list by duration.
"""
output_type = self.OUTPUT_TYPE
data, total_duration = [], 0.0
num_filtered, duration_filtered = 0, 0.0
for audio_files, duration, offset, text in zip(audio_files_list, duration_list, offset_list, text_list):
# Duration filters
if min_duration is not None and duration < min_duration:
duration_filtered += duration
num_filtered += 1
continue
if max_duration is not None and duration > max_duration:
duration_filtered += duration
num_filtered += 1
continue
total_duration += duration
data.append(output_type(audio_files, duration, offset, text))
# Max number of entities filter
if len(data) == max_number:
break
if do_sort_by_duration:
data.sort(key=lambda entity: entity.duration)
logging.info("Dataset loaded with %d files totalling %.2f hours", len(data), total_duration / 3600)
logging.info("%d files were filtered totalling %.2f hours", num_filtered, duration_filtered / 3600)
super().__init__(data)
class AudioCollection(Audio):
"""List of audio files from a manifest file.
"""
def __init__(
self, manifest_files: Union[str, List[str]], audio_to_manifest_key: Dict[str, str], *args, **kwargs,
):
"""Instantiates a list of audio files loaded from a manifest file.
Args:
manifest_files: path to a single manifest file or a list of paths
audio_to_manifest_key: dictionary mapping audio signals to keys of the manifest
"""
# Support for comma-separated manifests
if type(manifest_files) == str:
manifest_files = manifest_files.split(',')
for audio_key, manifest_key in audio_to_manifest_key.items():
# Support for comma-separated keys
if type(manifest_key) == str and ',' in manifest_key:
audio_to_manifest_key[audio_key] = manifest_key.split(',')
# Keys from manifest which contain audio
self.audio_to_manifest_key = audio_to_manifest_key
# Initialize data
audio_files_list, duration_list, offset_list, text_list = [], [], [], []
# Parse manifest files
for item in manifest.item_iter(manifest_files, parse_func=self.__parse_item):
audio_files_list.append(item['audio_files'])
duration_list.append(item['duration'])
offset_list.append(item['offset'])
text_list.append(item['text'])
super().__init__(audio_files_list, duration_list, offset_list, text_list, *args, **kwargs)
def __parse_item(self, line: str, manifest_file: str) -> Dict[str, Any]:
"""Parse a single line from a manifest file.
Args:
line: a string representing a line from a manifest file in JSON format
manifest_file: path to the manifest file. Used to resolve relative paths.
Returns:
Dictionary with audio_files, duration, and offset.
"""
# Local utility function
def get_audio_file(item: Dict, manifest_key: Union[str, List[str]]):
"""Get item[key] if key is string, or a list
of strings by combining item[key[0]], item[key[1]], etc.
"""
# Prepare audio file(s)
if manifest_key is None:
# Support for inference, when a target key is None
audio_file = None
elif isinstance(manifest_key, str):
# Load files from a single manifest key
audio_file = item[manifest_key]
elif isinstance(manifest_key, Iterable):
# Load files from multiple manifest keys
audio_file = []
for key in manifest_key:
item_key = item[key]
if isinstance(item_key, str):
audio_file.append(item_key)
elif isinstance(item_key, list):
audio_file += item_key
else:
raise ValueError(f'Unexpected type {type(item_key)} of item for key {key}: {item_key}')
else:
raise ValueError(f'Unexpected type {type(manifest_key)} of manifest_key: {manifest_key}')
return audio_file
# Convert JSON line to a dictionary
item = json.loads(line)
# Handle all audio files
audio_files = {}
for audio_key, manifest_key in self.audio_to_manifest_key.items():
audio_file = get_audio_file(item, manifest_key)
# Get full path to audio file(s)
if isinstance(audio_file, str):
# This dictionary entry points to a single file
audio_files[audio_key] = manifest.get_full_path(audio_file, manifest_file)
elif isinstance(audio_file, Iterable):
# This dictionary entry points to multiple files
# Get the files and keep the list structure for this key
audio_files[audio_key] = [manifest.get_full_path(f, manifest_file) for f in audio_file]
elif audio_file is None and audio_key.startswith('target'):
# For inference, we don't need the target
audio_files[audio_key] = None
else:
raise ValueError(f'Unexpected type {type(audio_file)} of audio_file: {audio_file}')
item['audio_files'] = audio_files
# Handle duration
if 'duration' not in item:
raise ValueError(f'Duration not available in line: {line}. Manifest file: {manifest_file}')
# Handle offset
if 'offset' not in item:
item['offset'] = 0.0
# Handle text
if 'text' not in item:
item['text'] = None
return dict(
audio_files=item['audio_files'], duration=item['duration'], offset=item['offset'], text=item['text']
)
class FeatureLabel(_Collection):
"""List of feature sequence and their label correspondence with preprocessing."""
OUTPUT_TYPE = collections.namedtuple(typename='FeatureLabelEntity', field_names='feature_file label duration',)
def __init__(
self,
feature_files: List[str],
labels: List[str],
durations: List[float],
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
max_number: Optional[int] = None,
do_sort_by_duration: bool = False,
index_by_file_id: bool = False,
):
"""Instantiates feature-SequenceLabel manifest with filters and preprocessing.
Args:
feature_files: List of feature files.
labels: List of labels.
max_number: Maximum number of samples to collect.
index_by_file_id: If True, saves a mapping from filename base (ID) to index in data.
"""
output_type = self.OUTPUT_TYPE
data = []
duration_filtered = 0.0
total_duration = 0.0
self.uniq_labels = set()
if index_by_file_id:
self.mapping = {}
for feature_file, label, duration in zip(feature_files, labels, durations):
# Duration filters.
if min_duration is not None and duration < min_duration:
duration_filtered += duration
continue
if max_duration is not None and duration > max_duration:
duration_filtered += duration
continue
data.append(output_type(feature_file, label, duration))
self.uniq_labels |= set(label)
total_duration += duration
if index_by_file_id:
file_id, _ = os.path.splitext(os.path.basename(feature_file))
self.mapping[file_id] = len(data) - 1
# Max number of entities filter.
if len(data) == max_number:
break
if do_sort_by_duration:
if index_by_file_id:
logging.warning("Tried to sort dataset by duration, but cannot since index_by_file_id is set.")
else:
data.sort(key=lambda entity: entity.duration)
logging.info(f"Filtered duration for loading collection is {duration_filtered / 2600:.2f} hours.")
logging.info(f"Dataset loaded with {len(data)} items, total duration of {total_duration / 3600: .2f} hours.")
logging.info("# {} files loaded including # {} unique labels".format(len(data), len(self.uniq_labels)))
super().__init__(data)
class ASRFeatureLabel(FeatureLabel):
"""`FeatureLabel` collector from asr structured json files."""
def __init__(
self,
manifests_files: Union[str, List[str]],
is_regression_task: bool = False,
cal_labels_occurrence: bool = False,
delimiter: Optional[str] = None,
*args,
**kwargs,
):
"""Parse lists of feature files and sequences of labels.
Args:
manifests_files: Either single string file or list of such -
manifests to yield items from.
max_number: Maximum number of samples to collect; pass to `FeatureSequenceLabel` constructor.
index_by_file_id: If True, saves a mapping from filename base (ID) to index in data; pass to `FeatureSequenceLabel` constructor.
"""
feature_files, labels, durations = [], [], []
all_labels = []
for item in manifest.item_iter(manifests_files, parse_func=self._parse_item):
feature_files.append(item['feature_file'])
durations.append(item['duration'])
if not is_regression_task:
label = item['label']
label_list = label.split() if not delimiter else label.split(delimiter)
else:
label = float(item['label'])
label_list = [label]
labels.append(label)
all_labels.extend(label_list)
if cal_labels_occurrence:
self.labels_occurrence = collections.Counter(all_labels)
super().__init__(feature_files, labels, durations, *args, **kwargs)
def _parse_item(self, line: str, manifest_file: str) -> Dict[str, Any]:
item = json.loads(line)
# Feature file
if 'feature_filename' in item:
item['feature_file'] = item.pop('feature_filename')
elif 'feature_filepath' in item:
item['feature_file'] = item.pop('feature_filepath')
elif 'feature_file' not in item:
raise ValueError(
f"Manifest file has invalid json line " f"structure: {line} without proper 'feature_file' key."
)
item['feature_file'] = manifest.get_full_path(audio_file=item['feature_file'], manifest_file=manifest_file)
# Label.
if 'label' in item:
item['label'] = item.pop('label')
else:
raise ValueError(f"Manifest file has invalid json line structure: {line} without proper 'label' key.")
item = dict(feature_file=item['feature_file'], label=item['label'], duration=item['duration'])
return item
class FeatureText(_Collection):
"""List of audio-transcript text correspondence with preprocessing."""
OUTPUT_TYPE = collections.namedtuple(
typename='FeatureTextEntity',
field_names='id feature_file rttm_file duration text_tokens offset text_raw speaker orig_sr lang',
)
def __init__(
self,
ids: List[int],
feature_files: List[str],
rttm_files: List[str],
durations: List[float],
texts: List[str],
offsets: List[str],
speakers: List[Optional[int]],
orig_sampling_rates: List[Optional[int]],
token_labels: List[Optional[int]],
langs: List[Optional[str]],
parser: parsers.CharParser,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
max_number: Optional[int] = None,
do_sort_by_duration: bool = False,
index_by_file_id: bool = False,
):
"""Instantiates feature-text manifest with filters and preprocessing.
Args:
ids: List of examples positions.
feature_files: List of audio feature files.
rttm_files: List of audio rttm files.
durations: List of float durations.
texts: List of raw text transcripts.
offsets: List of duration offsets or None.
speakers: List of optional speakers ids.
orig_sampling_rates: List of original sampling rates of audio files.
langs: List of language ids, one for eadh sample, or None.
parser: Instance of `CharParser` to convert string to tokens.
min_duration: Minimum duration to keep entry with (default: None).
max_duration: Maximum duration to keep entry with (default: None).
max_number: Maximum number of samples to collect.
do_sort_by_duration: True if sort samples list by duration. Not compatible with index_by_file_id.
index_by_file_id: If True, saves a mapping from filename base (ID) to index in data.
"""
output_type = self.OUTPUT_TYPE
data, duration_filtered, num_filtered, total_duration = [], 0.0, 0, 0.0
if index_by_file_id:
self.mapping = {}
for id_, feat_file, rttm_file, duration, offset, text, speaker, orig_sr, token_labels, lang in zip(
ids,
feature_files,
rttm_files,
durations,
offsets,
texts,
speakers,
orig_sampling_rates,
token_labels,
langs,
):
# Duration filters.
if min_duration is not None and duration < min_duration:
duration_filtered += duration
num_filtered += 1
continue
if max_duration is not None and duration > max_duration:
duration_filtered += duration
num_filtered += 1
continue
if token_labels is not None:
text_tokens = token_labels
else:
if text != '':
if hasattr(parser, "is_aggregate") and parser.is_aggregate and isinstance(text, str):
if lang is not None:
text_tokens = parser(text, lang)
else:
raise ValueError("lang required in manifest when using aggregate tokenizers")
else:
text_tokens = parser(text)
else:
text_tokens = []
if text_tokens is None:
duration_filtered += duration
num_filtered += 1
continue
total_duration += duration
data.append(
output_type(id_, feat_file, rttm_file, duration, text_tokens, offset, text, speaker, orig_sr, lang)
)
if index_by_file_id:
file_id, _ = os.path.splitext(os.path.basename(feat_file))
if file_id not in self.mapping:
self.mapping[file_id] = []
self.mapping[file_id].append(len(data) - 1)
# Max number of entities filter.
if len(data) == max_number:
break
if do_sort_by_duration:
if index_by_file_id:
logging.warning("Tried to sort dataset by duration, but cannot since index_by_file_id is set.")
else:
data.sort(key=lambda entity: entity.duration)
logging.info("Dataset loaded with %d files totalling %.2f hours", len(data), total_duration / 3600)
logging.info("%d files were filtered totalling %.2f hours", num_filtered, duration_filtered / 3600)
super().__init__(data)
class ASRFeatureText(FeatureText):
"""`FeatureText` collector from asr structured json files."""
def __init__(self, manifests_files: Union[str, List[str]], *args, **kwargs):
"""Parse lists of audio files, durations and transcripts texts.
Args:
manifests_files: Either single string file or list of such -
manifests to yield items from.
*args: Args to pass to `AudioText` constructor.
**kwargs: Kwargs to pass to `AudioText` constructor.
"""
ids, feature_files, rttm_files, durations, texts, offsets, = (
[],
[],
[],
[],
[],
[],
)
speakers, orig_srs, token_labels, langs = [], [], [], []
for item in manifest.item_iter(manifests_files):
ids.append(item['id'])
feature_files.append(item['feature_file'])
rttm_files.append(item['rttm_file'])
durations.append(item['duration'])
texts.append(item['text'])
offsets.append(item['offset'])
speakers.append(item['speaker'])
orig_srs.append(item['orig_sr'])
token_labels.append(item['token_labels'])
langs.append(item['lang'])
super().__init__(
ids,
feature_files,
rttm_files,
durations,
texts,
offsets,
speakers,
orig_srs,
token_labels,
langs,
*args,
**kwargs,
)
|
NeMo-main
|
nemo/collections/common/parts/preprocessing/collections.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import inflect
from text_unidecode import unidecode
from nemo.utils import logging
NUM_CHECK = re.compile(r'([$]?)(^|\s)(\S*[0-9]\S*)(?=(\s|$)((\S*)(\s|$))?)')
TIME_CHECK = re.compile(r'([0-9]{1,2}):([0-9]{2})(am|pm)?')
CURRENCY_CHECK = re.compile(r'\$')
ORD_CHECK = re.compile(r'([0-9]+)(st|nd|rd|th)')
THREE_CHECK = re.compile(r'([0-9]{3})([.,][0-9]{1,2})?([!.?])?$')
DECIMAL_CHECK = re.compile(r'([.,][0-9]{1,2})$')
ABBREVIATIONS_COMMON = [
(re.compile('\\b%s\\.' % x[0]), x[1])
for x in [
("ms", "miss"),
("mrs", "misess"),
("mr", "mister"),
("messrs", "messeurs"),
("dr", "doctor"),
("drs", "doctors"),
("st", "saint"),
("co", "company"),
("jr", "junior"),
("sr", "senior"),
("rev", "reverend"),
("hon", "honorable"),
("sgt", "sergeant"),
("capt", "captain"),
("maj", "major"),
("col", "colonel"),
("lt", "lieutenant"),
("gen", "general"),
("prof", "professor"),
("lb", "pounds"),
("rep", "representative"),
("st", "street"),
("ave", "avenue"),
("etc", "et cetera"),
("jan", "january"),
("feb", "february"),
("mar", "march"),
("apr", "april"),
("jun", "june"),
("jul", "july"),
("aug", "august"),
("sep", "september"),
("oct", "october"),
("nov", "november"),
("dec", "december"),
]
]
ABBREVIATIONS_EXPANDED = [
(re.compile('\\b%s\\.' % x[0]), x[1])
for x in [
("ltd", "limited"),
("fig", "figure"),
("figs", "figures"),
("gent", "gentlemen"),
("ft", "fort"),
("esq", "esquire"),
("prep", "preperation"),
("bros", "brothers"),
("ind", "independent"),
("mme", "madame"),
("pro", "professional"),
("vs", "versus"),
("inc", "include"),
]
]
ABBREVIATIONS_TTS_FASTPITCH = [
(re.compile('\\b%s\\.' % x[0]), x[1])
for x in [
("ms", "miss"),
("mrs", "misess"),
("mr", "mister"),
("dr", "doctor"),
("drs", "doctors"),
("st", "saint"),
("co", "company"),
("jr", "junior"),
("sr", "senior"),
("rev", "reverend"),
("hon", "honorable"),
("sgt", "sergeant"),
("capt", "captain"),
("maj", "major"),
("col", "colonel"),
("lt", "lieutenant"),
("gen", "general"),
("prof", "professor"),
("lb", "pounds"),
("rep", "representative"),
("st", "street"),
("ave", "avenue"),
("jan", "january"),
("feb", "february"),
("mar", "march"),
("apr", "april"),
("jun", "june"),
("jul", "july"),
("aug", "august"),
("sep", "september"),
("oct", "october"),
("nov", "november"),
("dec", "december"),
("ltd", "limited"),
("fig", "figure"),
("figs", "figures"),
("gent", "gentlemen"),
("ft", "fort"),
("esq", "esquire"),
("prep", "preperation"),
("bros", "brothers"),
("ind", "independent"),
("mme", "madame"),
("pro", "professional"),
("vs", "versus"),
]
]
inflect = inflect.engine()
def clean_text(string, table, punctuation_to_replace, abbreviation_version=None):
warn_common_chars(string)
string = unidecode(string)
string = string.lower()
string = re.sub(r'\s+', " ", string)
string = clean_numbers(string)
string = clean_abbreviations(string, version=abbreviation_version)
string = clean_punctuations(string, table, punctuation_to_replace)
string = re.sub(r'\s+', " ", string).strip()
return string
def warn_common_chars(string):
if re.search(r'[£€]', string):
logging.warning("Your transcript contains one of '£' or '€' which we do not currently handle")
def clean_numbers(string):
cleaner = NumberCleaner()
string = NUM_CHECK.sub(cleaner.clean, string)
return string
def clean_abbreviations(string, version=None):
abbbreviations = ABBREVIATIONS_COMMON
if version == "fastpitch":
abbbreviations = ABBREVIATIONS_TTS_FASTPITCH
elif version == "expanded":
abbbreviations.extend = ABBREVIATIONS_EXPANDED
for regex, replacement in abbbreviations:
string = re.sub(regex, replacement, string)
return string
def clean_punctuations(string, table, punctuation_to_replace):
for punc, replacement in punctuation_to_replace.items():
string = re.sub('\\{}'.format(punc), " {} ".format(replacement), string)
if table:
string = string.translate(table)
return string
class NumberCleaner:
def __init__(self):
super().__init__()
self.reset()
def reset(self):
self.curr_num = []
self.currency = None
def format_final_number(self, whole_num, decimal):
if self.currency:
return_string = inflect.number_to_words(whole_num)
return_string += " dollar" if whole_num == 1 else " dollars"
if decimal:
return_string += " and " + inflect.number_to_words(decimal)
return_string += " cent" if whole_num == decimal else " cents"
self.reset()
return return_string
self.reset()
if decimal:
whole_num += "." + decimal
return inflect.number_to_words(whole_num)
else:
# Check if there are non-numbers
def convert_to_word(match):
return " " + inflect.number_to_words(match.group(0)) + " "
return re.sub(r'[0-9,]+', convert_to_word, whole_num)
def clean(self, match):
ws = match.group(2)
number = match.group(3)
_proceeding_symbol = match.group(7)
time_match = TIME_CHECK.match(number)
if time_match:
string = ws + inflect.number_to_words(time_match.group(1)) + "{}{}"
mins = int(time_match.group(2))
min_string = ""
if mins != 0:
min_string = " " + inflect.number_to_words(time_match.group(2))
ampm_string = ""
if time_match.group(3):
ampm_string = " " + time_match.group(3)
return string.format(min_string, ampm_string)
ord_match = ORD_CHECK.match(number)
if ORD_CHECK.match(number):
return ws + inflect.number_to_words(ord_match.group(0))
if self.currency is None:
# Check if it is a currency
self.currency = match.group(1) or CURRENCY_CHECK.match(number)
# Check to see if next symbol is a number
# If it is a number and it has 3 digits, then it is probably a
# continuation
three_match = THREE_CHECK.match(match.group(6))
if three_match:
self.curr_num.append(number)
return " "
# Else we can output
else:
# Check for decimals
whole_num = "".join(self.curr_num) + number
decimal = None
decimal_match = DECIMAL_CHECK.search(whole_num)
if decimal_match:
decimal = decimal_match.group(1)[1:]
whole_num = whole_num[: -len(decimal) - 1]
whole_num = re.sub(r'\.', '', whole_num)
return ws + self.format_final_number(whole_num, decimal)
|
NeMo-main
|
nemo/collections/common/parts/preprocessing/cleaners.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.common.data.dataset import CodeSwitchedDataset, ConcatDataset, ConcatMapDataset
|
NeMo-main
|
nemo/collections/common/data/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.data as pt_data
from torch.utils.data import Dataset, IterableDataset
__all__ = ['ConcatDataset', 'ConcatMapDataset', 'CodeSwitchedDataset']
class ConcatDataset(IterableDataset):
"""
A dataset that accepts as argument multiple datasets and then samples from them based on the specified
sampling technique.
Args:
datasets (list): A list of datasets to sample from.
shuffle (bool): Whether to shuffle individual datasets. Only works with non-iterable datasets.
Defaults to True.
sampling_technique (str): Sampling technique to choose which dataset to draw a sample from.
Defaults to 'temperature'. Currently supports 'temperature', 'random' and 'round-robin'.
sampling_temperature (int): Temperature value for sampling. Only used when sampling_technique = 'temperature'.
Defaults to 5.
sampling_scale: Gives you the ability to upsample / downsample the dataset. Defaults to 1.
sampling_probabilities (list): Probability values for sampling. Only used when sampling_technique = 'random'.
seed: Optional value to seed the numpy RNG.
global_rank (int): Worker rank, used for partitioning map style datasets. Defaults to 0.
world_size (int): Total number of processes, used for partitioning map style datasets. Defaults to 1.
"""
def __init__(
self,
datasets: List[Any],
shuffle: bool = True,
sampling_technique: str = 'temperature',
sampling_temperature: int = 5,
sampling_scale: int = 1,
sampling_probabilities: List[float] = None,
seed: Optional[int] = None,
global_rank: int = 0,
world_size: int = 1,
):
super().__init__()
supported_sampling_techniques = ['temperature', 'random', 'round-robin']
self.datasets = datasets
self.iterables = [None] * len(datasets)
self.shuffle = shuffle
self.global_rank = global_rank
self.world_size = world_size
self.sampling_kwargs = {}
self.sampling_scale = sampling_scale
if sampling_technique == 'temperature':
self.index_generator = ConcatDataset.temperature_generator
self.sampling_kwargs['temperature'] = sampling_temperature
self.sampling_kwargs['seed'] = seed
elif sampling_technique == 'random':
self.index_generator = ConcatDataset.random_generator
self.sampling_kwargs['p'] = sampling_probabilities
self.sampling_kwargs['seed'] = seed
elif sampling_technique == 'round-robin':
self.index_generator = ConcatDataset.round_robin_generator
else:
raise ValueError(f"Currently we only support sampling techniques in {supported_sampling_techniques}.")
self.length = 0
if isinstance(datasets[0], IterableDataset):
self.kind = 'iterable'
else:
self.kind = 'map'
for idx, dataset in enumerate(datasets):
isiterable = isinstance(dataset, IterableDataset)
if (isiterable and not self.kind == 'iterable') or (not isiterable and self.kind == 'iterable'):
raise ValueError("All datasets in ConcatDataset must be of the same kind (Iterable or Map).")
if self.kind == 'map':
self.length += len(dataset) // world_size
else:
self.length += len(dataset)
if self.sampling_scale != 1:
self.length = int(self.length * self.sampling_scale)
logging.info(f'applying {sampling_scale} sampling scale, concat ds len: {self.length}')
def get_iterable(self, dataset):
if isinstance(dataset, IterableDataset):
return dataset.__iter__()
else:
indices = np.arange(len(dataset))
if self.shuffle:
np.random.shuffle(indices)
return iter(indices)
def __iter__(self):
worker_info = pt_data.get_worker_info()
if worker_info is None:
max_elements = self.length
wid = 0
wnum = 1
else:
wid = worker_info.id
wnum = worker_info.num_workers
max_elements = len(range(wid, self.length, wnum))
if self.kind == 'map':
for idx in range(len(self.datasets)):
start_idx = (len(self.datasets[idx]) // self.world_size) * self.global_rank
end_idx = start_idx + (len(self.datasets[idx]) // self.world_size)
if self.global_rank == self.world_size - 1:
end_idx = len(self.datasets[idx])
indices = range(start_idx + wid, end_idx, wnum)
self.datasets[idx] = pt_data.Subset(self.datasets[idx], indices)
for idx, dataset in enumerate(self.datasets):
iterable = self.get_iterable(dataset)
self.iterables[idx] = iterable
n = 0
ind_gen = self.index_generator(self.datasets, **self.sampling_kwargs)
while n < max_elements:
n += 1
try:
ind = next(ind_gen)
except StopIteration:
return
try:
val = next(self.iterables[ind])
if self.kind == 'map':
val = self.datasets[ind][val]
yield val
except StopIteration:
self.iterables[ind] = self.get_iterable(self.datasets[ind])
n -= 1
def __len__(self):
return self.length
@staticmethod
def temperature_generator(datasets, **kwargs):
temp = kwargs.get('temperature')
if not temp:
raise ValueError("Temperature generator expects a 'temperature' keyword argument.")
seed = kwargs.get('seed', None)
np_rng = np.random.RandomState(seed)
lengths = []
num = len(datasets)
for dataset in datasets:
lengths.append(len(dataset))
p = np.array(lengths) / np.sum(lengths)
p = np.power(p, 1 / temp)
p = p / np.sum(p)
while True:
ind = np_rng.choice(np.arange(num), p=p)
yield ind
@staticmethod
def round_robin_generator(datasets, **kwargs):
num = len(datasets)
while True:
for i in range(num):
yield i
@staticmethod
def random_generator(datasets, **kwargs):
p = kwargs.get('p')
if not p:
raise ValueError("Random generator expects a 'p' keyowrd argument for sampling probabilities.")
seed = kwargs.get('seed', None)
np_rng = np.random.RandomState(seed)
num = len(datasets)
if len(p) != num:
raise ValueError("Length of probabilities list must be equal to the number of datasets.")
while True:
ind = np_rng.choice(np.arange(num), p=p)
yield ind
class ConcatMapDataset(Dataset):
"""
A dataset that accepts as argument multiple datasets and then samples from them based on the specified
sampling technique.
Args:
datasets (list): A list of datasets to sample from.
sampling_technique (str): Sampling technique to choose which dataset to draw a sample from.
Defaults to 'temperature'. Currently supports 'temperature', 'random' and 'round-robin'.
sampling_temperature (int): Temperature value for sampling. Only used when sampling_technique = 'temperature'.
Defaults to 5.
sampling_probabilities (list): Probability values for sampling. Only used when sampling_technique = 'random'.
seed: Optional value to seed the numpy RNG.
"""
def __init__(
self,
datasets: List[Any],
sampling_technique: str = 'temperature',
sampling_temperature: int = 5,
sampling_probabilities: Optional[List[float]] = None,
seed: Optional[int] = None,
):
super().__init__()
self.datasets = datasets
self.lengths = [len(x) for x in self.datasets]
self.sampling_technique = sampling_technique
self.sampling_temperature = sampling_temperature
self.sampling_probabilities = sampling_probabilities
self.np_rng = np.random.RandomState(seed)
# Build a list of size `len(self)`. Each tuple contains (dataset_id, dataset_index)
self.indices: List[Tuple[int, int]] = []
# Current position as we consume indices from each data set
dataset_positions = [0] * len(self.datasets)
# Random permutation of each dataset. Will be regenerated when exhausted.
shuffled_indices = [self.np_rng.permutation(len(x)) for x in self.datasets]
# Build the list of randomly-chosen datasets spanning the entire length, adhering to sampling technique
if self.sampling_technique == "round-robin":
# To exhaust longest dataset, need to draw `num_datasets * max_dataset_len` samples
total_length = max(self.lengths) * len(self.lengths)
# For round robin, iterate through each dataset
dataset_ids = np.arange(total_length) % len(self.datasets)
for dataset_id in dataset_ids:
position = dataset_positions[dataset_id]
index = shuffled_indices[dataset_id][position]
self.indices.append((dataset_id, index))
dataset_positions[dataset_id] += 1
if dataset_positions[dataset_id] == len(shuffled_indices[dataset_id]):
dataset_positions[dataset_id] = 0
shuffled_indices[dataset_id] = self.np_rng.permutation(len(self.datasets[dataset_id]))
else:
# Resolve probabilities of drawing from each data set
if self.sampling_technique == "random":
if sampling_probabilities is None or len(sampling_probabilities) != len(self.datasets):
raise ValueError(
f"Need {len(self.datasets)} probabilities; got "
f"{len(sampling_probabilities) if sampling_probabilities is not None else 'None'}"
)
p = np.array(self.sampling_probabilities)
elif self.sampling_technique == "temperature":
p = np.array([len(x) for x in self.datasets])
p = np.power(p, 1 / self.sampling_temperature)
else:
raise ValueError(f"Couldn't interpret sampling technique: {sampling_technique}")
# Normalize probabilities
p = p / np.sum(p)
# Will randomly choose from datasets
choices = np.arange(len(self.datasets))
# Keep going until largest dataset is exhausted.
exhausted_datasets = set()
while len(exhausted_datasets) < len(self.datasets):
# Randomly choose a dataset for each position in accordance with p
dataset_id = self.np_rng.choice(a=choices, p=p)
dataset = self.datasets[dataset_id]
# Pick next index from dataset
position = dataset_positions[dataset_id]
index = shuffled_indices[dataset_id][position]
self.indices.append((dataset_id, index))
# Maybe reset this dataset's permutation
dataset_positions[dataset_id] += 1
if dataset_positions[dataset_id] >= len(dataset):
shuffled_indices[dataset_id] = self.np_rng.permutation(len(dataset))
dataset_positions[dataset_id] = 0
exhausted_datasets.add(dataset_id)
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
dataset_id, dataset_index = self.indices[idx]
return self.datasets[dataset_id][dataset_index]
class CodeSwitchedDataset(IterableDataset):
"""
A dataset that accepts as argument multiple sub-datasets (usually from different languages, but that's not required) and then
samples from them in order to create synthetic code-switched samples of up to N different sub-datasets
Args:
datasets (list): A list of datasets
lang_probs (list): A list of probabilities (which must sum to 1) corresponding to the sampling probability for each dataset
shuffle (bool): Whether to shuffle individual datasets. Only works with non-iterable datasets.
Defaults to True.
min_duration (int): the minimum duration (secs) of each synthetic code-switched sample. Will draw randomly until this is hit.
Defaults to 4
max_duration (int): the maximum duration (secs) of each synthetic code-switched sample.
Defaults to 20
min_monolingual (float): this percentage of the dataset will be original monolingual samples
Defaults to 0.3 - means 30%
db_norm (float): will normalise the composite CS sample to this DB level
Defaults to -25.0
pause_start (int): inserts silence equal to this value (msecs) at the start of each CS sample
Defaults to 0
pause_join (int): inserts silence equal to this value (msecs) between all language changes in the CS sample
Defaults to 0
pause_end (int): terminates all CS samples with silence equal to this value (msecs)
Defaults to 0
sampling_scales (list or float): gives you the ability to upsample/downsample each individual dataset
seed: Optional value to seed the numpy RNG.
global_rank (int): Worker rank, used for partitioning map style datasets. Defaults to 0.
world_size (int): Total number of processes, used for partitioning map style datasets. Defaults to 1.
pure_random (bool): If true, then always draw random sample from lang_probs. If false, you only draw from those datasets
which you haven't sampled from yet for the composite sample
force_monochannel (bool): If true, then all output audio will be mono-channel
infinity_mode (bool): If true, then the dataset iterable will generate an infinite amount of samples
sample_rate (int): the sample rate of all audio being sent to this Dataset
augmentor (AudioAugmentor): The any perturbations you wish to have applied on the CS samples
"""
def __init__(
self,
datasets: List[Any],
lang_probs: Optional[List[float]] = None,
shuffle: bool = True,
min_duration: int = 4,
max_duration: int = 20,
min_monolingual: float = 0.3,
db_norm: float = -25.0,
pause_start: int = 0,
pause_join: int = 0,
pause_end: int = 0,
sampling_scales: Optional[Union[float, List[float]]] = None,
seed: Optional[int] = None,
global_rank: int = 0,
world_size: int = 1,
pure_random: bool = False,
force_monochannel: bool = True,
infinity_mode: bool = False,
sample_rate: int = 16000,
augmentor: Optional['AudioAugmentor'] = None,
):
super().__init__()
if len(datasets) == 0:
raise ValueError("CodeSwitchedDataset must receive a non-zero length datasets dict object")
self.datasets = datasets
self.langs = list(range(len(datasets)))
self.langs_set = set(self.langs)
self.lang_iterables = {k: None for k in self.langs}
self.lang_kind = {k: None for k in self.langs}
self.shuffle = shuffle
self.min_duration = min_duration
self.max_duration = max_duration
self.min_monolingual = min_monolingual
self.db_norm = db_norm
self.pause_start = pause_start
self.pause_join = pause_join
self.pause_end = pause_end
self.pure_random = pure_random
self.force_monochannel = force_monochannel
self.infinity_mode = infinity_mode
self.global_rank = global_rank
self.world_size = world_size
self.augmentor = augmentor
self.sample_rate = sample_rate
self.length = 0
if lang_probs is None:
self.prob_dict = {l: 1.0 / len(self.langs) for l in self.langs}
else:
assert len(self.langs) == len(
lang_probs
), "Size mismatch between languages and respective probs in CodeSwitchedDataset"
self.prob_dict = {l: lang_probs[l] for l in self.langs}
self.lang_probs = np.array(list(self.prob_dict.values()))
if sampling_scales is not None and not isinstance(sampling_scales, list):
self.sampling_scales = {k: sampling_scales for k in self.langs}
elif (
sampling_scales is not None
and isinstance(sampling_scales, list)
and len(sampling_scales) == len(self.langs)
):
self.sampling_scales = {k: v for k, v in zip(self.langs, sampling_scales)}
else:
self.sampling_scales = {k: 1 for k in self.langs}
for lang, dataset in enumerate(self.datasets):
isiterable = isinstance(dataset, IterableDataset)
if isiterable:
self.lang_kind[lang] = 'iterable'
self.length += int(len(dataset) * self.sampling_scales[lang])
else:
self.lang_kind[lang] = 'map'
self.length += int((len(dataset) // world_size) * self.sampling_scales[lang])
if seed is not None:
np.random.seed(seed)
# set this to ensure compatibility with models searching for the collate_fn
# since this class stores datasets as a dict, not list
# self.collate_fn = self.datasets[self.langs[0]].collate_fn
if hasattr(self.datasets[self.langs[0]], 'collate_fn'):
self.collate_fn = self.datasets[self.langs[0]].collate_fn
elif (
hasattr(self.datasets[self.langs[0]], 'datasets')
and isinstance(self.datasets[self.langs[0]].datasets, list)
and len(self.datasets[self.langs[0]].datasets) > 0
and hasattr(self.datasets[self.langs[0]].datasets[0], 'collate_fn')
):
# support datasets that are lists of entries
self.collate_fn = self.datasets[self.langs[0]].datasets[0].collate_fn
elif (
hasattr(self.datasets[self.langs[0]], 'datasets')
and isinstance(self.datasets[self.langs[0]].datasets, list)
and len(self.datasets[self.langs[0]].datasets) > 0
and hasattr(self.datasets[self.langs[0]].datasets[0], 'datasets')
and isinstance(self.datasets[self.langs[0]].datasets[0].datasets, list)
and len(self.datasets[self.langs[0]].datasets[0].datasets) > 0
and hasattr(self.datasets[self.langs[0]].datasets[0].datasets[0], 'collate_fn')
):
# support datasets that are lists of lists
self.collate_fn = self.datasets[self.langs[0]].datasets[0].datasets[0].collate_fn
else:
raise RuntimeError("CodeSwitchedDataset could not locate a valid dataset collate_fn to bind to")
# this method returns an iterator object for a given language ID
# it correctly handles whether the underlying dataset is IterableDataset or mappable
def get_iterable_by_lang(self, lang):
dataset = self.datasets[lang]
if isinstance(dataset, IterableDataset):
return dataset.__iter__()
else:
indices = np.arange(len(dataset))
if self.shuffle:
np.random.shuffle(indices)
return iter(indices)
# this method is the main function which builds and returns a composite, synthetic code-switched
# utterance on the fly. It automatically works with all of the class-based variables stored to create
# the synthetic utterance
def build_single_CS_sample(self):
# get_sample_from_language returns a LongTensor for the transcripts so we create a LongTensor to hold
# all returned transcripts
comp_text = torch.LongTensor([])
created_sample_duration_sec = 0
created_sample_langs = []
created_sample_audios = []
# if min_monolingual fires, it means we will just return a single, original monolingual utterance
# from one of our languages based on that language's probability
pure_mono = np.random.rand() <= self.min_monolingual
# we continue to add to the composite utterance until we hit the min_duration
while created_sample_duration_sec < self.min_duration:
# we sample from only those languages which haven't already been sampled for this particular
# synthetic utterance, unless pure_random=True, in which case, you just sample with replacement
# every time
if (self.pure_random and not pure_mono) or (
len(set(created_sample_langs)) == 0 or len(set(created_sample_langs)) == len(self.langs)
):
lang_id = np.random.choice(self.langs, p=self.lang_probs)
# elif pure_mono:
# use this approach if you want synthetic utterances which are all monolingual
# lang_id = created_sample_langs[0]
else:
# this code is for when we need to sample from only those languages which haven't been sampled
# yet for this utterance
p = np.array(list(map(self.prob_dict.get, list(self.langs_set - set(created_sample_langs)))))
p = p / p.sum()
lang_id = np.random.choice(list(self.langs_set - set(created_sample_langs)), p=p)
audio, audio_len, labels, labels_len, *_ = self.get_sample_from_language(lang_id)
# in case you get an audio which is all silence we keep sampling
if audio.count_nonzero().item() == 0:
continue
sample_duration = len(audio) / self.sample_rate
if (created_sample_duration_sec + sample_duration) > self.max_duration:
continue
if comp_text.device != labels.device:
comp_text = comp_text.to(labels.device)
if audio.ndim > 1 and self.force_monochannel:
audio = audio.mean(dim=-1)
created_sample_duration_sec += sample_duration
created_sample_langs.append(lang_id)
# need to use numpy instead of torch here because we need numpy's trim_zeros function
created_sample_audios.append(audio.cpu().numpy())
comp_text = torch.cat([comp_text, labels], dim=0)
# we want a real, non-synth pure_mono sample so we break soon as we have one
if pure_mono:
break
# check that all samples have the same number of channels
sample_channels = list(set([s.ndim for s in created_sample_audios]))
if len(sample_channels) > 1:
raise RuntimeError(
"Mixture of audios with different number of channels in CodeSwitchedDataset. All sources must be same number of channels."
)
multichannel = sample_channels[0] > 1
# we start with pause_start amount of silence (zero array) which needs the correct shape for multi/mono channel
if multichannel:
comp_audio = np.zeros(
shape=(int(self.pause_start * self.sample_rate / 1000.0), created_sample_audios[0].shape[-1]),
dtype=created_sample_audios[0].dtype,
)
else:
comp_audio = np.zeros(
shape=(int(self.pause_start * self.sample_rate / 1000.0),), dtype=created_sample_audios[0].dtype
)
# iterate over all mono-lingual samples to build the final composite
for idx, wav in enumerate(created_sample_audios):
if not multichannel:
# this function only works if mono-channel
wav = np.trim_zeros(wav)
# normalise to provided DB level
wav_norm = wav * (10.0 ** (self.db_norm / 20.0) / np.maximum(0.01, (wav ** 2).mean(axis=0) ** 0.5))
# this part appends the normed waveform to the existing waveform, and inserts pause_join amount of silence
# if necessary, otherwise just a straight append
if idx < len(created_sample_audios) - 1:
if multichannel:
wav_norm = np.append(
wav_norm,
np.zeros(
shape=(
int(self.pause_join * self.sample_rate / 1000.0),
created_sample_audios[0].shape[-1],
),
dtype=comp_audio.dtype,
),
axis=0,
)
else:
wav_norm = np.append(
wav_norm,
np.zeros(shape=(int(self.pause_join * self.sample_rate / 1000.0),), dtype=comp_audio.dtype),
axis=0,
)
# this is the penultimate composite wavform, just need to add pause_end silence
comp_audio = np.append(comp_audio, wav_norm, axis=0)
# here we add the pause_end amount of silence, in correct channel shape
if multichannel:
comp_audio = np.append(
comp_audio,
np.zeros(
shape=(int(self.pause_end * self.sample_rate / 1000.0), created_sample_audios[0].shape[-1]),
dtype=comp_audio.dtype,
),
axis=0,
)
else:
comp_audio = np.append(
comp_audio,
np.zeros(shape=(int(self.pause_end * self.sample_rate / 1000.0),), dtype=comp_audio.dtype),
axis=0,
)
# we only want augmentation to happen on the final, synthetic utterance, and not on any of the individual
# languages, which is why we set augmentor=None when building the individual language datasets in audio_to_text_dataset.get_code_switched_dataset
# here we now apply augmentation to the final, synthetic utterance only
# all of this logic here happens in-memory, nothing is written to disk
if self.augmentor is not None:
# import here to avoid circular import error
# import here because otherwise CI test-nlp-imports fails since soundfile is only in requirements_asr and not in requirements_common
import soundfile as sf
from nemo.collections.asr.parts.preprocessing import AudioSegment
mb = io.BytesIO()
sf.write(mb, comp_audio, self.sample_rate, format='WAV')
mb.seek(0)
comp_audio_as = AudioSegment.from_file(mb, target_sr=self.sample_rate)
self.augmentor.perturb(comp_audio_as)
comp_audio = comp_audio_as.samples
return (
torch.tensor(comp_audio, dtype=audio.dtype, device=audio.device),
torch.tensor(len(comp_audio), device=audio_len.device).long(),
comp_text,
torch.tensor(len(comp_text), device=labels_len.device).long(),
)
# this is a helper method which prepares all of the iterator objects for all languages
# based on whether that language's underlying dataset is a map or an IterableDataset
def prep_underlying_datasets(self):
worker_info = pt_data.get_worker_info()
if worker_info is None:
max_elements = self.length
wid = 0
wnum = 1
else:
wid = worker_info.id
wnum = worker_info.num_workers
max_elements = len(range(wid, self.length, wnum))
for lang in self.langs:
if self.lang_kind[lang] == 'map':
start_idx = (len(self.datasets[lang]) // self.world_size) * self.global_rank
end_idx = start_idx + (len(self.datasets[lang]) // self.world_size)
if self.global_rank == self.world_size - 1:
end_idx = len(self.datasets[lang])
indices = range(start_idx + wid, end_idx, wnum)
self.datasets[lang] = pt_data.Subset(self.datasets[lang], indices)
self.lang_iterables[lang] = self.get_iterable_by_lang(lang)
return max_elements
# returns a sample (audio and transcript) from any underlying language stored by the class on instantiation
# the sample returned is a tensor for the audio and a tensor of ints for the transcript
# this method automatically handles StopIteration errors for the underyling language and rebuilds
# the iterator if necessary
def get_sample_from_language(self, lang):
while True:
try:
val = next(self.lang_iterables[lang])
if self.lang_kind[lang] == 'map':
val = self.datasets[lang][val]
return val
except StopIteration:
self.lang_iterables[lang] = self.get_iterable_by_lang(lang)
def __iter__(self):
# we create primed iterators for all languages and return the grand total of samples for each
# underlying language as a sum
max_elements = self.prep_underlying_datasets()
if self.infinity_mode:
while True:
yield self.build_single_CS_sample()
else:
n = 0
while n < max_elements:
yield self.build_single_CS_sample()
n += 1
def __len__(self):
return self.length
|
NeMo-main
|
nemo/collections/common/data/dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr import data, losses, models, modules
from nemo.package_info import __version__
# Set collection version equal to NeMo version.
__version = __version__
# Authorship.
__author__ = "NVIDIA Corporation"
# Set collection name.
__description__ = "Automatic Speech Recognition collection"
|
NeMo-main
|
nemo/collections/asr/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List
import editdistance
import torch
from torchmetrics import Metric
from nemo.collections.asr.metrics.wer import AbstractCTCDecoding, CTCDecodingConfig
from nemo.collections.asr.parts.submodules import ctc_beam_decoding
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.collections.common.tokenizers.aggregate_tokenizer import DummyTokenizer
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
class CTCBPEDecoding(AbstractCTCDecoding):
"""
Used for performing CTC auto-regressive / non-auto-regressive decoding of the logprobs for subword based
models.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy (for greedy decoding).
- beam (for DeepSpeed KenLM based decoding).
compute_timestamps: A bool flag, which determines whether to compute the character/subword, or
word based timestamp mapping the output log-probabilities to discrite intervals of timestamps.
The timestamps will be available in the returned Hypothesis.timestep as a dictionary.
ctc_timestamp_type: A str value, which represents the types of timestamps that should be calculated.
Can take the following values - "char" for character/subword time stamps, "word" for word level
time stamps and "all" (default), for both character level and word level time stamps.
word_seperator: Str token representing the seperator between words.
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `logprobs` in it. Here, `logprobs` is a torch.Tensors.
confidence_cfg: A dict-like object which contains the following key-value pairs related to confidence
scores. In order to obtain hypotheses with confidence scores, please utilize
`ctc_decoder_predictions_tensor` function with the `preserve_frame_confidence` flag set to True.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding. When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of floats.
preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `token_confidence` in it. Here, `token_confidence` is a List of floats.
The length of the list corresponds to the number of recognized tokens.
preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `word_confidence` in it. Here, `word_confidence` is a List of floats.
The length of the list corresponds to the number of recognized words.
exclude_blank: Bool flag indicating that blank token confidence scores are to be excluded
from the `token_confidence`.
aggregation: Which aggregation type to use for collapsing per-token confidence into per-word confidence.
Valid options are `mean`, `min`, `max`, `prod`.
measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str).
Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
batch_dim_index: Index of the batch dimension of ``targets`` and ``predictions`` parameters of
``ctc_decoder_predictions_tensor`` methods. Can be either 0 or 1.
The config may further contain the following sub-dictionaries:
"greedy":
preserve_alignments: Same as above, overrides above value.
compute_timestamps: Same as above, overrides above value.
preserve_frame_confidence: Same as above, overrides above value.
confidence_measure_cfg: Same as above, overrides confidence_cfg.measure_cfg.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded. This flag is set by default.
beam_alpha: float, the strength of the Language model on the final score of a token.
final_score = acoustic_score + beam_alpha * lm_score + beam_beta * seq_length.
beam_beta: float, the strength of the sequence length penalty on the final score of a token.
final_score = acoustic_score + beam_alpha * lm_score + beam_beta * seq_length.
kenlm_path: str, path to a KenLM ARPA or .binary file (depending on the strategy chosen).
If the path is invalid (file is not found at path), will raise a deferred error at the moment
of calculation of beam search, so that users may update / change the decoding strategy
to point to the correct file.
tokenizer: NeMo tokenizer object, which inherits from TokenizerSpec.
"""
def __init__(self, decoding_cfg, tokenizer: TokenizerSpec):
blank_id = tokenizer.tokenizer.vocab_size
self.tokenizer = tokenizer
super().__init__(decoding_cfg=decoding_cfg, blank_id=blank_id)
# Finalize Beam Search Decoding framework
if isinstance(self.decoding, ctc_beam_decoding.AbstractBeamCTCInfer):
if hasattr(self.tokenizer.tokenizer, 'get_vocab'):
vocab_dict = self.tokenizer.tokenizer.get_vocab()
if isinstance(self.tokenizer.tokenizer, DummyTokenizer): # AggregateTokenizer.DummyTokenizer
vocab = vocab_dict
else:
vocab = list(vocab_dict.keys())
self.decoding.set_vocabulary(vocab)
self.decoding.set_tokenizer(tokenizer)
else:
logging.warning("Could not resolve the vocabulary of the tokenizer !")
self.decoding.set_decoding_type('subword')
def _aggregate_token_confidence(self, hypothesis: Hypothesis) -> List[float]:
"""
Implemented by subclass in order to aggregate token confidence to a word-level confidence.
**Note**: Only supports Sentencepiece based tokenizers!
Args:
hypothesis: Hypothesis
Returns:
A list of word-level confidence scores.
"""
return self._aggregate_token_confidence_subwords_sentencepiece(
self.decode_tokens_to_str(hypothesis.text[0]).split(), hypothesis.token_confidence, hypothesis.text[0]
)
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to decoder a token list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
hypothesis = self.tokenizer.ids_to_text(tokens)
return hypothesis
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
token_list = self.tokenizer.ids_to_tokens(tokens)
return token_list
class WERBPE(Metric):
"""
This metric computes numerator and denominator for Overall Word Error Rate for BPE tokens (WER-BPE) between
prediction and reference texts. When doing distributed training/evaluation the result of
``res=WERBPE(predictions, targets, target_lengths)`` calls will be all-reduced between all workers using SUM
operations. Here ``res`` contains three numbers ``res=[wer, total_levenstein_distance, total_number_of_words]``.
If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step
results. Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER.
Example:
def validation_step(self, batch, batch_idx):
...
wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len)
self.val_outputs = {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom}
return self.val_outputs
def on_validation_epoch_end(self):
...
wer_num = torch.stack([x['val_wer_num'] for x in self.val_outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in self.val_outputs]).sum()
tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom}
self.val_outputs.clear() # free memory
return {'val_loss': val_loss_mean, 'log': tensorboard_logs}
Args:
decoding: An instance of CTCBPEDecoding.
use_cer: Whether to compute word-error-rate or character-error-rate.
log_prediction: Whether to log a single decoded sample per call.
fold_consecutive: Whether repeated consecutive tokens should be folded into one when decoding.
Returns:
res: a tuple of 3 zero dimensional float32 ``torch.Tensor` objects: a WER score, a sum of Levenstein's
distances for all prediction - reference pairs, total number of words in all references.
"""
full_state_update: bool = True
def __init__(
self,
decoding: CTCBPEDecoding,
use_cer=False,
log_prediction=True,
fold_consecutive=True,
dist_sync_on_step=False,
):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.decoding = decoding
self.tokenizer = self.decoding.tokenizer
self.blank_id = self.decoding.tokenizer.tokenizer.vocab_size
self.use_cer = use_cer
self.log_prediction = log_prediction
self.fold_consecutive = fold_consecutive
self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
def update(
self,
predictions: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
predictions_lengths: torch.Tensor = None,
):
"""
Updates metric state.
Args:
predictions: an integer torch.Tensor of shape ``[Batch, Time, {Vocabulary}]`` (if ``batch_dim_index == 0``) or
``[Time, Batch]`` (if ``batch_dim_index == 1``)
targets: an integer torch.Tensor of shape ``[Batch, Time]`` (if ``batch_dim_index == 0``) or
``[Time, Batch]`` (if ``batch_dim_index == 1``)
target_lengths: an integer torch.Tensor of shape ``[Batch]``
predictions_lengths: an integer torch.Tensor of shape ``[Batch]``
"""
words = 0
scores = 0
references = []
with torch.no_grad():
targets_cpu_tensor = targets.long().cpu()
tgt_lenths_cpu_tensor = target_lengths.long().cpu()
# iterate over batch
for ind in range(targets_cpu_tensor.shape[0]):
tgt_len = tgt_lenths_cpu_tensor[ind].item()
target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()
reference = self.decoding.decode_tokens_to_str(target)
references.append(reference)
hypotheses, _ = self.decoding.ctc_decoder_predictions_tensor(
predictions, predictions_lengths, fold_consecutive=self.fold_consecutive
)
if self.log_prediction:
logging.info(f"\n")
logging.info(f"reference:{references[0]}")
logging.info(f"predicted:{hypotheses[0]}")
for h, r in zip(hypotheses, references):
if self.use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
words += len(r_list)
# Compute Levenstein's distance
scores += editdistance.eval(h_list, r_list)
self.scores = torch.tensor(scores, device=self.scores.device, dtype=self.scores.dtype)
self.words = torch.tensor(words, device=self.words.device, dtype=self.words.dtype)
# return torch.tensor([scores, words]).to(predictions.device)
def compute(self):
scores = self.scores.detach().float()
words = self.words.detach().float()
return scores / words, scores, words
@dataclass
class CTCBPEDecodingConfig(CTCDecodingConfig):
pass
|
NeMo-main
|
nemo/collections/asr/metrics/wer_bpe.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
from torchmetrics import Metric
__all__ = ['MultiBinaryAccuracy']
class MultiBinaryAccuracy(Metric):
"""
This metric computes accuracies that are needed to evaluate multiple binary outputs.
For example, if a model returns a set of multiple sigmoid outputs per each sample or at each time step,
F1 score can be calculated to monitor Type 1 error and Type 2 error together.
Example:
def validation_step(self, batch, batch_idx):
...
signals, signal_lengths, targets = batch
preds, _ = self.forward(input_signal=signals,
signal_lengths=signal_lengths,
targets=targets)
loss = self.loss(logits=preds, labels=targets)
self._accuracy_valid(preds, targets, signal_lengths)
f1_acc = self._accuracy.compute()
self.val_outputs = {'val_loss': loss, 'val_f1_acc': f1_acc}
return self.val_outputs
def on_validation_epoch_end(self):
...
val_loss_mean = torch.stack([x['val_loss'] for x in self.val_outputs]).mean()
correct_counts = torch.stack([x['val_correct_counts'] for x in self.val_outputs]).sum(axis=0)
total_counts = torch.stack([x['val_total_counts'] for x in self.val_outputs]).sum(axis=0)
self._accuracy_valid.correct_counts_k = correct_counts
self._accuracy_valid.total_counts_k = total_counts
f1_acc = self._accuracy_valid.compute()
self._accuracy_valid.reset()
self.log('val_loss', val_loss_mean)
self.log('val_f1_acc', f1_acc)
self.val_outputs.clear() # free memory
return {'val_loss': val_loss_mean, 'val_f1_acc': f1_acc}
Args:
preds (torch.Tensor):
Predicted values which should be in range of [0, 1].
targets (torch.Tensor):
Target values which should be in range of [0, 1].
signal_lengths (torch.Tensor):
Length of each sequence in the batch input. signal_lengths values are used to
filter out zero-padded parts in each sequence.
Returns:
f1_score (torch.Tensor):
F1 score calculated from the predicted value and binarized target values.
"""
full_state_update = False
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.total_correct_counts = 0
self.total_sample_counts = 0
self.true_positive_count = 0
self.false_positive_count = 0
self.false_negative_count = 0
def update(self, preds: torch.Tensor, targets: torch.Tensor, signal_lengths: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
preds_list = [preds[k, : signal_lengths[k], :] for k in range(preds.shape[0])]
targets_list = [targets[k, : signal_lengths[k], :] for k in range(targets.shape[0])]
self.preds = torch.cat(preds_list, dim=0)
self.targets = torch.cat(targets_list, dim=0)
self.true = self.preds.round().bool() == self.targets.round().bool()
self.false = self.preds.round().bool() != self.targets.round().bool()
self.positive = self.preds.round().bool() == 1
self.negative = self.preds.round().bool() == 0
self.positive_count = torch.sum(self.preds.round().bool() == True)
self.true_positive_count += torch.sum(torch.logical_and(self.true, self.positive))
self.false_positive_count += torch.sum(torch.logical_and(self.false, self.positive))
self.false_negative_count += torch.sum(torch.logical_and(self.false, self.negative))
self.total_correct_counts += torch.sum(self.preds.round().bool() == self.targets.round().bool())
self.total_sample_counts += torch.prod(torch.tensor(self.targets.shape))
def compute(self):
"""
Compute F1 score from the accumulated values. Return -1 if the F1 score is NaN.
"""
self.precision = self.true_positive_count / (self.true_positive_count + self.false_positive_count)
self.recall = self.true_positive_count / (self.true_positive_count + self.false_negative_count)
self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)
if torch.isnan(self.f1_score):
logging.warn("self.f1_score contains NaN value. Returning -1 instead of NaN value.")
self.f1_score = -1
return self.f1_score
|
NeMo-main
|
nemo/collections/asr/metrics/multi_binary_acc.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Union
import editdistance
import torch
from torchmetrics import Metric
from nemo.collections.asr.metrics.rnnt_wer import AbstractRNNTDecoding, RNNTDecodingConfig
from nemo.collections.asr.metrics.wer import move_dimension_to_the_front
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis, NBestHypotheses
from nemo.collections.common.tokenizers.aggregate_tokenizer import AggregateTokenizer
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
__all__ = ['RNNTBPEDecoding', 'RNNTBPEWER']
class RNNTBPEDecoding(AbstractRNNTDecoding):
"""
Used for performing RNN-T auto-regressive decoding of the Decoder+Joint network given the encoder state.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy, greedy_batch (for greedy decoding).
- beam, tsd, alsd (for beam search decoding).
compute_hypothesis_token_set: A bool flag, which determines whether to compute a list of decoded
tokens as well as the decoded string. Default is False in order to avoid double decoding
unless required.
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1), Tensor(scalar, label after argmax)).
In order to obtain this hypothesis, please utilize `rnnt_decoder_predictions_tensor` function
with the `return_hypotheses` flag set to True.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
compute_timestamps: A bool flag, which determines whether to compute the character/subword, or
word based timestamp mapping the output log-probabilities to discrete intervals of timestamps.
The timestamps will be available in the returned Hypothesis.timestep as a dictionary.
compute_langs: a bool flag, which allows to compute language id (LID) information per token,
word, and the entire sample (most likely language id). The LIDS will be available
in the returned Hypothesis object as a dictionary
rnnt_timestamp_type: A str value, which represents the types of timestamps that should be calculated.
Can take the following values - "char" for character/subword time stamps, "word" for word level
time stamps and "all" (default), for both character level and word level time stamps.
word_seperator: Str token representing the seperator between words.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `alignments` is a List of List of ints.
confidence_cfg: A dict-like object which contains the following key-value pairs related to confidence
scores. In order to obtain hypotheses with confidence scores, please utilize
`rnnt_decoder_predictions_tensor` function with the `preserve_frame_confidence` flag set to True.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `alignments` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `token_confidence` in it. Here, `token_confidence` is a List of floats.
The length of the list corresponds to the number of recognized tokens.
preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `word_confidence` in it. Here, `word_confidence` is a List of floats.
The length of the list corresponds to the number of recognized words.
exclude_blank: Bool flag indicating that blank token confidence scores are to be excluded
from the `token_confidence`.
aggregation: Which aggregation type to use for collapsing per-token confidence into per-word confidence.
Valid options are `mean`, `min`, `max`, `prod`.
measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str).
Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
The config may further contain the following sub-dictionaries:
"greedy":
max_symbols: int, describing the maximum number of target tokens to decode per
timestep during greedy decoding. Setting to larger values allows longer sentences
to be decoded, at the cost of increased execution time.
preserve_frame_confidence: Same as above, overrides above value.
confidence_measure_cfg: Same as above, overrides confidence_cfg.measure_cfg.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
score_norm: optional bool, whether to normalize the returned beam score in the hypotheses.
Set to True by default.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded.
tsd_max_sym_exp: optional int, determines number of symmetric expansions of the target symbols
per timestep of the acoustic model. Larger values will allow longer sentences to be decoded,
at increased cost to execution time.
alsd_max_target_len: optional int or float, determines the potential maximum target sequence length.
If an integer is provided, it can decode sequences of that particular maximum length.
If a float is provided, it can decode sequences of int(alsd_max_target_len * seq_len),
where seq_len is the length of the acoustic model output (T).
NOTE:
If a float is provided, it can be greater than 1!
By default, a float of 2.0 is used so that a target sequence can be at most twice
as long as the acoustic model output length T.
maes_num_steps: Number of adaptive steps to take. From the paper, 2 steps is generally sufficient,
and can be reduced to 1 to improve decoding speed while sacrificing some accuracy. int > 0.
maes_prefix_alpha: Maximum prefix length in prefix search. Must be an integer, and is advised to keep this as 1
in order to reduce expensive beam search cost later. int >= 0.
maes_expansion_beta: Maximum number of prefix expansions allowed, in addition to the beam size.
Effectively, the number of hypothesis = beam_size + maes_expansion_beta. Must be an int >= 0,
and affects the speed of inference since large values will perform large beam search in the next step.
maes_expansion_gamma: Float pruning threshold used in the prune-by-value step when computing the expansions.
The default (2.3) is selected from the paper. It performs a comparison (max_log_prob - gamma <= log_prob[v])
where v is all vocabulary indices in the Vocab set and max_log_prob is the "most" likely token to be
predicted. Gamma therefore provides a margin of additional tokens which can be potential candidates for
expansion apart from the "most likely" candidate.
Lower values will reduce the number of expansions (by increasing pruning-by-value, thereby improving speed
but hurting accuracy). Higher values will increase the number of expansions (by reducing pruning-by-value,
thereby reducing speed but potentially improving accuracy). This is a hyper parameter to be experimentally
tuned on a validation set.
softmax_temperature: Scales the logits of the joint prior to computing log_softmax.
decoder: The Decoder/Prediction network module.
joint: The Joint network module.
tokenizer: The tokenizer which will be used for decoding.
"""
def __init__(self, decoding_cfg, decoder, joint, tokenizer: TokenizerSpec):
blank_id = tokenizer.tokenizer.vocab_size # RNNT or TDT models.
# multi-blank RNNTs
if hasattr(decoding_cfg, 'model_type') and decoding_cfg.model_type == 'multiblank':
blank_id = tokenizer.tokenizer.vocab_size + joint.num_extra_outputs
self.tokenizer = tokenizer
super(RNNTBPEDecoding, self).__init__(
decoding_cfg=decoding_cfg, decoder=decoder, joint=joint, blank_id=blank_id
)
if isinstance(self.decoding, rnnt_beam_decoding.BeamRNNTInfer):
self.decoding.set_decoding_type('subword')
def _aggregate_token_confidence(self, hypothesis: Hypothesis) -> List[float]:
"""
Implemented by subclass in order to reduce token confidence to a word-level confidence.
**Note**: Only supports Sentencepiece based tokenizers!
Args:
hypothesis: Hypothesis
Returns:
A list of word-level confidence scores.
"""
return self._aggregate_token_confidence_subwords_sentencepiece(
hypothesis.words, hypothesis.token_confidence, hypothesis.y_sequence
)
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to decoder a token list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
hypothesis = self.tokenizer.ids_to_text(tokens)
return hypothesis
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
token_list = self.tokenizer.ids_to_tokens(tokens)
return token_list
def decode_tokens_to_lang(self, tokens: List[int]) -> str:
"""
Compute the most likely language ID (LID) string given the tokens.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded LID string.
"""
lang = self.tokenizer.ids_to_lang(tokens)
return lang
def decode_ids_to_langs(self, tokens: List[int]) -> List[str]:
"""
Decode a token id list into language ID (LID) list.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded LIDS.
"""
lang_list = self.tokenizer.ids_to_text_and_langs(tokens)
return lang_list
def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[Hypothesis, NBestHypotheses]]:
"""
Decode a list of hypotheses into a list of strings.
Overrides the super() method optionally adding lang information
Args:
hypotheses_list: List of Hypothesis.
Returns:
A list of strings.
"""
hypotheses = super().decode_hypothesis(hypotheses_list)
if self.compute_langs:
if isinstance(self.tokenizer, AggregateTokenizer):
for ind in range(len(hypotheses_list)):
# Extract the integer encoded hypothesis
prediction = hypotheses_list[ind].y_sequence
if type(prediction) != list:
prediction = prediction.tolist()
# RNN-T sample level is already preprocessed by implicit RNNT decoding
# Simply remove any blank tokens
prediction = [p for p in prediction if p != self.blank_id]
hypotheses[ind].langs = self.decode_tokens_to_lang(prediction)
hypotheses[ind].langs_chars = self.decode_ids_to_langs(prediction)
else:
logging.warning(
"Ignoring request for lang output in hypotheses since the model does not use an aggregate tokenizer"
)
return hypotheses
class RNNTBPEWER(Metric):
"""
This metric computes numerator and denominator for Overall Word Error Rate (WER) between prediction and reference texts.
When doing distributed training/evaluation the result of res=WER(predictions, targets, target_lengths) calls
will be all-reduced between all workers using SUM operations.
Here contains two numbers res=[wer_numerator, wer_denominator]. WER=wer_numerator/wer_denominator.
If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step results.
Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER.
Example:
def validation_step(self, batch, batch_idx):
...
wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len)
self.val_outputs = {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom}
return self.val_outputs
def on_validation_epoch_end(self):
...
wer_num = torch.stack([x['val_wer_num'] for x in self.val_outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in self.val_outputs]).sum()
tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom}
self.val_outputs.clear() # free memory
return {'val_loss': val_loss_mean, 'log': tensorboard_logs}
Args:
decoding: RNNTBPEDecoding object that will perform autoregressive decoding of the RNNT model.
batch_dim_index: Index of the batch dimension.
use_cer: Whether to use Character Error Rate isntead of Word Error Rate.
log_prediction: Whether to log a single decoded sample per call.
Returns:
res: a tuple of 3 zero dimensional float32 ``torch.Tensor` objects: a WER score, a sum of Levenstein's
distances for all prediction - reference pairs, total number of words in all references.
"""
full_state_update = True
def __init__(
self,
decoding: RNNTBPEDecoding,
batch_dim_index=0,
use_cer: bool = False,
log_prediction: bool = True,
dist_sync_on_step=False,
):
super(RNNTBPEWER, self).__init__(dist_sync_on_step=dist_sync_on_step)
self.decoding = decoding
self.batch_dim_index = batch_dim_index
self.use_cer = use_cer
self.log_prediction = log_prediction
self.blank_id = self.decoding.blank_id
self.tokenizer = self.decoding.tokenizer
self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
def update(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
) -> torch.Tensor:
words = 0
scores = 0
references = []
with torch.no_grad():
# prediction_cpu_tensor = tensors[0].long().cpu()
targets_cpu_tensor = targets.long().cpu()
targets_cpu_tensor = move_dimension_to_the_front(targets_cpu_tensor, self.batch_dim_index)
tgt_lenths_cpu_tensor = target_lengths.long().cpu()
# iterate over batch
for ind in range(targets_cpu_tensor.shape[0]):
tgt_len = tgt_lenths_cpu_tensor[ind].item()
target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()
reference = self.decoding.decode_tokens_to_str(target)
references.append(reference)
hypotheses, _ = self.decoding.rnnt_decoder_predictions_tensor(encoder_output, encoded_lengths)
if self.log_prediction:
logging.info(f"\n")
logging.info(f"reference :{references[0]}")
logging.info(f"predicted :{hypotheses[0]}")
for h, r in zip(hypotheses, references):
if self.use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
words += len(r_list)
# Compute Levenshtein's distance
scores += editdistance.eval(h_list, r_list)
del hypotheses
self.scores += torch.tensor(scores, device=self.scores.device, dtype=self.scores.dtype)
self.words += torch.tensor(words, device=self.words.device, dtype=self.words.dtype)
# return torch.tensor([scores, words]).to(predictions.device)
def compute(self):
wer = self.scores.float() / self.words
return wer, self.scores.detach(), self.words.detach()
@dataclass
class RNNTBPEDecodingConfig(RNNTDecodingConfig):
pass
|
NeMo-main
|
nemo/collections/asr/metrics/rnnt_wer_bpe.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/metrics/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from abc import abstractmethod
from dataclasses import dataclass, is_dataclass
from typing import Callable, Dict, List, Optional, Tuple, Union
import editdistance
import jiwer
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from torchmetrics import Metric
from nemo.collections.asr.parts.submodules import ctc_beam_decoding, ctc_greedy_decoding
from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceConfig, ConfidenceMixin
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis, NBestHypotheses
from nemo.utils import logging, logging_mode
__all__ = ['word_error_rate', 'word_error_rate_detail', 'WER', 'move_dimension_to_the_front']
def word_error_rate(hypotheses: List[str], references: List[str], use_cer=False) -> float:
"""
Computes Average Word Error rate between two texts represented as
corresponding lists of string.
Hypotheses and references must have same length.
Args:
hypotheses (list): list of hypotheses
references(list) : list of references
use_cer (bool): set True to enable cer
Returns:
wer (float): average word error rate
"""
scores = 0
words = 0
if len(hypotheses) != len(references):
raise ValueError(
"In word error rate calculation, hypotheses and reference"
" lists must have the same number of elements. But I got:"
"{0} and {1} correspondingly".format(len(hypotheses), len(references))
)
for h, r in zip(hypotheses, references):
if use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
words += len(r_list)
# May deprecate using editdistance in future release for here and rest of codebase
# once we confirm jiwer is reliable.
scores += editdistance.eval(h_list, r_list)
if words != 0:
wer = 1.0 * scores / words
else:
wer = float('inf')
return wer
def word_error_rate_detail(
hypotheses: List[str], references: List[str], use_cer=False
) -> Tuple[float, int, float, float, float]:
"""
Computes Average Word Error Rate with details (insertion rate, deletion rate, substitution rate)
between two texts represented as corresponding lists of string.
Hypotheses and references must have same length.
Args:
hypotheses (list): list of hypotheses
references(list) : list of references
use_cer (bool): set True to enable cer
Returns:
wer (float): average word error rate
words (int): Total number of words/charactors of given reference texts
ins_rate (float): average insertion error rate
del_rate (float): average deletion error rate
sub_rate (float): average substitution error rate
"""
scores = 0
words = 0
ops_count = {'substitutions': 0, 'insertions': 0, 'deletions': 0}
if len(hypotheses) != len(references):
raise ValueError(
"In word error rate calculation, hypotheses and reference"
" lists must have the same number of elements. But I got:"
"{0} and {1} correspondingly".format(len(hypotheses), len(references))
)
for h, r in zip(hypotheses, references):
if use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
# To get rid of the issue that jiwer does not allow empty string
if len(r_list) == 0:
if len(h_list) != 0:
errors = len(h_list)
ops_count['insertions'] += errors
else:
errors = 0
else:
if use_cer:
measures = jiwer.cer(r, h, return_dict=True)
else:
measures = jiwer.compute_measures(r, h)
errors = measures['insertions'] + measures['deletions'] + measures['substitutions']
ops_count['insertions'] += measures['insertions']
ops_count['deletions'] += measures['deletions']
ops_count['substitutions'] += measures['substitutions']
scores += errors
words += len(r_list)
if words != 0:
wer = 1.0 * scores / words
ins_rate = 1.0 * ops_count['insertions'] / words
del_rate = 1.0 * ops_count['deletions'] / words
sub_rate = 1.0 * ops_count['substitutions'] / words
else:
wer, ins_rate, del_rate, sub_rate = float('inf'), float('inf'), float('inf'), float('inf')
return wer, words, ins_rate, del_rate, sub_rate
def word_error_rate_per_utt(hypotheses: List[str], references: List[str], use_cer=False) -> Tuple[List[float], float]:
"""
Computes Word Error Rate per utterance and the average WER
between two texts represented as corresponding lists of string.
Hypotheses and references must have same length.
Args:
hypotheses (list): list of hypotheses
references(list) : list of references
use_cer (bool): set True to enable cer
Returns:
wer_per_utt (List[float]): word error rate per utterance
avg_wer (float): average word error rate
"""
scores = 0
words = 0
wer_per_utt = []
if len(hypotheses) != len(references):
raise ValueError(
"In word error rate calculation, hypotheses and reference"
" lists must have the same number of elements. But I got:"
"{0} and {1} correspondingly".format(len(hypotheses), len(references))
)
for h, r in zip(hypotheses, references):
if use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
# To get rid of the issue that jiwer does not allow empty string
if len(r_list) == 0:
if len(h_list) != 0:
errors = len(h_list)
wer_per_utt.append(float('inf'))
else:
if use_cer:
measures = jiwer.cer(r, h, return_dict=True)
er = measures['cer']
else:
measures = jiwer.compute_measures(r, h)
er = measures['wer']
errors = measures['insertions'] + measures['deletions'] + measures['substitutions']
wer_per_utt.append(er)
scores += errors
words += len(r_list)
if words != 0:
avg_wer = 1.0 * scores / words
else:
avg_wer = float('inf')
return wer_per_utt, avg_wer
def move_dimension_to_the_front(tensor, dim_index):
all_dims = list(range(tensor.ndim))
return tensor.permute(*([dim_index] + all_dims[:dim_index] + all_dims[dim_index + 1 :]))
class AbstractCTCDecoding(ConfidenceMixin):
"""
Used for performing CTC auto-regressive / non-auto-regressive decoding of the logprobs.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy (for greedy decoding).
- beam (for DeepSpeed KenLM based decoding).
compute_timestamps: A bool flag, which determines whether to compute the character/subword, or
word based timestamp mapping the output log-probabilities to discrite intervals of timestamps.
The timestamps will be available in the returned Hypothesis.timestep as a dictionary.
ctc_timestamp_type: A str value, which represents the types of timestamps that should be calculated.
Can take the following values - "char" for character/subword time stamps, "word" for word level
time stamps and "all" (default), for both character level and word level time stamps.
word_seperator: Str token representing the seperator between words.
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `logprobs` in it. Here, `logprobs` is a torch.Tensors.
confidence_cfg: A dict-like object which contains the following key-value pairs related to confidence
scores. In order to obtain hypotheses with confidence scores, please utilize
`ctc_decoder_predictions_tensor` function with the `preserve_frame_confidence` flag set to True.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding. When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of floats.
preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `token_confidence` in it. Here, `token_confidence` is a List of floats.
The length of the list corresponds to the number of recognized tokens.
preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `word_confidence` in it. Here, `word_confidence` is a List of floats.
The length of the list corresponds to the number of recognized words.
exclude_blank: Bool flag indicating that blank token confidence scores are to be excluded
from the `token_confidence`.
aggregation: Which aggregation type to use for collapsing per-token confidence into per-word confidence.
Valid options are `mean`, `min`, `max`, `prod`.
measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str).
Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
batch_dim_index: Index of the batch dimension of ``targets`` and ``predictions`` parameters of
``ctc_decoder_predictions_tensor`` methods. Can be either 0 or 1.
The config may further contain the following sub-dictionaries:
"greedy":
preserve_alignments: Same as above, overrides above value.
compute_timestamps: Same as above, overrides above value.
preserve_frame_confidence: Same as above, overrides above value.
confidence_measure_cfg: Same as above, overrides confidence_cfg.measure_cfg.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded. This flag is set by default.
beam_alpha: float, the strength of the Language model on the final score of a token.
final_score = acoustic_score + beam_alpha * lm_score + beam_beta * seq_length.
beam_beta: float, the strength of the sequence length penalty on the final score of a token.
final_score = acoustic_score + beam_alpha * lm_score + beam_beta * seq_length.
kenlm_path: str, path to a KenLM ARPA or .binary file (depending on the strategy chosen).
If the path is invalid (file is not found at path), will raise a deferred error at the moment
of calculation of beam search, so that users may update / change the decoding strategy
to point to the correct file.
blank_id: The id of the RNNT blank token.
"""
def __init__(self, decoding_cfg, blank_id: int):
super().__init__()
# Convert dataclas to config
if is_dataclass(decoding_cfg):
decoding_cfg = OmegaConf.structured(decoding_cfg)
if not isinstance(decoding_cfg, DictConfig):
decoding_cfg = OmegaConf.create(decoding_cfg)
OmegaConf.set_struct(decoding_cfg, False)
# update minimal config
minimal_cfg = ['greedy']
for item in minimal_cfg:
if item not in decoding_cfg:
decoding_cfg[item] = OmegaConf.create({})
self.cfg = decoding_cfg
self.blank_id = blank_id
self.preserve_alignments = self.cfg.get('preserve_alignments', None)
self.compute_timestamps = self.cfg.get('compute_timestamps', None)
self.batch_dim_index = self.cfg.get('batch_dim_index', 0)
self.word_seperator = self.cfg.get('word_seperator', ' ')
possible_strategies = ['greedy', 'beam', 'pyctcdecode', 'flashlight']
if self.cfg.strategy not in possible_strategies:
raise ValueError(f"Decoding strategy must be one of {possible_strategies}. Given {self.cfg.strategy}")
# Update preserve alignments
if self.preserve_alignments is None:
if self.cfg.strategy in ['greedy']:
self.preserve_alignments = self.cfg.greedy.get('preserve_alignments', False)
else:
self.preserve_alignments = self.cfg.beam.get('preserve_alignments', False)
# Update compute timestamps
if self.compute_timestamps is None:
if self.cfg.strategy in ['greedy']:
self.compute_timestamps = self.cfg.greedy.get('compute_timestamps', False)
elif self.cfg.strategy in ['beam']:
self.compute_timestamps = self.cfg.beam.get('compute_timestamps', False)
# initialize confidence-related fields
self._init_confidence(self.cfg.get('confidence_cfg', None))
# Confidence estimation is not implemented for strategies other than `greedy`
if (
not self.preserve_frame_confidence
and self.cfg.strategy != 'greedy'
and self.cfg.beam.get('preserve_frame_confidence', False)
):
raise NotImplementedError(f"Confidence calculation is not supported for strategy `{self.cfg.strategy}`")
# we need timestamps to extract non-blank per-frame confidence
if self.compute_timestamps is not None:
self.compute_timestamps |= self.preserve_frame_confidence
if self.cfg.strategy == 'greedy':
self.decoding = ctc_greedy_decoding.GreedyCTCInfer(
blank_id=self.blank_id,
preserve_alignments=self.preserve_alignments,
compute_timestamps=self.compute_timestamps,
preserve_frame_confidence=self.preserve_frame_confidence,
confidence_measure_cfg=self.confidence_measure_cfg,
)
elif self.cfg.strategy == 'beam':
self.decoding = ctc_beam_decoding.BeamCTCInfer(
blank_id=blank_id,
beam_size=self.cfg.beam.get('beam_size', 1),
search_type='default',
return_best_hypothesis=self.cfg.beam.get('return_best_hypothesis', True),
preserve_alignments=self.preserve_alignments,
compute_timestamps=self.compute_timestamps,
beam_alpha=self.cfg.beam.get('beam_alpha', 1.0),
beam_beta=self.cfg.beam.get('beam_beta', 0.0),
kenlm_path=self.cfg.beam.get('kenlm_path', None),
)
self.decoding.override_fold_consecutive_value = False
elif self.cfg.strategy == 'pyctcdecode':
self.decoding = ctc_beam_decoding.BeamCTCInfer(
blank_id=blank_id,
beam_size=self.cfg.beam.get('beam_size', 1),
search_type='pyctcdecode',
return_best_hypothesis=self.cfg.beam.get('return_best_hypothesis', True),
preserve_alignments=self.preserve_alignments,
compute_timestamps=self.compute_timestamps,
beam_alpha=self.cfg.beam.get('beam_alpha', 1.0),
beam_beta=self.cfg.beam.get('beam_beta', 0.0),
kenlm_path=self.cfg.beam.get('kenlm_path', None),
pyctcdecode_cfg=self.cfg.beam.get('pyctcdecode_cfg', None),
)
self.decoding.override_fold_consecutive_value = False
elif self.cfg.strategy == 'flashlight':
self.decoding = ctc_beam_decoding.BeamCTCInfer(
blank_id=blank_id,
beam_size=self.cfg.beam.get('beam_size', 1),
search_type='flashlight',
return_best_hypothesis=self.cfg.beam.get('return_best_hypothesis', True),
preserve_alignments=self.preserve_alignments,
compute_timestamps=self.compute_timestamps,
beam_alpha=self.cfg.beam.get('beam_alpha', 1.0),
beam_beta=self.cfg.beam.get('beam_beta', 0.0),
kenlm_path=self.cfg.beam.get('kenlm_path', None),
flashlight_cfg=self.cfg.beam.get('flashlight_cfg', None),
)
self.decoding.override_fold_consecutive_value = False
else:
raise ValueError(
f"Incorrect decoding strategy supplied. Must be one of {possible_strategies}\n"
f"but was provided {self.cfg.strategy}"
)
def ctc_decoder_predictions_tensor(
self,
decoder_outputs: torch.Tensor,
decoder_lengths: torch.Tensor = None,
fold_consecutive: bool = True,
return_hypotheses: bool = False,
) -> Tuple[List[str], Optional[List[List[str]]], Optional[Union[Hypothesis, NBestHypotheses]]]:
"""
Decodes a sequence of labels to words
Args:
decoder_outputs: An integer torch.Tensor of shape [Batch, Time, {Vocabulary}] (if ``batch_index_dim == 0``) or [Time, Batch]
(if ``batch_index_dim == 1``) of integer indices that correspond to the index of some character in the
label set.
decoder_lengths: Optional tensor of length `Batch` which contains the integer lengths
of the sequence in the padded `predictions` tensor.
fold_consecutive: Bool, determine whether to perform "ctc collapse", folding consecutive tokens
into a single token.
return_hypotheses: Bool flag whether to return just the decoding predictions of the model
or a Hypothesis object that holds information such as the decoded `text`,
the `alignment` of emited by the CTC Model, and the `length` of the sequence (if available).
May also contain the log-probabilities of the decoder (if this method is called via
transcribe())
Returns:
Either a list of str which represent the CTC decoded strings per sample,
or a list of Hypothesis objects containing additional information.
"""
if isinstance(decoder_outputs, torch.Tensor):
decoder_outputs = move_dimension_to_the_front(decoder_outputs, self.batch_dim_index)
if (
hasattr(self.decoding, 'override_fold_consecutive_value')
and self.decoding.override_fold_consecutive_value is not None
):
logging.info(
f"Beam search requires that consecutive ctc tokens are not folded. \n"
f"Overriding provided value of `fold_consecutive` = {fold_consecutive} to "
f"{self.decoding.override_fold_consecutive_value}",
mode=logging_mode.ONCE,
)
fold_consecutive = self.decoding.override_fold_consecutive_value
with torch.inference_mode():
# Resolve the forward step of the decoding strategy
hypotheses_list = self.decoding(
decoder_output=decoder_outputs, decoder_lengths=decoder_lengths
) # type: List[List[Hypothesis]]
# extract the hypotheses
hypotheses_list = hypotheses_list[0] # type: List[Hypothesis]
if isinstance(hypotheses_list[0], NBestHypotheses):
hypotheses = []
all_hypotheses = []
for nbest_hyp in hypotheses_list: # type: NBestHypotheses
n_hyps = nbest_hyp.n_best_hypotheses # Extract all hypotheses for this sample
decoded_hyps = self.decode_hypothesis(
n_hyps, fold_consecutive
) # type: List[Union[Hypothesis, NBestHypotheses]]
# If computing timestamps
if self.compute_timestamps is True:
timestamp_type = self.cfg.get('ctc_timestamp_type', 'all')
for hyp_idx in range(len(decoded_hyps)):
decoded_hyps[hyp_idx] = self.compute_ctc_timestamps(decoded_hyps[hyp_idx], timestamp_type)
hypotheses.append(decoded_hyps[0]) # best hypothesis
all_hypotheses.append(decoded_hyps)
if return_hypotheses:
return hypotheses, all_hypotheses
best_hyp_text = [h.text for h in hypotheses]
all_hyp_text = [h.text for hh in all_hypotheses for h in hh]
return best_hyp_text, all_hyp_text
else:
hypotheses = self.decode_hypothesis(
hypotheses_list, fold_consecutive
) # type: List[Union[Hypothesis, NBestHypotheses]]
# If computing timestamps
if self.compute_timestamps is True:
# greedy decoding, can get high-level confidence scores
if return_hypotheses and (self.preserve_word_confidence or self.preserve_token_confidence):
hypotheses = self.compute_confidence(hypotheses)
else:
# remove unused token_repetitions from Hypothesis.text
for hyp in hypotheses:
hyp.text = hyp.text[:2]
timestamp_type = self.cfg.get('ctc_timestamp_type', 'all')
for hyp_idx in range(len(hypotheses)):
hypotheses[hyp_idx] = self.compute_ctc_timestamps(hypotheses[hyp_idx], timestamp_type)
if return_hypotheses:
return hypotheses, None
best_hyp_text = [h.text for h in hypotheses]
return best_hyp_text, None
def decode_hypothesis(
self, hypotheses_list: List[Hypothesis], fold_consecutive: bool
) -> List[Union[Hypothesis, NBestHypotheses]]:
"""
Decode a list of hypotheses into a list of strings.
Args:
hypotheses_list: List of Hypothesis.
fold_consecutive: Whether to collapse the ctc blank tokens or not.
Returns:
A list of strings.
"""
for ind in range(len(hypotheses_list)):
# Extract the integer encoded hypothesis
hyp = hypotheses_list[ind]
prediction = hyp.y_sequence
predictions_len = hyp.length if hyp.length > 0 else None
if fold_consecutive:
if type(prediction) != list:
prediction = prediction.numpy().tolist()
if predictions_len is not None:
prediction = prediction[:predictions_len]
# CTC decoding procedure
decoded_prediction = []
token_lengths = [] # preserve token lengths
token_repetitions = [] # preserve number of repetitions per token
previous = self.blank_id
last_length = 0
last_repetition = 1
for pidx, p in enumerate(prediction):
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
token_lengths.append(pidx - last_length)
last_length = pidx
token_repetitions.append(last_repetition)
last_repetition = 1
if p == previous and previous != self.blank_id:
last_repetition += 1
previous = p
if len(token_repetitions) > 0:
token_repetitions = token_repetitions[1:] + [last_repetition]
else:
if predictions_len is not None:
prediction = prediction[:predictions_len]
decoded_prediction = prediction[prediction != self.blank_id].tolist()
token_lengths = [1] * len(decoded_prediction) # preserve number of repetitions per token
token_repetitions = [1] * len(decoded_prediction) # preserve number of repetitions per token
# De-tokenize the integer tokens; if not computing timestamps
if self.compute_timestamps is True:
# keep the original predictions, wrap with the number of repetitions per token
# this is done so that `ctc_decoder_predictions_tensor()` can process this hypothesis
# in order to compute exact time stamps.
hypothesis = (decoded_prediction, token_lengths, token_repetitions)
else:
hypothesis = self.decode_tokens_to_str(decoded_prediction)
# TODO: remove
# collapse leading spaces before . , ? for PC models
hypothesis = re.sub(r'(\s+)([\.\,\?])', r'\2', hypothesis)
# Preserve this wrapped hypothesis or decoded text tokens.
hypotheses_list[ind].text = hypothesis
return hypotheses_list
def compute_confidence(self, hypotheses_list: List[Hypothesis]) -> List[Hypothesis]:
"""
Computes high-level (per-token and/or per-word) confidence scores for a list of hypotheses.
Assumes that `frame_confidence` is present in the hypotheses.
Args:
hypotheses_list: List of Hypothesis.
Returns:
A list of hypotheses with high-level confidence scores.
"""
for hyp in hypotheses_list:
if not isinstance(hyp.text, tuple) or len(hyp.text) != 3:
# the method must have been called in the wrong place
raise ValueError(
"""Wrong format of the `text` attribute of a hypothesis.\n
Expected: (decoded_prediction, token_repetitions)\n
The method invocation is expected between .decode_hypothesis() and .compute_ctc_timestamps()"""
)
token_repetitions = hyp.text[2]
hyp.text = hyp.text[:2]
token_confidence = []
if self.exclude_blank_from_confidence:
non_blank_frame_confidence = hyp.non_blank_frame_confidence
i = 0
for tr in token_repetitions:
# token repetition can be zero
j = i + tr
token_confidence.append(self._aggregate_confidence(non_blank_frame_confidence[i:j]))
i = j
else:
# <blank> tokens are considered to belong to the last non-blank token, if any.
token_lengths = hyp.text[1]
if len(token_lengths) > 0:
ts = token_lengths[0]
for tl in token_lengths[1:] + [len(hyp.frame_confidence)]:
token_confidence.append(self._aggregate_confidence(hyp.frame_confidence[ts : ts + tl]))
ts += tl
hyp.token_confidence = token_confidence
if self.preserve_word_confidence:
for hyp in hypotheses_list:
hyp.word_confidence = self._aggregate_token_confidence(hyp)
return hypotheses_list
@abstractmethod
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to decoder a token id list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
raise NotImplementedError()
@abstractmethod
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
raise NotImplementedError()
def compute_ctc_timestamps(self, hypothesis: Hypothesis, timestamp_type: str = "all"):
"""
Method to compute time stamps at char/subword, and word level given some hypothesis.
Requires the input hypothesis to contain a `text` field that is the tuple. The tuple contains -
the ctc collapsed integer ids, and the number of repetitions of each token.
Args:
hypothesis: A Hypothesis object, with a wrapped `text` field.
The `text` field must contain a tuple with two values -
The ctc collapsed integer ids
A list of integers that represents the number of repetitions per token.
timestamp_type: A str value that represents the type of time stamp calculated.
Can be one of "char", "word" or "all"
Returns:
A Hypothesis object with a modified `timestep` value, which is now a dictionary containing
the time stamp information.
"""
assert timestamp_type in ['char', 'word', 'all']
# Unpack the temporary storage, and set the decoded predictions
decoded_prediction, token_lengths = hypothesis.text
hypothesis.text = decoded_prediction
# Retrieve offsets
char_offsets = word_offsets = None
char_offsets = self._compute_offsets(hypothesis, token_lengths, self.blank_id)
# Assert number of offsets and hypothesis tokens are 1:1 match.
if len(char_offsets) != len(hypothesis.text):
raise ValueError(
f"`char_offsets`: {char_offsets} and `processed_tokens`: {hypothesis.text}"
" have to be of the same length, but are: "
f"`len(offsets)`: {len(char_offsets)} and `len(processed_tokens)`:"
f" {len(hypothesis.text)}"
)
# Correctly process the token ids to chars/subwords.
for i, char in enumerate(hypothesis.text):
char_offsets[i]["char"] = self.decode_tokens_to_str([char])
# detect char vs subword models
lens = [len(list(v["char"])) > 1 for v in char_offsets]
if any(lens):
text_type = 'subword'
else:
text_type = 'char'
# retrieve word offsets from character offsets
word_offsets = None
if timestamp_type in ['word', 'all']:
if text_type == 'char':
word_offsets = self._get_word_offsets_chars(char_offsets, word_delimiter_char=self.word_seperator)
else:
word_offsets = self._get_word_offsets_subwords_sentencepiece(
char_offsets,
hypothesis,
decode_ids_to_tokens=self.decode_ids_to_tokens,
decode_tokens_to_str=self.decode_tokens_to_str,
)
# attach results
if len(hypothesis.timestep) > 0:
timestep_info = hypothesis.timestep
else:
timestep_info = []
# Setup defaults
hypothesis.timestep = {"timestep": timestep_info}
# Add char / subword time stamps
if char_offsets is not None and timestamp_type in ['char', 'all']:
hypothesis.timestep['char'] = char_offsets
# Add word time stamps
if word_offsets is not None and timestamp_type in ['word', 'all']:
hypothesis.timestep['word'] = word_offsets
# Convert the token indices to text
hypothesis.text = self.decode_tokens_to_str(hypothesis.text)
return hypothesis
@staticmethod
def _compute_offsets(
hypothesis: Hypothesis, token_lengths: List[int], ctc_token: int
) -> List[Dict[str, Union[str, int]]]:
"""
Utility method that calculates the indidual time indices where a token starts and ends.
Args:
hypothesis: A Hypothesis object that contains `text` field that holds the character / subword token
emitted at every time step after ctc collapse.
token_lengths: A list of ints representing the lengths of each emitted token.
ctc_token: The integer of the ctc blank token used during ctc collapse.
Returns:
"""
start_index = 0
# If the exact timestep information is available, utilize the 1st non-ctc blank token timestep
# as the start index.
if hypothesis.timestep is not None and len(hypothesis.timestep) > 0:
start_index = max(0, hypothesis.timestep[0] - 1)
# Construct the start and end indices brackets
end_indices = np.asarray(token_lengths).cumsum()
start_indices = np.concatenate(([start_index], end_indices[:-1]))
# Merge the results per token into a list of dictionaries
offsets = [
{"char": t, "start_offset": s, "end_offset": e}
for t, s, e in zip(hypothesis.text, start_indices, end_indices)
]
# Filter out CTC token
offsets = list(filter(lambda offsets: offsets["char"] != ctc_token, offsets))
return offsets
@staticmethod
def _get_word_offsets_chars(
offsets: Dict[str, Union[str, float]], word_delimiter_char: str = " "
) -> Dict[str, Union[str, float]]:
"""
Utility method which constructs word time stamps out of character time stamps.
References:
This code is a port of the Hugging Face code for word time stamp construction.
Args:
offsets: A list of dictionaries, each containing "char", "start_offset" and "end_offset".
word_delimiter_char: Character token that represents the word delimiter. By default, " ".
Returns:
A list of dictionaries containing the word offsets. Each item contains "word", "start_offset" and
"end_offset".
"""
word_offsets = []
last_state = "SPACE"
word = ""
start_offset = 0
end_offset = 0
for i, offset in enumerate(offsets):
char = offset["char"]
state = "SPACE" if char == word_delimiter_char else "WORD"
if state == last_state:
# If we are in the same state as before, we simply repeat what we've done before
end_offset = offset["end_offset"]
word += char
else:
# Switching state
if state == "SPACE":
# Finishing a word
word_offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset})
else:
# Starting a new word
start_offset = offset["start_offset"]
end_offset = offset["end_offset"]
word = char
last_state = state
if last_state == "WORD":
word_offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset})
return word_offsets
@staticmethod
def _get_word_offsets_subwords_sentencepiece(
offsets: Dict[str, Union[str, float]],
hypothesis: Hypothesis,
decode_ids_to_tokens: Callable[[List[int]], str],
decode_tokens_to_str: Callable[[List[int]], str],
) -> Dict[str, Union[str, float]]:
"""
Utility method which constructs word time stamps out of sub-word time stamps.
**Note**: Only supports Sentencepiece based tokenizers !
Args:
offsets: A list of dictionaries, each containing "char", "start_offset" and "end_offset".
hypothesis: Hypothesis object that contains `text` field, where each token is a sub-word id
after ctc collapse.
decode_ids_to_tokens: A Callable function that accepts a list of integers and maps it to a sub-word.
decode_tokens_to_str: A Callable function that accepts a list of integers and maps it to text / str.
Returns:
A list of dictionaries containing the word offsets. Each item contains "word", "start_offset" and
"end_offset".
"""
word_offsets = []
built_token = []
previous_token_index = 0
# For every collapsed sub-word token
for i, char in enumerate(hypothesis.text):
# Compute the sub-word text representation, and the decoded text (stripped of sub-word markers).
token = decode_ids_to_tokens([char])[0]
token_text = decode_tokens_to_str([char])
# It is a sub-word token, or contains an identifier at the beginning such as _ or ## that was stripped
# after forcing partial text conversion of the token.
if token != token_text:
# If there are any partially or fully built sub-word token ids, construct to text.
# Note: This is "old" subword, that occurs *after* current sub-word has started.
if len(built_token) > 0:
word_offsets.append(
{
"word": decode_tokens_to_str(built_token),
"start_offset": offsets[previous_token_index]["start_offset"],
"end_offset": offsets[i]["start_offset"],
}
)
# Prepare list of new sub-word ids
built_token.clear()
built_token.append(char)
previous_token_index = i
else:
# If the token does not contain any sub-word start mark, then the sub-word has not completed yet
# Append to current sub-word list.
built_token.append(char)
# Inject the start offset of the first token to word offsets
# This is because we always skip the delay the injection of the first sub-word due to the loop
# condition and check whether built token is ready or not.
# Therefore without this forced injection, the start_offset appears as off by 1.
if len(word_offsets) == 0:
# alaptev: sometimes word_offsets can be empty
if len(built_token) > 0:
word_offsets.append(
{
"word": decode_tokens_to_str(built_token),
"start_offset": offsets[0]["start_offset"],
"end_offset": offsets[-1]["end_offset"],
}
)
built_token.clear()
else:
word_offsets[0]["start_offset"] = offsets[0]["start_offset"]
# If there are any remaining tokens left, inject them all into the final word offset.
# Note: The start offset of this token is the start time of the first token inside build_token.
# Note: The end offset of this token is the end time of the last token inside build_token
if len(built_token) > 0:
word_offsets.append(
{
"word": decode_tokens_to_str(built_token),
"start_offset": offsets[-(len(built_token))]["start_offset"],
"end_offset": offsets[-1]["end_offset"],
}
)
built_token.clear()
return word_offsets
@property
def preserve_alignments(self):
return self._preserve_alignments
@preserve_alignments.setter
def preserve_alignments(self, value):
self._preserve_alignments = value
if hasattr(self, 'decoding'):
self.decoding.preserve_alignments = value
@property
def compute_timestamps(self):
return self._compute_timestamps
@compute_timestamps.setter
def compute_timestamps(self, value):
self._compute_timestamps = value
if hasattr(self, 'decoding'):
self.decoding.compute_timestamps = value
@property
def preserve_frame_confidence(self):
return self._preserve_frame_confidence
@preserve_frame_confidence.setter
def preserve_frame_confidence(self, value):
self._preserve_frame_confidence = value
if hasattr(self, 'decoding'):
self.decoding.preserve_frame_confidence = value
class CTCDecoding(AbstractCTCDecoding):
"""
Used for performing CTC auto-regressive / non-auto-regressive decoding of the logprobs for character
based models.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy (for greedy decoding).
- beam (for DeepSpeed KenLM based decoding).
compute_timestamps: A bool flag, which determines whether to compute the character/subword, or
word based timestamp mapping the output log-probabilities to discrite intervals of timestamps.
The timestamps will be available in the returned Hypothesis.timestep as a dictionary.
ctc_timestamp_type: A str value, which represents the types of timestamps that should be calculated.
Can take the following values - "char" for character/subword time stamps, "word" for word level
time stamps and "all" (default), for both character level and word level time stamps.
word_seperator: Str token representing the seperator between words.
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `logprobs` in it. Here, `logprobs` is a torch.Tensors.
confidence_cfg: A dict-like object which contains the following key-value pairs related to confidence
scores. In order to obtain hypotheses with confidence scores, please utilize
`ctc_decoder_predictions_tensor` function with the `preserve_frame_confidence` flag set to True.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding. When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `frame_confidence` is a List of floats.
preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `token_confidence` in it. Here, `token_confidence` is a List of floats.
The length of the list corresponds to the number of recognized tokens.
preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `word_confidence` in it. Here, `word_confidence` is a List of floats.
The length of the list corresponds to the number of recognized words.
exclude_blank: Bool flag indicating that blank token confidence scores are to be excluded
from the `token_confidence`.
aggregation: Which aggregation type to use for collapsing per-token confidence into per-word confidence.
Valid options are `mean`, `min`, `max`, `prod`.
measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str).
Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
batch_dim_index: Index of the batch dimension of ``targets`` and ``predictions`` parameters of
``ctc_decoder_predictions_tensor`` methods. Can be either 0 or 1.
The config may further contain the following sub-dictionaries:
"greedy":
preserve_alignments: Same as above, overrides above value.
compute_timestamps: Same as above, overrides above value.
preserve_frame_confidence: Same as above, overrides above value.
confidence_measure_cfg: Same as above, overrides confidence_cfg.measure_cfg.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded. This flag is set by default.
beam_alpha: float, the strength of the Language model on the final score of a token.
final_score = acoustic_score + beam_alpha * lm_score + beam_beta * seq_length.
beam_beta: float, the strength of the sequence length penalty on the final score of a token.
final_score = acoustic_score + beam_alpha * lm_score + beam_beta * seq_length.
kenlm_path: str, path to a KenLM ARPA or .binary file (depending on the strategy chosen).
If the path is invalid (file is not found at path), will raise a deferred error at the moment
of calculation of beam search, so that users may update / change the decoding strategy
to point to the correct file.
blank_id: The id of the RNNT blank token.
"""
def __init__(
self, decoding_cfg, vocabulary,
):
blank_id = len(vocabulary)
self.vocabulary = vocabulary
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
super().__init__(decoding_cfg=decoding_cfg, blank_id=blank_id)
# Finalize Beam Search Decoding framework
if isinstance(self.decoding, ctc_beam_decoding.AbstractBeamCTCInfer):
self.decoding.set_vocabulary(self.vocabulary)
self.decoding.set_decoding_type('char')
def _aggregate_token_confidence(self, hypothesis: Hypothesis) -> List[float]:
"""
Implemented by subclass in order to aggregate token confidence to a word-level confidence.
Args:
hypothesis: Hypothesis
Returns:
A list of word-level confidence scores.
"""
return self._aggregate_token_confidence_chars(
self.decode_tokens_to_str(hypothesis.text[0]).split(), hypothesis.token_confidence
)
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to decoder a token list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
hypothesis = ''.join(self.decode_ids_to_tokens(tokens))
return hypothesis
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
token_list = [self.labels_map[c] for c in tokens if c != self.blank_id]
return token_list
class WER(Metric):
"""
This metric computes numerator and denominator for Overall Word Error Rate (WER) between prediction and reference
texts. When doing distributed training/evaluation the result of ``res=WER(predictions, targets, target_lengths)``
calls will be all-reduced between all workers using SUM operations. Here ``res`` contains three numbers
``res=[wer, total_levenstein_distance, total_number_of_words]``.
If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step
results. Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER.
Example:
def validation_step(self, batch, batch_idx):
...
wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len)
self.val_outputs = {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom}
return self.val_outputs
def on_validation_epoch_end(self):
...
wer_num = torch.stack([x['val_wer_num'] for x in self.val_outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in self.val_outputs]).sum()
tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom}
self.val_outputs.clear() # free memory
return {'val_loss': val_loss_mean, 'log': tensorboard_logs}
Args:
decoding: An instance of CTCDecoding.
use_cer: Whether to use Character Error Rate instead of Word Error Rate.
log_prediction: Whether to log a single decoded sample per call.
fold_consecutive: Whether repeated consecutive characters should be folded into one when decoding.
Returns:
res: a tuple of 3 zero dimensional float32 ``torch.Tensor` objects: a WER score, a sum of Levenstein's
distances for all prediction - reference pairs, total number of words in all references.
"""
full_state_update: bool = True
def __init__(
self,
decoding: CTCDecoding,
use_cer=False,
log_prediction=True,
fold_consecutive=True,
dist_sync_on_step=False,
):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.decoding = decoding
self.use_cer = use_cer
self.log_prediction = log_prediction
self.fold_consecutive = fold_consecutive
self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
def update(
self,
predictions: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
predictions_lengths: torch.Tensor = None,
):
"""
Updates metric state.
Args:
predictions: an integer torch.Tensor of shape ``[Batch, Time, {Vocabulary}]`` (if ``batch_dim_index == 0``) or
``[Time, Batch]`` (if ``batch_dim_index == 1``)
targets: an integer torch.Tensor of shape ``[Batch, Time]`` (if ``batch_dim_index == 0``) or
``[Time, Batch]`` (if ``batch_dim_index == 1``)
target_lengths: an integer torch.Tensor of shape ``[Batch]``
predictions_lengths: an integer torch.Tensor of shape ``[Batch]``
"""
words = 0
scores = 0
references = []
with torch.no_grad():
# prediction_cpu_tensor = tensors[0].long().cpu()
targets_cpu_tensor = targets.long().cpu()
tgt_lenths_cpu_tensor = target_lengths.long().cpu()
# iterate over batch
for ind in range(targets_cpu_tensor.shape[0]):
tgt_len = tgt_lenths_cpu_tensor[ind].item()
target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()
reference = self.decoding.decode_tokens_to_str(target)
references.append(reference)
hypotheses, _ = self.decoding.ctc_decoder_predictions_tensor(
predictions, predictions_lengths, fold_consecutive=self.fold_consecutive
)
if self.log_prediction:
logging.info(f"\n")
logging.info(f"reference:{references[0]}")
logging.info(f"predicted:{hypotheses[0]}")
for h, r in zip(hypotheses, references):
if self.use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
words += len(r_list)
# Compute Levenstein's distance
scores += editdistance.eval(h_list, r_list)
self.scores = torch.tensor(scores, device=self.scores.device, dtype=self.scores.dtype)
self.words = torch.tensor(words, device=self.words.device, dtype=self.words.dtype)
# return torch.tensor([scores, words]).to(predictions.device)
def compute(self):
scores = self.scores.detach().float()
words = self.words.detach().float()
return scores / words, scores, words
@dataclass
class CTCDecodingConfig:
strategy: str = "greedy"
# preserve decoding alignments
preserve_alignments: Optional[bool] = None
# compute ctc time stamps
compute_timestamps: Optional[bool] = None
# token representing word seperator
word_seperator: str = " "
# type of timestamps to calculate
ctc_timestamp_type: str = "all" # can be char, word or all for both
# batch dimension
batch_dim_index: int = 0
# greedy decoding config
greedy: ctc_greedy_decoding.GreedyCTCInferConfig = ctc_greedy_decoding.GreedyCTCInferConfig()
# beam decoding config
beam: ctc_beam_decoding.BeamCTCInferConfig = ctc_beam_decoding.BeamCTCInferConfig(beam_size=4)
# confidence config
confidence_cfg: ConfidenceConfig = ConfidenceConfig()
# can be used to change temperature for decoding
temperature: float = 1.0
|
NeMo-main
|
nemo/collections/asr/metrics/wer.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Iterable, List, Optional, Tuple
import torch
from torchmetrics import Metric
from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality
from torchmetrics.audio.pit import PermutationInvariantTraining
from torchmetrics.audio.sdr import ScaleInvariantSignalDistortionRatio, SignalDistortionRatio
from torchmetrics.audio.snr import ScaleInvariantSignalNoiseRatio, SignalNoiseRatio
from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility
from nemo.utils import logging
__all__ = ['AudioMetricWrapper']
__VERIFIED_METRICS__ = [
PermutationInvariantTraining,
ScaleInvariantSignalDistortionRatio,
SignalDistortionRatio,
ScaleInvariantSignalNoiseRatio,
SignalNoiseRatio,
PerceptualEvaluationSpeechQuality,
ShortTimeObjectiveIntelligibility,
]
class AudioMetricWrapper(Metric):
"""A wrapper around an audio metric enabling selection of a specific channel
and handling of examples in a batch with varying valid input length.
Note:
This class assumes that the underlying metric uses averaging to calculate the
value over a batch. This assumption is only used by `forward` and does not
impact other methods, such as `update` and `compute`.
Args:
metric: base metric that should be wrapped. It is assumed that calculation
of the metric over a batch is done by averaging.
channel: Optional, for selecting a channel from `preds` and `target` signals.
If None, all channels are used.
metric_using_batch_averaging: Optional, used to denote that the base metric
is using averaging to calculate the metric value
for a batch.
"""
full_state_update: bool = False
def __init__(
self, metric: Metric, channel: Optional[int] = None, metric_using_batch_averaging: Optional[bool] = None
):
super().__init__()
if not isinstance(metric, Metric):
raise ValueError(f"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}")
if not metric_using_batch_averaging and type(metric) not in __VERIFIED_METRICS__:
raise ValueError(
f'Metric {metric} is not in verified metrics. {self.__class__.__name__} assumes reduction over batch is calculated using averaging. \n'
'This should not affect the final results, but values for a single batch obtained using `forward` may be inaccurate if using `input_length`. \n'
'To suppress this message, please confirm the used metric is using batch averaging and set "metric_using_batch_averaging = True"'
)
self._metric = metric
self._channel = channel
logging.debug('Setup metric %s, channel %s', metric, str(channel))
def _select_channel(self, preds: torch.Tensor, target: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Select a single channel from input signals.
Args:
preds: tensor with shape (B, C, T)
target: tensor with shape (B, C, T)
Returns:
Original tensors if self.channel is None, shape (B, C, T).
A single channel from input tensors if self.channel is set, shape (B, T)
"""
if self._channel is None:
return preds, target
else:
return preds[:, self._channel, ...], target[:, self._channel, ...]
@staticmethod
def _trim_inputs(
preds: torch.Tensor, target: torch.Tensor, input_length: torch.Tensor
) -> Iterable[Tuple[torch.Tensor, torch.Tensor]]:
"""Trim input tensors to input_length samples.
Args:
preds: tensor with shape (B, C, T)
target: tensor with shape (B, C, T)
Returns:
An iterable with tuples of (preds, target) with
the correct length.
"""
# Each example has a different length
for b_idx, b_len in enumerate(input_length):
b_preds = preds[b_idx, ..., :b_len]
b_target = target[b_idx, ..., :b_len]
yield b_preds, b_target
@staticmethod
def _batch_reduction(batch_values: List[torch.Tensor]) -> torch.Tensor:
"""Reduce metric values for each example in a batch to a single
value for the whole batch.
Args:
batch_values: list of metric values for each example in a batch
Returns:
Average metric value over the batch.
"""
return sum(batch_values) / len(batch_values)
def update(self, preds: torch.Tensor, target: torch.Tensor, input_length: Optional[torch.Tensor] = None) -> None:
"""Update the underlying metric by taking into account channel selector and input length.
Args:
preds: tensor with predictions, shape (B, C, T)
target: tensor with target signals, shape (B, C, T)
input_length: Optional, input tensor with length (in samples) of each signal in the batch, shape (B,).
If not provided, it is assumed that all samples are valid.
"""
preds, target = self._select_channel(preds=preds, target=target)
if input_length is None:
self._metric.update(preds=preds, target=target)
else:
# Each example in this batch has a different length
for b_preds, b_target in self._trim_inputs(preds=preds, target=target, input_length=input_length):
self._metric.update(preds=b_preds, target=b_target)
def compute(self) -> torch.Tensor:
"""Compute the underlying metric.
"""
return self._metric.compute()
def forward(
self, preds: torch.Tensor, target: torch.Tensor, input_length: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Call underlying forward method to add the batch statistics to the accumulated metric state
and return the result for the current batch.
Args:
preds: tensor with predictions, shape (B, C, T)
target: tensor with target signals, shape (B, C, T)
input_length: Optional, input tensor with length (in samples) of each signal in the batch, shape (B,).
If not provided, it is assumed that all samples are valid.
Returns:
Underlying metric averaged on the current batch.
"""
preds, target = self._select_channel(preds=preds, target=target)
if input_length is None:
return self._metric(preds=preds, target=target)
else:
# Each example in this batch has a different length
batch_values = []
for b_preds, b_target in self._trim_inputs(preds=preds, target=target, input_length=input_length):
batch_values.append(self._metric(preds=b_preds, target=b_target))
# Average over the batch
return self._batch_reduction(batch_values)
def reset(self) -> None:
"""Reset the underlying metric.
"""
self._metric.reset()
def __repr__(self) -> str:
"""Return string representation of the object.
"""
_op_metric = f"(metric: {repr(self._metric)}, channel: {self._channel})"
repr_str = self.__class__.__name__ + _op_metric
return repr_str
def _wrap_compute(self, compute: Callable) -> Callable:
"""Overwrite to do nothing, as in CompositionalMetric.
"""
return compute
|
NeMo-main
|
nemo/collections/asr/metrics/audio.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from itertools import permutations
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from pyannote.core import Segment, Timeline
from pyannote.metrics.diarization import DiarizationErrorRate
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.parts.utils.optimization_utils import linear_sum_assignment
from nemo.utils import logging
__all__ = [
'score_labels',
'calculate_session_cpWER',
'calculate_session_cpWER_bruteforce',
'concat_perm_word_error_rate',
]
def get_partial_ref_labels(pred_labels: List[str], ref_labels: List[str]) -> List[str]:
"""
For evaluation of online diarization performance, generate partial reference labels
from the last prediction time.
Args:
pred_labels (list[str]): list of partial prediction labels
ref_labels (list[str]): list of full reference labels
Returns:
ref_labels_out (list[str]): list of partial reference labels
"""
# If there is no reference, return empty list
if len(ref_labels) == 0:
return []
# If there is no prediction, set the last prediction time to 0
if len(pred_labels) == 0:
last_pred_time = 0
else:
# The lastest prediction time in the prediction labels
last_pred_time = max([float(labels.split()[1]) for labels in pred_labels])
ref_labels_out = []
for label in ref_labels:
start, end, speaker = label.split()
start, end = float(start), float(end)
# If the current [start, end] interval extends beyond the end of hypothesis time stamps
if start < last_pred_time:
end_time = min(end, last_pred_time)
label = f"{start} {end_time} {speaker}"
ref_labels_out.append(label)
# Other cases where the current [start, end] interval is before the last prediction time
elif end < last_pred_time:
ref_labels_out.append(label)
return ref_labels_out
def get_online_DER_stats(
DER: float,
CER: float,
FA: float,
MISS: float,
diar_eval_count: int,
der_stat_dict: Dict[str, float],
deci: int = 3,
) -> Tuple[Dict[str, float], Dict[str, float]]:
"""
For evaluation of online diarization performance, add cumulative, average, and maximum DER/CER.
Args:
DER (float): Diarization Error Rate from the start to the current point
CER (float): Confusion Error Rate from the start to the current point
FA (float): False Alarm from the start to the current point
MISS (float): Miss rate from the start to the current point
diar_eval_count (int): Number of evaluation sessions
der_stat_dict (dict): Dictionary containing cumulative, average, and maximum DER/CER
deci (int): Number of decimal places to round
Returns:
der_dict (dict): Dictionary containing DER, CER, FA, and MISS
der_stat_dict (dict): Dictionary containing cumulative, average, and maximum DER/CER
"""
der_dict = {
"DER": round(100 * DER, deci),
"CER": round(100 * CER, deci),
"FA": round(100 * FA, deci),
"MISS": round(100 * MISS, deci),
}
der_stat_dict['cum_DER'] += DER
der_stat_dict['cum_CER'] += CER
der_stat_dict['avg_DER'] = round(100 * der_stat_dict['cum_DER'] / diar_eval_count, deci)
der_stat_dict['avg_CER'] = round(100 * der_stat_dict['cum_CER'] / diar_eval_count, deci)
der_stat_dict['max_DER'] = round(max(der_dict['DER'], der_stat_dict['max_DER']), deci)
der_stat_dict['max_CER'] = round(max(der_dict['CER'], der_stat_dict['max_CER']), deci)
return der_dict, der_stat_dict
def uem_timeline_from_file(uem_file, uniq_name=''):
"""
Generate pyannote timeline segments for uem file
<UEM> file format
UNIQ_SPEAKER_ID CHANNEL START_TIME END_TIME
"""
timeline = Timeline(uri=uniq_name)
with open(uem_file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
speaker_id, channel, start_time, end_time = line.split()
timeline.add(Segment(float(start_time), float(end_time)))
return timeline
def score_labels(
AUDIO_RTTM_MAP, all_reference, all_hypothesis, collar=0.25, ignore_overlap=True, verbose: bool = True
) -> Optional[Tuple[DiarizationErrorRate, Dict]]:
"""
Calculate DER, CER, FA and MISS rate from hypotheses and references. Hypothesis results are
coming from Pyannote-formatted speaker diarization results and References are coming from
Pyannote-formatted RTTM data.
Args:
AUDIO_RTTM_MAP (dict): Dictionary containing information provided from manifestpath
all_reference (list[uniq_name,Annotation]): reference annotations for score calculation
all_hypothesis (list[uniq_name,Annotation]): hypothesis annotations for score calculation
verbose (bool): Warns if RTTM file is not found.
Returns:
metric (pyannote.DiarizationErrorRate): Pyannote Diarization Error Rate metric object. This object contains detailed scores of each audiofile.
mapping (dict): Mapping dict containing the mapping speaker label for each audio input
< Caveat >
Unlike md-eval.pl, "no score" collar in pyannote.metrics is the maximum length of
"no score" collar from left to right. Therefore, if 0.25s is applied for "no score"
collar in md-eval.pl, 0.5s should be applied for pyannote.metrics.
"""
metric = None
if len(all_reference) == len(all_hypothesis):
metric = DiarizationErrorRate(collar=2 * collar, skip_overlap=ignore_overlap)
mapping_dict = {}
for (reference, hypothesis) in zip(all_reference, all_hypothesis):
ref_key, ref_labels = reference
_, hyp_labels = hypothesis
uem = AUDIO_RTTM_MAP[ref_key].get('uem_filepath', None)
if uem is not None:
uem = uem_timeline_from_file(uem_file=uem, uniq_name=ref_key)
metric(ref_labels, hyp_labels, uem=uem, detailed=True)
mapping_dict[ref_key] = metric.optimal_mapping(ref_labels, hyp_labels)
DER = abs(metric)
CER = metric['confusion'] / metric['total']
FA = metric['false alarm'] / metric['total']
MISS = metric['missed detection'] / metric['total']
itemized_errors = (DER, CER, FA, MISS)
logging.info(
"Cumulative Results for collar {} sec and ignore_overlap {}: \n FA: {:.4f}\t MISS {:.4f}\t \
Diarization ER: {:.4f}\t, Confusion ER:{:.4f}".format(
collar, ignore_overlap, FA, MISS, DER, CER
)
)
return metric, mapping_dict, itemized_errors
elif verbose:
logging.warning(
"Check if each ground truth RTTMs were present in the provided manifest file. Skipping calculation of Diariazation Error Rate"
)
return None
def evaluate_der(audio_rttm_map_dict, all_reference, all_hypothesis, diar_eval_mode='all'):
"""
Evaluate with a selected diarization evaluation scheme
AUDIO_RTTM_MAP (dict):
Dictionary containing information provided from manifestpath
all_reference (list[uniq_name,annotation]):
reference annotations for score calculation
all_hypothesis (list[uniq_name,annotation]):
hypothesis annotations for score calculation
diar_eval_mode (str):
Diarization evaluation modes
diar_eval_mode == "full":
DIHARD challenge style evaluation, the most strict way of evaluating diarization
(collar, ignore_overlap) = (0.0, False)
diar_eval_mode == "fair":
Evaluation setup used in VoxSRC challenge
(collar, ignore_overlap) = (0.25, False)
diar_eval_mode == "forgiving":
Traditional evaluation setup
(collar, ignore_overlap) = (0.25, True)
diar_eval_mode == "all":
Compute all three modes (default)
"""
eval_settings = []
if diar_eval_mode == "full":
eval_settings = [(0.0, False)]
elif diar_eval_mode == "fair":
eval_settings = [(0.25, False)]
elif diar_eval_mode == "forgiving":
eval_settings = [(0.25, True)]
elif diar_eval_mode == "all":
eval_settings = [(0.0, False), (0.25, False), (0.25, True)]
else:
raise ValueError("`diar_eval_mode` variable contains an unsupported value")
for collar, ignore_overlap in eval_settings:
diar_score = score_labels(
AUDIO_RTTM_MAP=audio_rttm_map_dict,
all_reference=all_reference,
all_hypothesis=all_hypothesis,
collar=collar,
ignore_overlap=ignore_overlap,
)
return diar_score
def calculate_session_cpWER_bruteforce(spk_hypothesis: List[str], spk_reference: List[str]) -> Tuple[float, str, str]:
"""
Calculate cpWER with actual permutations in brute-force way when LSA algorithm cannot deliver the correct result.
Args:
spk_hypothesis (list):
List containing the hypothesis transcript for each speaker. A list containing the sequence
of words is assigned for each speaker.
Example:
>>> spk_hypothesis = ["hey how are you we that's nice", "i'm good yes hi is your sister"]
spk_reference (list):
List containing the reference transcript for each speaker. A list containing the sequence
of words is assigned for each speaker.
Example:
>>> spk_reference = ["hi how are you well that's nice", "i'm good yeah how is your sister"]
Returns:
cpWER (float):
cpWER value for the given session.
min_perm_hyp_trans (str):
Hypothesis transcript containing the permutation that minimizes WER. Words are separated by spaces.
ref_trans (str):
Reference transcript in an arbitrary permutation. Words are separated by spaces.
"""
p_wer_list, permed_hyp_lists = [], []
ref_word_list = []
# Concatenate the hypothesis transcripts into a list
for spk_id, word_list in enumerate(spk_reference):
ref_word_list.append(word_list)
ref_trans = " ".join(ref_word_list)
# Calculate WER for every permutation
for hyp_word_list in permutations(spk_hypothesis):
hyp_trans = " ".join(hyp_word_list)
permed_hyp_lists.append(hyp_trans)
# Calculate a WER value of the permuted and concatenated transcripts
p_wer = word_error_rate(hypotheses=[hyp_trans], references=[ref_trans])
p_wer_list.append(p_wer)
# Find the lowest WER and its hypothesis transcript
argmin_idx = np.argmin(p_wer_list)
min_perm_hyp_trans = permed_hyp_lists[argmin_idx]
cpWER = p_wer_list[argmin_idx]
return cpWER, min_perm_hyp_trans, ref_trans
def calculate_session_cpWER(
spk_hypothesis: List[str], spk_reference: List[str], use_lsa_only: bool = False
) -> Tuple[float, str, str]:
"""
Calculate a session-level concatenated minimum-permutation word error rate (cpWER) value. cpWER is
a scoring method that can evaluate speaker diarization and speech recognition performance at the same time.
cpWER is calculated by going through the following steps.
1. Concatenate all utterances of each speaker for both reference and hypothesis files.
2. Compute the WER between the reference and all possible speaker permutations of the hypothesis.
3. Pick the lowest WER among them (this is assumed to be the best permutation: `min_perm_hyp_trans`).
cpWER was proposed in the following article:
CHiME-6 Challenge: Tackling Multispeaker Speech Recognition for Unsegmented Recordings
https://arxiv.org/pdf/2004.09249.pdf
Implementation:
- Brute force permutation method for calculating cpWER has a time complexity of `O(n!)`.
- To reduce the computational burden, linear sum assignment (LSA) algorithm is applied
(also known as Hungarian algorithm) to find the permutation that leads to the lowest WER.
- In this implementation, instead of calculating all WER values for all permutation of hypotheses,
we only calculate WER values of (estimated number of speakers) x (reference number of speakers)
combinations with `O(n^2)`) time complexity and then select the permutation that yields the lowest
WER based on LSA algorithm.
- LSA algorithm has `O(n^3)` time complexity in the worst case.
- We cannot use LSA algorithm to find the best permutation when there are more hypothesis speakers
than reference speakers. In this case, we use the brute-force permutation method instead.
Example:
>>> transcript_A = ['a', 'b', 'c', 'd', 'e', 'f'] # 6 speakers
>>> transcript_B = ['a c b d', 'e f'] # 2 speakers
[case1] hypothesis is transcript_A, reference is transcript_B
[case2] hypothesis is transcript_B, reference is transcript_A
LSA algorithm based cpWER is:
[case1] 4/6 (4 deletion)
[case2] 2/6 (2 substitution)
brute force permutation based cpWER is:
[case1] 0
[case2] 2/6 (2 substitution)
Args:
spk_hypothesis (list):
List containing the hypothesis transcript for each speaker. A list containing the sequence
of words is assigned for each speaker.
Example:
>>> spk_hypothesis = ["hey how are you we that's nice", "i'm good yes hi is your sister"]
spk_reference (list):
List containing the reference transcript for each speaker. A list containing the sequence
of words is assigned for each speaker.
Example:
>>> spk_reference = ["hi how are you well that's nice", "i'm good yeah how is your sister"]
Returns:
cpWER (float):
cpWER value for the given session.
min_perm_hyp_trans (str):
Hypothesis transcript containing the permutation that minimizes WER. Words are separated by spaces.
ref_trans (str):
Reference transcript in an arbitrary permutation. Words are separated by spaces.
"""
# Get all pairs of (estimated num of spks) x (reference num of spks) combinations
hyp_ref_pair = [spk_hypothesis, spk_reference]
all_pairs = list(itertools.product(*hyp_ref_pair))
num_hyp_spks, num_ref_spks = len(spk_hypothesis), len(spk_reference)
if not use_lsa_only and num_ref_spks < num_hyp_spks:
# Brute force algorithm when there are more speakers in the hypothesis
cpWER, min_perm_hyp_trans, ref_trans = calculate_session_cpWER_bruteforce(spk_hypothesis, spk_reference)
else:
# Calculate WER for each speaker in hypothesis with reference
# There are (number of hyp speakers) x (number of ref speakers) combinations
lsa_wer_list = []
for (spk_hyp_trans, spk_ref_trans) in all_pairs:
spk_wer = word_error_rate(hypotheses=[spk_hyp_trans], references=[spk_ref_trans])
lsa_wer_list.append(spk_wer)
# Make a cost matrix and calculate a linear sum assignment on the cost matrix.
# Row is hypothesis index and column is reference index
cost_wer = torch.tensor(lsa_wer_list).reshape([len(spk_hypothesis), len(spk_reference)])
row_hyp_ind, col_ref_ind = linear_sum_assignment(cost_wer)
# In case where hypothesis has more speakers, add words from residual speakers
hyp_permed = [spk_hypothesis[k] for k in np.argsort(col_ref_ind)]
min_perm_hyp_trans = " ".join(hyp_permed)
# Concatenate the reference transcripts into a string variable
ref_trans = " ".join(spk_reference)
# Calculate a WER value from the permutation that yields the lowest WER.
cpWER = word_error_rate(hypotheses=[min_perm_hyp_trans], references=[ref_trans])
return cpWER, min_perm_hyp_trans, ref_trans
def concat_perm_word_error_rate(
spk_hypotheses: List[List[str]], spk_references: List[List[str]]
) -> Tuple[List[float], List[str], List[str]]:
"""
Launcher function for `calculate_session_cpWER`. Calculate session-level cpWER and average cpWER.
For detailed information about cpWER, see docstrings of `calculate_session_cpWER` function.
As opposed to `cpWER`, `WER` is the regular WER value where the hypothesis transcript contains
words in temporal order regardless of the speakers. `WER` value can be different from cpWER value,
depending on the speaker diarization results.
Args:
spk_hypotheses (list):
List containing the lists of speaker-separated hypothesis transcripts.
spk_references (list):
List containing the lists of speaker-separated reference transcripts.
Returns:
cpWER (float):
List containing cpWER values for each session
min_perm_hyp_trans (list):
List containing transcripts that lead to the minimum WER in string format
ref_trans (list):
List containing concatenated reference transcripts
"""
if len(spk_hypotheses) != len(spk_references):
raise ValueError(
"In concatenated-minimum permutation word error rate calculation, "
"hypotheses and reference lists must have the same number of elements. But got arguments:"
f"{len(spk_hypotheses)} and {len(spk_references)} correspondingly"
)
cpWER_values, hyps_spk, refs_spk = [], [], []
for (spk_hypothesis, spk_reference) in zip(spk_hypotheses, spk_references):
cpWER, min_hypothesis, concat_reference = calculate_session_cpWER(spk_hypothesis, spk_reference)
cpWER_values.append(cpWER)
hyps_spk.append(min_hypothesis)
refs_spk.append(concat_reference)
return cpWER_values, hyps_spk, refs_spk
|
NeMo-main
|
nemo/collections/asr/metrics/der.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
from abc import abstractmethod
from dataclasses import dataclass, is_dataclass
from typing import Callable, Dict, List, Optional, Tuple, Union
import editdistance
import numpy as np
import torch
from omegaconf import OmegaConf
from torchmetrics import Metric
from nemo.collections.asr.metrics.wer import move_dimension_to_the_front
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts.submodules import rnnt_greedy_decoding as greedy_decode
from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceConfig, ConfidenceMixin
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis, NBestHypotheses
from nemo.utils import logging
__all__ = ['RNNTDecoding', 'RNNTWER']
class AbstractRNNTDecoding(ConfidenceMixin):
"""
Used for performing RNN-T auto-regressive decoding of the Decoder+Joint network given the encoder state.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy, greedy_batch (for greedy decoding).
- beam, tsd, alsd (for beam search decoding).
compute_hypothesis_token_set: A bool flag, which determines whether to compute a list of decoded
tokens as well as the decoded string. Default is False in order to avoid double decoding
unless required.
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `alignments` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1), Tensor(scalar, label after argmax)).
In order to obtain this hypothesis, please utilize `rnnt_decoder_predictions_tensor` function
with the `return_hypotheses` flag set to True.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
compute_timestamps: A bool flag, which determines whether to compute the character/subword, or
word based timestamp mapping the output log-probabilities to discrete intervals of timestamps.
The timestamps will be available in the returned Hypothesis.timestep as a dictionary.
rnnt_timestamp_type: A str value, which represents the types of timestamps that should be calculated.
Can take the following values - "char" for character/subword time stamps, "word" for word level
time stamps and "all" (default), for both character level and word level time stamps.
word_seperator: Str token representing the seperator between words.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `alignments` is a List of List of ints.
confidence_cfg: A dict-like object which contains the following key-value pairs related to confidence
scores. In order to obtain hypotheses with confidence scores, please utilize
`rnnt_decoder_predictions_tensor` function with the `preserve_frame_confidence` flag set to True.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `alignments` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `token_confidence` in it. Here, `token_confidence` is a List of floats.
The length of the list corresponds to the number of recognized tokens.
preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `word_confidence` in it. Here, `word_confidence` is a List of floats.
The length of the list corresponds to the number of recognized words.
exclude_blank: Bool flag indicating that blank token confidence scores are to be excluded
from the `token_confidence`.
aggregation: Which aggregation type to use for collapsing per-token confidence into per-word confidence.
Valid options are `mean`, `min`, `max`, `prod`.
measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str).
Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
The config may further contain the following sub-dictionaries:
"greedy":
max_symbols: int, describing the maximum number of target tokens to decode per
timestep during greedy decoding. Setting to larger values allows longer sentences
to be decoded, at the cost of increased execution time.
preserve_frame_confidence: Same as above, overrides above value.
confidence_measure_cfg: Same as above, overrides confidence_cfg.measure_cfg.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
score_norm: optional bool, whether to normalize the returned beam score in the hypotheses.
Set to True by default.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded. This flag is set by default.
tsd_max_sym_exp: optional int, determines number of symmetric expansions of the target symbols
per timestep of the acoustic model. Larger values will allow longer sentences to be decoded,
at increased cost to execution time.
alsd_max_target_len: optional int or float, determines the potential maximum target sequence length.
If an integer is provided, it can decode sequences of that particular maximum length.
If a float is provided, it can decode sequences of int(alsd_max_target_len * seq_len),
where seq_len is the length of the acoustic model output (T).
NOTE:
If a float is provided, it can be greater than 1!
By default, a float of 2.0 is used so that a target sequence can be at most twice
as long as the acoustic model output length T.
maes_num_steps: Number of adaptive steps to take. From the paper, 2 steps is generally sufficient,
and can be reduced to 1 to improve decoding speed while sacrificing some accuracy. int > 0.
maes_prefix_alpha: Maximum prefix length in prefix search. Must be an integer, and is advised to keep this as 1
in order to reduce expensive beam search cost later. int >= 0.
maes_expansion_beta: Maximum number of prefix expansions allowed, in addition to the beam size.
Effectively, the number of hypothesis = beam_size + maes_expansion_beta. Must be an int >= 0,
and affects the speed of inference since large values will perform large beam search in the next step.
maes_expansion_gamma: Float pruning threshold used in the prune-by-value step when computing the expansions.
The default (2.3) is selected from the paper. It performs a comparison (max_log_prob - gamma <= log_prob[v])
where v is all vocabulary indices in the Vocab set and max_log_prob is the "most" likely token to be
predicted. Gamma therefore provides a margin of additional tokens which can be potential candidates for
expansion apart from the "most likely" candidate.
Lower values will reduce the number of expansions (by increasing pruning-by-value, thereby improving speed
but hurting accuracy). Higher values will increase the number of expansions (by reducing pruning-by-value,
thereby reducing speed but potentially improving accuracy). This is a hyper parameter to be experimentally
tuned on a validation set.
softmax_temperature: Scales the logits of the joint prior to computing log_softmax.
decoder: The Decoder/Prediction network module.
joint: The Joint network module.
blank_id: The id of the RNNT blank token.
"""
def __init__(self, decoding_cfg, decoder, joint, blank_id: int):
super(AbstractRNNTDecoding, self).__init__()
# Convert dataclass to config object
if is_dataclass(decoding_cfg):
decoding_cfg = OmegaConf.structured(decoding_cfg)
self.cfg = decoding_cfg
self.blank_id = blank_id
self.num_extra_outputs = joint.num_extra_outputs
self.big_blank_durations = self.cfg.get("big_blank_durations", None)
self.durations = self.cfg.get("durations", None)
self.compute_hypothesis_token_set = self.cfg.get("compute_hypothesis_token_set", False)
self.compute_langs = decoding_cfg.get('compute_langs', False)
self.preserve_alignments = self.cfg.get('preserve_alignments', None)
self.joint_fused_batch_size = self.cfg.get('fused_batch_size', None)
self.compute_timestamps = self.cfg.get('compute_timestamps', None)
self.word_seperator = self.cfg.get('word_seperator', ' ')
if self.durations is not None: # this means it's a TDT model.
if blank_id == 0:
raise ValueError("blank_id must equal len(non_blank_vocabs) for TDT models")
if self.big_blank_durations is not None:
raise ValueError("duration and big_blank_durations can't both be not None")
if self.cfg.strategy not in ['greedy', 'greedy_batch']:
raise ValueError("currently only greedy and greedy_batch inference is supported for TDT models")
if self.big_blank_durations is not None: # this means it's a multi-blank model.
if blank_id == 0:
raise ValueError("blank_id must equal len(vocabs) for multi-blank RNN-T models")
if self.cfg.strategy not in ['greedy', 'greedy_batch']:
raise ValueError(
"currently only greedy and greedy_batch inference is supported for multi-blank models"
)
possible_strategies = ['greedy', 'greedy_batch', 'beam', 'tsd', 'alsd', 'maes']
if self.cfg.strategy not in possible_strategies:
raise ValueError(f"Decoding strategy must be one of {possible_strategies}")
# Update preserve alignments
if self.preserve_alignments is None:
if self.cfg.strategy in ['greedy', 'greedy_batch']:
self.preserve_alignments = self.cfg.greedy.get('preserve_alignments', False)
elif self.cfg.strategy in ['beam', 'tsd', 'alsd', 'maes']:
self.preserve_alignments = self.cfg.beam.get('preserve_alignments', False)
# Update compute timestamps
if self.compute_timestamps is None:
if self.cfg.strategy in ['greedy', 'greedy_batch']:
self.compute_timestamps = self.cfg.greedy.get('compute_timestamps', False)
elif self.cfg.strategy in ['beam', 'tsd', 'alsd', 'maes']:
self.compute_timestamps = self.cfg.beam.get('compute_timestamps', False)
# Test if alignments are being preserved for RNNT
if self.compute_timestamps is True and self.preserve_alignments is False:
raise ValueError("If `compute_timesteps` flag is set, then `preserve_alignments` flag must also be set.")
# initialize confidence-related fields
self._init_confidence(self.cfg.get('confidence_cfg', None))
# Confidence estimation is not implemented for these strategies
if (
not self.preserve_frame_confidence
and self.cfg.strategy in ['beam', 'tsd', 'alsd', 'maes']
and self.cfg.beam.get('preserve_frame_confidence', False)
):
raise NotImplementedError(f"Confidence calculation is not supported for strategy `{self.cfg.strategy}`")
if self.cfg.strategy == 'greedy':
if self.big_blank_durations is None:
if self.durations is None:
self.decoding = greedy_decode.GreedyRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
max_symbols_per_step=(
self.cfg.greedy.get('max_symbols', None)
or self.cfg.greedy.get('max_symbols_per_step', None)
),
preserve_alignments=self.preserve_alignments,
preserve_frame_confidence=self.preserve_frame_confidence,
confidence_measure_cfg=self.confidence_measure_cfg,
)
else:
self.decoding = greedy_decode.GreedyTDTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
durations=self.durations,
max_symbols_per_step=(
self.cfg.greedy.get('max_symbols', None)
or self.cfg.greedy.get('max_symbols_per_step', None)
),
preserve_alignments=self.preserve_alignments,
preserve_frame_confidence=self.preserve_frame_confidence,
confidence_measure_cfg=self.confidence_measure_cfg,
)
else:
self.decoding = greedy_decode.GreedyMultiblankRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
big_blank_durations=self.big_blank_durations,
max_symbols_per_step=(
self.cfg.greedy.get('max_symbols', None) or self.cfg.greedy.get('max_symbols_per_step', None)
),
preserve_alignments=self.preserve_alignments,
preserve_frame_confidence=self.preserve_frame_confidence,
confidence_measure_cfg=self.confidence_measure_cfg,
)
elif self.cfg.strategy == 'greedy_batch':
if self.big_blank_durations is None:
if self.durations is None:
self.decoding = greedy_decode.GreedyBatchedRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
max_symbols_per_step=(
self.cfg.greedy.get('max_symbols', None)
or self.cfg.greedy.get('max_symbols_per_step', None)
),
preserve_alignments=self.preserve_alignments,
preserve_frame_confidence=self.preserve_frame_confidence,
confidence_measure_cfg=self.confidence_measure_cfg,
)
else:
self.decoding = greedy_decode.GreedyBatchedTDTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
durations=self.durations,
max_symbols_per_step=(
self.cfg.greedy.get('max_symbols', None)
or self.cfg.greedy.get('max_symbols_per_step', None)
),
preserve_alignments=self.preserve_alignments,
preserve_frame_confidence=self.preserve_frame_confidence,
confidence_measure_cfg=self.confidence_measure_cfg,
)
else:
self.decoding = greedy_decode.GreedyBatchedMultiblankRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
big_blank_durations=self.big_blank_durations,
max_symbols_per_step=(
self.cfg.greedy.get('max_symbols', None) or self.cfg.greedy.get('max_symbols_per_step', None)
),
preserve_alignments=self.preserve_alignments,
preserve_frame_confidence=self.preserve_frame_confidence,
confidence_measure_cfg=self.confidence_measure_cfg,
)
elif self.cfg.strategy == 'beam':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='default',
score_norm=self.cfg.beam.get('score_norm', True),
softmax_temperature=self.cfg.beam.get('softmax_temperature', 1.0),
preserve_alignments=self.preserve_alignments,
)
elif self.cfg.strategy == 'tsd':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='tsd',
score_norm=self.cfg.beam.get('score_norm', True),
tsd_max_sym_exp_per_step=self.cfg.beam.get('tsd_max_sym_exp', 10),
softmax_temperature=self.cfg.beam.get('softmax_temperature', 1.0),
preserve_alignments=self.preserve_alignments,
)
elif self.cfg.strategy == 'alsd':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='alsd',
score_norm=self.cfg.beam.get('score_norm', True),
alsd_max_target_len=self.cfg.beam.get('alsd_max_target_len', 2),
softmax_temperature=self.cfg.beam.get('softmax_temperature', 1.0),
preserve_alignments=self.preserve_alignments,
)
elif self.cfg.strategy == 'maes':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='maes',
score_norm=self.cfg.beam.get('score_norm', True),
maes_num_steps=self.cfg.beam.get('maes_num_steps', 2),
maes_prefix_alpha=self.cfg.beam.get('maes_prefix_alpha', 1),
maes_expansion_gamma=self.cfg.beam.get('maes_expansion_gamma', 2.3),
maes_expansion_beta=self.cfg.beam.get('maes_expansion_beta', 2.0),
softmax_temperature=self.cfg.beam.get('softmax_temperature', 1.0),
preserve_alignments=self.preserve_alignments,
ngram_lm_model=self.cfg.beam.get('ngram_lm_model', None),
ngram_lm_alpha=self.cfg.beam.get('ngram_lm_alpha', 0.0),
hat_subtract_ilm=self.cfg.beam.get('hat_subtract_ilm', False),
hat_ilm_weight=self.cfg.beam.get('hat_ilm_weight', 0.0),
)
else:
raise ValueError(
f"Incorrect decoding strategy supplied. Must be one of {possible_strategies}\n"
f"but was provided {self.cfg.strategy}"
)
# Update the joint fused batch size or disable it entirely if needed.
self.update_joint_fused_batch_size()
def rnnt_decoder_predictions_tensor(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
return_hypotheses: bool = False,
partial_hypotheses: Optional[List[Hypothesis]] = None,
) -> Tuple[List[str], Optional[List[List[str]]], Optional[Union[Hypothesis, NBestHypotheses]]]:
"""
Decode an encoder output by autoregressive decoding of the Decoder+Joint networks.
Args:
encoder_output: torch.Tensor of shape [B, D, T].
encoded_lengths: torch.Tensor containing lengths of the padded encoder outputs. Shape [B].
return_hypotheses: bool. If set to True it will return list of Hypothesis or NBestHypotheses
Returns:
If `return_best_hypothesis` is set:
A tuple (hypotheses, None):
hypotheses - list of Hypothesis (best hypothesis per sample).
Look at rnnt_utils.Hypothesis for more information.
If `return_best_hypothesis` is not set:
A tuple(hypotheses, all_hypotheses)
hypotheses - list of Hypothesis (best hypothesis per sample).
Look at rnnt_utils.Hypothesis for more information.
all_hypotheses - list of NBestHypotheses. Each NBestHypotheses further contains a sorted
list of all the hypotheses of the model per sample.
Look at rnnt_utils.NBestHypotheses for more information.
"""
# Compute hypotheses
with torch.inference_mode():
hypotheses_list = self.decoding(
encoder_output=encoder_output, encoded_lengths=encoded_lengths, partial_hypotheses=partial_hypotheses
) # type: [List[Hypothesis]]
# extract the hypotheses
hypotheses_list = hypotheses_list[0] # type: List[Hypothesis]
prediction_list = hypotheses_list
if isinstance(prediction_list[0], NBestHypotheses):
hypotheses = []
all_hypotheses = []
for nbest_hyp in prediction_list: # type: NBestHypotheses
n_hyps = nbest_hyp.n_best_hypotheses # Extract all hypotheses for this sample
decoded_hyps = self.decode_hypothesis(n_hyps) # type: List[str]
# If computing timestamps
if self.compute_timestamps is True:
timestamp_type = self.cfg.get('rnnt_timestamp_type', 'all')
for hyp_idx in range(len(decoded_hyps)):
decoded_hyps[hyp_idx] = self.compute_rnnt_timestamps(decoded_hyps[hyp_idx], timestamp_type)
hypotheses.append(decoded_hyps[0]) # best hypothesis
all_hypotheses.append(decoded_hyps)
if return_hypotheses:
return hypotheses, all_hypotheses
best_hyp_text = [h.text for h in hypotheses]
all_hyp_text = [h.text for hh in all_hypotheses for h in hh]
return best_hyp_text, all_hyp_text
else:
hypotheses = self.decode_hypothesis(prediction_list) # type: List[str]
# If computing timestamps
if self.compute_timestamps is True:
timestamp_type = self.cfg.get('rnnt_timestamp_type', 'all')
for hyp_idx in range(len(hypotheses)):
hypotheses[hyp_idx] = self.compute_rnnt_timestamps(hypotheses[hyp_idx], timestamp_type)
if return_hypotheses:
# greedy decoding, can get high-level confidence scores
if self.preserve_frame_confidence and (
self.preserve_word_confidence or self.preserve_token_confidence
):
hypotheses = self.compute_confidence(hypotheses)
return hypotheses, None
best_hyp_text = [h.text for h in hypotheses]
return best_hyp_text, None
def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[Hypothesis, NBestHypotheses]]:
"""
Decode a list of hypotheses into a list of strings.
Args:
hypotheses_list: List of Hypothesis.
Returns:
A list of strings.
"""
for ind in range(len(hypotheses_list)):
# Extract the integer encoded hypothesis
prediction = hypotheses_list[ind].y_sequence
if type(prediction) != list:
prediction = prediction.tolist()
# RNN-T sample level is already preprocessed by implicit RNNT decoding
# Simply remove any blank and possibly big blank tokens
if self.big_blank_durations is not None: # multi-blank RNNT
num_extra_outputs = len(self.big_blank_durations)
prediction = [p for p in prediction if p < self.blank_id - num_extra_outputs]
elif self.durations is not None: # TDT model.
prediction = [p for p in prediction if p < self.blank_id]
else: # standard RNN-T
prediction = [p for p in prediction if p != self.blank_id]
# De-tokenize the integer tokens; if not computing timestamps
if self.compute_timestamps is True:
# keep the original predictions, wrap with the number of repetitions per token and alignments
# this is done so that `rnnt_decoder_predictions_tensor()` can process this hypothesis
# in order to compute exact time stamps.
alignments = copy.deepcopy(hypotheses_list[ind].alignments)
token_repetitions = [1] * len(alignments) # preserve number of repetitions per token
hypothesis = (prediction, alignments, token_repetitions)
else:
hypothesis = self.decode_tokens_to_str(prediction)
# TODO: remove
# collapse leading spaces before . , ? for PC models
hypothesis = re.sub(r'(\s+)([\.\,\?])', r'\2', hypothesis)
if self.compute_hypothesis_token_set:
hypotheses_list[ind].tokens = self.decode_ids_to_tokens(prediction)
# De-tokenize the integer tokens
hypotheses_list[ind].text = hypothesis
return hypotheses_list
def compute_confidence(self, hypotheses_list: List[Hypothesis]) -> List[Hypothesis]:
"""
Computes high-level (per-token and/or per-word) confidence scores for a list of hypotheses.
Assumes that `frame_confidence` is present in the hypotheses.
Args:
hypotheses_list: List of Hypothesis.
Returns:
A list of hypotheses with high-level confidence scores.
"""
if self.exclude_blank_from_confidence:
for hyp in hypotheses_list:
hyp.token_confidence = hyp.non_blank_frame_confidence
else:
for hyp in hypotheses_list:
offset = 0
token_confidence = []
if len(hyp.timestep) > 0:
for ts, te in zip(hyp.timestep, hyp.timestep[1:] + [len(hyp.frame_confidence)]):
if ts != te:
# <blank> tokens are considered to belong to the last non-blank token, if any.
token_confidence.append(
self._aggregate_confidence(
[hyp.frame_confidence[ts][offset]]
+ [fc[0] for fc in hyp.frame_confidence[ts + 1 : te]]
)
)
offset = 0
else:
token_confidence.append(hyp.frame_confidence[ts][offset])
offset += 1
hyp.token_confidence = token_confidence
if self.preserve_word_confidence:
for hyp in hypotheses_list:
hyp.word_confidence = self._aggregate_token_confidence(hyp)
return hypotheses_list
@abstractmethod
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to decoder a token id list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
raise NotImplementedError()
@abstractmethod
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
raise NotImplementedError()
@abstractmethod
def decode_tokens_to_lang(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to
compute the most likely language ID (LID) string given the tokens.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded LID string.
"""
raise NotImplementedError()
@abstractmethod
def decode_ids_to_langs(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to
decode a token id list into language ID (LID) list.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded LIDS.
"""
raise NotImplementedError()
def update_joint_fused_batch_size(self):
if self.joint_fused_batch_size is None:
# do nothing and let the Joint itself handle setting up of the fused batch
return
if not hasattr(self.decoding.joint, 'set_fused_batch_size'):
logging.warning(
"The joint module does not have `set_fused_batch_size(int)` as a setter function.\n"
"Ignoring update of joint fused batch size."
)
return
if not hasattr(self.decoding.joint, 'set_fuse_loss_wer'):
logging.warning(
"The joint module does not have `set_fuse_loss_wer(bool, RNNTLoss, RNNTWER)` "
"as a setter function.\n"
"Ignoring update of joint fused batch size."
)
return
if self.joint_fused_batch_size > 0:
self.decoding.joint.set_fused_batch_size(self.joint_fused_batch_size)
else:
logging.info("Joint fused batch size <= 0; Will temporarily disable fused batch step in the Joint.")
self.decoding.joint.set_fuse_loss_wer(False)
def compute_rnnt_timestamps(self, hypothesis: Hypothesis, timestamp_type: str = "all"):
assert timestamp_type in ['char', 'word', 'all']
# Unpack the temporary storage
decoded_prediction, alignments, token_repetitions = hypothesis.text
# Retrieve offsets
char_offsets = word_offsets = None
char_offsets = self._compute_offsets(hypothesis, token_repetitions, self.blank_id)
# finally, set the flattened decoded predictions to text field for later text decoding
hypothesis.text = decoded_prediction
# Assert number of offsets and hypothesis tokens are 1:1 match.
num_flattened_tokens = 0
for t in range(len(char_offsets)):
# Subtract one here for the extra RNNT BLANK token emitted to designate "End of timestep"
num_flattened_tokens += len(char_offsets[t]['char']) - 1
if num_flattened_tokens != len(hypothesis.text):
raise ValueError(
f"`char_offsets`: {char_offsets} and `processed_tokens`: {hypothesis.text}"
" have to be of the same length, but are: "
f"`len(offsets)`: {len(char_offsets)} and `len(processed_tokens)`:"
f" {len(hypothesis.text)}"
)
encoded_char_offsets = copy.deepcopy(char_offsets)
# Correctly process the token ids to chars/subwords.
for i, offsets in enumerate(char_offsets):
decoded_chars = []
for char in offsets['char'][:-1]: # ignore the RNNT Blank token at end of every timestep with -1 subset
decoded_chars.append(self.decode_tokens_to_str([int(char)]))
char_offsets[i]["char"] = decoded_chars
# detect char vs subword models
lens = []
for v in char_offsets:
tokens = v["char"]
# each token may be either 1 unicode token or multiple unicode token
# for character based models, only 1 token is used
# for subword, more than one token can be used.
# Computing max, then summing up total lens is a test to check for char vs subword
# For char models, len(lens) == sum(lens)
# but this is violated for subword models.
max_len = max(len(c) for c in tokens)
lens.append(max_len)
# array of one or more chars implies subword based model with multiple char emitted per TxU step (via subword)
if sum(lens) > len(lens):
text_type = 'subword'
else:
# full array of ones implies character based model with 1 char emitted per TxU step
text_type = 'char'
# retrieve word offsets from character offsets
word_offsets = None
if timestamp_type in ['word', 'all']:
if text_type == 'char':
word_offsets = self._get_word_offsets_chars(char_offsets, word_delimiter_char=self.word_seperator)
else:
# utilize the copy of char offsets with the correct integer ids for tokens
# so as to avoid tokenize -> detokenize -> compare -> merge steps.
word_offsets = self._get_word_offsets_subwords_sentencepiece(
encoded_char_offsets,
hypothesis,
decode_ids_to_tokens=self.decode_ids_to_tokens,
decode_tokens_to_str=self.decode_tokens_to_str,
)
# attach results
if len(hypothesis.timestep) > 0:
timestep_info = hypothesis.timestep
else:
timestep_info = []
# Setup defaults
hypothesis.timestep = {"timestep": timestep_info}
# Add char / subword time stamps
if char_offsets is not None and timestamp_type in ['char', 'all']:
hypothesis.timestep['char'] = char_offsets
# Add word time stamps
if word_offsets is not None and timestamp_type in ['word', 'all']:
hypothesis.timestep['word'] = word_offsets
# Convert the flattened token indices to text
hypothesis.text = self.decode_tokens_to_str(hypothesis.text)
return hypothesis
@staticmethod
def _compute_offsets(
hypothesis: Hypothesis, token_repetitions: List[int], rnnt_token: int
) -> List[Dict[str, Union[str, int]]]:
"""
Utility method that calculates the indidual time indices where a token starts and ends.
Args:
hypothesis: A Hypothesis object that contains `text` field that holds the character / subword token
emitted at every time step after rnnt collapse.
token_repetitions: A list of ints representing the number of repetitions of each emitted token.
rnnt_token: The integer of the rnnt blank token used during rnnt collapse.
Returns:
"""
start_index = 0
# If the exact timestep information is available, utilize the 1st non-rnnt blank token timestep
# as the start index.
if hypothesis.timestep is not None and len(hypothesis.timestep) > 0:
start_index = max(0, hypothesis.timestep[0] - 1)
# Construct the start and end indices brackets
end_indices = np.asarray(token_repetitions).cumsum()
start_indices = np.concatenate(([start_index], end_indices[:-1]))
# Process the TxU dangling alignment tensor, containing pairs of (logits, label)
alignment_labels = [al_logits_labels for al_logits_labels in hypothesis.text[1]]
for t in range(len(alignment_labels)):
for u in range(len(alignment_labels[t])):
alignment_labels[t][u] = alignment_labels[t][u][1] # pick label from (logit, label) tuple
# Merge the results per token into a list of dictionaries
offsets = [
{"char": a, "start_offset": s, "end_offset": e}
for a, s, e in zip(alignment_labels, start_indices, end_indices)
]
# Filter out RNNT token (blank at [t][0] position). This is because blank can only occur at end of a
# time step for RNNT, so if 0th token is blank, then that timestep is skipped.
offsets = list(filter(lambda offsets: offsets["char"][0] != rnnt_token, offsets))
return offsets
@staticmethod
def _get_word_offsets_chars(
offsets: Dict[str, Union[str, float]], word_delimiter_char: str = " "
) -> Dict[str, Union[str, float]]:
"""
Utility method which constructs word time stamps out of character time stamps.
References:
This code is a port of the Hugging Face code for word time stamp construction.
Args:
offsets: A list of dictionaries, each containing "char", "start_offset" and "end_offset".
word_delimiter_char: Character token that represents the word delimiter. By default, " ".
Returns:
A list of dictionaries containing the word offsets. Each item contains "word", "start_offset" and
"end_offset".
"""
word_offsets = []
last_state = "SPACE"
word = ""
start_offset = 0
end_offset = 0
for i, offset in enumerate(offsets):
chars = offset["char"]
for char in chars:
state = "SPACE" if char == word_delimiter_char else "WORD"
if state == last_state:
# If we are in the same state as before, we simply repeat what we've done before
end_offset = offset["end_offset"]
word += char
else:
# Switching state
if state == "SPACE":
# Finishing a word
word_offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset})
else:
# Starting a new word
start_offset = offset["start_offset"]
end_offset = offset["end_offset"]
word = char
last_state = state
if last_state == "WORD":
word_offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset})
return word_offsets
@staticmethod
def _get_word_offsets_subwords_sentencepiece(
offsets: Dict[str, Union[str, float]],
hypothesis: Hypothesis,
decode_ids_to_tokens: Callable[[List[int]], str],
decode_tokens_to_str: Callable[[List[int]], str],
) -> Dict[str, Union[str, float]]:
"""
Utility method which constructs word time stamps out of sub-word time stamps.
**Note**: Only supports Sentencepiece based tokenizers !
Args:
offsets: A list of dictionaries, each containing "char", "start_offset" and "end_offset".
hypothesis: Hypothesis object that contains `text` field, where each token is a sub-word id
after rnnt collapse.
decode_ids_to_tokens: A Callable function that accepts a list of integers and maps it to a sub-word.
decode_tokens_to_str: A Callable function that accepts a list of integers and maps it to text / str.
Returns:
A list of dictionaries containing the word offsets. Each item contains "word", "start_offset" and
"end_offset".
"""
word_offsets = []
built_token = []
previous_token_index = 0
# For every offset token
for i, offset in enumerate(offsets):
# For every subword token in offset token list (ignoring the RNNT Blank token at the end)
for char in offset['char'][:-1]:
char = int(char)
# Compute the sub-word text representation, and the decoded text (stripped of sub-word markers).
token = decode_ids_to_tokens([char])[0]
token_text = decode_tokens_to_str([char])
# It is a sub-word token, or contains an identifier at the beginning such as _ or ## that was stripped
# after forcing partial text conversion of the token.
if token != token_text:
# If there are any partially or fully built sub-word token ids, construct to text.
# Note: This is "old" subword, that occurs *after* current sub-word has started.
if built_token:
word_offsets.append(
{
"word": decode_tokens_to_str(built_token),
"start_offset": offsets[previous_token_index]["start_offset"],
"end_offset": offsets[i]["start_offset"],
}
)
# Prepare list of new sub-word ids
built_token.clear()
built_token.append(char)
previous_token_index = i
else:
# If the token does not contain any sub-word start mark, then the sub-word has not completed yet
# Append to current sub-word list.
built_token.append(char)
# Inject the start offset of the first token to word offsets
# This is because we always skip the delay the injection of the first sub-word due to the loop
# condition and check whether built token is ready or not.
# Therefore without this forced injection, the start_offset appears as off by 1.
# This should only be done when these arrays contain more than one element.
if offsets and word_offsets:
word_offsets[0]["start_offset"] = offsets[0]["start_offset"]
# If there are any remaining tokens left, inject them all into the final word offset.
# The start offset of this token is the start time of the next token to process.
# The end offset of this token is the end time of the last token from offsets.
# Note that built_token is a flat list; but offsets contains a nested list which
# may have different dimensionality.
# As such, we can't rely on the length of the list of built_token to index offsets.
if built_token:
# start from the previous token index as this hasn't been committed to word_offsets yet
# if we still have content in built_token
start_offset = offsets[previous_token_index]["start_offset"]
word_offsets.append(
{
"word": decode_tokens_to_str(built_token),
"start_offset": start_offset,
"end_offset": offsets[-1]["end_offset"],
}
)
built_token.clear()
return word_offsets
class RNNTDecoding(AbstractRNNTDecoding):
"""
Used for performing RNN-T auto-regressive decoding of the Decoder+Joint network given the encoder state.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy, greedy_batch (for greedy decoding).
- beam, tsd, alsd (for beam search decoding).
compute_hypothesis_token_set: A bool flag, which determines whether to compute a list of decoded
tokens as well as the decoded string. Default is False in order to avoid double decoding
unless required.
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `logprobs` in it. Here, `alignments` is a List of List of
Tuple(Tensor (of length V + 1), Tensor(scalar, label after argmax)).
In order to obtain this hypothesis, please utilize `rnnt_decoder_predictions_tensor` function
with the `return_hypotheses` flag set to True.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
confidence_cfg: A dict-like object which contains the following key-value pairs related to confidence
scores. In order to obtain hypotheses with confidence scores, please utilize
`rnnt_decoder_predictions_tensor` function with the `preserve_frame_confidence` flag set to True.
preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores
generated during decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `frame_confidence` in it. Here, `alignments` is a List of List of floats.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more confidence scores.
U is the number of target tokens for the current timestep Ti.
preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `token_confidence` in it. Here, `token_confidence` is a List of floats.
The length of the list corresponds to the number of recognized tokens.
preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores
generated during greedy decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `word_confidence` in it. Here, `word_confidence` is a List of floats.
The length of the list corresponds to the number of recognized words.
exclude_blank: Bool flag indicating that blank token confidence scores are to be excluded
from the `token_confidence`.
aggregation: Which aggregation type to use for collapsing per-token confidence into per-word confidence.
Valid options are `mean`, `min`, `max`, `prod`.
measure_cfg: A dict-like object which contains the measure name and settings to compute per-frame
confidence scores.
name: The measure name (str).
Supported values:
- 'max_prob' for using the maximum token probability as a confidence.
- 'entropy' for using a normalized entropy of a log-likelihood vector.
entropy_type: Which type of entropy to use (str).
Used if confidence_measure_cfg.name is set to `entropy`.
Supported values:
- 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided,
the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)).
Note that for this entropy, the alpha should comply the following inequality:
(log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1)
where V is the model vocabulary size.
- 'tsallis' for the Tsallis entropy with the Boltzmann constant one.
Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/Tsallis_entropy
- 'renyi' for the Rényi entropy.
Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)),
where α is a parameter. When α == 1, it works like the Gibbs entropy.
More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy
alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0.
When the alpha equals one, scaling is not applied to 'max_prob',
and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i))
entropy_norm: A mapping of the entropy value to the interval [0,1].
Supported values:
- 'lin' for using the linear mapping.
- 'exp' for using exponential mapping with linear shift.
The config may further contain the following sub-dictionaries:
"greedy":
max_symbols: int, describing the maximum number of target tokens to decode per
timestep during greedy decoding. Setting to larger values allows longer sentences
to be decoded, at the cost of increased execution time.
preserve_frame_confidence: Same as above, overrides above value.
confidence_measure_cfg: Same as above, overrides confidence_cfg.measure_cfg.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
score_norm: optional bool, whether to normalize the returned beam score in the hypotheses.
Set to True by default.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded. This flag is set by default.
tsd_max_sym_exp: optional int, determines number of symmetric expansions of the target symbols
per timestep of the acoustic model. Larger values will allow longer sentences to be decoded,
at increased cost to execution time.
alsd_max_target_len: optional int or float, determines the potential maximum target sequence length.
If an integer is provided, it can decode sequences of that particular maximum length.
If a float is provided, it can decode sequences of int(alsd_max_target_len * seq_len),
where seq_len is the length of the acoustic model output (T).
NOTE:
If a float is provided, it can be greater than 1!
By default, a float of 2.0 is used so that a target sequence can be at most twice
as long as the acoustic model output length T.
maes_num_steps: Number of adaptive steps to take. From the paper, 2 steps is generally sufficient,
and can be reduced to 1 to improve decoding speed while sacrificing some accuracy. int > 0.
maes_prefix_alpha: Maximum prefix length in prefix search. Must be an integer, and is advised to keep this as 1
in order to reduce expensive beam search cost later. int >= 0.
maes_expansion_beta: Maximum number of prefix expansions allowed, in addition to the beam size.
Effectively, the number of hypothesis = beam_size + maes_expansion_beta. Must be an int >= 0,
and affects the speed of inference since large values will perform large beam search in the next step.
maes_expansion_gamma: Float pruning threshold used in the prune-by-value step when computing the expansions.
The default (2.3) is selected from the paper. It performs a comparison (max_log_prob - gamma <= log_prob[v])
where v is all vocabulary indices in the Vocab set and max_log_prob is the "most" likely token to be
predicted. Gamma therefore provides a margin of additional tokens which can be potential candidates for
expansion apart from the "most likely" candidate.
Lower values will reduce the number of expansions (by increasing pruning-by-value, thereby improving speed
but hurting accuracy). Higher values will increase the number of expansions (by reducing pruning-by-value,
thereby reducing speed but potentially improving accuracy). This is a hyper parameter to be experimentally
tuned on a validation set.
softmax_temperature: Scales the logits of the joint prior to computing log_softmax.
decoder: The Decoder/Prediction network module.
joint: The Joint network module.
vocabulary: The vocabulary (excluding the RNNT blank token) which will be used for decoding.
"""
def __init__(
self, decoding_cfg, decoder, joint, vocabulary,
):
# we need to ensure blank is the last token in the vocab for the case of RNNT and Multi-blank RNNT.
blank_id = len(vocabulary) + joint.num_extra_outputs
if hasattr(decoding_cfg, 'model_type') and decoding_cfg.model_type == 'tdt':
blank_id = len(vocabulary)
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
super(RNNTDecoding, self).__init__(
decoding_cfg=decoding_cfg, decoder=decoder, joint=joint, blank_id=blank_id,
)
if isinstance(self.decoding, beam_decode.BeamRNNTInfer):
self.decoding.set_decoding_type('char')
def _aggregate_token_confidence(self, hypothesis: Hypothesis) -> List[float]:
"""
Implemented by subclass in order to aggregate token confidence to a word-level confidence.
Args:
hypothesis: Hypothesis
Returns:
A list of word-level confidence scores.
"""
return self._aggregate_token_confidence_chars(hypothesis.words, hypothesis.token_confidence)
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to decoder a token list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
hypothesis = ''.join(self.decode_ids_to_tokens(tokens))
return hypothesis
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
token_list = [self.labels_map[c] for c in tokens if c < self.blank_id - self.num_extra_outputs]
return token_list
def decode_tokens_to_lang(self, tokens: List[int]) -> str:
"""
Compute the most likely language ID (LID) string given the tokens.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded LID string.
"""
lang = self.tokenizer.ids_to_lang(tokens)
return lang
def decode_ids_to_langs(self, tokens: List[int]) -> List[str]:
"""
Decode a token id list into language ID (LID) list.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded LIDS.
"""
lang_list = self.tokenizer.ids_to_text_and_langs(tokens)
return lang_list
class RNNTWER(Metric):
"""
This metric computes numerator and denominator for Overall Word Error Rate (WER) between prediction and reference texts.
When doing distributed training/evaluation the result of res=WER(predictions, targets, target_lengths) calls
will be all-reduced between all workers using SUM operations.
Here contains two numbers res=[wer_numerator, wer_denominator]. WER=wer_numerator/wer_denominator.
If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step results.
Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER.
Example:
def validation_step(self, batch, batch_idx):
...
wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len)
self.val_outputs = {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom}
return self.val_outputs
def on_validation_epoch_end(self):
...
wer_num = torch.stack([x['val_wer_num'] for x in self.val_outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in self.val_outputs]).sum()
tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom}
self.val_outputs.clear() # free memory
return {'val_loss': val_loss_mean, 'log': tensorboard_logs}
Args:
decoding: RNNTDecoding object that will perform autoregressive decoding of the RNNT model.
batch_dim_index: Index of the batch dimension.
use_cer: Whether to use Character Error Rate isntead of Word Error Rate.
log_prediction: Whether to log a single decoded sample per call.
Returns:
res: a tuple of 3 zero dimensional float32 ``torch.Tensor` objects: a WER score, a sum of Levenshtein's
distances for all prediction - reference pairs, total number of words in all references.
"""
full_state_update = True
def __init__(
self, decoding: RNNTDecoding, batch_dim_index=0, use_cer=False, log_prediction=True, dist_sync_on_step=False
):
super(RNNTWER, self).__init__(dist_sync_on_step=dist_sync_on_step)
self.decoding = decoding
self.batch_dim_index = batch_dim_index
self.use_cer = use_cer
self.log_prediction = log_prediction
self.blank_id = self.decoding.blank_id
self.labels_map = self.decoding.labels_map
self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
def update(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
) -> torch.Tensor:
words = 0
scores = 0
references = []
with torch.no_grad():
# prediction_cpu_tensor = tensors[0].long().cpu()
targets_cpu_tensor = targets.long().cpu()
targets_cpu_tensor = move_dimension_to_the_front(targets_cpu_tensor, self.batch_dim_index)
tgt_lenths_cpu_tensor = target_lengths.long().cpu()
# iterate over batch
for ind in range(targets_cpu_tensor.shape[0]):
tgt_len = tgt_lenths_cpu_tensor[ind].item()
target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()
reference = self.decoding.decode_tokens_to_str(target)
references.append(reference)
hypotheses, _ = self.decoding.rnnt_decoder_predictions_tensor(encoder_output, encoded_lengths)
if self.log_prediction:
logging.info(f"\n")
logging.info(f"reference :{references[0]}")
logging.info(f"predicted :{hypotheses[0]}")
for h, r in zip(hypotheses, references):
if self.use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
words += len(r_list)
# Compute Levenshtein's distance
scores += editdistance.eval(h_list, r_list)
self.scores += torch.tensor(scores, device=self.scores.device, dtype=self.scores.dtype)
self.words += torch.tensor(words, device=self.words.device, dtype=self.words.dtype)
# return torch.tensor([scores, words]).to(predictions.device)
def compute(self):
wer = self.scores.float() / self.words
return wer, self.scores.detach(), self.words.detach()
@dataclass
class RNNTDecodingConfig:
model_type: str = "rnnt" # one of "rnnt", "multiblank" or "tdt"
strategy: str = "greedy_batch"
compute_hypothesis_token_set: bool = False
# preserve decoding alignments
preserve_alignments: Optional[bool] = None
# confidence config
confidence_cfg: ConfidenceConfig = ConfidenceConfig()
# RNNT Joint fused batch size
fused_batch_size: Optional[int] = None
# compute RNNT time stamps
compute_timestamps: Optional[bool] = None
# compute language IDs
compute_langs: bool = False
# token representing word seperator
word_seperator: str = " "
# type of timestamps to calculate
rnnt_timestamp_type: str = "all" # can be char, word or all for both
# greedy decoding config
greedy: greedy_decode.GreedyRNNTInferConfig = greedy_decode.GreedyRNNTInferConfig()
# beam decoding config
beam: beam_decode.BeamRNNTInferConfig = beam_decode.BeamRNNTInferConfig(beam_size=4)
# can be used to change temperature for decoding
temperature: float = 1.0
|
NeMo-main
|
nemo/collections/asr/metrics/rnnt_wer.py
|
# ! /usr/bin/python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from nemo.core.classes import Serialization, Typing, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LogprobsType, LossType, NeuralType
__all__ = ['CTCLoss']
class CTCLoss(nn.CTCLoss, Serialization, Typing):
@property
def input_types(self):
"""Input types definitions for CTCLoss.
"""
return {
"log_probs": NeuralType(('B', 'T', 'D'), LogprobsType()),
"targets": NeuralType(('B', 'T'), LabelsType()),
"input_lengths": NeuralType(tuple('B'), LengthsType()),
"target_lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Output types definitions for CTCLoss.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, num_classes, zero_infinity=False, reduction='mean_batch'):
self._blank = num_classes
# Don't forget to properly call base constructor
if reduction not in ['none', 'mean', 'sum', 'mean_batch', 'mean_volume']:
raise ValueError('`reduction` must be one of [mean, sum, mean_batch, mean_volume]')
self.config_reduction = reduction
if reduction == 'mean_batch' or reduction == 'mean_volume':
ctc_reduction = 'none'
self._apply_reduction = True
elif reduction in ['sum', 'mean', 'none']:
ctc_reduction = reduction
self._apply_reduction = False
super().__init__(blank=self._blank, reduction=ctc_reduction, zero_infinity=zero_infinity)
def reduce(self, losses, target_lengths):
if self.config_reduction == 'mean_batch':
losses = losses.mean() # global batch size average
elif self.config_reduction == 'mean_volume':
losses = losses.sum() / target_lengths.sum() # same as above but longer samples weigh more
return losses
@typecheck()
def forward(self, log_probs, targets, input_lengths, target_lengths):
# override forward implementation
# custom logic, if necessary
input_lengths = input_lengths.long()
target_lengths = target_lengths.long()
targets = targets.long()
# here we transpose because we expect [B, T, D] while PyTorch assumes [T, B, D]
log_probs = log_probs.transpose(1, 0)
loss = super().forward(
log_probs=log_probs, targets=targets, input_lengths=input_lengths, target_lengths=target_lengths
)
if self._apply_reduction:
loss = self.reduce(loss, target_lengths)
return loss
|
NeMo-main
|
nemo/collections/asr/losses/ctc.py
|
# ! /usr/bin/python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.core.classes import Loss, Typing, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LossType, NeuralType, ProbsType
__all__ = ['BCELoss']
class BCELoss(Loss, Typing):
"""
Computes Binary Cross Entropy (BCE) loss. The BCELoss class expects output from Sigmoid function.
"""
@property
def input_types(self):
"""Input types definitions for AnguarLoss.
"""
return {
"probs": NeuralType(('B', 'T', 'C'), ProbsType()),
'labels': NeuralType(('B', 'T', 'C'), LabelsType()),
"signal_lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""
Output types definitions for binary cross entropy loss. Weights for labels can be set using weight variables.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, reduction='sum', alpha=1.0, weight=torch.tensor([0.5, 0.5])):
super().__init__()
self.reduction = reduction
self.loss_weight = weight
self.loss_f = torch.nn.BCELoss(weight=self.loss_weight, reduction=self.reduction)
@typecheck()
def forward(self, probs, labels, signal_lengths):
"""
Calculate binary cross entropy loss based on probs, labels and signal_lengths variables.
Args:
probs (torch.tensor)
Predicted probability value which ranges from 0 to 1. Sigmoid output is expected.
labels (torch.tensor)
Groundtruth label for the predicted samples.
signal_lengths (torch.tensor):
The actual length of the sequence without zero-padding.
Returns:
loss (NeuralType)
Binary cross entropy loss value.
"""
probs_list = [probs[k, : signal_lengths[k], :] for k in range(probs.shape[0])]
targets_list = [labels[k, : signal_lengths[k], :] for k in range(labels.shape[0])]
probs = torch.cat(probs_list, dim=0)
labels = torch.cat(targets_list, dim=0)
return self.loss_f(probs, labels)
|
NeMo-main
|
nemo/collections/asr/losses/bce_loss.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.losses.angularloss import AngularSoftmaxLoss
from nemo.collections.asr.losses.audio_losses import SDRLoss
from nemo.collections.asr.losses.ctc import CTCLoss
from nemo.collections.asr.losses.lattice_losses import LatticeLoss
from nemo.collections.asr.losses.ssl_losses.contrastive import ContrastiveLoss
from nemo.collections.asr.losses.ssl_losses.ctc import CTCLossForSSL
from nemo.collections.asr.losses.ssl_losses.mlm import MLMLoss
from nemo.collections.asr.losses.ssl_losses.rnnt import RNNTLossForSSL
|
NeMo-main
|
nemo/collections/asr/losses/__init__.py
|
# ! /usr/bin/python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.core.classes import Loss, Typing, typecheck
from nemo.core.neural_types import LabelsType, LogitsType, LossType, NeuralType
__all__ = ['AngularSoftmaxLoss']
class AngularSoftmaxLoss(Loss, Typing):
"""
Computes ArcFace Angular softmax angle loss
reference: https://openaccess.thecvf.com/content_CVPR_2019/papers/Deng_ArcFace_Additive_Angular_Margin_Loss_for_Deep_Face_Recognition_CVPR_2019_paper.pdf
args:
scale: scale value for cosine angle
margin: margin value added to cosine angle
"""
@property
def input_types(self):
"""Input types definitions for AnguarLoss.
"""
return {
"logits": NeuralType(('B', 'D'), LogitsType()),
"labels": NeuralType(('B',), LabelsType()),
}
@property
def output_types(self):
"""Output types definitions for AngularLoss.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, scale=20.0, margin=1.35):
super().__init__()
self.eps = 1e-7
self.scale = scale
self.margin = margin
@typecheck()
def forward(self, logits, labels):
numerator = self.scale * torch.cos(
torch.acos(torch.clamp(torch.diagonal(logits.transpose(0, 1)[labels]), -1.0 + self.eps, 1 - self.eps))
+ self.margin
)
excl = torch.cat(
[torch.cat((logits[i, :y], logits[i, y + 1 :])).unsqueeze(0) for i, y in enumerate(labels)], dim=0
)
denominator = torch.exp(numerator) + torch.sum(torch.exp(self.scale * excl), dim=1)
L = numerator - torch.log(denominator)
return -torch.mean(L)
|
NeMo-main
|
nemo/collections/asr/losses/angularloss.py
|
# ! /usr/bin/python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from omegaconf import DictConfig
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LogprobsType, LossType, NeuralType
class LatticeLoss(Loss):
"""Family of loss functions based on various lattice scores.
Note:
Requires k2 v1.14 or later to be installed to use this loss function.
Losses can be selected via the config, and optionally be passed keyword arguments as follows.
Examples:
.. code-block:: yaml
model: # Model config
...
graph_module_cfg: # Config for graph modules, e.g. LatticeLoss
criterion_type: "map"
loss_type: "mmi"
split_batch_size: 0
backend_cfg:
topo_type: "default" # other options: "compact", "shared_blank", "minimal"
topo_with_self_loops: true
token_lm: <token_lm_path> # must be provided for criterion_type: "map"
Args:
num_classes: Number of target classes for the decoder network to predict.
(Excluding the blank token).
reduction: Type of reduction to perform on loss. Possible values are `mean_batch`, `mean`, `sum`, or None.
None will return a torch vector comprising the individual loss values of the batch.
backend: Which backend to use for loss calculation. Currently only `k2` is supported.
criterion_type: Type of criterion to use. Choices: `ml` and `map`,
with `ml` standing for Maximum Likelihood and `map` for Maximum A Posteriori Probability.
loss_type: Type of the loss function to use. Choices: `ctc` and `rnnt` for `ml`, and `mmi` for `map`.
split_batch_size: Local batch size. Used for memory consumption reduction at the cost of speed performance.
Effective if complies 0 < split_batch_size < batch_size.
graph_module_cfg: Optional Dict of (str, value) pairs that are passed to the backend loss function.
"""
@property
def input_types(self):
"""Input types definitions for LatticeLoss.
"""
return {
"log_probs": NeuralType(("B", "T", "D") if self._3d_input else ("B", "T", "T", "D"), LogprobsType()),
"targets": NeuralType(("B", "T"), LabelsType()),
"input_lengths": NeuralType(tuple("B"), LengthsType()),
"target_lengths": NeuralType(tuple("B"), LengthsType()),
}
@property
def output_types(self):
"""Output types definitions for LatticeLoss.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(
self,
num_classes: int,
reduction: str = "mean_batch",
backend: str = "k2",
criterion_type: str = "ml",
loss_type: str = "ctc",
split_batch_size: int = 0,
graph_module_cfg: Optional[DictConfig] = None,
):
super().__init__()
self._blank = num_classes
self.split_batch_size = split_batch_size
inner_reduction = None
if reduction == "mean_batch":
inner_reduction = "none"
self._apply_batch_mean = True
elif reduction in ["sum", "mean", "none"]:
inner_reduction = reduction
self._apply_batch_mean = False
# we assume that self._blank + 1 == num_classes
if backend == "k2":
if criterion_type == "ml":
if loss_type == "ctc":
from nemo.collections.asr.parts.k2.ml_loss import CtcLoss as K2Loss
elif loss_type == "rnnt":
from nemo.collections.asr.parts.k2.ml_loss import RnntLoss as K2Loss
else:
raise ValueError(f"Unsupported `loss_type`: {loss_type}.")
elif criterion_type == "map":
if loss_type == "ctc":
from nemo.collections.asr.parts.k2.map_loss import CtcMmiLoss as K2Loss
else:
raise ValueError(f"Unsupported `loss_type`: {loss_type}.")
else:
raise ValueError(f"Unsupported `criterion_type`: {criterion_type}.")
self._loss = K2Loss(
num_classes=self._blank + 1, blank=self._blank, reduction=inner_reduction, cfg=graph_module_cfg,
)
elif backend == "gtn":
raise NotImplementedError(f"Backend {backend} is not supported.")
else:
raise ValueError(f"Invalid value of `backend`: {backend}.")
self.criterion_type = criterion_type
self.loss_type = loss_type
self._3d_input = self.loss_type != "rnnt"
if self.split_batch_size > 0:
# don't need to guard grad_utils
from nemo.collections.asr.parts.k2.grad_utils import PartialGrad
self._partial_loss = PartialGrad(self._loss)
def update_graph(self, graph):
"""Updates graph of the backend loss function.
"""
if self.criterion_type != "ml":
self._loss.update_graph(graph)
@typecheck()
def forward(self, log_probs, targets, input_lengths, target_lengths):
# override forward implementation
# custom logic, if necessary
assert not (torch.isnan(log_probs).any() or torch.isinf(log_probs).any())
log_probs = log_probs.float()
input_lengths = input_lengths.long()
target_lengths = target_lengths.long()
targets = targets.long()
batch_size = log_probs.shape[0]
if self.split_batch_size > 0 and self.split_batch_size <= batch_size:
loss_list = []
for batch_idx in range(0, batch_size, self.split_batch_size):
begin = batch_idx
end = min(begin + self.split_batch_size, batch_size)
input_lengths_part = input_lengths[begin:end]
log_probs_part = log_probs[begin:end, : input_lengths_part.max()]
target_lengths_part = target_lengths[begin:end]
targets_part = targets[begin:end, : target_lengths_part.max()]
loss_part, _ = (
self._partial_loss(log_probs_part, targets_part, input_lengths_part, target_lengths_part)
if log_probs_part.requires_grad
else self._loss(log_probs_part, targets_part, input_lengths_part, target_lengths_part)
)
del log_probs_part, targets_part, input_lengths_part, target_lengths_part
loss_list.append(loss_part)
loss = torch.cat(loss_list, 0)
else:
loss, _ = self._loss(
log_probs=log_probs, targets=targets, input_lengths=input_lengths, target_lengths=target_lengths,
)
if self._apply_batch_mean:
# torch.mean gives nan if loss is empty
loss = torch.mean(loss) if loss.nelement() > 0 else torch.sum(loss)
return loss
|
NeMo-main
|
nemo/collections/asr/losses/lattice_losses.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional
import numpy as np
import torch
from nemo.collections.asr.parts.preprocessing.features import make_seq_mask_like
from nemo.collections.asr.parts.utils.audio_utils import toeplitz
from nemo.core.classes import Loss, Typing, typecheck
from nemo.core.neural_types import AudioSignal, LengthsType, LossType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['SDRLoss']
def temporal_mean(
input: torch.Tensor,
input_length: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
keepdim: bool = False,
eps: float = 1e-10,
) -> torch.Tensor:
"""Calculate mean along temporal dimension with optionally
averaging only over valid samples (based on the input length).
Args:
input: Batch of signals, shape (B, C, T)
input_length: Optional, length of each example in the batch, shape (B,)
mask: Optional, temporal mask for each example in the batch, shape (B, T)
keepdim: Whether to keep the temporal dimension
eps: Regularization to avoid division by zero
Returns:
(B, C, 1) if keepdim=True, otherwise (B, C)
"""
if input_length is not None:
if mask is not None:
raise RuntimeError(
'Argument `input_length` is mutually exclusive with `mask`. Both cannot be used at the same time.'
)
# Construct a binary mask
mask = make_seq_mask_like(lengths=input_length, like=input, time_dim=-1, valid_ones=True).squeeze(1)
if mask is None:
# No length information, assume all samples are valid
mean = torch.mean(input, dim=-1, keepdim=keepdim)
else:
# Average using temporal mask
mean = mask.unsqueeze(1) * input
mean = torch.sum(mean, axis=-1, keepdim=keepdim)
normalization = torch.sum(mask, axis=-1, keepdim=keepdim)
mean = mean / (normalization.unsqueeze(1) + eps)
return mean
def scale_invariant_target(
estimate: torch.Tensor,
target: torch.Tensor,
input_length: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
eps: float = 1e-10,
) -> torch.Tensor:
"""Calculate optimal scale-invariant target.
Assumes time dimension is the last dimension in the array.
Calculate scaled target obtained by solving
min_scale || scale * target - estimate ||^2
for each example in batch and each channel (b, c).
Args:
estimate: tensor, shape (B, C, T)
target: tensor, shape (B, C, T)
input_length: optional, length of valid samples, shape (B,)
mask: optional, mask for input samples, shape (B, T)
eps: regularization constant
Returns:
Scaled target, shape (B, C, T)
"""
if input_length is not None:
if mask is not None:
raise RuntimeError(
'Argument `input_length` is mutually exclusive with `mask`. Both cannot be used at the same time.'
)
# Construct a binary mask
mask = make_seq_mask_like(lengths=input_length, like=estimate, time_dim=-1, valid_ones=True).squeeze(1)
estimate_dot_target = temporal_mean(estimate * target, mask=mask, keepdim=True, eps=eps)
target_pow = temporal_mean(torch.abs(target) ** 2, mask=mask, keepdim=True, eps=eps)
scale = estimate_dot_target / (target_pow + eps)
target_scaled = scale * target
# Mask to keep only the valid samples
if mask is not None:
target_scaled = mask.unsqueeze(1) * target_scaled
return target_scaled
def convolution_invariant_target(
estimate: torch.Tensor,
target: torch.Tensor,
input_length: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
filter_length: int = 512,
diag_reg: float = 1e-8,
eps: float = 1e-10,
) -> torch.Tensor:
"""Calculate optimal convolution-invariant target for a given estimate.
Assumes time dimension is the last dimension in the array.
Calculate target filtered with a linear f obtained by solving
min_filter || conv(filter, target) - estimate ||^2
for each example in batch and each channel (b, c).
Args:
estimate: tensor, shape (B, C, T)
target: tensor, shape (B, C, T)
input_length: optional, length of valid samples, shape (B,)
mask: optional, mask for input samples, shape (B, T)
filter_length: length of the (convolutional) filter for target
diag_reg: relative diagonal regularization for the linear system
eps: absolute regularization for the diagonal
Returns:
Filtered target, shape (B, C, T)
Reference:
C. Boeddeker et al., Convolutive Transfer Function Invariant SDR training criteria for Multi-Channel Reverberant Speech Separation, 2021
"""
if input_length is not None:
if mask is not None:
raise RuntimeError(
'Argument `input_length` is mutually exclusive with `mask`. Both cannot be used at the same time.'
)
if torch.min(input_length) < filter_length:
logging.warning(
'Current min input_length (%d) is smaller than filter_length (%d). This will result in a singular linear system.',
torch.min(input_length),
filter_length,
)
# Construct a binary mask
mask = make_seq_mask_like(lengths=input_length, like=estimate, time_dim=-1, valid_ones=True).squeeze(1)
# Apply a mask, if available
if mask is not None:
estimate = mask.unsqueeze(1) * estimate
target = mask.unsqueeze(1) * target
# Calculate filtered target
input_shape = estimate.shape
estimate = estimate.view(-1, input_shape[-1])
target = target.view(-1, input_shape[-1])
n_fft = 2 ** math.ceil(math.log2(2 * input_shape[-1] - 1))
T = torch.fft.rfft(target, n=n_fft)
E = torch.fft.rfft(estimate, n=n_fft)
# Target autocorrelation
tt_corr = torch.fft.irfft(torch.abs(T) ** 2, n=n_fft)
# Target-estimate crosscorrelation
te_corr = torch.fft.irfft(T.conj() * E, n=n_fft)
# Use only filter_length
tt_corr = tt_corr[..., :filter_length]
te_corr = te_corr[..., :filter_length]
# Diagonal regularization
if diag_reg is not None:
tt_corr[..., 0] += diag_reg * tt_corr[..., 0] + eps
# Construct the Toeplitz system matrix
TT = toeplitz(tt_corr)
# Solve the linear system for the optimal filter
filt = torch.linalg.solve(TT, te_corr)
# Calculate filtered target
T_filt = T * torch.fft.rfft(filt, n=n_fft)
target_filt = torch.fft.irfft(T_filt, n=n_fft)
# Reshape to the original format
target_filt = target_filt[..., : input_shape[-1]].view(*input_shape)
# Mask to keep only the valid samples
if mask is not None:
target_filt = mask.unsqueeze(1) * target_filt
return target_filt
def calculate_sdr_batch(
estimate: torch.Tensor,
target: torch.Tensor,
input_length: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
scale_invariant: bool = False,
convolution_invariant: bool = False,
convolution_filter_length: Optional[int] = 512,
remove_mean: bool = True,
sdr_max: Optional[float] = None,
eps: float = 1e-10,
) -> torch.Tensor:
"""Calculate signal-to-distortion ratio per channel.
SDR = 10 * log10( ||t||_2^2 / (||e-t||_2^2 + alpha * ||t||^2)
where
alpha = 10^(-sdr_max/10)
Optionally, use scale- or convolution- invariant target signal.
Args:
estimate: estimated signal, shape (B, C, T)
target: target signal, shape (B, C, T)
input_length: Optional, length of valid samples, shape (B,)
mask: Optional, temporal mask, shape (B, T)
scale_invariant: Use scale invariant SDR
convolution_invariant: Use convolution invariant SDR
convolution_filter_length: Filter length for convolution invariant SDR
remove_mean: If True, mean will be removed before calculating SDR
eps: Small regularization constant
Returns:
SDR in dB for each channel, shape (B, C)
"""
if scale_invariant and convolution_invariant:
raise ValueError(f'Arguments scale_invariant and convolution_invariant cannot be used simultaneously.')
assert (
estimate.shape == target.shape
), f'Estimate shape ({estimate.shape}) not matching target shape ({target.shape})'
if input_length is not None:
if mask is not None:
raise RuntimeError(
'Argument `input_length` is mutually exclusive with `mask`. Both cannot be used at the same time.'
)
# Construct a binary mask
mask = make_seq_mask_like(lengths=input_length, like=estimate, time_dim=-1, valid_ones=True).squeeze(1)
if remove_mean:
estimate = estimate - temporal_mean(estimate, mask=mask, keepdim=True, eps=eps)
target = target - temporal_mean(target, mask=mask, keepdim=True, eps=eps)
if scale_invariant or (convolution_invariant and convolution_filter_length == 1):
target = scale_invariant_target(estimate=estimate, target=target, mask=mask, eps=eps)
elif convolution_invariant:
target = convolution_invariant_target(
estimate=estimate, target=target, mask=mask, filter_length=convolution_filter_length, eps=eps,
)
distortion = estimate - target
target_pow = temporal_mean(torch.abs(target) ** 2, mask=mask, eps=eps)
distortion_pow = temporal_mean(torch.abs(distortion) ** 2, mask=mask, eps=eps)
if sdr_max is not None:
distortion_pow = distortion_pow + 10 ** (-sdr_max / 10) * target_pow
sdr = target_pow / (distortion_pow + eps)
sdr = 10 * torch.log10(sdr + eps)
return sdr
class SDRLoss(Loss, Typing):
"""
Computes signal-to-distortion ratio (SDR) loss with weighted average across channels.
Args:
weight: weight for SDR of each output channel, used for averaging the loss across channels. Defaults to `None` (averaging).
reduction: batch reduction. Defaults to `mean` over the batch.
scale_invariant: If `True`, use scale-invariant SDR. Defaults to `False`.
remove_mean: Remove mean before calculating the loss. Defaults to `True`.
sdr_max: Soft thresholding of the loss to SDR_max.
eps: Small value for regularization.
"""
def __init__(
self,
weight: Optional[List[float]] = None,
reduction: str = 'mean',
scale_invariant: bool = False,
convolution_invariant: bool = False,
convolution_filter_length: Optional[int] = 512,
remove_mean: bool = True,
sdr_max: Optional[float] = None,
eps: float = 1e-10,
):
super().__init__()
# SDR weight buffer
if weight is not None:
if any([w <= 0 for w in weight]):
raise ValueError(f'Weight must be positive! Current value: {weight}')
elif not np.isclose(sum(weight), 1, atol=1e-6):
raise ValueError(f'Weight should add to one, current weight: {weight}')
weight = torch.tensor(weight).reshape(1, -1)
logging.info(f'Channel weight set to %s', weight)
self.register_buffer('weight', weight)
self.weight: Optional[Tensor]
# Batch reduction
self.reduction = reduction
if reduction == 'mean':
self.reduce = torch.mean
else:
raise ValueError(f'Unexpected reduction mode {reduction}.')
# SDR calculation setup
if scale_invariant and convolution_invariant:
raise ValueError(
f'{self.__class__.__name__}: arguments scale_invariant and convolution_invariant cannot be used simultaneously.'
)
self.scale_invariant = scale_invariant
self.convolution_invariant = convolution_invariant
self.convolution_filter_length = convolution_filter_length
self.remove_mean = remove_mean
self.sdr_max = sdr_max
self.eps = eps
@property
def input_types(self):
"""Input types definitions for SDRLoss.
"""
signal_shape = ('B', 'C', 'T')
return {
"estimate": NeuralType(signal_shape, AudioSignal()),
"target": NeuralType(signal_shape, AudioSignal()),
"input_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"mask": NeuralType(('B', 'T'), MaskType(), optional=True),
}
@property
def output_types(self):
"""Output types definitions for SDRLoss.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
@typecheck()
def forward(
self,
estimate: torch.Tensor,
target: torch.Tensor,
input_length: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""For input batch of multi-channel signals, calculate SDR between estimate and target for each channel,
perform averaging across channels (weighting optional), and apply reduction across the batch.
Args:
estimate: Batch of signals, shape (B, T, C)
target: Batch of signals, shape (B, T, C)
input_length: Batch of lengths, shape (B,)
mask: Batch of temporal masks, shape (B, T)
Returns:
Scalar loss.
"""
sdr = calculate_sdr_batch(
estimate=estimate,
target=target,
input_length=input_length,
mask=mask,
scale_invariant=self.scale_invariant,
convolution_invariant=self.convolution_invariant,
convolution_filter_length=self.convolution_filter_length,
remove_mean=self.remove_mean,
sdr_max=self.sdr_max,
eps=self.eps,
)
# channel averaging
if self.weight is None:
sdr = torch.mean(sdr, dim=1)
else:
# weighting across channels
sdr = sdr * self.weight
sdr = torch.sum(sdr, dim=1)
# reduction
sdr = self.reduce(sdr)
return -sdr
|
NeMo-main
|
nemo/collections/asr/losses/audio_losses.py
|
# ! /usr/bin/python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import torch
from nemo.core.classes import Loss
from nemo.core.neural_types import LabelsType, LengthsType, LogprobsType, LossType, NeuralType
class RNNTLossPytorch(Loss):
@property
def input_types(self):
"""Input types definitions for CTCLoss.
"""
return {
"acts": NeuralType(('B', 'T', 'T', 'D'), LogprobsType()),
"labels": NeuralType(('B', 'T'), LabelsType()),
"act_lens": NeuralType(tuple('B'), LengthsType()),
"label_lens": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Output types definitions for CTCLoss.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, blank, reduction):
super().__init__()
self.blank = blank
self.reduction = reduction
def forward(self, acts, labels, act_lens, label_lens):
# CPU patch for FP16
if not acts.is_cuda and acts.dtype == torch.float16:
acts = acts.float()
acts = torch.log_softmax(acts, -1)
forward_logprob = self.compute_forward_prob(acts, labels, act_lens, label_lens)
losses = -forward_logprob
if self.reduction == 'mean_batch':
losses = losses.mean() # global batch size average
elif self.reduction == 'mean':
losses = torch.div(losses, label_lens).mean()
elif self.reduction == 'sum':
losses = losses.sum()
elif self.reduction == 'mean_volume':
losses = losses.sum() / label_lens.sum() # same as above but longer samples weigh more
return losses
def compute_forward_prob(self, acts, labels, act_lens, label_lens):
B, T, U, _ = acts.shape
log_alpha = torch.zeros(B, T, U)
log_alpha = log_alpha.to(acts.device)
for t in range(T):
for u in range(U):
if u == 0:
if t == 0:
# this is the base case: (t=0, u=0) with log-alpha = 0.
log_alpha[:, t, u] = 0.0
else:
# this is case for (t = 0, u > 0), reached by (t, u - 1)
# emitting a blank symbol.
log_alpha[:, t, u] = log_alpha[:, t - 1, u] + acts[:, t - 1, 0, self.blank]
else:
if t == 0:
# in case of (u > 0, t = 0), this is only reached from
# (t, u - 1) with a label emission.
gathered = torch.gather(
acts[:, t, u - 1], dim=1, index=labels[:, u - 1].view(-1, 1).type(torch.int64)
).reshape(-1)
log_alpha[:, t, u] = log_alpha[:, t, u - 1] + gathered.to(log_alpha.device)
else:
# here both t and u are > 0, this state is reachable
# with two possibilities: (t - 1, u) with a blank emission
# or (t, u - 1) with a label emission.
log_alpha[:, t, u] = torch.logsumexp(
torch.stack(
[
log_alpha[:, t - 1, u] + acts[:, t - 1, u, self.blank],
log_alpha[:, t, u - 1]
+ torch.gather(
acts[:, t, u - 1], dim=1, index=labels[:, u - 1].view(-1, 1).type(torch.int64)
).reshape(-1),
]
),
dim=0,
)
log_probs = []
for b in range(B):
# here we need to add the final blank emission weights.
to_append = (
log_alpha[b, act_lens[b] - 1, label_lens[b]] + acts[b, act_lens[b] - 1, label_lens[b], self.blank]
)
log_probs.append(to_append)
log_prob = torch.stack(log_probs)
return log_prob
class TDTLossPytorch(Loss):
"""
Pure Python implementation of TDT loss (https://arxiv.org/pdf/2304.06795.pdf)
"""
@property
def input_types(self):
"""Input types definitions for CTCLoss.
"""
return {
"acts": NeuralType(('B', 'T', 'T', 'D'), LogprobsType()),
"labels": NeuralType(('B', 'T'), LabelsType()),
"act_lens": NeuralType(tuple('B'), LengthsType()),
"label_lens": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Output types definitions for CTCLoss.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, blank: int, durations: List[int] = [], reduction: str = 'sum', sigma: float = 0.0):
super().__init__()
self.blank = blank
self.durations = durations
self.n_durations = len(durations)
self.reduction = reduction
self.sigma = sigma
def forward(self, acts, labels, act_lens, label_lens):
label_acts = acts[:, :, :, : -self.n_durations]
duration_acts = acts[:, :, :, -self.n_durations :]
# the - self.sigma here is for logit-undernormalization. Check the paper for details.
label_acts = torch.log_softmax(label_acts, -1) - self.sigma
duration_acts = torch.log_softmax(duration_acts, -1)
forward_logprob, _ = self.compute_forward_prob(label_acts, duration_acts, labels, act_lens, label_lens)
losses = -forward_logprob
if self.reduction == 'mean_batch':
losses = losses.mean() # global batch size average
elif self.reduction == 'mean':
losses = torch.div(losses, label_lens).mean()
elif self.reduction == 'sum':
losses = losses.sum()
elif self.reduction == 'mean_volume':
losses = losses.sum() / label_lens.sum() # same as above but longer samples weigh more
return losses
def logsumexp(self, a, b):
ret = torch.logsumexp(torch.stack([a, b]), dim=0)
return ret
def compute_forward_prob(self, acts, duration_acts, labels, act_lens, label_lens):
"""This function implements Equation 7 in the TDT paper https://arxiv.org/pdf/2304.06795.pdf,
Simply put, for each alpha(t, u), it sums over the contribution from all incoming blank arcs and non-blank arcs.
"""
B, T, U, _ = acts.shape
log_alpha = torch.zeros(B, T, U)
log_alpha = log_alpha.cuda()
for b in range(B):
for t in range(T):
for u in range(U):
if u == 0:
if t == 0:
# both t and u are 0, this is the base case for alphas.
log_alpha[b, t, u] = 0.0
else:
# u = 0 and t != 0: only considers blank emissions.
log_alpha[b, t, u] = -1000.0
for n, l in enumerate(self.durations):
if (
t - l >= 0 and l > 0
): # checking conditions for blank emission, l has to be at least 1
tmp = (
log_alpha[b, t - l, u]
+ acts[b, t - l, u, self.blank]
+ duration_acts[b, t - l, u, n]
)
log_alpha[b, t, u] = self.logsumexp(tmp, 1.0 * log_alpha[b, t, u])
else:
# u != 0 here, need to consider both blanks and non-blanks.
log_alpha[b, t, u] = -1000.0
for n, l in enumerate(self.durations):
if t - l >= 0:
if l > 0: # for blank emissions. Need to ensure index is not out-of-bound.
tmp = (
log_alpha[b, t - l, u]
+ acts[b, t - l, u, self.blank]
+ duration_acts[b, t - l, u, n]
)
log_alpha[b, t, u] = self.logsumexp(tmp, 1.0 * log_alpha[b, t, u])
# non-blank emissions.
tmp = (
log_alpha[b, t - l, u - 1]
+ acts[b, t - l, u - 1, labels[b, u - 1]]
+ duration_acts[b, t - l, u - 1, n]
)
log_alpha[b, t, u] = self.logsumexp(tmp, 1.0 * log_alpha[b, t, u])
log_probs = []
for b in range(B):
tt = torch.Tensor([-1000.0]).cuda()[0]
# need to loop over all possible ways that blank with different durations contributes to the final loss.
for n, l in enumerate(self.durations):
if act_lens[b] - l >= 0 and l > 0:
bb = (
log_alpha[b, act_lens[b] - l, label_lens[b]]
+ acts[b, act_lens[b] - l, label_lens[b], self.blank]
+ duration_acts[b, act_lens[b] - l, label_lens[b], n]
)
tt = self.logsumexp(bb, 1.0 * tt)
log_probs.append(tt)
log_prob = torch.stack(log_probs)
return log_prob, log_alpha
class MultiblankRNNTLossPytorch(Loss):
"""
Pure Python implementation of multi-blank transducer loss (https://arxiv.org/pdf/2211.03541.pdf)
"""
@property
def input_types(self):
"""Input types definitions for CTCLoss.
"""
return {
"acts": NeuralType(('B', 'T', 'T', 'D'), LogprobsType()),
"labels": NeuralType(('B', 'T'), LabelsType()),
"act_lens": NeuralType(tuple('B'), LengthsType()),
"label_lens": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Output types definitions for CTCLoss.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, blank, big_blank_durations, reduction: str = "sum", sigma: float = 0.0):
super().__init__()
self.blank = blank
self.big_blank_durations = big_blank_durations
self.reduction = reduction
self.sigma = sigma
def forward(self, acts, labels, act_lens, label_lens):
acts = torch.log_softmax(acts, -1) - self.sigma
forward_logprob, _ = self.compute_forward_prob(acts, labels, act_lens, label_lens)
losses = -forward_logprob
if self.reduction == 'mean_batch':
losses = losses.mean() # global batch size average
elif self.reduction == 'mean':
losses = torch.div(losses, label_lens).mean()
elif self.reduction == 'sum':
losses = losses.sum()
elif self.reduction == 'mean_volume':
losses = losses.sum() / label_lens.sum() # same as above but longer samples weigh more
return losses
def compute_forward_prob(self, acts, labels, act_lens, label_lens):
B, T, U, _ = acts.shape
log_alpha = torch.zeros(B, T, U, device=acts.device)
for t in range(T):
for u in range(U):
if u == 0:
if t == 0:
# this is the base case: (t=0, u=0) with log-alpha = 0.
log_alpha[:, t, u] = 0.0
else:
# this is case for (t = 0, u > 0), reached by (t, u - d)
# emitting a blank symbol of duration d.
log_alpha[:, t, u] = log_alpha[:, t - 1, u] + acts[:, t - 1, 0, self.blank]
for i, d in enumerate(self.big_blank_durations):
if t >= d:
tt = log_alpha[:, t - d, u] + acts[:, t - d, 0, self.blank - 1 - i]
log_alpha[:, t, u] = torch.logsumexp(
torch.stack([1.0 * log_alpha[:, t, u], tt]), dim=0
)
else:
if t == 0:
# in case of (u > 0, t = 0), this is only reached from
# (t, u - 1) with a label emission.
gathered = torch.gather(
acts[:, t, u - 1], dim=1, index=labels[:, u - 1].view(-1, 1).type(torch.int64)
).reshape(-1)
log_alpha[:, t, u] = log_alpha[:, t, u - 1] + gathered
else:
# here both t and u are > 0, this state is reachable
# with two possibilities: (t - d, u) with emission of
# blank with duration d, or (t, u - 1) with a label emission.
# first we take care of the standard blank.
log_alpha[:, t, u] = torch.logsumexp(
torch.stack(
[
log_alpha[:, t - 1, u] + acts[:, t - 1, u, self.blank],
log_alpha[:, t, u - 1]
+ torch.gather(
acts[:, t, u - 1], dim=1, index=labels[:, u - 1].view(-1, 1).type(torch.int64)
).reshape(-1),
]
),
dim=0,
)
# now we go over all big blanks. They need to be considered if current t >= blank duration d.
for i, d in enumerate(self.big_blank_durations):
if t >= d:
tt = log_alpha[:, t - d, u] + acts[:, t - d, u, self.blank - 1 - i]
log_alpha[:, t, u] = torch.logsumexp(
torch.stack([1.0 * log_alpha[:, t, u], tt]), dim=0
)
log_probs = []
for b in range(B):
# here we need to add the final blank emission weights, which needs
# to consider all possible blank durations.
to_append = (
log_alpha[b, act_lens[b] - 1, label_lens[b]] + acts[b, act_lens[b] - 1, label_lens[b], self.blank]
)
for i, d in enumerate(self.big_blank_durations):
if act_lens[b] >= d:
tt = (
log_alpha[b, act_lens[b] - d, label_lens[b]]
+ acts[b, act_lens[b] - d, label_lens[b], self.blank - 1 - i]
)
to_append = torch.logsumexp(torch.stack([1.0 * to_append, tt]), dim=0)
log_probs.append(to_append)
log_prob = torch.stack(log_probs)
return log_prob, log_alpha
|
NeMo-main
|
nemo/collections/asr/losses/rnnt_pytorch.py
|
# ! /usr/bin/python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import operator
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Set
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.asr.losses.rnnt_pytorch import MultiblankRNNTLossPytorch, RNNTLossPytorch, TDTLossPytorch
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LogprobsType, LossType, NeuralType
from nemo.core.utils import numba_utils
from nemo.core.utils.k2_utils import K2_INSTALLATION_MESSAGE
from nemo.core.utils.numba_utils import NUMBA_INSTALLATION_MESSAGE
from nemo.utils import logging, logging_mode, model_utils
try:
import warprnnt_pytorch as warprnnt
WARP_RNNT_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
WARP_RNNT_AVAILABLE = False
try:
from nemo.collections.asr.parts.numba.rnnt_loss import MultiblankRNNTLossNumba, RNNTLossNumba, TDTLossNumba
NUMBA_RNNT_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
NUMBA_RNNT_AVAILABLE = False
try:
from nemo.collections.asr.parts.k2.graph_transducer import GraphRnntLoss
from nemo.collections.asr.parts.k2.w_transducer import GraphWTransducerLoss
K2_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
K2_AVAILABLE = False
WARP_RNNT_INSTALLATION_MESSAGE = (
"Could not import `warprnnt_pytorch`.\n"
"Please visit https://github.com/HawkAaron/warp-transducer "
"and follow the steps in the readme to build and install the "
"pytorch bindings for RNNT Loss, or use the provided docker "
"container that supports RNN-T loss."
)
@dataclass
class RNNTLossConfig:
loss_name: str
lib_name: str
is_available: bool = False
installation_msg: str = ""
min_version: Optional[str] = None
force_float32: bool = True # default True for now for all losses except graph-based
# Resolved list of available RNNT losses
RNNT_LOSS_RESOLVER = {
"warprnnt": RNNTLossConfig(
loss_name="warprnnt",
lib_name="warprnnt_pytorch",
is_available=WARP_RNNT_AVAILABLE,
installation_msg=WARP_RNNT_INSTALLATION_MESSAGE,
force_float32=True,
),
"warprnnt_numba": RNNTLossConfig(
loss_name="warprnnt_numba",
lib_name="numba",
min_version='0.53.0',
is_available=NUMBA_RNNT_AVAILABLE,
installation_msg=NUMBA_INSTALLATION_MESSAGE,
force_float32=False, # This is only temporarily false, will be dynamically updated during resolution
),
"pytorch": RNNTLossConfig(
loss_name="pytorch",
lib_name="torch",
min_version='0.0',
is_available=True,
installation_msg="Pure Pytorch implementation of RNN-T loss. Slow and for debugging purposes only.",
force_float32=True,
),
"multiblank_rnnt": RNNTLossConfig(
loss_name="multiblank_rnnt",
lib_name="numba",
min_version='0.53.0',
is_available=NUMBA_RNNT_AVAILABLE,
installation_msg=NUMBA_INSTALLATION_MESSAGE,
force_float32=True,
),
"multiblank_rnnt_pytorch": RNNTLossConfig(
loss_name="pytorch",
lib_name="torch",
min_version='0.0',
is_available=True,
installation_msg="Pure Pytorch implementation of Multiblank RNN-T loss. Slow and for debugging purposes only.",
force_float32=True,
),
"graph_w_transducer": RNNTLossConfig(
loss_name="graph_w_transducer",
lib_name="k2",
is_available=K2_AVAILABLE,
installation_msg=K2_INSTALLATION_MESSAGE,
force_float32=False,
),
"graph_rnnt": RNNTLossConfig(
loss_name="graph_rnnt",
lib_name="k2",
is_available=K2_AVAILABLE,
installation_msg=K2_INSTALLATION_MESSAGE,
force_float32=False,
),
"tdt": RNNTLossConfig(
loss_name="tdt",
lib_name="numba",
min_version='0.53.0',
is_available=NUMBA_RNNT_AVAILABLE,
installation_msg=NUMBA_INSTALLATION_MESSAGE,
),
"tdt_pytorch": RNNTLossConfig(
loss_name="tdt_pytorch",
lib_name="torch",
min_version='0.0',
is_available=True,
installation_msg="Pure Pytorch implementation of TDT loss. Slow and for debugging purposes only.",
),
}
RNNT_LOSS_RESOLVER['default'] = RNNT_LOSS_RESOLVER['warprnnt_numba']
def _warn_unused_additional_kwargs(loss_name, kwargs):
if len(kwargs) > 0:
logging.warning(
f"Loss function `{loss_name}` was provided with following additional kwargs,\n"
f"however they were ignored as it is unused.\n"
f"{kwargs}"
)
def _clean_kwargs(
loss_name: str, kwargs: Optional[Dict[str, Any]], init_method: Callable, ignore_params: Optional[Set[str]] = None
) -> Dict[str, Any]:
"""
Cleans kwargs for the given loss function. Warn if there are unused kwargs.
Args:
loss_name: name of the loss function
kwargs: kwargs to clean
init_method: LossClass.__init__ method
ignore_params: set of argument names for init_method to ignore
Returns:
only used kwargs for the given `init_method`
"""
if not kwargs:
return {}
init_params = set(inspect.signature(init_method).parameters.keys()) - {"self"}
if ignore_params is not None:
init_params -= ignore_params
unused_kwargs = dict()
used_kwargs = dict()
for key, value in kwargs.items():
if key not in init_params:
unused_kwargs[key] = value
else:
used_kwargs[key] = value
if len(unused_kwargs) > 0:
_warn_unused_additional_kwargs(loss_name, unused_kwargs)
return used_kwargs
def resolve_rnnt_default_loss_name() -> str:
return RNNT_LOSS_RESOLVER['default'].loss_name
def resolve_rnnt_loss(loss_name: str, blank_idx: int, loss_kwargs: dict = None) -> torch.nn.Module:
loss_function_names = list(RNNT_LOSS_RESOLVER.keys())
if loss_name not in loss_function_names:
raise ValueError(
f"Provided `loss_name` {loss_name} not in list of available RNNT losses \n" f"{loss_function_names}"
)
all_available_losses = {name: config for name, config in RNNT_LOSS_RESOLVER.items() if config.is_available}
loss_config = RNNT_LOSS_RESOLVER[loss_name] # type: RNNTLossConfig
# Re-raise import error with installation message
if not loss_config.is_available:
msg = (
f"Installed RNNT losses are : {list(all_available_losses.keys())}.\n"
f"****************************************************************\n"
f"To install the selected loss function, please follow the steps below:\n"
f"{loss_config.installation_msg}"
)
raise ImportError(msg)
# Library version check
if loss_config.min_version is not None:
ver_matched, msg = model_utils.check_lib_version(
loss_config.lib_name, checked_version=loss_config.min_version, operator=operator.ge
)
if ver_matched is False:
msg = (
f"{msg}\n"
f"****************************************************************\n"
f"To update the selected loss function, please follow the steps below:\n"
f"{loss_config.installation_msg}"
)
raise RuntimeError(msg)
# Resolve loss functions sequentially
loss_kwargs = {} if loss_kwargs is None else loss_kwargs
if isinstance(loss_kwargs, DictConfig):
loss_kwargs = OmegaConf.to_container(loss_kwargs, resolve=True)
# Get actual loss name for `default`
if loss_name == 'default':
loss_name = loss_config.loss_name
"""
Resolve RNNT loss functions
"""
if loss_name == 'warprnnt':
loss_func = warprnnt.RNNTLoss(blank=blank_idx, reduction='none')
_warn_unused_additional_kwargs(loss_name, loss_kwargs)
elif loss_name == 'warprnnt_numba':
# Update loss config's forced float32 flag if set to None
loss_config.force_float32 = not numba_utils.is_numba_cuda_fp16_supported()
fastemit_lambda = loss_kwargs.pop('fastemit_lambda', 0.0)
clamp = loss_kwargs.pop('clamp', -1.0)
loss_func = RNNTLossNumba(blank=blank_idx, reduction='none', fastemit_lambda=fastemit_lambda, clamp=clamp)
_warn_unused_additional_kwargs(loss_name, loss_kwargs)
elif loss_name == 'pytorch':
loss_func = RNNTLossPytorch(blank=blank_idx, reduction='none')
_warn_unused_additional_kwargs(loss_name, loss_kwargs)
elif loss_name == 'multiblank_rnnt':
fastemit_lambda = loss_kwargs.pop('fastemit_lambda', 0.0)
clamp = loss_kwargs.pop('clamp', -1.0)
big_blank_durations = loss_kwargs.pop('big_blank_durations', None)
sigma = loss_kwargs.pop('sigma', 0.0)
loss_func = MultiblankRNNTLossNumba(
blank=blank_idx,
big_blank_durations=big_blank_durations,
reduction='none',
fastemit_lambda=fastemit_lambda,
clamp=clamp,
sigma=sigma,
)
_warn_unused_additional_kwargs(loss_name, loss_kwargs)
elif loss_name == 'multiblank_rnnt_pytorch':
big_blank_durations = loss_kwargs.pop('big_blank_durations', None)
sigma = loss_kwargs.pop('sigma', 0.0)
loss_func = MultiblankRNNTLossPytorch(
blank=blank_idx, big_blank_durations=big_blank_durations, reduction='none', sigma=sigma
)
_warn_unused_additional_kwargs(loss_name, loss_kwargs)
elif loss_name == 'tdt':
fastemit_lambda = loss_kwargs.pop('fastemit_lambda', 0.0)
clamp = loss_kwargs.pop('clamp', -1.0)
durations = loss_kwargs.pop('durations', None)
sigma = loss_kwargs.pop('sigma', 0.0)
omega = loss_kwargs.pop('omega', 0.0)
loss_func = TDTLossNumba(
blank=blank_idx,
durations=durations,
reduction='none',
fastemit_lambda=fastemit_lambda,
clamp=clamp,
sigma=sigma,
omega=omega,
)
_warn_unused_additional_kwargs(loss_name, loss_kwargs)
elif loss_name == 'tdt_pytorch':
durations = loss_kwargs.pop('durations', None)
sigma = loss_kwargs.pop('sigma', 0.0)
loss_func = TDTLossPytorch(blank=blank_idx, durations=durations, reduction='none', sigma=sigma)
_warn_unused_additional_kwargs(loss_name, loss_kwargs)
elif loss_name == "graph_rnnt":
loss_kwargs = _clean_kwargs(loss_name, loss_kwargs, GraphRnntLoss.__init__, ignore_params={"blank"})
loss_func = GraphRnntLoss(blank=blank_idx, **loss_kwargs)
elif loss_name == "graph_w_transducer":
loss_kwargs = _clean_kwargs(loss_name, loss_kwargs, GraphWTransducerLoss.__init__, ignore_params={"blank"})
loss_func = GraphWTransducerLoss(blank=blank_idx, **loss_kwargs)
else:
raise ValueError(
f"Invalid value of `loss_name`: {loss_name}. Allowed loss names are :" f"{loss_function_names}"
)
return loss_func
class RNNTLoss(Loss):
@property
def input_types(self):
"""Input types definitions for CTCLoss.
"""
return {
"log_probs": NeuralType(('B', 'T', 'T', 'D'), LogprobsType()),
"targets": NeuralType(('B', 'T'), LabelsType()),
"input_lengths": NeuralType(tuple('B'), LengthsType()),
"target_lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
"""Output types definitions for CTCLoss.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, num_classes, reduction: str = 'mean_batch', loss_name: str = "default", loss_kwargs=None):
"""
RNN-T Loss function based on https://github.com/HawkAaron/warp-transducer.
Optionally, can utilize a numba implementation of the same loss without having to compile the loss,
albiet there is a small speed penalty for JIT numba compile.
Note:
Requires Numba 0.53.0 or later to be installed to use this loss function.
Losses can be selected via the config, and optionally be passed keyword arguments as follows.
Examples:
.. code-block:: yaml
model: # RNNT Model config
...
loss:
loss_name: "warprnnt_numba"
warprnnt_numba_kwargs:
fastemit_lambda: 0.0
Warning:
In the case that GPU memory is exhausted in order to compute RNNTLoss, it might cause
a core dump at the cuda level with the following error message.
```
...
costs = costs.to(acts.device)
RuntimeError: CUDA error: an illegal memory access was encountered
terminate called after throwing an instance of 'c10::Error'
```
Please kill all remaining python processes after this point, and use a smaller batch size
for train, validation and test sets so that CUDA memory is not exhausted.
Args:
num_classes: Number of target classes for the joint network to predict.
In all cases (conventional RNNT, multi-blank RNNT, and TDT model), this equals the token-id
for the standard "blank" symbol. In particular, say V is the number of non-blank tokens in
the vocabulary, then in the case of,
standard RNNT: num_classes = V
multiblank RNNT: num_classes = V + number-big-blanks (since we store big-blanks before
standard blank, and the standard blank is the last symbol in the vocab)
TDT: num_classes = V. Note, V here does not include any of the "duration outputs".
reduction: Type of reduction to perform on loss. Possible values are
`mean_batch`, 'mean_volume`, `mean`, `sum` or None.
`None` will return a torch vector comprising the individual loss values of the batch.
`mean_batch` will average the losses in the batch
`mean` will divide each loss by the target length and then average
`mean_volume` will add up all the losses and divide by sum of target lengths
loss_name: String that is resolved into an RNNT loss function. Available list of losses
is ininitialized in `RNNT_LOSS_RESOLVER` dictionary.
loss_kwargs: Optional Dict of (str, value) pairs that are passed to the instantiated loss
function.
"""
super(RNNTLoss, self).__init__()
if reduction not in [None, 'mean', 'sum', 'mean_batch', 'mean_volume']:
raise ValueError('`reduction` must be one of [mean, sum, mean_batch, mean_volume]')
self._blank = num_classes
self.reduction = reduction
self._loss = resolve_rnnt_loss(loss_name, blank_idx=self._blank, loss_kwargs=loss_kwargs)
self._force_float32 = RNNT_LOSS_RESOLVER[loss_name].force_float32
self._fp16_compat_checked = False
def reduce(self, losses, target_lengths):
if isinstance(losses, List):
losses = torch.cat(losses, 0)
target_lengths = torch.cat(target_lengths, 0)
if self.reduction == 'mean_batch':
losses = losses.mean() # global batch size average
elif self.reduction == 'mean':
losses = torch.div(losses, target_lengths).mean()
elif self.reduction == 'sum':
losses = losses.sum()
elif self.reduction == 'mean_volume':
losses = losses.sum() / target_lengths.sum() # same as above but longer samples weigh more
return losses
@typecheck()
def forward(self, log_probs, targets, input_lengths, target_lengths):
# Cast to int 64
targets = targets.long()
input_lengths = input_lengths.long()
target_lengths = target_lengths.long()
max_logit_len = input_lengths.max()
max_targets_len = target_lengths.max()
# Force cast joint to float32
if not self._force_float32 and numba_utils.is_numba_cuda_fp16_supported():
# Execute the kernel in fp16
pass
elif self._force_float32 and log_probs.dtype != torch.float32:
# Log just once if fp16 tensor was passed and fp16 Numba CUDA loss could not be used.
if log_probs.dtype == torch.float16 and not self._fp16_compat_checked:
_, reason = numba_utils.is_numba_cuda_fp16_supported(return_reason=True)
logging.warning(
f"Provided RNNT Joint tensor is of dtype {log_probs.dtype}, but RNNT loss could not be calculated "
f"in fp16 due to following reason stated below. Loss will be calculated in fp32. \n\n"
f"{reason}",
mode=logging_mode.ONCE,
)
self._fp16_compat_checked = True
# Upcast the activation tensor and compute loss and grads in fp32
logits_orig = log_probs
log_probs = log_probs.float()
del logits_orig # save memory *before* computing the loss
# Ensure that shape mismatch does not occur due to padding
# Due to padding and subsequent downsampling, it may be possible that
# max sequence length computed does not match the actual max sequence length
# of the log_probs tensor, therefore we increment the input_lengths by the difference.
# This difference is generally small.
if log_probs.shape[1] != max_logit_len:
log_probs = log_probs.narrow(dim=1, start=0, length=max_logit_len).contiguous()
# Reduce transcript length to correct alignment if additional padding was applied.
# Transcript: [B, L] -> [B, L']; If L' < L
if not targets.is_contiguous():
targets = targets.contiguous()
if targets.shape[1] != max_targets_len:
targets = targets.narrow(dim=1, start=0, length=max_targets_len).contiguous()
# Temporarily override loss reduction
loss_reduction = self._loss.reduction
self._loss.reduction = None
# Compute RNNT loss
loss = self._loss(acts=log_probs, labels=targets, act_lens=input_lengths, label_lens=target_lengths)
# Loss reduction can be dynamic, so reset it after call
self._loss.reduction = loss_reduction
# reduce here using our own reduction function
if self.reduction is not None:
loss = self.reduce(loss, target_lengths)
# del new variables that may have been created
del (
log_probs,
targets,
input_lengths,
target_lengths,
)
return loss
|
NeMo-main
|
nemo/collections/asr/losses/rnnt.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import nn
from nemo.core import Loss, typecheck
from nemo.core.neural_types import AcousticEncodedRepresentation, LengthsType, LossType, NeuralType, SpectrogramType
__all__ = ["ContrastiveLoss"]
class ContrastiveLoss(Loss):
@property
def input_types(self):
"""Input types definitions for Contrastive.
"""
return {
"spectrograms": NeuralType(("B", "D", "T"), SpectrogramType()),
"spec_masks": NeuralType(("B", "D", "T"), SpectrogramType()),
"decoder_outputs": NeuralType(("B", "T", "D"), AcousticEncodedRepresentation()),
"decoder_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
"""Output types definitions for Contrastive.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
@property
def needs_labels(self):
return False
def __init__(
self,
in_dim: int,
proj_dim: int = 128,
combine_time_steps: int = 1,
num_negatives: int = 100,
quantized_targets: bool = False,
codebook_size: int = 320,
prob_ppl_weight: float = 0.1,
logit_temp: float = 0.1,
reduce: str = "sum",
sample_from_same_utterance_only: bool = True,
sample_from_non_masked: bool = False,
sample_from_codebook: bool = False,
group_loss: bool = False,
num_groups: int = 2,
quantizer_temp_start: float = 2,
quantizer_temp_min: float = 0.5,
quantizer_temp_decay: float = 0.999995,
mask_threshold: float = 0.8,
store_ids: bool = True,
reduce_ids: bool = False,
multiplier: float = 16.0,
):
"""
Loss function representing the contrastive task of identifying the true latent speech representation of
the masked spectrogram steps from a set of sampled distractors.
Args:
in_dim: Number of spectrogram channels.
proj_dim: Number of channels in the model outputs.
combine_time_steps: How many time steps should be combined into a single representation.
num_negatives: Number of sampled negatives for each target.
quantized_targets: Bool that determines if the targets should be quantized.
codebook_size: Number of vectors in the codebook per group.
prob_ppl_weight: Float multiplier on the perplexity loss for target quantization.
logit_temp: Float temperature for normalizing logits.
reduce: String representing the type of reduction used for cross entropy.
sample_from_same_utterance_only: Bool that determines if negatives should be sampled only from same utterance.
sample_from_non_masked: Bool that determines if negatives should be sampled from non-masked steps of the spectrogram.
sample_from_codebook: Bool that determines if negatives should be sampled from entire codebook.
group_loss: Bool that determines if loss should be computed separately for each group in the quantizer codebook.
num_groups: Number of groups in the quantizer codebook.
quantizer_temp_start: Starting temperature in quantizer.
quantizer_temp_min: Minimum temperature in quantizer.
quantizer_temp_decay: Decay rate of quantizer temperature per global step.
mask_threshold: Float threshold for determining if a time step of the spectrogram is masked based on percent of masked channels.
store_ids: Bool that determines if the quantizer ids will be stored to be potentially used by other losses.
reduce_ids: Bool that determines if we convert any sequence of consecutive equivalent ids to a single occurence of that id.
multiplier: Float multipler on final loss
"""
super().__init__()
quantizer_temp = (quantizer_temp_start, quantizer_temp_min, quantizer_temp_decay)
self.quantized_targets = quantized_targets
self.num_negatives = num_negatives
self.prob_ppl_weight = prob_ppl_weight
if self.quantized_targets:
quantizer_cfg = {
"_target_": "nemo.collections.asr.parts.submodules.ssl_quantizers.GumbelVectorQuantizer",
"dim": in_dim * combine_time_steps,
"vq_dim": proj_dim,
"num_vars": codebook_size,
"groups": num_groups,
"temp": quantizer_temp,
"combine_groups": True,
"time_first": True,
}
self.quantizer = ContrastiveLoss.from_config_dict(quantizer_cfg)
self.prob_ppl_weight = prob_ppl_weight
self.logit_temp = logit_temp
self.reduce = reduce
self.combine_time_steps = combine_time_steps
self.sample_from_same_utterance_only = sample_from_same_utterance_only
self.sample_from_non_masked = sample_from_non_masked
self.sample_from_codebook = sample_from_codebook
self.group_loss = group_loss
self.mask_threshold = mask_threshold
self.multiplier = multiplier
self.store_ids = store_ids
self.reduce_ids = reduce_ids
if not self.quantized_targets:
self.target_proj = nn.Linear(in_dim * combine_time_steps, proj_dim)
def sample_negatives(self, y, num):
# y - T'xBxC or T'xC
high = y.shape[0]
neg_idxs = torch.multinomial(torch.ones((num, high), device=y.device), self.num_negatives)
negs = y[neg_idxs.view(-1)]
negs = negs.view((num, self.num_negatives) + y.shape[1:])
negs = negs.transpose(0, 1)
# negs - NxT'xBxC or NxT'xC
return negs, neg_idxs
@typecheck()
def forward(self, spectrograms, spec_masks, decoder_outputs, decoder_lengths=None):
spec_in = spectrograms.transpose(-2, -1)
masks = spec_masks.transpose(-2, -1)
targets = spec_in
# BxTxC
targets = targets.reshape(targets.shape[0], targets.shape[1] // self.combine_time_steps, -1)
masks = masks.reshape(targets.shape[0], targets.shape[1], -1)
if self.quantized_targets:
if self.store_ids:
# store ids for use by other losses
targets, prob_ppl_loss, cur_codebook_temp, self.target_ids = self.quantizer(targets, return_ids=True)
if self.reduce_ids:
# reduce consecutive equivalent ids to a single occurence
_, indices = torch.unique_consecutive(self.target_ids, return_inverse=True)
indices -= indices.min(dim=1, keepdims=True)[0]
reduced_ids = torch.zeros_like(self.target_ids)
reduced_ids = reduced_ids.scatter_(1, indices, self.target_ids)
reduced_lens = indices.max(dim=-1)[0] + 1
self.target_ids = reduced_ids.narrow(1, 0, reduced_lens.max())
self.target_lengths = reduced_lens
else:
self.target_lengths = None
else:
targets, prob_ppl_loss, cur_codebook_temp = self.quantizer(targets)
else:
targets = self.target_proj(targets)
if self.sample_from_same_utterance_only:
bs = decoder_outputs.shape[0]
masks = masks.mean(-1) > self.mask_threshold
out_masked_only = decoder_outputs[masks]
targets_masked_only = targets[masks]
out_masked_only = out_masked_only.reshape(bs, -1, out_masked_only.shape[-1])
targets_masked_only = targets_masked_only.reshape(bs, -1, targets_masked_only.shape[-1])
# BxT'xC
# number of masked time steps to predict (T')
# -> T'xBxC
out_masked_only = out_masked_only.transpose(0, 1)
targets_masked_only = targets_masked_only.transpose(0, 1)
# -> T'xBxC
if self.sample_from_non_masked:
# sample from all steps in utterance
negatives, _ = self.sample_negatives(
targets.transpose(0, 1), targets_masked_only.size(0), # TxBxC # T'
)
else:
# only sample from masked steps in utterance
negatives, _ = self.sample_negatives(targets_masked_only, targets_masked_only.size(0)) # T'xBxC # T'
# NxT'xBxC
out_masked_only = out_masked_only.reshape(-1, out_masked_only.shape[-1])
targets_masked_only = targets_masked_only.reshape(-1, targets_masked_only.shape[-1])
negatives = negatives.reshape(self.num_negatives, -1, negatives.shape[-1])
# T'BxC and NxT'BxC
else:
masks = masks.mean(-1) > self.mask_threshold
out_masked_only = decoder_outputs[masks]
targets_masked_only = targets[masks]
# T'xC
# number of masked time steps to predict (T')
if self.group_loss:
num_groups = self.quantizer.groups
negatives = self.quantizer.vars.reshape(num_groups, self.quantizer.num_vars, -1)
# GxNx(C//G)
negatives = negatives.transpose(0, 1)
# NxGx(C//G)
negatives = negatives.unsqueeze(1).expand(-1, out_masked_only.shape[0], -1, -1)
# NxT'xGx(C//G)
negatives = negatives.reshape(negatives.shape[0], -1, negatives.shape[-1])
# NxT'Gx(C//G)
out_masked_only = out_masked_only.reshape(-1, out_masked_only.shape[-1] // num_groups)
targets_masked_only = targets_masked_only.reshape(-1, targets_masked_only.shape[-1] // num_groups)
# T'Gx(C//G)
elif self.sample_from_codebook:
# sample from the full codebook
negatives = self.quantizer.sample_from_codebook(self.num_negatives, targets_masked_only.size(0))
elif self.sample_from_non_masked:
# sample from all steps in batch
negatives, _ = self.sample_negatives(
targets.reshape(targets.shape[0] * targets.shape[1], -1), targets_masked_only.size(0), # BTxC
) # T'
else:
# only sample from masked steps
negatives, _ = self.sample_negatives(targets_masked_only, targets_masked_only.size(0)) # T'xC # T'
# NxT'xC
# Calculate similarity between outputs and all targets
similarity_scores = self._calculate_similarity(out_masked_only, negatives, targets_masked_only)
# (1+N)xT'
# cosine similarity of outs with targets + N negatives
# Create targets of size T
similarity_targets = decoder_outputs.new_zeros(similarity_scores.size(1), dtype=torch.long)
# T'
# targets are 0, since it's the first, followed by N sampled negatives
# Transpose similarity scores to TxF for loss
similarity_scores = similarity_scores.transpose(0, 1)
# T'x(1+N)
loss = F.cross_entropy(similarity_scores, similarity_targets, reduction=self.reduce)
sample_size = similarity_targets.numel()
if self.prob_ppl_weight != 0 and self.quantized_targets:
prob_ppl_loss = self.prob_ppl_weight * prob_ppl_loss * sample_size
loss += prob_ppl_loss
if not isinstance(loss, torch.Tensor):
loss = torch.Tensor([0]).to(device=decoder_outputs.device)
batch_size = spectrograms.shape[0]
loss *= self.multiplier / batch_size
return loss
def _calculate_similarity(self, logits, negatives, targets):
neg_is_pos = (targets == negatives).all(-1)
# NxT' - true where the negative is actually the positive
targets = targets.unsqueeze(0)
# 1xT'xC
targets = torch.cat([targets, negatives], dim=0)
# (1+N)xT'XC
logits = torch.cosine_similarity(
logits.float().unsqueeze(0).expand(targets.shape[0], -1, -1), targets.float(), dim=-1
).type_as(logits)
# (1+N)xT'
logits /= self.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
return logits
def set_num_updates(self, num_updates):
if self.quantized_targets:
self.quantizer.set_num_updates(num_updates)
|
NeMo-main
|
nemo/collections/asr/losses/ssl_losses/contrastive.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.losses import CTCLoss
from nemo.core import Loss, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LossType, NeuralType, SpectrogramType, VoidType
__all__ = ["CTCLossForSSL"]
class CTCLossForSSL(Loss):
@property
def input_types(self):
"""Input types definitions for Contrastive.
"""
return {
"spec_masks": NeuralType(("B", "D", "T"), SpectrogramType()),
"decoder_outputs": NeuralType(("B", "T", "D"), VoidType()),
"targets": NeuralType(('B', 'T'), LabelsType()),
"decoder_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"target_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
"""Output types definitions for Contrastive.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
@property
def needs_labels(self):
return True
def __init__(self, num_classes, zero_infinity=True, reduction='mean_batch'):
super().__init__()
self.loss = CTCLoss(num_classes=num_classes, reduction=reduction, zero_infinity=zero_infinity)
@typecheck()
def forward(self, spec_masks, decoder_outputs, targets, decoder_lengths=None, target_lengths=None):
loss = self.loss(
log_probs=decoder_outputs, targets=targets, input_lengths=decoder_lengths, target_lengths=target_lengths
)
return loss
|
NeMo-main
|
nemo/collections/asr/losses/ssl_losses/ctc.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import nn
from nemo.core import Loss, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LogprobsType, LossType, NeuralType, SpectrogramType
__all__ = ["MLMLoss"]
class MLMLoss(Loss):
@property
def input_types(self):
"""Input types definitions for Contrastive.
"""
return {
"spec_masks": NeuralType(("B", "D", "T"), SpectrogramType()),
"decoder_outputs": NeuralType(("B", "T", "D"), LogprobsType()),
"targets": NeuralType(('B', 'T'), LabelsType()),
"decoder_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"target_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
"""Output types definitions for Contrastive.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
@property
def needs_labels(self):
return True
def __init__(
self, combine_time_steps: int = 1, mask_threshold: float = 0.8,
):
super().__init__()
self.nll_loss = nn.NLLLoss()
self.combine_time_steps = combine_time_steps
self.mask_threshold = mask_threshold
@typecheck()
def forward(self, spec_masks, decoder_outputs, targets, decoder_lengths=None, target_lengths=None):
# outputs are log_probs
masks = spec_masks.transpose(-2, -1)
# BxTxC
masks = masks.reshape(masks.shape[0], masks.shape[1] // self.combine_time_steps, -1)
masks = masks.mean(-1) > self.mask_threshold
out_masked_only = decoder_outputs[masks]
targets = F.pad(targets, (0, masks.shape[-1] - targets.shape[-1]))
targets_masked_only = targets[masks]
loss = self.nll_loss(out_masked_only, targets_masked_only)
loss = torch.mean(loss)
return loss
|
NeMo-main
|
nemo/collections/asr/losses/ssl_losses/mlm.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.losses.ssl_losses.contrastive import ContrastiveLoss
|
NeMo-main
|
nemo/collections/asr/losses/ssl_losses/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.losses.rnnt import RNNTLoss
from nemo.core import Loss, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LogprobsType, LossType, NeuralType, SpectrogramType
__all__ = ["RNNTLossForSSL"]
class RNNTLossForSSL(Loss):
@property
def input_types(self):
"""Input types definitions for Contrastive.
"""
return {
"spec_masks": NeuralType(("B", "D", "T"), SpectrogramType()),
"decoder_outputs": NeuralType(('B', 'T', 'T', 'D'), LogprobsType()),
"targets": NeuralType(('B', 'T'), LabelsType()),
"decoder_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"target_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
"""Output types definitions for Contrastive.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
@property
def needs_labels(self):
return True
def __init__(self, num_classes):
super().__init__()
self.loss = RNNTLoss(num_classes=num_classes)
@typecheck()
def forward(self, spec_masks, decoder_outputs, targets, decoder_lengths=None, target_lengths=None):
loss = self.loss(
log_probs=decoder_outputs, targets=targets, input_lengths=decoder_lengths, target_lengths=target_lengths
)
return loss
|
NeMo-main
|
nemo/collections/asr/losses/ssl_losses/rnnt.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from tqdm.auto import tqdm
from nemo.collections.asr.data.audio_to_ctm_dataset import FrameCtmUnit
from nemo.collections.asr.data.audio_to_text_dali import DALIOutputs
from nemo.collections.asr.models.asr_model import ASRModel
from nemo.utils import logging
class AlignerWrapperModel(ASRModel):
"""ASR model wrapper to perform alignment building.
Functionality is limited to the components needed to build an alignment."""
def __init__(self, model: ASRModel, cfg: DictConfig):
model_cfg = model.cfg
for ds in ("train_ds", "validation_ds", "test_ds"):
if ds in model_cfg:
model_cfg[ds] = None
super().__init__(cfg=model_cfg, trainer=model.trainer)
self._model = model
self.alignment_type = cfg.get("alignment_type", "forced")
self.word_output = cfg.get("word_output", True)
self.cpu_decoding = cfg.get("cpu_decoding", False)
self.decode_batch_size = cfg.get("decode_batch_size", 0)
# list possible alignment types here for future work
if self.alignment_type == "forced":
pass
elif self.alignment_type == "argmax":
pass
elif self.alignment_type == "loose":
raise NotImplementedError(f"alignment_type=`{self.alignment_type}` is not supported at the moment.")
elif self.alignment_type == "rnnt_decoding_aux":
raise NotImplementedError(f"alignment_type=`{self.alignment_type}` is not supported at the moment.")
else:
raise RuntimeError(f"Unsupported alignment type: {self.alignment_type}")
self._init_model_specific(cfg)
def _init_ctc_alignment_specific(self, cfg: DictConfig):
"""Part of __init__ intended to initialize attributes specific to the alignment type for CTC models.
This method is not supposed to be called outside of __init__.
"""
# do nothing for regular CTC with `argmax` alignment type
if self.alignment_type == "argmax" and not hasattr(self._model, "use_graph_lm"):
return
from nemo.collections.asr.modules.graph_decoder import ViterbiDecoderWithGraph
if self.alignment_type == "forced":
if hasattr(self._model, "use_graph_lm"):
if self._model.use_graph_lm:
self.graph_decoder = self._model.transcribe_decoder
self._model.use_graph_lm = False
else:
self.graph_decoder = ViterbiDecoderWithGraph(
num_classes=self.blank_id, backend="k2", dec_type="topo", return_type="1best"
)
# override split_batch_size
self.graph_decoder.split_batch_size = self.decode_batch_size
else:
self.graph_decoder = ViterbiDecoderWithGraph(
num_classes=self.blank_id, split_batch_size=self.decode_batch_size,
)
# override decoder args if a config is provided
decoder_module_cfg = cfg.get("decoder_module_cfg", None)
if decoder_module_cfg is not None:
self.graph_decoder._decoder.intersect_pruned = decoder_module_cfg.get("intersect_pruned")
self.graph_decoder._decoder.intersect_conf = decoder_module_cfg.get("intersect_conf")
return
if self.alignment_type == "argmax":
# we use transcribe_decoder to get topology-independent output
if not self._model.use_graph_lm:
self._model.transcribe_decoder = ViterbiDecoderWithGraph(
num_classes=self.blank_id, backend="k2", dec_type="topo", return_type="1best"
)
# override decoder args
self._model.transcribe_decoder.return_ilabels = False
self._model.transcribe_decoder.output_aligned = True
self._model.transcribe_decoder.split_batch_size = self.decode_batch_size
self._model.use_graph_lm = False
return
def _init_rnnt_alignment_specific(self, cfg: DictConfig):
"""Part of __init__ intended to initialize attributes specific to the alignment type for RNNT models.
This method is not supposed to be called outside of __init__.
"""
if self.alignment_type == "argmax":
return
from nemo.collections.asr.modules.graph_decoder import ViterbiDecoderWithGraph
if self.alignment_type == "forced":
self.predictor_window_size = cfg.rnnt_cfg.get("predictor_window_size", 0)
self.predictor_step_size = cfg.rnnt_cfg.get("predictor_step_size", 0)
from nemo.collections.asr.parts.k2.utils import apply_rnnt_prune_ranges, get_uniform_rnnt_prune_ranges
self.prepare_pruned_outputs = lambda encoder_outputs, encoded_len, decoder_outputs, transcript_len: apply_rnnt_prune_ranges(
encoder_outputs,
decoder_outputs,
get_uniform_rnnt_prune_ranges(
encoded_len,
transcript_len,
self.predictor_window_size + 1,
self.predictor_step_size,
encoder_outputs.size(1),
).to(device=encoder_outputs.device),
)
from nemo.collections.asr.parts.k2.classes import GraphModuleConfig
self.graph_decoder = ViterbiDecoderWithGraph(
num_classes=self.blank_id,
backend="k2",
dec_type="topo_rnnt_ali",
split_batch_size=self.decode_batch_size,
graph_module_cfg=OmegaConf.structured(
GraphModuleConfig(
topo_type="minimal",
predictor_window_size=self.predictor_window_size,
predictor_step_size=self.predictor_step_size,
)
),
)
# override decoder args if a config is provided
decoder_module_cfg = cfg.get("decoder_module_cfg", None)
if decoder_module_cfg is not None:
self.graph_decoder._decoder.intersect_pruned = decoder_module_cfg.get("intersect_pruned")
self.graph_decoder._decoder.intersect_conf = decoder_module_cfg.get("intersect_conf")
return
def _init_model_specific(self, cfg: DictConfig):
"""Part of __init__ intended to initialize attributes specific to the model type.
This method is not supposed to be called outside of __init__.
"""
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
if isinstance(self._model, EncDecCTCModel):
self.model_type = "ctc"
self.blank_id = self._model.decoder.num_classes_with_blank - 1
self._predict_impl = self._predict_impl_ctc
prob_suppress_index = cfg.ctc_cfg.get("prob_suppress_index", -1)
prob_suppress_value = cfg.ctc_cfg.get("prob_suppress_value", 1.0)
if prob_suppress_value > 1 or prob_suppress_value <= 0:
raise ValueError(f"Suppression value has to be in (0,1]: {prob_suppress_value}")
if prob_suppress_index < -(self.blank_id + 1) or prob_suppress_index > self.blank_id:
raise ValueError(
f"Suppression index for the provided model has to be in [{-self.blank_id+1},{self.blank_id}]: {prob_suppress_index}"
)
self.prob_suppress_index = (
self._model.decoder.num_classes_with_blank + prob_suppress_index
if prob_suppress_index < 0
else prob_suppress_index
)
self.prob_suppress_value = prob_suppress_value
self._init_ctc_alignment_specific(cfg)
return
from nemo.collections.asr.models.rnnt_models import EncDecRNNTModel
if isinstance(self._model, EncDecRNNTModel):
self.model_type = "rnnt"
self.blank_id = self._model.joint.num_classes_with_blank - 1
self.log_softmax = None if self._model.joint.log_softmax is None else not self._model.joint.log_softmax
self._predict_impl = self._predict_impl_rnnt
decoding_config = copy.deepcopy(self._model.cfg.decoding)
decoding_config.strategy = "greedy_batch"
with open_dict(decoding_config):
decoding_config.preserve_alignments = True
decoding_config.fused_batch_size = -1
self._model.change_decoding_strategy(decoding_config)
self._init_rnnt_alignment_specific(cfg)
return
raise RuntimeError(f"Unsupported model type: {type(self._model)}")
def _rnnt_joint_pruned(
self,
encoder_outputs: torch.Tensor,
encoded_len: torch.Tensor,
decoder_outputs: torch.Tensor,
transcript_len: torch.Tensor,
) -> torch.Tensor:
"""A variant of the RNNT Joiner tensor calculation with pruned Encoder and Predictor sum.
Only the uniform pruning is supported at the moment.
"""
encoder_outputs = self._model.joint.enc(encoder_outputs.transpose(1, 2)) # (B, T, H)
decoder_outputs = self._model.joint.pred(decoder_outputs.transpose(1, 2)) # (B, U, H)
encoder_outputs_pruned, decoder_outputs_pruned = self.prepare_pruned_outputs(
encoder_outputs, encoded_len, decoder_outputs, transcript_len
)
res = self._model.joint.joint_net(encoder_outputs_pruned + decoder_outputs_pruned)
# copied from model.joint.joint(...)
if self._model.joint.log_softmax is None:
if not res.is_cuda:
res = res.log_softmax(dim=-1)
else:
if self._model.joint.log_softmax:
res = res.log_softmax(dim=-1)
return res
def _apply_prob_suppress(self, log_probs: torch.Tensor) -> torch.Tensor:
"""Multiplies probability of an element with index self.prob_suppress_index by self.prob_suppress_value times
with stochasticity preservation of the log_probs tensor.
Often used to suppress <blank> probability of the output of a CTC model.
Example:
For
- log_probs = torch.log(torch.tensor([0.015, 0.085, 0.9]))
- self.prob_suppress_index = -1
- self.prob_suppress_value = 0.5
the result of _apply_prob_suppress(log_probs) is
- torch.log(torch.tensor([0.0825, 0.4675, 0.45]))
"""
exp_probs = (log_probs).exp()
x = exp_probs[:, :, self.prob_suppress_index]
# we cannot do y=1-x because exp_probs can be not stochastic due to numerical limitations
y = torch.cat(
[exp_probs[:, :, : self.prob_suppress_index], exp_probs[:, :, self.prob_suppress_index + 1 :]], 2
).sum(-1)
b1 = torch.full((exp_probs.shape[0], exp_probs.shape[1], 1), self.prob_suppress_value, device=log_probs.device)
b2 = ((1 - self.prob_suppress_value * x) / y).unsqueeze(2).repeat(1, 1, exp_probs.shape[-1] - 1)
return (
exp_probs * torch.cat([b2[:, :, : self.prob_suppress_index], b1, b2[:, :, self.prob_suppress_index :]], 2)
).log()
def _prepare_ctc_argmax_predictions(
self, log_probs: torch.Tensor, encoded_len: torch.Tensor
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""Obtains argmax predictions with corresponding probabilities.
Replaces consecutive repeated indices in the argmax predictions with the <blank> index.
"""
if hasattr(self._model, "transcribe_decoder"):
predictions, _, probs = self.transcribe_decoder.forward(log_probs=log_probs, log_probs_length=encoded_len)
else:
greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)
probs_tensor, _ = log_probs.exp().max(dim=-1, keepdim=False)
predictions, probs = [], []
for i in range(log_probs.shape[0]):
utt_len = encoded_len[i]
probs.append(probs_tensor[i, :utt_len])
pred_candidate = greedy_predictions[i, :utt_len].cpu()
# replace consecutive tokens with <blank>
previous = self.blank_id
for j in range(utt_len):
p = pred_candidate[j]
if p == previous and previous != self.blank_id:
pred_candidate[j] = self.blank_id
previous = p
predictions.append(pred_candidate.to(device=greedy_predictions.device))
return predictions, probs
def _predict_impl_rnnt_argmax(
self,
encoded: torch.Tensor,
encoded_len: torch.Tensor,
transcript: torch.Tensor,
transcript_len: torch.Tensor,
sample_id: torch.Tensor,
) -> List[Tuple[int, 'FrameCtmUnit']]:
"""Builds time alignment of an encoded sequence.
This method assumes that the RNNT model is used and the alignment type is `argmax`.
It produces a list of sample ids and fours: (label, start_frame, length, probability), called FrameCtmUnit.
"""
hypotheses = self._model.decoding.rnnt_decoder_predictions_tensor(
encoded, encoded_len, return_hypotheses=True
)[0]
results = []
for s_id, hypothesis in zip(sample_id, hypotheses):
pred_ids = hypothesis.y_sequence.tolist()
tokens = self._model.decoding.decode_ids_to_tokens(pred_ids)
token_begin = hypothesis.timestep
token_len = [j - i for i, j in zip(token_begin, token_begin[1:] + [len(hypothesis.alignments)])]
# we have no token probabilities for the argmax rnnt setup
token_prob = [1.0] * len(tokens)
if self.word_output:
words = [w for w in self._model.decoding.decode_tokens_to_str(pred_ids).split(" ") if w != ""]
words, word_begin, word_len, word_prob = (
self._process_tokens_to_words(tokens, token_begin, token_len, token_prob, words)
if hasattr(self._model, "tokenizer")
else self._process_char_with_space_to_words(tokens, token_begin, token_len, token_prob, words)
)
results.append(
(s_id, [FrameCtmUnit(t, b, l, p) for t, b, l, p in zip(words, word_begin, word_len, word_prob)])
)
else:
results.append(
(
s_id,
[FrameCtmUnit(t, b, l, p) for t, b, l, p in zip(tokens, token_begin, token_len, token_prob)],
)
)
return results
def _process_tokens_to_words(
self,
tokens: List[str],
token_begin: List[int],
token_len: List[int],
token_prob: List[float],
words: List[str],
) -> Tuple[List[str], List[int], List[int], List[float]]:
"""Transforms alignment information from token level to word level.
Used when self._model.tokenizer is present.
"""
# suppose that there are no whitespaces
assert len(self._model.tokenizer.text_to_tokens(words[0])) == len(
self._model.tokenizer.text_to_tokens(words[0] + " ")
)
word_begin, word_len, word_prob = [], [], []
token_len_nonzero = [(t_l if t_l > 0 else 1) for t_l in token_len]
i = 0
for word in words:
loc_tokens = self._model.tokenizer.text_to_tokens(word)
step = len(loc_tokens)
# we assume that an empty word consists of only one token
# drop current token
if step == 0:
token_begin[i + 1] = token_begin[i]
token_len[i + 1] += token_len[i]
token_len_nonzero[i + 1] += token_len_nonzero[i]
del tokens[i], token_begin[i], token_len[i], token_len_nonzero[i], token_prob[i]
continue
# fix <unk> tokenization
if step == 2 and loc_tokens[-1] == "??":
step -= 1
j = i + step
word_begin.append(token_begin[i])
word_len.append(sum(token_len[i:j]))
denominator = sum(token_len_nonzero[i:j])
word_prob.append(sum(token_prob[k] * token_len_nonzero[k] for k in range(i, j)) / denominator)
i = j
return words, word_begin, word_len, word_prob
def _process_char_with_space_to_words(
self,
tokens: List[str],
token_begin: List[int],
token_len: List[int],
token_prob: List[float],
words: List[str],
) -> Tuple[List[str], List[int], List[int], List[float]]:
"""Transforms alignment information from character level to word level.
This method includes separator (typically the space) information in the results.
Used with character-based models (no self._model.tokenizer).
"""
# suppose that there are no whitespaces anywhere except between words
space_idx = (np.array(tokens) == " ").nonzero()[0].tolist()
assert len(words) == len(space_idx) + 1
token_len_nonzero = [(t_l if t_l > 0 else 1) for t_l in token_len]
if len(space_idx) == 0:
word_begin = [token_begin[0]]
word_len = [sum(token_len)]
denominator = sum(token_len_nonzero)
word_prob = [sum(t_p * t_l for t_p, t_l in zip(token_prob, token_len_nonzero)) / denominator]
else:
space_word = "[SEP]"
word_begin = [token_begin[0]]
word_len = [sum(token_len[: space_idx[0]])]
denominator = sum(token_len_nonzero[: space_idx[0]])
word_prob = [sum(token_prob[k] * token_len_nonzero[k] for k in range(space_idx[0])) / denominator]
words_with_space = [words[0]]
for word, i, j in zip(words[1:], space_idx, space_idx[1:] + [len(tokens)]):
# append space
word_begin.append(token_begin[i])
word_len.append(token_len[i])
word_prob.append(token_prob[i])
words_with_space.append(space_word)
# append next word
word_begin.append(token_begin[i + 1])
word_len.append(sum(token_len[i + 1 : j]))
denominator = sum(token_len_nonzero[i + 1 : j])
word_prob.append(sum(token_prob[k] * token_len_nonzero[k] for k in range(i + 1, j)) / denominator)
words_with_space.append(word)
words = words_with_space
return words, word_begin, word_len, word_prob
def _results_to_ctmUnits(
self, s_id: int, pred: torch.Tensor, prob: torch.Tensor
) -> Tuple[int, List['FrameCtmUnit']]:
"""Transforms predictions with probabilities to a list of FrameCtmUnit objects,
containing frame-level alignment information (label, start, duration, probability), for a given sample id.
Alignment information can be either token-based (char, wordpiece, ...) or word-based.
"""
if len(pred) == 0:
return (s_id, [])
non_blank_idx = (pred != self.blank_id).nonzero(as_tuple=True)[0].cpu()
pred_ids = pred[non_blank_idx].tolist()
prob_list = prob.tolist()
if self.model_type == "rnnt":
wer_module = self._model.decoding
# for rnnt forced alignment we always have num_blanks == num_frames,
# thus len(pred) == num_frames + num_non_blanks
token_begin = non_blank_idx - torch.arange(len(non_blank_idx))
token_end = torch.cat((token_begin[1:], torch.tensor([len(pred) - len(non_blank_idx)])))
else:
wer_module = self._model._wer
token_begin = non_blank_idx
token_end = torch.cat((token_begin[1:], torch.tensor([len(pred)])))
tokens = wer_module.decode_ids_to_tokens(pred_ids)
token_len = (token_end - token_begin).tolist()
token_begin = token_begin.tolist()
token_prob = [
sum(prob_list[i:j]) / (j - i)
for i, j in zip(non_blank_idx.tolist(), non_blank_idx[1:].tolist() + [len(pred)])
]
if self.word_output:
words = wer_module.decode_tokens_to_str(pred_ids).split(" ")
words, word_begin, word_len, word_prob = (
self._process_tokens_to_words(tokens, token_begin, token_len, token_prob, words)
if hasattr(self._model, "tokenizer")
else self._process_char_with_space_to_words(tokens, token_begin, token_len, token_prob, words)
)
return s_id, [FrameCtmUnit(t, b, l, p) for t, b, l, p in zip(words, word_begin, word_len, word_prob)]
return s_id, [FrameCtmUnit(t, b, l, p) for t, b, l, p in zip(tokens, token_begin, token_len, token_prob)]
def _predict_impl_ctc(
self,
encoded: torch.Tensor,
encoded_len: torch.Tensor,
transcript: torch.Tensor,
transcript_len: torch.Tensor,
sample_id: torch.Tensor,
) -> List[Tuple[int, 'FrameCtmUnit']]:
"""Builds time alignment of an encoded sequence.
This method assumes that the CTC model is used.
It produces a list of sample ids and fours: (label, start_frame, length, probability), called FrameCtmUnit.
"""
log_probs = encoded
if self.prob_suppress_value != 1.0:
log_probs = self._apply_prob_suppress(log_probs)
if self.alignment_type == "argmax":
predictions, probs = self._prepare_ctc_argmax_predictions(log_probs, encoded_len)
elif self.alignment_type == "forced":
if self.cpu_decoding:
log_probs, encoded_len, transcript, transcript_len = (
log_probs.cpu(),
encoded_len.cpu(),
transcript.cpu(),
transcript_len.cpu(),
)
predictions, probs = self.graph_decoder.align(log_probs, encoded_len, transcript, transcript_len)
else:
raise NotImplementedError()
return [
self._results_to_ctmUnits(s_id, pred, prob)
for s_id, pred, prob in zip(sample_id.tolist(), predictions, probs)
]
def _predict_impl_rnnt(
self,
encoded: torch.Tensor,
encoded_len: torch.Tensor,
transcript: torch.Tensor,
transcript_len: torch.Tensor,
sample_id: torch.Tensor,
) -> List[Tuple[int, 'FrameCtmUnit']]:
"""Builds time alignment of an encoded sequence.
This method assumes that the RNNT model is used.
It produces a list of sample ids and fours: (label, start_frame, length, probability), called FrameCtmUnit.
"""
if self.alignment_type == "argmax":
return self._predict_impl_rnnt_argmax(encoded, encoded_len, transcript, transcript_len, sample_id)
elif self.alignment_type == "forced":
decoded = self._model.decoder(targets=transcript, target_length=transcript_len)[0]
log_probs = (
self._rnnt_joint_pruned(encoded, encoded_len, decoded, transcript_len)
if self.predictor_window_size > 0 and self.predictor_window_size < transcript_len.max()
else self._model.joint(encoder_outputs=encoded, decoder_outputs=decoded)
)
apply_log_softmax = True if self.log_softmax is None and encoded.is_cuda else self.log_softmax
if apply_log_softmax:
log_probs = log_probs.log_softmax(dim=-1)
if self.cpu_decoding:
log_probs, encoded_len, transcript, transcript_len = (
log_probs.cpu(),
encoded_len.cpu(),
transcript.cpu(),
transcript_len.cpu(),
)
predictions, probs = self.graph_decoder.align(log_probs, encoded_len, transcript, transcript_len)
return [
self._results_to_ctmUnits(s_id, pred, prob)
for s_id, pred, prob in zip(sample_id.tolist(), predictions, probs)
]
else:
raise NotImplementedError()
@torch.no_grad()
def predict_step(self, batch, batch_idx, dataloader_idx=0) -> List[Tuple[int, 'FrameCtmUnit']]:
signal, signal_len, transcript, transcript_len, sample_id = batch
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self._model.forward(processed_signal=signal, processed_signal_length=signal_len)[:2]
else:
encoded, encoded_len = self._model.forward(input_signal=signal, input_signal_length=signal_len)[:2]
return self._predict_impl(encoded, encoded_len, transcript, transcript_len, sample_id)
@torch.no_grad()
def transcribe(
self, manifest: List[str], batch_size: int = 4, num_workers: int = None, verbose: bool = True,
) -> List['FrameCtmUnit']:
"""
Does alignment. Use this method for debugging and prototyping.
Args:
manifest: path to dataset JSON manifest file (in NeMo format). \
Recommended length per audio file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
num_workers: (int) number of workers for DataLoader
verbose: (bool) whether to display tqdm progress bar
Returns:
A list of four: (label, start_frame, length, probability), called FrameCtmUnit, \
in the same order as in the manifest.
"""
hypotheses = []
# Model's mode and device
mode = self._model.training
device = next(self._model.parameters()).device
dither_value = self._model.preprocessor.featurizer.dither
pad_to_value = self._model.preprocessor.featurizer.pad_to
if num_workers is None:
num_workers = min(batch_size, os.cpu_count() - 1)
try:
self._model.preprocessor.featurizer.dither = 0.0
self._model.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self._model.eval()
# Freeze the encoder and decoder modules
self._model.encoder.freeze()
self._model.decoder.freeze()
if hasattr(self._model, "joint"):
self._model.joint.freeze()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
config = {
'manifest_filepath': manifest,
'batch_size': batch_size,
'num_workers': num_workers,
}
temporary_datalayer = self._model._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Aligning", disable=not verbose):
test_batch[0] = test_batch[0].to(device)
test_batch[1] = test_batch[1].to(device)
hypotheses += [unit for i, unit in self.predict_step(test_batch, 0)]
del test_batch
finally:
# set mode back to its original value
self._model.train(mode=mode)
self._model.preprocessor.featurizer.dither = dither_value
self._model.preprocessor.featurizer.pad_to = pad_to_value
logging.set_verbosity(logging_level)
if mode is True:
self._model.encoder.unfreeze()
self._model.decoder.unfreeze()
if hasattr(self._model, "joint"):
self._model.joint.unfreeze()
return hypotheses
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
raise RuntimeError("This module cannot be used in training.")
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
raise RuntimeError("This module cannot be used in validation.")
def setup_test_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
raise RuntimeError("This module cannot be used in testing.")
|
NeMo-main
|
nemo/collections/asr/models/k2_aligner_model.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import joblib
import numpy as np
import torch
from omegaconf import DictConfig, open_dict
from pytorch_lightning import Trainer
from nemo.collections.asr.models.asr_model import ASRModel
from nemo.collections.asr.models.hybrid_rnnt_ctc_models import EncDecHybridRNNTCTCModel
from nemo.collections.asr.parts.utils.asr_confidence_utils import (
ConfidenceConfig,
ConfidenceMeasureConfig,
get_confidence_aggregation_bank,
get_confidence_measure_bank,
)
from nemo.collections.asr.parts.utils.audio_utils import ChannelSelectorType
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.core.classes import ModelPT
from nemo.utils import model_utils
# frozen is required to allow hashing of this class and use it
# as a dictionary key when running confidence tuning
@dataclass(frozen=True)
class ConfidenceSpec:
exclude_blank: bool
aggregation: str
confidence_type: str
alpha: float
def to_confidence_config(self) -> ConfidenceConfig:
"""Converts confidence spec to the confidence config.
Internally, the tuning procedure uses this "spec" objects as they
are more aligned with how things are implemented. But when it's time
to save the models or call transcribe, we need to use the proper
object of type ``ConfidenceConfig``.
"""
if self.confidence_type == 'max_prob':
name = 'max_prob'
entropy_type = 'tsallis' # can be any
entropy_norm = 'lin' # can be any
else:
name, entropy_type, entropy_norm = self.confidence_type.split("_")
return ConfidenceConfig(
exclude_blank=self.exclude_blank,
aggregation=self.aggregation,
measure_cfg=ConfidenceMeasureConfig(
name=name, entropy_type=entropy_type, alpha=self.alpha, entropy_norm=entropy_norm,
),
)
def get_filtered_logprobs(hypothesis: Hypothesis, exclude_blank: bool) -> torch.Tensor:
"""Returns logprobs from the hypothesis object with optional blanks filter.
This function supports both CTC and Transducer hypotheses. Will place the
logprobs on GPU if it's available.
Args:
hypothesis: generated hypothesis as returned from the transcribe
method of the ASR model.
exclude_blank: whether to filter out all ``<blank>`` tokens.
Returns:
torch.Tensor: of shape [S, V], where S is (filtered) sequence length and
V is the vocabulary size.
"""
if isinstance(hypothesis.alignments, list): # Transducer
filtered_logprobs = []
for alignment in hypothesis.alignments:
for align_elem in alignment:
if not exclude_blank:
filtered_logprobs.append(align_elem[0])
elif align_elem[1].item() != align_elem[0].shape[-1] - 1:
filtered_logprobs.append(align_elem[0])
if not filtered_logprobs: # for the edge-case of all blanks
filtered_logprobs.append(align_elem[0])
filtered_logprobs = torch.stack(filtered_logprobs)
if torch.cuda.is_available(): # by default logprobs are placed on cpu in nemo
filtered_logprobs = filtered_logprobs.cuda()
else: # CTC
logprobs = hypothesis.y_sequence
if torch.cuda.is_available(): # by default logprobs are placed on cpu in nemo
logprobs = logprobs.cuda()
if exclude_blank: # filtering blanks
labels = logprobs.argmax(dim=-1)
filtered_logprobs = logprobs[labels != logprobs.shape[1] - 1]
if filtered_logprobs.shape[0] == 0: # for the edge-case of all blanks
filtered_logprobs = logprobs[:1]
else:
filtered_logprobs = logprobs
# need to make sure logprobs are always normalized, so checking if they sum up to 1
if not torch.allclose(filtered_logprobs[0].exp().sum(), torch.tensor(1.0)):
filtered_logprobs = torch.log_softmax(filtered_logprobs, dim=1)
return filtered_logprobs
def compute_confidence(hypothesis: Hypothesis, confidence_cfg: ConfidenceConfig) -> float:
"""Computes confidence score of the full utterance from a given hypothesis.
This is essentially a re-implementation of the built-in confidence
computation in NeMo. The difference is that we aggregate full-utterance
scores, while core functionality only supports word and token level
aggregations.
Args:
hypothesis: generated hypothesis as returned from the transcribe
method of the ASR model.
confidence_cfg: confidence config specifying what kind of
measure/aggregation should be used.
Returns:
float: confidence score.
"""
filtered_logprobs = get_filtered_logprobs(hypothesis, confidence_cfg.exclude_blank)
vocab_size = filtered_logprobs.shape[1]
aggr_func = get_confidence_aggregation_bank()[confidence_cfg.aggregation]
if confidence_cfg.measure_cfg.name == "max_prob":
conf_type = "max_prob"
alpha = 1.0
else:
conf_type = f"entropy_{confidence_cfg.measure_cfg.entropy_type}_{confidence_cfg.measure_cfg.entropy_norm}"
alpha = confidence_cfg.measure_cfg.alpha
conf_func = get_confidence_measure_bank()[conf_type]
conf_value = aggr_func(conf_func(filtered_logprobs, v=vocab_size, t=alpha)).cpu().item()
return conf_value
class ConfidenceEnsembleModel(ModelPT):
"""Implementation of the confidence ensemble model.
See https://arxiv.org/abs/2306.15824 for details.
.. note::
Currently this class only support `transcribe` method as it requires
full-utterance confidence scores to operate.
"""
def __init__(
self, cfg: DictConfig, trainer: 'Trainer' = None,
):
super().__init__(cfg=cfg, trainer=trainer)
# either we load all models from ``load_models`` cfg parameter
# or all of them are specified in the config as modelX alongside the num_models key
#
# ideally, we'd like to directly store all models in a list, but that
# is not currently supported by the submodule logic
# so to access all the models, we do something like
#
# for model_idx in range(self.num_models):
# model = getattr(self, f"model{model_idx}")
if 'num_models' in self.cfg:
self.num_models = self.cfg.num_models
for idx in range(self.num_models):
cfg_field = f"model{idx}"
model_cfg = self.cfg[cfg_field]
model_class = model_utils.import_class_by_path(model_cfg['target'])
self.register_nemo_submodule(
name=cfg_field, config_field=cfg_field, model=model_class(model_cfg, trainer=trainer),
)
else:
self.num_models = len(cfg.load_models)
with open_dict(self.cfg):
self.cfg.num_models = self.num_models
for idx, model in enumerate(cfg.load_models):
cfg_field = f"model{idx}"
if model.endswith(".nemo"):
self.register_nemo_submodule(
name=cfg_field,
config_field=cfg_field,
model=ASRModel.restore_from(model, trainer=trainer, map_location="cpu"),
)
else:
self.register_nemo_submodule(
cfg_field, config_field=cfg_field, model=ASRModel.from_pretrained(model, map_location="cpu"),
)
# registering model selection block - this is expected to be a joblib-saved
# pretrained sklearn pipeline containing standardization + logistic regression
# trained to predict "most-confident" model index from the confidence scores of all models
model_selection_block_path = self.register_artifact("model_selection_block", cfg.model_selection_block)
self.model_selection_block = joblib.load(model_selection_block_path)
self.confidence_cfg = ConfidenceConfig(**self.cfg.confidence)
# making sure each model has correct temperature setting in the decoder strategy
for model_idx in range(self.num_models):
model = getattr(self, f"model{model_idx}")
# for now we assume users are direclty responsible for matching
# decoder type when building ensemble with inference type
# TODO: add automatic checks for errors
if isinstance(model, EncDecHybridRNNTCTCModel):
self.update_decoding_parameters(model.cfg.decoding)
model.change_decoding_strategy(model.cfg.decoding, decoder_type="rnnt")
self.update_decoding_parameters(model.cfg.aux_ctc.decoding)
model.change_decoding_strategy(model.cfg.aux_ctc.decoding, decoder_type="ctc")
else:
self.update_decoding_parameters(model.cfg.decoding)
model.change_decoding_strategy(model.cfg.decoding)
def update_decoding_parameters(self, decoding_cfg: DictConfig):
"""Updating temperature/preserve_alignment parameters of the config."""
with open_dict(decoding_cfg):
decoding_cfg.temperature = self.cfg.temperature
decoding_cfg.preserve_alignments = True
def setup_training_data(self, train_data_config: Union[DictConfig, Dict]):
"""Pass-through to the ensemble models.
Note that training is not actually supported for this class!
"""
for model_idx in range(self.num_models):
getattr(self, f"model{model_idx}").setup_training_data(train_data_config)
def setup_validation_data(self, val_data_config: Union[DictConfig, Dict]):
"""Pass-through to the ensemble models."""
for model_idx in range(self.num_models):
getattr(self, f"model{model_idx}").setup_validation_data(val_data_config)
def change_attention_model(
self, self_attention_model: str = None, att_context_size: List[int] = None, update_config: bool = True
):
"""Pass-through to the ensemble models."""
for model_idx in range(self.num_models):
getattr(self, f"model{model_idx}").change_attention_model(
self_attention_model, att_context_size, update_config
)
def change_decoding_strategy(self, decoding_cfg: Optional[DictConfig] = None, decoder_type: str = None):
"""Pass-through to the ensemble models.
The only change here is that we always require expected temperature
to be set as well as ``decoding_cfg.preserve_alignments = True``
"""
self.update_decoding_parameters(decoding_cfg)
for model_idx in range(self.num_models):
model = getattr(self, f"model{model_idx}")
if isinstance(model, EncDecHybridRNNTCTCModel):
model.change_decoding_strategy(decoding_cfg, decoder_type=decoder_type)
else:
model.change_decoding_strategy(decoding_cfg)
@torch.no_grad()
def transcribe(
self,
paths2audio_files: List[str],
batch_size: int = 4,
return_hypotheses: bool = False,
num_workers: int = 0,
channel_selector: Optional[ChannelSelectorType] = None,
augmentor: DictConfig = None,
verbose: bool = True,
**kwargs, # any other model specific parameters are passed directly
) -> List[str]:
"""Confidence-ensemble transcribe method.
Consists of the following steps:
1. Run all models (TODO: in parallel)
2. Compute confidence for each model
3. Use logistic regression to pick the "most confident" model
4. Return the output of that model
"""
confidences = []
all_transcriptions = []
# always requiring to return hypothesis
# TODO: make sure to return text only if was False originally
return_hypotheses = True
for model_idx in range(self.num_models):
model = getattr(self, f"model{model_idx}")
transcriptions = model.transcribe(
paths2audio_files=paths2audio_files,
batch_size=batch_size,
return_hypotheses=return_hypotheses,
num_workers=num_workers,
channel_selector=channel_selector,
augmentor=augmentor,
verbose=verbose,
**kwargs,
)
if isinstance(transcriptions, tuple): # transducers return a tuple
transcriptions = transcriptions[0]
model_confidences = []
for transcription in transcriptions:
model_confidences.append(compute_confidence(transcription, self.confidence_cfg))
confidences.append(model_confidences)
all_transcriptions.append(transcriptions)
# transposing with zip(*list)
features = np.array(list(zip(*confidences)))
model_indices = self.model_selection_block.predict(features)
final_transcriptions = []
for transcrption_idx in range(len(all_transcriptions[0])):
final_transcriptions.append(all_transcriptions[model_indices[transcrption_idx]][transcrption_idx])
return final_transcriptions
def list_available_models(self):
return []
|
NeMo-main
|
nemo/collections/asr/models/confidence_ensemble.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import tempfile
from abc import abstractmethod
from math import ceil, floor
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import Trainer
from torchmetrics import Accuracy
from torchmetrics.regression import MeanAbsoluteError, MeanSquaredError
from nemo.collections.asr.data import audio_to_label_dataset, feature_to_label_dataset
from nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecModel
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
from nemo.collections.common.losses import CrossEntropyLoss, MSELoss
from nemo.collections.common.metrics import TopKClassificationAccuracy
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import *
from nemo.utils import logging, model_utils
from nemo.utils.cast_utils import cast_all
__all__ = ['EncDecClassificationModel', 'EncDecRegressionModel']
class _EncDecBaseModel(ASRModel, ExportableEncDecModel):
"""Encoder decoder Classification models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_devices
# Convert config to a DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
# Convert config to support Hydra 1.0+ instantiation
cfg = model_utils.maybe_update_config_version(cfg)
self.is_regression_task = cfg.get('is_regression_task', False)
# Change labels if needed
self._update_decoder_config(cfg.labels, cfg.decoder)
super().__init__(cfg=cfg, trainer=trainer)
if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = ASRModel.from_config_dict(self._cfg.spec_augment)
else:
self.spec_augmentation = None
if hasattr(self._cfg, 'crop_or_pad_augment') and self._cfg.crop_or_pad_augment is not None:
self.crop_or_pad = ASRModel.from_config_dict(self._cfg.crop_or_pad_augment)
else:
self.crop_or_pad = None
self.preprocessor = self._setup_preprocessor()
self.encoder = self._setup_encoder()
self.decoder = self._setup_decoder()
self.loss = self._setup_loss()
self._setup_metrics()
@abstractmethod
def _setup_preprocessor(self):
"""
Setup preprocessor for audio data
Returns: Preprocessor
"""
pass
@abstractmethod
def _setup_encoder(self):
"""
Setup encoder for the Encoder-Decoder network
Returns: Encoder
"""
pass
@abstractmethod
def _setup_decoder(self):
"""
Setup decoder for the Encoder-Decoder network
Returns: Decoder
"""
pass
@abstractmethod
def _setup_loss(self):
"""
Setup loss function for training
Returns: Loss function
"""
pass
@abstractmethod
def _setup_metrics(self):
"""
Setup metrics to be tracked in addition to loss
Returns: void
"""
pass
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
audio_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), audio_eltype, optional=True),
"input_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"processed_signal": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),
"processed_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
@abstractmethod
def output_types(self) -> Optional[Dict[str, NeuralType]]:
pass
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None
):
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) == False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_length`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
# Crop or pad is always applied
if self.crop_or_pad is not None:
processed_signal, processed_signal_length = self.crop_or_pad(
input_signal=processed_signal, length=processed_signal_length
)
# Spec augment is not applied during evaluation/testing
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
logits = self.decoder(encoder_output=encoded)
return logits
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
# preserve config
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=DictConfig(train_data_config))
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if (
self._train_dl is not None
and hasattr(self._train_dl, 'dataset')
and isinstance(self._train_dl.dataset, torch.utils.data.IterableDataset)
):
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])
)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=DictConfig(val_data_config))
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]], use_feat: bool = False):
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
if use_feat and hasattr(self, '_setup_feature_label_dataloader'):
self._test_dl = self._setup_feature_label_dataloader(config=DictConfig(test_data_config))
else:
self._test_dl = self._setup_dataloader_from_config(config=DictConfig(test_data_config))
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def _setup_dataloader_from_config(self, config: DictConfig):
OmegaConf.set_struct(config, False)
config.is_regression_task = self.is_regression_task
OmegaConf.set_struct(config, True)
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
featurizer = WaveformFeaturizer(
sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor
)
shuffle = config['shuffle']
# Instantiate tarred dataset loader or normal dataset loader
if config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
logging.warning(
"Could not load dataset as `manifest_filepath` is None or "
f"`tarred_audio_filepaths` is None. Provided config : {config}"
)
return None
if 'vad_stream' in config and config['vad_stream']:
logging.warning("VAD inference does not support tarred dataset now")
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
dataset = audio_to_label_dataset.get_tarred_classification_label_dataset(
featurizer=featurizer,
config=config,
shuffle_n=shuffle_n,
global_rank=self.global_rank,
world_size=self.world_size,
)
shuffle = False
batch_size = config['batch_size']
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` is None. Provided config : {config}")
return None
if 'vad_stream' in config and config['vad_stream']:
logging.info("Perform streaming frame-level VAD")
dataset = audio_to_label_dataset.get_speech_label_dataset(featurizer=featurizer, config=config)
batch_size = 1
collate_fn = dataset.vad_frame_seq_collate_fn
else:
dataset = audio_to_label_dataset.get_classification_label_dataset(featurizer=featurizer, config=config)
batch_size = config['batch_size']
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def _setup_feature_label_dataloader(self, config: DictConfig) -> torch.utils.data.DataLoader:
"""
setup dataloader for VAD inference with audio features as input
"""
OmegaConf.set_struct(config, False)
config.is_regression_task = self.is_regression_task
OmegaConf.set_struct(config, True)
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` is None. Provided config : {config}")
return None
dataset = feature_to_label_dataset.get_feature_label_dataset(config=config, augmentor=augmentor)
if 'vad_stream' in config and config['vad_stream']:
collate_func = dataset._vad_segment_collate_fn
batch_size = 1
shuffle = False
else:
collate_func = dataset._collate_fn
batch_size = config['batch_size']
shuffle = config['shuffle']
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=collate_func,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
@torch.no_grad()
def transcribe(self, paths2audio_files: List[str], batch_size: int = 4, logprobs=False) -> List[str]:
"""
Generate class labels for provided audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is approximately 1 second.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
logprobs: (bool) pass True to get log probabilities instead of class labels.
Returns:
A list of transcriptions (or raw log probabilities if logprobs is True) in the same order as paths2audio_files
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return []
# We will store transcriptions here
labels = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
if hasattr(self.preprocessor.featurizer, 'dither'):
dither_value = self.preprocessor.featurizer.dither
if hasattr(self.preprocessor.featurizer, 'pad_to'):
pad_to_value = self.preprocessor.featurizer.pad_to
try:
if hasattr(self.preprocessor.featurizer, 'dither'):
self.preprocessor.featurizer.dither = 0.0
if hasattr(self.preprocessor.featurizer, 'pad_to'):
self.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self.eval()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Work in tmp directory - will store manifest file there
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w', encoding='utf-8') as fp:
for audio_file in paths2audio_files:
label = 0.0 if self.is_regression_task else self.cfg.labels[0]
entry = {'audio_filepath': audio_file, 'duration': 100000.0, 'label': label}
fp.write(json.dumps(entry) + '\n')
config = {'paths2audio_files': paths2audio_files, 'batch_size': batch_size, 'temp_dir': tmpdir}
temporary_datalayer = self._setup_transcribe_dataloader(config)
for test_batch in temporary_datalayer:
logits = self.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
if logprobs:
# dump log probs per file
for idx in range(logits.shape[0]):
lg = logits[idx]
labels.append(lg.cpu().numpy())
else:
labels_k = []
top_ks = self._accuracy.top_k
for top_k_i in top_ks:
# replace top k value with current top k
self._accuracy.top_k = top_k_i
labels_k_i = self._accuracy.top_k_predicted_labels(logits)
labels_k_i = labels_k_i.cpu()
labels_k.append(labels_k_i)
# convenience: if only one top_k, pop out the nested list
if len(top_ks) == 1:
labels_k = labels_k[0]
labels += labels_k
# reset top k to orignal value
self._accuracy.top_k = top_ks
del test_batch
finally:
# set mode back to its original value
self.train(mode=mode)
if hasattr(self.preprocessor.featurizer, 'dither'):
self.preprocessor.featurizer.dither = dither_value
if hasattr(self.preprocessor.featurizer, 'pad_to'):
self.preprocessor.featurizer.pad_to = pad_to_value
logging.set_verbosity(logging_level)
return labels
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
Returns:
A pytorch DataLoader for the given audio file(s).
"""
dl_config = {
'manifest_filepath': os.path.join(config['temp_dir'], 'manifest.json'),
'sample_rate': self.preprocessor._sample_rate,
'labels': self.cfg.labels,
'batch_size': min(config['batch_size'], len(config['paths2audio_files'])),
'trim_silence': False,
'shuffle': False,
}
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
@abstractmethod
def _update_decoder_config(self, labels, cfg):
pass
class EncDecClassificationModel(_EncDecBaseModel):
"""Encoder decoder Classification models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if cfg.get("is_regression_task", False):
raise ValueError(f"EndDecClassificationModel requires the flag is_regression_task to be set as false")
super().__init__(cfg=cfg, trainer=trainer)
def _setup_preprocessor(self):
return EncDecClassificationModel.from_config_dict(self._cfg.preprocessor)
def _setup_encoder(self):
return EncDecClassificationModel.from_config_dict(self._cfg.encoder)
def _setup_decoder(self):
return EncDecClassificationModel.from_config_dict(self._cfg.decoder)
def _setup_loss(self):
return CrossEntropyLoss()
def _setup_metrics(self):
self._accuracy = TopKClassificationAccuracy(dist_sync_on_step=True)
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="vad_multilingual_marblenet",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/vad_multilingual_marblenet",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/vad_multilingual_marblenet/versions/1.10.0/files/vad_multilingual_marblenet.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="vad_telephony_marblenet",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:vad_telephony_marblenet",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/vad_telephony_marblenet/versions/1.0.0rc1/files/vad_telephony_marblenet.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="vad_marblenet",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:vad_marblenet",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/vad_marblenet/versions/1.0.0rc1/files/vad_marblenet.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="commandrecognition_en_matchboxnet3x1x64_v1",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:commandrecognition_en_matchboxnet3x1x64_v1",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/commandrecognition_en_matchboxnet3x1x64_v1/versions/1.0.0rc1/files/commandrecognition_en_matchboxnet3x1x64_v1.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="commandrecognition_en_matchboxnet3x2x64_v1",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:commandrecognition_en_matchboxnet3x2x64_v1",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/commandrecognition_en_matchboxnet3x2x64_v1/versions/1.0.0rc1/files/commandrecognition_en_matchboxnet3x2x64_v1.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="commandrecognition_en_matchboxnet3x1x64_v2",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:commandrecognition_en_matchboxnet3x1x64_v2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/commandrecognition_en_matchboxnet3x1x64_v2/versions/1.0.0rc1/files/commandrecognition_en_matchboxnet3x1x64_v2.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="commandrecognition_en_matchboxnet3x2x64_v2",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:commandrecognition_en_matchboxnet3x2x64_v2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/commandrecognition_en_matchboxnet3x2x64_v2/versions/1.0.0rc1/files/commandrecognition_en_matchboxnet3x2x64_v2.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="commandrecognition_en_matchboxnet3x1x64_v2_subset_task",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:commandrecognition_en_matchboxnet3x1x64_v2_subset_task",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/commandrecognition_en_matchboxnet3x1x64_v2_subset_task/versions/1.0.0rc1/files/commandrecognition_en_matchboxnet3x1x64_v2_subset_task.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="commandrecognition_en_matchboxnet3x2x64_v2_subset_task",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:commandrecognition_en_matchboxnet3x2x64_v2_subset_task",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/commandrecognition_en_matchboxnet3x2x64_v2_subset_task/versions/1.0.0rc1/files/commandrecognition_en_matchboxnet3x2x64_v2_subset_task.nemo",
)
results.append(model)
return results
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"outputs": NeuralType(('B', 'D'), LogitsType())}
# PTL-specific methods
def training_step(self, batch, batch_nb):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
self.log('train_loss', loss_value)
self.log('learning_rate', self._optimizer.param_groups[0]['lr'])
self.log('global_step', self.trainer.global_step)
self._accuracy(logits=logits, labels=labels)
topk_scores = self._accuracy.compute()
self._accuracy.reset()
for top_k, score in zip(self._accuracy.top_k, topk_scores):
self.log('training_batch_accuracy_top_{}'.format(top_k), score)
return {
'loss': loss_value,
}
def validation_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
acc = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
loss = {
'val_loss': loss_value,
'val_correct_counts': correct_counts,
'val_total_counts': total_counts,
'val_acc': acc,
}
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(loss)
else:
self.validation_step_outputs.append(loss)
return loss
def test_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
acc = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
loss = {
'test_loss': loss_value,
'test_correct_counts': correct_counts,
'test_total_counts': total_counts,
'test_acc': acc,
}
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx].append(loss)
else:
self.test_step_outputs.append(loss)
return loss
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['val_correct_counts'] for x in outputs]).sum(axis=0)
total_counts = torch.stack([x['val_total_counts'] for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
self._accuracy.reset()
tensorboard_log = {'val_loss': val_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['val_epoch_top@{}'.format(top_k)] = score
return {'log': tensorboard_log}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['test_correct_counts'].unsqueeze(0) for x in outputs]).sum(axis=0)
total_counts = torch.stack([x['test_total_counts'].unsqueeze(0) for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
self._accuracy.reset()
tensorboard_log = {'test_loss': test_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['test_epoch_top@{}'.format(top_k)] = score
return {'log': tensorboard_log}
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None
):
logits = super().forward(
input_signal=input_signal,
input_signal_length=input_signal_length,
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
)
return logits
def change_labels(self, new_labels: List[str]):
"""
Changes labels used by the decoder model. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another dataset.
If new_labels == self.decoder.vocabulary then nothing will be changed.
Args:
new_labels: list with new labels. Must contain at least 2 elements. Typically, \
this is set of labels for the dataset.
Returns: None
"""
if new_labels is not None and not isinstance(new_labels, ListConfig):
new_labels = ListConfig(new_labels)
if self._cfg.labels == new_labels:
logging.warning(
f"Old labels ({self._cfg.labels}) and new labels ({new_labels}) match. Not changing anything"
)
else:
if new_labels is None or len(new_labels) == 0:
raise ValueError(f'New labels must be non-empty list of labels. But I got: {new_labels}')
# Update config
self._cfg.labels = new_labels
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
self._update_decoder_config(new_labels, new_decoder_config)
del self.decoder
self.decoder = EncDecClassificationModel.from_config_dict(new_decoder_config)
OmegaConf.set_struct(self._cfg.decoder, False)
self._cfg.decoder = new_decoder_config
OmegaConf.set_struct(self._cfg.decoder, True)
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
self._cfg.train_ds.labels = new_labels
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
self._cfg.validation_ds.labels = new_labels
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
self._cfg.test_ds.labels = new_labels
logging.info(f"Changed decoder output to {self.decoder.num_classes} labels.")
def _update_decoder_config(self, labels, cfg):
"""
Update the number of classes in the decoder based on labels provided.
Args:
labels: The current labels of the model
cfg: The config of the decoder which will be updated.
"""
OmegaConf.set_struct(cfg, False)
if 'params' in cfg:
cfg.params.num_classes = len(labels)
else:
cfg.num_classes = len(labels)
OmegaConf.set_struct(cfg, True)
class EncDecRegressionModel(_EncDecBaseModel):
"""Encoder decoder class for speech regression models.
Model class creates training, validation methods for setting up data
performing model forward pass.
"""
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if not cfg.get('is_regression_task', False):
raise ValueError(f"EndDecRegressionModel requires the flag is_regression_task to be set as true")
super().__init__(cfg=cfg, trainer=trainer)
def _setup_preprocessor(self):
return EncDecRegressionModel.from_config_dict(self._cfg.preprocessor)
def _setup_encoder(self):
return EncDecRegressionModel.from_config_dict(self._cfg.encoder)
def _setup_decoder(self):
return EncDecRegressionModel.from_config_dict(self._cfg.decoder)
def _setup_loss(self):
return MSELoss()
def _setup_metrics(self):
self._mse = MeanSquaredError()
self._mae = MeanAbsoluteError()
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"preds": NeuralType(tuple('B'), RegressionValuesType())}
@typecheck()
def forward(self, input_signal, input_signal_length):
logits = super().forward(input_signal=input_signal, input_signal_length=input_signal_length)
return logits.view(-1)
# PTL-specific methods
def training_step(self, batch, batch_idx):
audio_signal, audio_signal_len, targets, targets_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss = self.loss(preds=logits, labels=targets)
train_mse = self._mse(preds=logits, target=targets)
train_mae = self._mae(preds=logits, target=targets)
self.log_dict(
{
'train_loss': loss,
'train_mse': train_mse,
'train_mae': train_mae,
'learning_rate': self._optimizer.param_groups[0]['lr'],
},
)
return {'loss': loss}
def validation_step(self, batch, batch_idx, dataloader_idx: int = 0):
audio_signal, audio_signal_len, targets, targets_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(preds=logits, labels=targets)
val_mse = self._mse(preds=logits, target=targets)
val_mae = self._mae(preds=logits, target=targets)
return {'val_loss': loss_value, 'val_mse': val_mse, 'val_mae': val_mae}
def test_step(self, batch, batch_idx, dataloader_idx: int = 0):
logs = self.validation_step(batch, batch_idx, dataloader_idx)
return {'test_loss': logs['val_loss'], 'test_mse': logs['test_mse'], 'test_mae': logs['val_mae']}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
val_mse = self._mse.compute()
self._mse.reset()
val_mae = self._mae.compute()
self._mae.reset()
tensorboard_logs = {'val_loss': val_loss_mean, 'val_mse': val_mse, 'val_mae': val_mae}
return {'val_loss': val_loss_mean, 'val_mse': val_mse, 'val_mae': val_mae, 'log': tensorboard_logs}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
test_mse = self._mse.compute()
self._mse.reset()
test_mae = self._mae.compute()
self._mae.reset()
tensorboard_logs = {'test_loss': test_loss_mean, 'test_mse': test_mse, 'test_mae': test_mae}
return {'test_loss': test_loss_mean, 'test_mse': test_mse, 'test_mae': test_mae, 'log': tensorboard_logs}
@torch.no_grad()
def transcribe(self, paths2audio_files: List[str], batch_size: int = 4) -> List[float]:
"""
Generate class labels for provided audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is approximately 1 second.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
Returns:
A list of predictions in the same order as paths2audio_files
"""
predictions = super().transcribe(paths2audio_files, batch_size, logprobs=True)
return [float(pred) for pred in predictions]
def _update_decoder_config(self, labels, cfg):
OmegaConf.set_struct(cfg, False)
if 'params' in cfg:
cfg.params.num_classes = 1
else:
cfg.num_classes = 1
OmegaConf.set_struct(cfg, True)
class EncDecFrameClassificationModel(EncDecClassificationModel):
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"outputs": NeuralType(('B', 'T', 'C'), LogitsType())}
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.num_classes = len(cfg.labels)
self.eval_loop_cnt = 0
self.ratio_threshold = cfg.get('ratio_threshold', 0.2)
super().__init__(cfg=cfg, trainer=trainer)
self.decoder.output_types = self.output_types
self.decoder.output_types_for_export = self.output_types
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
results = []
model = PretrainedModelInfo(
pretrained_model_name="vad_multilingual_frame_marblenet",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/vad_multilingual_frame_marblenet",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/vad_multilingual_frame_marblenet/versions/1.20.0/files/vad_multilingual_frame_marblenet.nemo",
)
results.append(model)
return results
def _setup_metrics(self):
self._accuracy = TopKClassificationAccuracy(dist_sync_on_step=True)
self._macro_accuracy = Accuracy(num_classes=self.num_classes, average='macro', task="multiclass")
def _setup_loss(self):
if "loss" in self.cfg:
weight = self.cfg.loss.get("weight", None)
if weight in [None, "none", "None"]:
weight = [1.0] * self.num_classes
logging.info(f"Using cross-entropy with weights: {weight}")
else:
weight = [1.0] * self.num_classes
return CrossEntropyLoss(logits_ndim=3, weight=weight)
def _setup_dataloader_from_config(self, config: DictConfig):
OmegaConf.set_struct(config, False)
config.is_regression_task = self.is_regression_task
OmegaConf.set_struct(config, True)
shuffle = config.get('shuffle', False)
if config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
raise ValueError(
"Could not load dataset as `manifest_filepath` is None or "
f"`tarred_audio_filepaths` is None. Provided cfg : {config}"
)
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
dataset = audio_to_label_dataset.get_tarred_audio_multi_label_dataset(
cfg=config, shuffle_n=shuffle_n, global_rank=self.global_rank, world_size=self.world_size,
)
shuffle = False
if hasattr(dataset, 'collate_fn'):
collate_func = dataset.collate_fn
else:
collate_func = dataset.datasets[0].collate_fn
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
raise ValueError(f"Could not load dataset as `manifest_filepath` is None. Provided cfg : {config}")
dataset = audio_to_label_dataset.get_audio_multi_label_dataset(config)
collate_func = dataset.collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config.get("batch_size", 1),
collate_fn=collate_func,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def _setup_feature_label_dataloader(self, config: DictConfig) -> torch.utils.data.DataLoader:
"""
setup dataloader for VAD inference with audio features as input
"""
OmegaConf.set_struct(config, False)
config.is_regression_task = self.is_regression_task
OmegaConf.set_struct(config, True)
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` is None. Provided config : {config}")
return None
dataset = feature_to_label_dataset.get_feature_multi_label_dataset(config=config, augmentor=augmentor)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config.get("batch_size", 1),
collate_fn=dataset.collate_fn,
drop_last=config.get('drop_last', False),
shuffle=config.get('shuffle', False),
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def get_label_masks(self, labels, labels_len):
mask = torch.arange(labels.size(1))[None, :].to(labels.device) < labels_len[:, None]
return mask.to(labels.device, dtype=bool)
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None
):
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) == False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_length`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
# Crop or pad is always applied
if self.crop_or_pad is not None:
processed_signal, processed_signal_length = self.crop_or_pad(
input_signal=processed_signal, length=processed_signal_length
)
# Spec augment is not applied during evaluation/testing
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
logits = self.decoder(encoded.transpose(1, 2))
return logits
# PTL-specific methods
def training_step(self, batch, batch_idx):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
labels, labels_len = self.reshape_labels(logits, labels, audio_signal_len, labels_len)
masks = self.get_label_masks(labels, labels_len)
loss_value = self.loss(logits=logits, labels=labels, loss_mask=masks)
tensorboard_logs = {
'train_loss': loss_value,
'learning_rate': self._optimizer.param_groups[0]['lr'],
'global_step': torch.tensor(self.trainer.global_step, dtype=torch.float32),
}
metric_logits, metric_labels = self.get_metric_logits_labels(logits, labels, masks)
self._accuracy(logits=metric_logits, labels=metric_labels)
topk_scores = self._accuracy.compute()
self._accuracy.reset()
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_logs[f'training_batch_accuracy_top@{top_k}'] = score
return {'loss': loss_value, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx, dataloader_idx: int = 0, tag: str = 'val'):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
labels, labels_len = self.reshape_labels(logits, labels, audio_signal_len, labels_len)
masks = self.get_label_masks(labels, labels_len)
loss_value = self.loss(logits=logits, labels=labels, loss_mask=masks)
metric_logits, metric_labels = self.get_metric_logits_labels(logits, labels, masks)
acc = self._accuracy(logits=metric_logits, labels=metric_labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
self._macro_accuracy.update(preds=metric_logits, target=metric_labels)
stats = self._macro_accuracy._final_state()
return {
f'{tag}_loss': loss_value,
f'{tag}_correct_counts': correct_counts,
f'{tag}_total_counts': total_counts,
f'{tag}_acc_micro': acc,
f'{tag}_acc_stats': stats,
}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0, tag: str = 'val'):
val_loss_mean = torch.stack([x[f'{tag}_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x[f'{tag}_correct_counts'] for x in outputs]).sum(axis=0)
total_counts = torch.stack([x[f'{tag}_total_counts'] for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
self._macro_accuracy.tp = torch.stack([x[f'{tag}_acc_stats'][0] for x in outputs]).sum(axis=0)
self._macro_accuracy.fp = torch.stack([x[f'{tag}_acc_stats'][1] for x in outputs]).sum(axis=0)
self._macro_accuracy.tn = torch.stack([x[f'{tag}_acc_stats'][2] for x in outputs]).sum(axis=0)
self._macro_accuracy.fn = torch.stack([x[f'{tag}_acc_stats'][3] for x in outputs]).sum(axis=0)
macro_accuracy_score = self._macro_accuracy.compute()
self._accuracy.reset()
self._macro_accuracy.reset()
tensorboard_log = {
f'{tag}_loss': val_loss_mean,
f'{tag}_acc_macro': macro_accuracy_score,
}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log[f'{tag}_acc_micro_top@{top_k}'] = score
self.log_dict(tensorboard_log, sync_dist=True)
return tensorboard_log
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.validation_step(batch, batch_idx, dataloader_idx, tag='test')
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
return self.multi_validation_epoch_end(outputs, dataloader_idx, tag='test')
def reshape_labels(self, logits, labels, logits_len, labels_len):
"""
Reshape labels to match logits shape. For example, each label is expected to cover a 40ms frame, while each frme prediction from the
model covers 20ms. If labels are shorter than logits, labels are repeated, otherwise labels are folded and argmax is applied to obtain
the label of each frame. When lengths of labels and logits are not factors of each other, labels are truncated or padded with zeros.
The ratio_threshold=0.2 is used to determine whether to pad or truncate labels, where the value 0.2 is not important as in real cases the ratio
is very close to either ceil(ratio) or floor(ratio). We use 0.2 here for easier unit-testing. This implementation does not allow frame length
and label length that are not multiples of each other.
Args:
logits: logits tensor with shape [B, T1, C]
labels: labels tensor with shape [B, T2]
logits_len: logits length tensor with shape [B]
labels_len: labels length tensor with shape [B]
Returns:
labels: labels tensor with shape [B, T1]
labels_len: labels length tensor with shape [B]
"""
logits_max_len = logits.size(1)
labels_max_len = labels.size(1)
batch_size = logits.size(0)
if logits_max_len < labels_max_len:
ratio = labels_max_len // logits_max_len
res = labels_max_len % logits_max_len
if ceil(ratio) - ratio < self.ratio_threshold: # e.g., ratio is 1.99
# pad labels with zeros until labels_max_len is a multiple of logits_max_len
labels = labels.cpu().tolist()
if len(labels) % ceil(ratio) != 0:
labels += [0] * (ceil(ratio) - len(labels) % ceil(ratio))
labels = torch.tensor(labels).long().to(logits.device)
labels = labels.view(-1, ceil(ratio)).amax(1)
return self.reshape_labels(logits, labels, logits_len, labels_len)
else:
# truncate additional labels until labels_max_len is a multiple of logits_max_len
if res > 0:
labels = labels[:, :-res]
mask = labels_len > (labels_max_len - res)
labels_len = labels_len - mask * (labels_len - (labels_max_len - res))
labels = labels.view(batch_size, ratio, -1).amax(1)
labels_len = torch.div(labels_len, ratio, rounding_mode="floor")
labels_len = torch.min(torch.cat([logits_len[:, None], labels_len[:, None]], dim=1), dim=1)[0]
return labels.contiguous(), labels_len.contiguous()
elif logits_max_len > labels_max_len:
ratio = logits_max_len / labels_max_len
res = logits_max_len % labels_max_len
if ceil(ratio) - ratio < self.ratio_threshold: # e.g., ratio is 1.99
# repeat labels for ceil(ratio) times, and DROP additional labels based on logits_max_len
labels = labels.repeat_interleave(ceil(ratio), dim=1).long()
labels = labels[:, :logits_max_len]
labels_len = labels_len * ceil(ratio)
mask = labels_len > logits_max_len
labels_len = labels_len - mask * (labels_len - logits_max_len)
else: # e.g., ratio is 2.01
# repeat labels for floor(ratio) times, and ADD padding labels based on logits_max_len
labels = labels.repeat_interleave(floor(ratio), dim=1).long()
labels_len = labels_len * floor(ratio)
if res > 0:
labels = torch.cat([labels, labels[:, -res:]], dim=1)
# no need to update `labels_len` since we ignore additional "res" padded labels
labels_len = torch.min(torch.cat([logits_len[:, None], labels_len[:, None]], dim=1), dim=1)[0]
return labels.contiguous(), labels_len.contiguous()
else:
labels_len = torch.min(torch.cat([logits_len[:, None], labels_len[:, None]], dim=1), dim=1)[0]
return labels, labels_len
def get_metric_logits_labels(self, logits, labels, masks):
"""
Computes valid logits and labels for metric computation.
Args:
logits: tensor of shape [B, T, C]
labels: tensor of shape [B, T]
masks: tensor of shape [B, T]
Returns:
logits of shape [N, C]
labels of shape [N,]
"""
C = logits.size(2)
logits = logits.view(-1, C) # [BxT, C]
labels = labels.view(-1).contiguous() # [BxT,]
masks = masks.view(-1) # [BxT,]
idx = masks.nonzero() # [BxT, 1]
logits = logits.gather(dim=0, index=idx.repeat(1, 2))
labels = labels.gather(dim=0, index=idx.view(-1))
return logits, labels
def forward_for_export(
self, input, length=None, cache_last_channel=None, cache_last_time=None, cache_last_channel_len=None
):
"""
This forward is used when we need to export the model to ONNX format.
Inputs cache_last_channel and cache_last_time are needed to be passed for exporting streaming models.
Args:
input: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps.
length: Vector of length B, that contains the individual lengths of the audio sequences.
cache_last_channel: Tensor of shape [N, B, T, H] which contains the cache for last channel layers
cache_last_time: Tensor of shape [N, B, H, T] which contains the cache for last time layers
N is the number of such layers which need caching, B is batch size, H is the hidden size of activations,
and T is the length of the cache
Returns:
the output of the model
"""
enc_fun = getattr(self.input_module, 'forward_for_export', self.input_module.forward)
if cache_last_channel is None:
encoder_output = enc_fun(audio_signal=input, length=length)
if isinstance(encoder_output, tuple):
encoder_output = encoder_output[0]
else:
encoder_output, length, cache_last_channel, cache_last_time, cache_last_channel_len = enc_fun(
audio_signal=input,
length=length,
cache_last_channel=cache_last_channel,
cache_last_time=cache_last_time,
cache_last_channel_len=cache_last_channel_len,
)
dec_fun = getattr(self.output_module, 'forward_for_export', self.output_module.forward)
ret = dec_fun(hidden_states=encoder_output.transpose(1, 2))
if isinstance(ret, tuple):
ret = ret[0]
if cache_last_channel is not None:
ret = (ret, length, cache_last_channel, cache_last_time, cache_last_channel_len)
return cast_all(ret, from_dtype=torch.float16, to_dtype=torch.float32)
|
NeMo-main
|
nemo/collections/asr/models/classification_models.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import ceil
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import DALIOutputs
from nemo.collections.asr.parts.mixins import ASRModuleMixin
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.classes.mixins import AccessMixin, set_access_cfg
from nemo.core.neural_types import (
AcousticEncodedRepresentation,
AudioSignal,
LabelsType,
LengthsType,
NeuralType,
SpectrogramType,
)
from nemo.utils import logging
__all__ = ['SpeechEncDecSelfSupervisedModel']
class SpeechEncDecSelfSupervisedModel(ModelPT, ASRModuleMixin, AccessMixin):
"""Base class for encoder-decoder models used for self-supervised encoder pre-training"""
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="ssl_en_conformer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:ssl_en_conformer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/ssl_en_conformer_large/versions/1.10.1/files/ssl_en_conformer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="ssl_en_conformer_xlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:ssl_en_conformer_xlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/ssl_en_conformer_xlarge/versions/1.10.0/files/ssl_en_conformer_xlarge.nemo",
)
results.append(model)
return results
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.world_size
super().__init__(cfg=cfg, trainer=trainer)
self.preprocessor = SpeechEncDecSelfSupervisedModel.from_config_dict(self._cfg.preprocessor)
self.encoder = SpeechEncDecSelfSupervisedModel.from_config_dict(self._cfg.encoder)
self.decoder_losses = None
if "loss_list" in self._cfg:
self.decoder_losses = {}
self.loss_alphas = {}
self.start_step = {}
self.output_from_layer = {}
self.transpose_encoded = {}
self.targets_from_loss = {}
self.decoder_losses_active = {}
# need to be separate for moduledict
for decoder_loss_name, decoder_loss_cfg in self._cfg.loss_list.items():
if not decoder_loss_cfg.get("is_active", True): # active by default
continue
new_decoder_loss = {
'decoder': SpeechEncDecSelfSupervisedModel.from_config_dict(decoder_loss_cfg.decoder),
'loss': SpeechEncDecSelfSupervisedModel.from_config_dict(decoder_loss_cfg.loss),
}
new_decoder_loss = nn.ModuleDict(new_decoder_loss)
self.decoder_losses[decoder_loss_name] = new_decoder_loss
self.loss_alphas[decoder_loss_name] = decoder_loss_cfg.get("loss_alpha", 1.0)
self.output_from_layer[decoder_loss_name] = decoder_loss_cfg.get("output_from_layer", None)
self.targets_from_loss[decoder_loss_name] = decoder_loss_cfg.get("targets_from_loss", None)
self.start_step[decoder_loss_name] = decoder_loss_cfg.get("start_step", 0)
self.transpose_encoded[decoder_loss_name] = decoder_loss_cfg.get("transpose_encoded", False)
self.decoder_losses_active[decoder_loss_name] = True
self.decoder_losses = nn.ModuleDict(self.decoder_losses)
else:
self.decoder_ssl = SpeechEncDecSelfSupervisedModel.from_config_dict(self._cfg.decoder)
self.loss = SpeechEncDecSelfSupervisedModel.from_config_dict(self._cfg.loss)
self.spec_augmentation = SpeechEncDecSelfSupervisedModel.from_config_dict(self._cfg.spec_augment)
# dropout for features/spectrograms (applied before masking)
self.dropout_features = (
torch.nn.Dropout(self._cfg.dropout_features) if "dropout_features" in self._cfg else None
)
# dropout for targets (applied before quantization)
self.dropout_features_q = (
torch.nn.Dropout(self._cfg.dropout_features_q) if "dropout_features_q" in self._cfg else None
)
# Feature penalty for preprocessor encodings (for Wav2Vec training)
if "feature_penalty" in self._cfg:
self.feat_pen, self.pen_factor = 0.0, self._cfg.feature_penalty
else:
self.feat_pen, self.pen_factor = None, None
if "access" in self._cfg:
set_access_cfg(self._cfg.access)
self.apply_masking = True
def _setup_dataloader_from_config(self, config: Optional[Dict]):
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
# Automatically inject args from model config to dataloader config
audio_to_text_dataset.inject_dataloader_value_from_model_config(self.cfg, config, key='sample_rate')
shuffle = config['shuffle']
device = 'gpu' if torch.cuda.is_available() else 'cpu'
if config.get('use_dali', False):
device_id = self.local_rank if device == 'gpu' else None
dataset = audio_to_text_dataset.get_dali_char_dataset(
config=config,
shuffle=shuffle,
device_id=device_id,
global_rank=self.global_rank,
world_size=self.world_size,
preprocessor_cfg=self._cfg.preprocessor,
)
return dataset
# Instantiate tarred dataset loader or normal dataset loader
if config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
logging.warning(
"Could not load dataset as `manifest_filepath` was None or "
f"`tarred_audio_filepaths` is None. Provided config : {config}"
)
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
dataset = audio_to_text_dataset.get_tarred_dataset(
config=config,
shuffle_n=shuffle_n,
global_rank=self.global_rank,
world_size=self.world_size,
augmentor=augmentor,
)
shuffle = False
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
dataset = audio_to_text_dataset.get_char_dataset(config=config, augmentor=augmentor)
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the training data loader via a Dict-like object.
Args:
train_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
# preserve config
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if (
self._train_dl is not None
and hasattr(self._train_dl, 'dataset')
and isinstance(self._train_dl.dataset, torch.utils.data.IterableDataset)
):
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])
)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the validation data loader via a Dict-like object.
Args:
val_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if (
self._validation_dl is not None
and hasattr(self._validation_dl, 'dataset')
and isinstance(self._validation_dl.dataset, torch.utils.data.IterableDataset)
):
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if isinstance(self._trainer.limit_val_batches, float):
self._trainer.limit_val_batches = int(
self._trainer.limit_val_batches
* ceil((len(self._validation_dl.dataset) / self.world_size) / val_data_config['batch_size'])
)
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
input_signal_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
input_signal_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), input_signal_eltype, optional=True),
"input_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"processed_signal": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),
"processed_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"targets": NeuralType(('B', 'T'), LabelsType(), optional=True),
"target_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"spectrograms": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"spec_masks": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"encoded": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_len": NeuralType(tuple('B'), LengthsType()),
}
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None,
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 4 elements -
1) Processed spectrograms of shape [B, D, T].
2) Masks applied to spectrograms of shape [B, D, T].
3) The encoded features tensor of shape [B, D, T].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
"""
# Reset access registry
if self.is_access_enabled():
self.reset_registry()
# Check for special flag for validation step
if hasattr(self, '_in_validation_step'):
in_validation_step = self._in_validation_step
else:
in_validation_step = False
# reset module registry from AccessMixin
if (
(self.training or in_validation_step)
and self.decoder_losses is not None
and self.output_from_layer is not None
and len(self.output_from_layer) > 0
):
layer_names = list(self.output_from_layer.values())
register_layer = any([name is not None for name in layer_names])
if register_layer:
self.access_cfg['save_encoder_tensors'] = True
self.set_access_enabled(access_enabled=True)
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) == False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_len`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
if self.pen_factor:
self.feat_pen = processed_signal.float().pow(2).mean() * self.pen_factor
spectrograms = processed_signal.detach().clone()
if self.dropout_features:
processed_signal = self.dropout_features(processed_signal)
if self.dropout_features_q:
spectrograms = self.dropout_features_q(spectrograms)
if self.apply_masking:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
masked_spectrograms = processed_signal.detach()
spec_masks = torch.logical_and(masked_spectrograms < 1e-5, masked_spectrograms > -1e-5).float()
for idx, proc_len in enumerate(processed_signal_length):
spec_masks[idx, :, proc_len:] = 0.0
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
return spectrograms, spec_masks, encoded, encoded_len
def decoder_loss_step(self, spectrograms, spec_masks, encoded, encoded_len, targets=None, target_lengths=None):
"""
Forward pass through all decoders and calculate corresponding losses.
Args:
spectrograms: Processed spectrograms of shape [B, D, T].
spec_masks: Masks applied to spectrograms of shape [B, D, T].
encoded: The encoded features tensor of shape [B, D, T].
encoded_len: The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
targets: Optional target labels of shape [B, T]
target_lengths: Optional target label lengths of shape [B]
Returns:
A tuple of 2 elements -
1) Total sum of losses weighted by corresponding loss_alphas
2) Dictionary of unweighted losses
"""
loss_val_dict = {}
if self.decoder_losses is None:
if hasattr(self.decoder_ssl, "needs_labels") and self.decoder_ssl.needs_labels:
outputs = self.decoder_ssl(encoder_output=encoded, targets=targets, target_lengths=target_lengths)
else:
outputs = self.decoder_ssl(encoder_output=encoded)
if self.loss.needs_labels:
loss_value = self.loss(
spec_masks=spec_masks,
decoder_outputs=outputs,
targets=targets,
decoder_lengths=encoded_len,
target_lengths=target_lengths,
)
else:
loss_value = self.loss(spectrograms=spectrograms, spec_masks=spec_masks, decoder_outputs=outputs)
else:
loss_value = encoded.new_zeros(1)
outputs = {}
registry = self.get_module_registry(self.encoder)
for dec_loss_name, dec_loss in self.decoder_losses.items():
# loop through decoders and corresponding losses
if not self.decoder_losses_active[dec_loss_name]:
continue
if self.output_from_layer[dec_loss_name] is None:
dec_input = encoded
else:
# extract output from specified layer using AccessMixin registry
dec_input = registry[self.output_from_layer[dec_loss_name]]['encoder'][-1]
if self.transpose_encoded[dec_loss_name]:
dec_input = dec_input.transpose(-2, -1)
if self.targets_from_loss[dec_loss_name] is not None:
# extract targets from specified loss
target_loss = self.targets_from_loss[dec_loss_name]
targets = self.decoder_losses[target_loss]['loss'].target_ids
target_lengths = self.decoder_losses[target_loss]['loss'].target_lengths
if target_lengths is None:
target_lengths = encoded_len
if hasattr(dec_loss['decoder'], "needs_labels") and dec_loss['decoder'].needs_labels:
# if we are using a decoder which needs labels, provide them
outputs[dec_loss_name] = dec_loss['decoder'](
encoder_output=dec_input, targets=targets, target_lengths=target_lengths
)
else:
outputs[dec_loss_name] = dec_loss['decoder'](encoder_output=dec_input)
current_loss = dec_loss['loss']
if current_loss.needs_labels:
# if we are using a loss which needs labels, provide them
current_loss_value = current_loss(
spec_masks=spec_masks,
decoder_outputs=outputs[dec_loss_name],
targets=targets,
decoder_lengths=encoded_len,
target_lengths=target_lengths,
)
else:
current_loss_value = current_loss(
spectrograms=spectrograms,
spec_masks=spec_masks,
decoder_outputs=outputs[dec_loss_name],
decoder_lengths=encoded_len,
)
loss_value = loss_value + current_loss_value * self.loss_alphas[dec_loss_name]
loss_val_dict[dec_loss_name] = current_loss_value
return loss_value, loss_val_dict
# PTL-specific methods
def training_step(self, batch, batch_nb):
signal, signal_len, targets, target_lengths = batch
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
spectrograms, spec_masks, encoded, encoded_len = self.forward(
processed_signal=signal, processed_signal_length=signal_len,
)
else:
spectrograms, spec_masks, encoded, encoded_len = self.forward(
input_signal=signal, input_signal_length=signal_len,
)
if self.decoder_losses is not None:
for dec_loss_name, dec_loss in self.decoder_losses.items():
self.decoder_losses_active[dec_loss_name] = self.trainer.global_step >= self.start_step[dec_loss_name]
loss = dec_loss['loss']
if hasattr(loss, "set_num_updates"):
loss.set_num_updates(self.trainer.global_step)
else:
if hasattr(self.loss, "set_num_updates"):
self.loss.set_num_updates(self.trainer.global_step)
loss_value, loss_val_dict = self.decoder_loss_step(
spectrograms, spec_masks, encoded, encoded_len, targets, target_lengths
)
tensorboard_logs = {
'learning_rate': self._optimizer.param_groups[0]['lr'],
'global_step': self.trainer.global_step,
}
for loss_name, loss_val in loss_val_dict.items():
tensorboard_logs['train_' + loss_name] = loss_val
if self.feat_pen:
loss_value += self.feat_pen
# Reset access registry
self.reset_registry()
return {'loss': loss_value, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx, dataloader_idx=0):
# Set flag to register tensors
self._in_validation_step = True
signal, signal_len, targets, target_lengths = batch
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
spectrograms, spec_masks, encoded, encoded_len = self.forward(
processed_signal=signal, processed_signal_length=signal_len,
)
else:
spectrograms, spec_masks, encoded, encoded_len = self.forward(
input_signal=signal, input_signal_length=signal_len,
)
if self.decoder_losses is not None:
for dec_loss_name, dec_loss in self.decoder_losses.items():
self.decoder_losses_active[dec_loss_name] = self.trainer.global_step >= self.start_step[dec_loss_name]
loss_value, _ = self.decoder_loss_step(spectrograms, spec_masks, encoded, encoded_len, targets, target_lengths)
if self.feat_pen:
loss_value += self.feat_pen
# reset access registry
self.reset_registry()
del self._in_validation_step
return {
'val_loss': loss_value,
}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': val_loss_mean}
return {'val_loss': val_loss_mean, 'log': tensorboard_logs}
|
NeMo-main
|
nemo/collections/asr/models/ssl_models.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf, open_dict
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import AudioToBPEDALIDataset
from nemo.collections.asr.losses.ctc import CTCLoss
from nemo.collections.asr.metrics.wer_bpe import WERBPE, CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
from nemo.collections.asr.parts.mixins import ASRBPEMixin
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging, model_utils
__all__ = ['EncDecCTCModelBPE']
class EncDecCTCModelBPE(EncDecCTCModel, ASRBPEMixin):
"""Encoder decoder CTC-based models with Byte Pair Encoding."""
def __init__(self, cfg: DictConfig, trainer=None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
if 'tokenizer' not in cfg:
raise ValueError("`cfg` must have `tokenizer` config to create a tokenizer !")
# Setup the tokenizer
self._setup_tokenizer(cfg.tokenizer)
# Initialize a dummy vocabulary
vocabulary = self.tokenizer.tokenizer.get_vocab()
# Set the new vocabulary
with open_dict(cfg):
# sidestepping the potential overlapping tokens issue in aggregate tokenizers
if self.tokenizer_type == "agg":
cfg.decoder.vocabulary = ListConfig(vocabulary)
else:
cfg.decoder.vocabulary = ListConfig(list(vocabulary.keys()))
# Override number of classes if placeholder provided
num_classes = cfg.decoder["num_classes"]
if num_classes < 1:
logging.info(
"\nReplacing placeholder number of classes ({}) with actual number of classes - {}".format(
num_classes, len(vocabulary)
)
)
cfg.decoder["num_classes"] = len(vocabulary)
super().__init__(cfg=cfg, trainer=trainer)
# Setup decoding objects
decoding_cfg = self.cfg.get('decoding', None)
# In case decoding config not found, use default config
if decoding_cfg is None:
decoding_cfg = OmegaConf.structured(CTCBPEDecodingConfig)
with open_dict(self.cfg):
self.cfg.decoding = decoding_cfg
self.decoding = CTCBPEDecoding(self.cfg.decoding, tokenizer=self.tokenizer)
# Setup metric with decoding strategy
self._wer = WERBPE(
decoding=self.decoding,
use_cer=self._cfg.get('use_cer', False),
dist_sync_on_step=True,
log_prediction=self._cfg.get("log_prediction", False),
)
def _setup_dataloader_from_config(self, config: Optional[Dict]):
dataset = audio_to_text_dataset.get_audio_to_text_bpe_dataset_from_config(
config=config,
local_rank=self.local_rank,
global_rank=self.global_rank,
world_size=self.world_size,
tokenizer=self.tokenizer,
preprocessor_cfg=self.cfg.get("preprocessor", None),
)
if dataset is None:
return None
if isinstance(dataset, AudioToBPEDALIDataset):
# DALI Dataset implements dataloader interface
return dataset
shuffle = config['shuffle']
if isinstance(dataset, torch.utils.data.IterableDataset):
shuffle = False
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \
Recommended length per file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
temp_dir: (str) A temporary directory where the audio manifest is temporarily
stored.
num_workers: (int) number of workers. Depends of the batch_size and machine. \
0 - only the main process will load batches, 1 - one worker (not main process)
Returns:
A pytorch DataLoader for the given audio file(s).
"""
if 'manifest_filepath' in config:
manifest_filepath = config['manifest_filepath']
batch_size = config['batch_size']
else:
manifest_filepath = os.path.join(config['temp_dir'], 'manifest.json')
batch_size = min(config['batch_size'], len(config['paths2audio_files']))
dl_config = {
'manifest_filepath': manifest_filepath,
'sample_rate': self.preprocessor._sample_rate,
'batch_size': batch_size,
'shuffle': False,
'num_workers': config.get('num_workers', min(batch_size, os.cpu_count() - 1)),
'pin_memory': True,
'channel_selector': config.get('channel_selector', None),
'use_start_end_token': self.cfg.validation_ds.get('use_start_end_token', False),
}
if config.get("augmentor"):
dl_config['augmentor'] = config.get("augmentor")
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
def change_vocabulary(
self,
new_tokenizer_dir: Union[str, DictConfig],
new_tokenizer_type: str,
decoding_cfg: Optional[DictConfig] = None,
):
"""
Changes vocabulary of the tokenizer used during CTC decoding process.
Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Directory path to tokenizer or a config for a new tokenizer (if the tokenizer type is `agg`)
new_tokenizer_type: Either `agg`, `bpe` or `wpe`. `bpe` is used for SentencePiece tokenizers,
whereas `wpe` is used for `BertTokenizer`.
new_tokenizer_cfg: A config for the new tokenizer. if provided, pre-empts the dir and type
Returns: None
"""
if isinstance(new_tokenizer_dir, DictConfig):
if new_tokenizer_type == 'agg':
new_tokenizer_cfg = new_tokenizer_dir
else:
raise ValueError(
f'New tokenizer dir should be a string unless the tokenizer is `agg`, but this tokenizer type is: {new_tokenizer_type}'
)
else:
new_tokenizer_cfg = None
if new_tokenizer_cfg is not None:
tokenizer_cfg = new_tokenizer_cfg
else:
if not os.path.isdir(new_tokenizer_dir):
raise NotADirectoryError(
f'New tokenizer dir must be non-empty path to a directory. But I got: {new_tokenizer_dir}'
f"New tokenizer dir must be non-empty path to a directory. But I got: {new_tokenizer_dir}"
)
if new_tokenizer_type.lower() not in ('bpe', 'wpe'):
raise ValueError(f'New tokenizer type must be either `bpe` or `wpe`')
tokenizer_cfg = OmegaConf.create({'dir': new_tokenizer_dir, 'type': new_tokenizer_type})
# Setup the tokenizer
self._setup_tokenizer(tokenizer_cfg)
# Initialize a dummy vocabulary
vocabulary = self.tokenizer.tokenizer.get_vocab()
# Set the new vocabulary
decoder_config = copy.deepcopy(self.decoder.to_config_dict())
# sidestepping the potential overlapping tokens issue in aggregate tokenizers
if self.tokenizer_type == "agg":
decoder_config.vocabulary = ListConfig(vocabulary)
else:
decoder_config.vocabulary = ListConfig(list(vocabulary.keys()))
decoder_num_classes = decoder_config['num_classes']
# Override number of classes if placeholder provided
logging.info(
"\nReplacing old number of classes ({}) with new number of classes - {}".format(
decoder_num_classes, len(vocabulary)
)
)
decoder_config['num_classes'] = len(vocabulary)
del self.decoder
self.decoder = EncDecCTCModelBPE.from_config_dict(decoder_config)
del self.loss
self.loss = CTCLoss(
num_classes=self.decoder.num_classes_with_blank - 1,
zero_infinity=True,
reduction=self._cfg.get("ctc_reduction", "mean_batch"),
)
if decoding_cfg is None:
# Assume same decoding config as before
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(CTCBPEDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = CTCBPEDecoding(decoding_cfg=decoding_cfg, tokenizer=self.tokenizer)
self._wer = WERBPE(
decoding=self.decoding,
use_cer=self._cfg.get('use_cer', False),
log_prediction=self._cfg.get("log_prediction", False),
dist_sync_on_step=True,
)
# Update config
with open_dict(self.cfg.decoder):
self._cfg.decoder = decoder_config
with open_dict(self.cfg.decoding):
self._cfg.decoding = decoding_cfg
logging.info(f"Changed tokenizer to {self.decoder.vocabulary} vocabulary.")
def change_decoding_strategy(self, decoding_cfg: DictConfig):
"""
Changes decoding strategy used during CTC decoding process.
Args:
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
"""
if decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `decoding_cfg` passed when changing decoding strategy, using internal config")
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(CTCBPEDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = CTCBPEDecoding(decoding_cfg=decoding_cfg, tokenizer=self.tokenizer,)
self._wer = WERBPE(
decoding=self.decoding,
use_cer=self._wer.use_cer,
log_prediction=self._wer.log_prediction,
dist_sync_on_step=True,
)
self.decoder.temperature = decoding_cfg.get('temperature', 1.0)
# Update config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
logging.info(f"Changed decoding strategy to \n{OmegaConf.to_yaml(self.cfg.decoding)}")
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="stt_en_citrinet_256",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_256",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_citrinet_256/versions/1.0.0rc1/files/stt_en_citrinet_256.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_citrinet_512",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_512",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_citrinet_512/versions/1.0.0rc1/files/stt_en_citrinet_512.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_citrinet_1024",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_1024",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_citrinet_1024/versions/1.0.0rc1/files/stt_en_citrinet_1024.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_citrinet_256_gamma_0_25",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_256_gamma_0_25",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_citrinet_256_gamma_0_25/versions/1.0.0/files/stt_en_citrinet_256_gamma_0_25.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_citrinet_512_gamma_0_25",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_512_gamma_0_25",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_citrinet_512_gamma_0_25/versions/1.0.0/files/stt_en_citrinet_512_gamma_0_25.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_citrinet_1024_gamma_0_25",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_1024_gamma_0_25",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_citrinet_1024_gamma_0_25/versions/1.0.0/files/stt_en_citrinet_1024_gamma_0_25.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_es_citrinet_512",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_citrinet_512",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_es_citrinet_512/versions/1.0.0/files/stt_es_citrinet_512.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_de_citrinet_1024",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_citrinet_1024",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_de_citrinet_1024/versions/1.5.0/files/stt_de_citrinet_1024.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_fr_citrinet_1024_gamma_0_25",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_fr_citrinet_1024_gamma_0_25",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_citrinet_1024_gamma_0_25/versions/1.5/files/stt_fr_citrinet_1024_gamma_0_25.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_fr_no_hyphen_citrinet_1024_gamma_0_25",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_fr_citrinet_1024_gamma_0_25",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_citrinet_1024_gamma_0_25/versions/1.5/files/stt_fr_no_hyphen_citrinet_1024_gamma_0_25.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_es_citrinet_1024_gamma_0_25",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_citrinet_1024_gamma_0_25",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_es_citrinet_1024_gamma_0_25/versions/1.8.0/files/stt_es_citrinet_1024_gamma_0_25.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_ctc_small",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_small",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_ctc_small/versions/1.6.0/files/stt_en_conformer_ctc_small.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_ctc_medium",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_medium",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_ctc_medium/versions/1.6.0/files/stt_en_conformer_ctc_medium.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_ctc_large/versions/1.10.0/files/stt_en_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_ctc_xlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_xlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_ctc_xlarge/versions/1.10.0/files/stt_en_conformer_ctc_xlarge.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_squeezeformer_ctc_xsmall_ls",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_squeezeformer_ctc_xsmall_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_squeezeformer_ctc_xsmall_ls/versions/1.13.0/files/stt_en_squeezeformer_ctc_xsmall_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_squeezeformer_ctc_small_ls",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_squeezeformer_ctc_small_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_squeezeformer_ctc_small_ls/versions/1.13.0/files/stt_en_squeezeformer_ctc_small_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_squeezeformer_ctc_small_medium_ls",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_squeezeformer_ctc_small_medium_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_squeezeformer_ctc_small_medium_ls/versions/1.13.0/files/stt_en_squeezeformer_ctc_small_medium_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_squeezeformer_ctc_medium_ls",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_squeezeformer_ctc_medium_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_squeezeformer_ctc_medium_ls/versions/1.13.0/files/stt_en_squeezeformer_ctc_medium_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_squeezeformer_ctc_medium_large_ls",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_squeezeformer_ctc_medium_large_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_squeezeformer_ctc_medium_large_ls/versions/1.13.0/files/stt_en_squeezeformer_ctc_medium_large_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_squeezeformer_ctc_large_ls",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_squeezeformer_ctc_large_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_squeezeformer_ctc_large_ls/versions/1.13.0/files/stt_en_squeezeformer_ctc_large_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_ctc_small_ls",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_small_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_ctc_small_ls/versions/1.0.0/files/stt_en_conformer_ctc_small_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_ctc_medium_ls",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_medium_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_ctc_medium_ls/versions/1.0.0/files/stt_en_conformer_ctc_medium_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_ctc_large_ls",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_large_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_ctc_large_ls/versions/1.0.0/files/stt_en_conformer_ctc_large_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_fr_conformer_ctc_large",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_fr_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_conformer_ctc_large/versions/1.5.1/files/stt_fr_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_fr_no_hyphen_conformer_ctc_large",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_fr_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_conformer_ctc_large/versions/1.5.1/files/stt_fr_no_hyphen_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_de_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_de_conformer_ctc_large/versions/1.5.0/files/stt_de_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_es_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_es_conformer_ctc_large/versions/1.8.0/files/stt_es_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_hi_conformer_ctc_medium",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_hi_conformer_ctc_medium",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_hi_conformer_ctc_medium/versions/1.6.0/files/stt_hi_conformer_ctc_medium.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_mr_conformer_ctc_medium",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_mr_conformer_ctc_medium",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_mr_conformer_ctc_medium/versions/1.6.0/files/stt_mr_conformer_ctc_medium.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_enes_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_enes_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_enes_conformer_ctc_large/versions/1.0.0/files/stt_enes_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_ca_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ca_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ca_conformer_ctc_large/versions/1.11.0/files/stt_ca_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_rw_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_rw_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_rw_conformer_ctc_large/versions/1.11.0/files/stt_rw_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_enes_conformer_ctc_large_codesw",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_enes_conformer_ctc_large_codesw",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_enes_conformer_ctc_large_codesw/versions/1.0.0/files/stt_enes_conformer_ctc_large_codesw.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_be_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_be_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_be_conformer_ctc_large/versions/1.12.0/files/stt_be_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_hr_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_hr_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_hr_conformer_ctc_large/versions/1.11.0/files/stt_hr_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_it_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_it_conformer_ctc_large/versions/1.13.0/files/stt_it_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_ru_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ru_conformer_ctc_large/versions/1.13.0/files/stt_ru_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_eo_conformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_eo_conformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_eo_conformer_ctc_large/versions/1.14.0/files/stt_eo_conformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_ctc_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_ctc_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_ctc_large/versions/1.0.0/files/stt_en_fastconformer_ctc_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_ctc_xlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_ctc_xlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_ctc_xlarge/versions/1.20.0/files/stt_en_fastconformer_ctc_xlarge.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_ctc_xxlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_ctc_xxlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_ctc_xxlarge/versions/1.20.1/files/stt_en_fastconformer_ctc_xxlarge.nemo",
)
results.append(model)
return results
|
NeMo-main
|
nemo/collections/asr/models/ctc_bpe_models.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
from nemo.collections.asr.models.rnnt_bpe_models import EncDecRNNTBPEModel
from nemo.collections.asr.models.rnnt_models import EncDecRNNTModel
from nemo.collections.asr.parts.k2.classes import ASRK2Mixin
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
class EncDecK2SeqModel(EncDecCTCModel, ASRK2Mixin):
"""Encoder decoder models with various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "ctc")
if loss_type != "ctc" and loss_type != "mmi":
raise ValueError(f"Class {self.__class__.__name__} does not support `loss_type`={loss_type}")
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_vocabulary: List[str]):
"""
Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
If new_vocabulary == self.decoder.vocabulary then nothing will be changed.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
Returns: None
"""
super().change_vocabulary(new_vocabulary)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None,
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
log_probs, encoded_len, greedy_predictions = super().forward(
input_signal=input_signal,
input_signal_length=input_signal_length,
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
)
return self._forward_k2_post_processing(
log_probs=log_probs, encoded_length=encoded_len, greedy_predictions=greedy_predictions
)
class EncDecK2SeqModelBPE(EncDecCTCModelBPE, ASRK2Mixin):
"""Encoder decoder models with Byte Pair Encoding and various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "ctc")
if loss_type != "ctc" and loss_type != "mmi":
raise ValueError(f"Class {self.__class__.__name__} does not support `loss_type`={loss_type}")
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_tokenizer_dir: str, new_tokenizer_type: str):
"""
Changes vocabulary of the tokenizer used during CTC decoding process.
Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Path to the new tokenizer directory.
new_tokenizer_type: Either `bpe` or `wpe`. `bpe` is used for SentencePiece tokenizers,
whereas `wpe` is used for `BertTokenizer`.
Returns: None
"""
super().change_vocabulary(new_tokenizer_dir, new_tokenizer_type)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None,
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
log_probs, encoded_len, greedy_predictions = super().forward(
input_signal=input_signal,
input_signal_length=input_signal_length,
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
)
return self._forward_k2_post_processing(
log_probs=log_probs, encoded_length=encoded_len, greedy_predictions=greedy_predictions
)
class EncDecK2RnntSeqModel(EncDecRNNTModel, ASRK2Mixin):
"""Encoder decoder models with various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "rnnt")
criterion_type = cfg.graph_module_cfg.get("criterion_type", "ml")
if loss_type != "rnnt" or criterion_type != "ml":
raise ValueError(
f"""Class {self.__class__.__name__} does not support
`criterion_type`={criterion_type} with `loss_type`={loss_type}"""
)
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_vocabulary: List[str]):
"""
Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
If new_vocabulary == self.decoder.vocabulary then nothing will be changed.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
Returns: None
"""
super().change_vocabulary(new_vocabulary)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
class EncDecK2RnntSeqModelBPE(EncDecRNNTBPEModel, ASRK2Mixin):
"""Encoder decoder models with Byte Pair Encoding and various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "rnnt")
criterion_type = cfg.graph_module_cfg.get("criterion_type", "ml")
if loss_type != "rnnt" or criterion_type != "ml":
raise ValueError(
f"""Class {self.__class__.__name__} does not support
`criterion_type`={criterion_type} with `loss_type`={loss_type}"""
)
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_tokenizer_dir: str, new_tokenizer_type: str):
"""
Changes vocabulary of the tokenizer used during CTC decoding process.
Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Path to the new tokenizer directory.
new_tokenizer_type: Either `bpe` or `wpe`. `bpe` is used for SentencePiece tokenizers,
whereas `wpe` is used for `BertTokenizer`.
Returns: None
"""
super().change_vocabulary(new_tokenizer_dir, new_tokenizer_type)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
|
NeMo-main
|
nemo/collections/asr/models/k2_sequence_models.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle as pkl
import shutil
import tarfile
import tempfile
from copy import deepcopy
from typing import Any, List, Optional, Union
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
from tqdm import tqdm
from nemo.collections.asr.metrics.der import score_labels
from nemo.collections.asr.models.classification_models import EncDecClassificationModel
from nemo.collections.asr.models.label_models import EncDecSpeakerLabelModel
from nemo.collections.asr.parts.mixins.mixins import DiarizationMixin
from nemo.collections.asr.parts.utils.speaker_utils import (
audio_rttm_map,
get_embs_and_timestamps,
get_uniqname_from_filepath,
parse_scale_configs,
perform_clustering,
segments_manifest_to_subsegments_manifest,
validate_vad_manifest,
write_rttm2manifest,
)
from nemo.collections.asr.parts.utils.vad_utils import (
generate_overlap_vad_seq,
generate_vad_segment_table,
get_vad_stream_status,
prepare_manifest,
)
from nemo.core.classes import Model
from nemo.utils import logging, model_utils
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
__all__ = ['ClusteringDiarizer']
_MODEL_CONFIG_YAML = "model_config.yaml"
_VAD_MODEL = "vad_model.nemo"
_SPEAKER_MODEL = "speaker_model.nemo"
def get_available_model_names(class_name):
"lists available pretrained model names from NGC"
available_models = class_name.list_available_models()
return list(map(lambda x: x.pretrained_model_name, available_models))
class ClusteringDiarizer(torch.nn.Module, Model, DiarizationMixin):
"""
Inference model Class for offline speaker diarization.
This class handles required functionality for diarization : Speech Activity Detection, Segmentation,
Extract Embeddings, Clustering, Resegmentation and Scoring.
All the parameters are passed through config file
"""
def __init__(self, cfg: Union[DictConfig, Any], speaker_model=None):
super().__init__()
if isinstance(cfg, DictConfig):
cfg = model_utils.convert_model_config_to_dict_config(cfg)
# Convert config to support Hydra 1.0+ instantiation
cfg = model_utils.maybe_update_config_version(cfg)
self._cfg = cfg
# Diarizer set up
self._diarizer_params = self._cfg.diarizer
# init vad model
self.has_vad_model = False
if not self._diarizer_params.oracle_vad:
if self._cfg.diarizer.vad.model_path is not None:
self._vad_params = self._cfg.diarizer.vad.parameters
self._init_vad_model()
# init speaker model
self.multiscale_embeddings_and_timestamps = {}
self._init_speaker_model(speaker_model)
self._speaker_params = self._cfg.diarizer.speaker_embeddings.parameters
# Clustering params
self._cluster_params = self._diarizer_params.clustering.parameters
@classmethod
def list_available_models(cls):
pass
def _init_vad_model(self):
"""
Initialize VAD model with model name or path passed through config
"""
model_path = self._cfg.diarizer.vad.model_path
if model_path.endswith('.nemo'):
self._vad_model = EncDecClassificationModel.restore_from(model_path, map_location=self._cfg.device)
logging.info("VAD model loaded locally from {}".format(model_path))
else:
if model_path not in get_available_model_names(EncDecClassificationModel):
logging.warning(
"requested {} model name not available in pretrained models, instead".format(model_path)
)
model_path = "vad_telephony_marblenet"
logging.info("Loading pretrained {} model from NGC".format(model_path))
self._vad_model = EncDecClassificationModel.from_pretrained(
model_name=model_path, map_location=self._cfg.device
)
self._vad_window_length_in_sec = self._vad_params.window_length_in_sec
self._vad_shift_length_in_sec = self._vad_params.shift_length_in_sec
self.has_vad_model = True
def _init_speaker_model(self, speaker_model=None):
"""
Initialize speaker embedding model with model name or path passed through config
"""
if speaker_model is not None:
self._speaker_model = speaker_model
else:
model_path = self._cfg.diarizer.speaker_embeddings.model_path
if model_path is not None and model_path.endswith('.nemo'):
self._speaker_model = EncDecSpeakerLabelModel.restore_from(model_path, map_location=self._cfg.device)
logging.info("Speaker Model restored locally from {}".format(model_path))
elif model_path.endswith('.ckpt'):
self._speaker_model = EncDecSpeakerLabelModel.load_from_checkpoint(
model_path, map_location=self._cfg.device
)
logging.info("Speaker Model restored locally from {}".format(model_path))
else:
if model_path not in get_available_model_names(EncDecSpeakerLabelModel):
logging.warning(
"requested {} model name not available in pretrained models, instead".format(model_path)
)
model_path = "ecapa_tdnn"
logging.info("Loading pretrained {} model from NGC".format(model_path))
self._speaker_model = EncDecSpeakerLabelModel.from_pretrained(
model_name=model_path, map_location=self._cfg.device
)
self.multiscale_args_dict = parse_scale_configs(
self._diarizer_params.speaker_embeddings.parameters.window_length_in_sec,
self._diarizer_params.speaker_embeddings.parameters.shift_length_in_sec,
self._diarizer_params.speaker_embeddings.parameters.multiscale_weights,
)
def _setup_vad_test_data(self, manifest_vad_input):
vad_dl_config = {
'manifest_filepath': manifest_vad_input,
'sample_rate': self._cfg.sample_rate,
'batch_size': self._cfg.get('batch_size'),
'vad_stream': True,
'labels': ['infer',],
'window_length_in_sec': self._vad_window_length_in_sec,
'shift_length_in_sec': self._vad_shift_length_in_sec,
'trim_silence': False,
'num_workers': self._cfg.num_workers,
}
self._vad_model.setup_test_data(test_data_config=vad_dl_config)
def _setup_spkr_test_data(self, manifest_file):
spk_dl_config = {
'manifest_filepath': manifest_file,
'sample_rate': self._cfg.sample_rate,
'batch_size': self._cfg.get('batch_size'),
'trim_silence': False,
'labels': None,
'num_workers': self._cfg.num_workers,
}
self._speaker_model.setup_test_data(spk_dl_config)
def _run_vad(self, manifest_file):
"""
Run voice activity detection.
Get log probability of voice activity detection and smoothes using the post processing parameters.
Using generated frame level predictions generated manifest file for later speaker embedding extraction.
input:
manifest_file (str) : Manifest file containing path to audio file and label as infer
"""
shutil.rmtree(self._vad_dir, ignore_errors=True)
os.makedirs(self._vad_dir)
self._vad_model.eval()
time_unit = int(self._vad_window_length_in_sec / self._vad_shift_length_in_sec)
trunc = int(time_unit / 2)
trunc_l = time_unit - trunc
all_len = 0
data = []
for line in open(manifest_file, 'r', encoding='utf-8'):
file = json.loads(line)['audio_filepath']
data.append(get_uniqname_from_filepath(file))
status = get_vad_stream_status(data)
for i, test_batch in enumerate(
tqdm(self._vad_model.test_dataloader(), desc='vad', leave=True, disable=not self.verbose)
):
test_batch = [x.to(self._vad_model.device) for x in test_batch]
with autocast():
log_probs = self._vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])
probs = torch.softmax(log_probs, dim=-1)
pred = probs[:, 1]
if status[i] == 'start':
to_save = pred[:-trunc]
elif status[i] == 'next':
to_save = pred[trunc:-trunc_l]
elif status[i] == 'end':
to_save = pred[trunc_l:]
else:
to_save = pred
all_len += len(to_save)
outpath = os.path.join(self._vad_dir, data[i] + ".frame")
with open(outpath, "a", encoding='utf-8') as fout:
for f in range(len(to_save)):
fout.write('{0:0.4f}\n'.format(to_save[f]))
del test_batch
if status[i] == 'end' or status[i] == 'single':
all_len = 0
if not self._vad_params.smoothing:
# Shift the window by 10ms to generate the frame and use the prediction of the window to represent the label for the frame;
self.vad_pred_dir = self._vad_dir
frame_length_in_sec = self._vad_shift_length_in_sec
else:
# Generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.
# smoothing_method would be either in majority vote (median) or average (mean)
logging.info("Generating predictions with overlapping input segments")
smoothing_pred_dir = generate_overlap_vad_seq(
frame_pred_dir=self._vad_dir,
smoothing_method=self._vad_params.smoothing,
overlap=self._vad_params.overlap,
window_length_in_sec=self._vad_window_length_in_sec,
shift_length_in_sec=self._vad_shift_length_in_sec,
num_workers=self._cfg.num_workers,
)
self.vad_pred_dir = smoothing_pred_dir
frame_length_in_sec = 0.01
logging.info("Converting frame level prediction to speech/no-speech segment in start and end times format.")
vad_params = self._vad_params if isinstance(self._vad_params, (DictConfig, dict)) else self._vad_params.dict()
table_out_dir = generate_vad_segment_table(
vad_pred_dir=self.vad_pred_dir,
postprocessing_params=vad_params,
frame_length_in_sec=frame_length_in_sec,
num_workers=self._cfg.num_workers,
out_dir=self._vad_dir,
)
AUDIO_VAD_RTTM_MAP = {}
for key in self.AUDIO_RTTM_MAP:
if os.path.exists(os.path.join(table_out_dir, key + ".txt")):
AUDIO_VAD_RTTM_MAP[key] = deepcopy(self.AUDIO_RTTM_MAP[key])
AUDIO_VAD_RTTM_MAP[key]['rttm_filepath'] = os.path.join(table_out_dir, key + ".txt")
else:
logging.warning(f"no vad file found for {key} due to zero or negative duration")
write_rttm2manifest(AUDIO_VAD_RTTM_MAP, self._vad_out_file)
self._speaker_manifest_path = self._vad_out_file
def _run_segmentation(self, window: float, shift: float, scale_tag: str = ''):
self.subsegments_manifest_path = os.path.join(self._speaker_dir, f'subsegments{scale_tag}.json')
logging.info(
f"Subsegmentation for embedding extraction:{scale_tag.replace('_',' ')}, {self.subsegments_manifest_path}"
)
self.subsegments_manifest_path = segments_manifest_to_subsegments_manifest(
segments_manifest_file=self._speaker_manifest_path,
subsegments_manifest_file=self.subsegments_manifest_path,
window=window,
shift=shift,
)
return None
def _perform_speech_activity_detection(self):
"""
Checks for type of speech activity detection from config. Choices are NeMo VAD,
external vad manifest and oracle VAD (generates speech activity labels from provided RTTM files)
"""
if self.has_vad_model:
self._auto_split = True
self._split_duration = 50
manifest_vad_input = self._diarizer_params.manifest_filepath
if self._auto_split:
logging.info("Split long audio file to avoid CUDA memory issue")
logging.debug("Try smaller split_duration if you still have CUDA memory issue")
config = {
'input': manifest_vad_input,
'window_length_in_sec': self._vad_window_length_in_sec,
'split_duration': self._split_duration,
'num_workers': self._cfg.num_workers,
'out_dir': self._diarizer_params.out_dir,
}
manifest_vad_input = prepare_manifest(config)
else:
logging.warning(
"If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it."
)
self._setup_vad_test_data(manifest_vad_input)
self._run_vad(manifest_vad_input)
elif self._diarizer_params.vad.external_vad_manifest is not None:
self._speaker_manifest_path = self._diarizer_params.vad.external_vad_manifest
elif self._diarizer_params.oracle_vad:
self._speaker_manifest_path = os.path.join(self._speaker_dir, 'oracle_vad_manifest.json')
self._speaker_manifest_path = write_rttm2manifest(self.AUDIO_RTTM_MAP, self._speaker_manifest_path)
else:
raise ValueError(
"Only one of diarizer.oracle_vad, vad.model_path or vad.external_vad_manifest must be passed from config"
)
validate_vad_manifest(self.AUDIO_RTTM_MAP, vad_manifest=self._speaker_manifest_path)
def _extract_embeddings(self, manifest_file: str, scale_idx: int, num_scales: int):
"""
This method extracts speaker embeddings from segments passed through manifest_file
Optionally you may save the intermediate speaker embeddings for debugging or any use.
"""
logging.info("Extracting embeddings for Diarization")
self._setup_spkr_test_data(manifest_file)
self.embeddings = {}
self._speaker_model.eval()
self.time_stamps = {}
all_embs = torch.empty([0])
for test_batch in tqdm(
self._speaker_model.test_dataloader(),
desc=f'[{scale_idx+1}/{num_scales}] extract embeddings',
leave=True,
disable=not self.verbose,
):
test_batch = [x.to(self._speaker_model.device) for x in test_batch]
audio_signal, audio_signal_len, labels, slices = test_batch
with autocast():
_, embs = self._speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
emb_shape = embs.shape[-1]
embs = embs.view(-1, emb_shape)
all_embs = torch.cat((all_embs, embs.cpu().detach()), dim=0)
del test_batch
with open(manifest_file, 'r', encoding='utf-8') as manifest:
for i, line in enumerate(manifest.readlines()):
line = line.strip()
dic = json.loads(line)
uniq_name = get_uniqname_from_filepath(dic['audio_filepath'])
if uniq_name in self.embeddings:
self.embeddings[uniq_name] = torch.cat((self.embeddings[uniq_name], all_embs[i].view(1, -1)))
else:
self.embeddings[uniq_name] = all_embs[i].view(1, -1)
if uniq_name not in self.time_stamps:
self.time_stamps[uniq_name] = []
start = dic['offset']
end = start + dic['duration']
self.time_stamps[uniq_name].append([start, end])
if self._speaker_params.save_embeddings:
embedding_dir = os.path.join(self._speaker_dir, 'embeddings')
if not os.path.exists(embedding_dir):
os.makedirs(embedding_dir, exist_ok=True)
prefix = get_uniqname_from_filepath(manifest_file)
name = os.path.join(embedding_dir, prefix)
self._embeddings_file = name + f'_embeddings.pkl'
pkl.dump(self.embeddings, open(self._embeddings_file, 'wb'))
logging.info("Saved embedding files to {}".format(embedding_dir))
def path2audio_files_to_manifest(self, paths2audio_files, manifest_filepath):
with open(manifest_filepath, 'w', encoding='utf-8') as fp:
for audio_file in paths2audio_files:
audio_file = audio_file.strip()
entry = {'audio_filepath': audio_file, 'offset': 0.0, 'duration': None, 'text': '-', 'label': 'infer'}
fp.write(json.dumps(entry) + '\n')
def diarize(self, paths2audio_files: List[str] = None, batch_size: int = 0):
"""
Diarize files provided thorugh paths2audio_files or manifest file
input:
paths2audio_files (List[str]): list of paths to file containing audio file
batch_size (int): batch_size considered for extraction of speaker embeddings and VAD computation
"""
self._out_dir = self._diarizer_params.out_dir
self._speaker_dir = os.path.join(self._diarizer_params.out_dir, 'speaker_outputs')
if os.path.exists(self._speaker_dir):
logging.warning("Deleting previous clustering diarizer outputs.")
shutil.rmtree(self._speaker_dir, ignore_errors=True)
os.makedirs(self._speaker_dir)
if not os.path.exists(self._out_dir):
os.mkdir(self._out_dir)
self._vad_dir = os.path.join(self._out_dir, 'vad_outputs')
self._vad_out_file = os.path.join(self._vad_dir, "vad_out.json")
if batch_size:
self._cfg.batch_size = batch_size
if paths2audio_files:
if type(paths2audio_files) is list:
self._diarizer_params.manifest_filepath = os.path.join(self._out_dir, 'paths2audio_filepath.json')
self.path2audio_files_to_manifest(paths2audio_files, self._diarizer_params.manifest_filepath)
else:
raise ValueError("paths2audio_files must be of type list of paths to file containing audio file")
self.AUDIO_RTTM_MAP = audio_rttm_map(self._diarizer_params.manifest_filepath)
out_rttm_dir = os.path.join(self._out_dir, 'pred_rttms')
os.makedirs(out_rttm_dir, exist_ok=True)
# Speech Activity Detection
self._perform_speech_activity_detection()
# Segmentation
scales = self.multiscale_args_dict['scale_dict'].items()
for scale_idx, (window, shift) in scales:
# Segmentation for the current scale (scale_idx)
self._run_segmentation(window, shift, scale_tag=f'_scale{scale_idx}')
# Embedding Extraction for the current scale (scale_idx)
self._extract_embeddings(self.subsegments_manifest_path, scale_idx, len(scales))
self.multiscale_embeddings_and_timestamps[scale_idx] = [self.embeddings, self.time_stamps]
embs_and_timestamps = get_embs_and_timestamps(
self.multiscale_embeddings_and_timestamps, self.multiscale_args_dict
)
# Clustering
all_reference, all_hypothesis = perform_clustering(
embs_and_timestamps=embs_and_timestamps,
AUDIO_RTTM_MAP=self.AUDIO_RTTM_MAP,
out_rttm_dir=out_rttm_dir,
clustering_params=self._cluster_params,
device=self._speaker_model.device,
verbose=self.verbose,
)
logging.info("Outputs are saved in {} directory".format(os.path.abspath(self._diarizer_params.out_dir)))
# Scoring
return score_labels(
self.AUDIO_RTTM_MAP,
all_reference,
all_hypothesis,
collar=self._diarizer_params.collar,
ignore_overlap=self._diarizer_params.ignore_overlap,
verbose=self.verbose,
)
@staticmethod
def __make_nemo_file_from_folder(filename, source_dir):
with tarfile.open(filename, "w:gz") as tar:
tar.add(source_dir, arcname="./")
@rank_zero_only
def save_to(self, save_path: str):
"""
Saves model instance (weights and configuration) into EFF archive or .
You can use "restore_from" method to fully restore instance from .nemo file.
.nemo file is an archive (tar.gz) with the following:
model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor
model_wights.chpt - model checkpoint
Args:
save_path: Path to .nemo file where model instance should be saved
"""
# TODO: Why does this override the main save_to?
with tempfile.TemporaryDirectory() as tmpdir:
config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)
spkr_model = os.path.join(tmpdir, _SPEAKER_MODEL)
self.to_config_file(path2yaml_file=config_yaml)
if self.has_vad_model:
vad_model = os.path.join(tmpdir, _VAD_MODEL)
self._vad_model.save_to(vad_model)
self._speaker_model.save_to(spkr_model)
self.__make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir)
@staticmethod
def __unpack_nemo_file(path2file: str, out_folder: str) -> str:
if not os.path.exists(path2file):
raise FileNotFoundError(f"{path2file} does not exist")
tar = tarfile.open(path2file, "r:gz")
tar.extractall(path=out_folder)
tar.close()
return out_folder
@classmethod
def restore_from(
cls,
restore_path: str,
override_config_path: Optional[str] = None,
map_location: Optional[torch.device] = None,
strict: bool = False,
):
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
cwd = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
try:
cls.__unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)
os.chdir(tmpdir)
if override_config_path is None:
config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)
else:
config_yaml = override_config_path
conf = OmegaConf.load(config_yaml)
if os.path.exists(os.path.join(tmpdir, _VAD_MODEL)):
conf.diarizer.vad.model_path = os.path.join(tmpdir, _VAD_MODEL)
else:
logging.info(
f'Model {cls.__name__} does not contain a VAD model. A VAD model or manifest file with'
f'speech segments need for diarization with this model'
)
conf.diarizer.speaker_embeddings.model_path = os.path.join(tmpdir, _SPEAKER_MODEL)
conf.restore_map_location = map_location
OmegaConf.set_struct(conf, True)
instance = cls(cfg=conf)
logging.info(f'Model {cls.__name__} was successfully restored from {restore_path}.')
finally:
os.chdir(cwd)
return instance
@property
def verbose(self) -> bool:
return self._cfg.verbose
|
NeMo-main
|
nemo/collections/asr/models/clustering_diarizer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
from math import ceil
from typing import Dict, List, Optional, Union
import librosa
import numpy as np
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from torchmetrics import Accuracy
from tqdm import tqdm
from nemo.collections.asr.data.audio_to_label import AudioToSpeechLabelDataset, cache_datastore_manifests
from nemo.collections.asr.data.audio_to_label_dataset import (
get_concat_tarred_speech_label_dataset,
get_tarred_speech_label_dataset,
)
from nemo.collections.asr.data.audio_to_text_dataset import convert_to_config_list
from nemo.collections.asr.models.asr_model import ExportableEncDecModel
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
from nemo.collections.common.metrics import TopKClassificationAccuracy
from nemo.collections.common.parts.preprocessing.collections import ASRSpeechLabel
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import *
from nemo.utils import logging
__all__ = ['EncDecSpeakerLabelModel']
class EncDecSpeakerLabelModel(ModelPT, ExportableEncDecModel):
"""
Encoder decoder class for speaker label models.
Model class creates training, validation methods for setting up data
performing model forward pass.
Expects config dict for
* preprocessor
* Jasper/Quartznet Encoder
* Speaker Decoder
"""
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="speakerverification_speakernet",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/speakerverification_speakernet/versions/1.16.0/files/speakerverification_speakernet.nemo",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:speakerverification_speakernet",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="ecapa_tdnn",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/ecapa_tdnn/versions/1.16.0/files/ecapa_tdnn.nemo",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:ecapa_tdnn",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="titanet_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/titanet_large/versions/v1/files/titanet-l.nemo",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/titanet_large",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="langid_ambernet",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/langid_ambernet/versions/1.12.0/files/ambernet.nemo",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/langid_ambernet",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="titanet_small",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:titanet_small",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/titanet_small/versions/1.19.0/files/titanet-s.nemo",
)
result.append(model)
return result
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.world_size = 1
self.cal_labels_occurrence_train = False
self.labels_occurrence = None
self.labels = None
num_classes = cfg.decoder.num_classes
if 'loss' in cfg:
if 'weight' in cfg.loss:
if cfg.loss.weight == 'auto':
weight = num_classes * [1]
self.cal_labels_occurrence_train = True
else:
weight = cfg.loss.weight
else:
weight = None # weight is None for angular loss and CE loss if it's not specified.
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_devices
super().__init__(cfg=cfg, trainer=trainer)
if self.labels_occurrence:
# Goal is to give more weight to the classes with less samples so as to match the ones with the higher frequencies
weight = [sum(self.labels_occurrence) / (len(self.labels_occurrence) * i) for i in self.labels_occurrence]
if 'loss' in cfg:
cfg_eval_loss = copy.deepcopy(cfg.loss)
if 'angular' in cfg.loss._target_:
OmegaConf.set_struct(cfg, True)
with open_dict(cfg):
cfg.decoder.angular = True
if 'weight' in cfg.loss:
cfg.loss.weight = weight
cfg_eval_loss.weight = None
# May need a general check for arguments of loss
self.loss = instantiate(cfg.loss)
self.eval_loss = instantiate(cfg_eval_loss)
else:
tmp_loss_cfg = OmegaConf.create(
{"_target_": "nemo.collections.common.losses.cross_entropy.CrossEntropyLoss"}
)
self.loss = instantiate(tmp_loss_cfg)
self.eval_loss = instantiate(tmp_loss_cfg)
self._accuracy = TopKClassificationAccuracy(top_k=[1])
self.preprocessor = EncDecSpeakerLabelModel.from_config_dict(cfg.preprocessor)
self.encoder = EncDecSpeakerLabelModel.from_config_dict(cfg.encoder)
self.decoder = EncDecSpeakerLabelModel.from_config_dict(cfg.decoder)
self._macro_accuracy = Accuracy(num_classes=num_classes, top_k=1, average='macro', task='multiclass')
if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = EncDecSpeakerLabelModel.from_config_dict(self._cfg.spec_augment)
else:
self.spec_augmentation = None
@staticmethod
def extract_labels(data_layer_config):
labels = set()
manifest_filepath = data_layer_config.get('manifest_filepath', None)
if manifest_filepath is None:
logging.warning("No manifest_filepath was provided, no labels got extracted!")
return None
manifest_filepaths = convert_to_config_list(data_layer_config['manifest_filepath'])
for manifest_filepath in itertools.chain.from_iterable(manifest_filepaths):
cache_datastore_manifests(manifest_filepaths=manifest_filepath)
collection = ASRSpeechLabel(
manifests_files=manifest_filepath,
min_duration=data_layer_config.get("min_duration", None),
max_duration=data_layer_config.get("max_duration", None),
index_by_file_id=True,
)
labels.update(collection.uniq_labels)
labels = list(sorted(labels))
logging.warning(f"Total number of {len(labels)} found in all the manifest files.")
return labels
def __setup_dataloader_from_config(self, config: Optional[Dict]):
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
featurizer = WaveformFeaturizer(
sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor
)
shuffle = config.get('shuffle', False)
if config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
logging.warning(
"Could not load dataset as `manifest_filepath` was None or "
f"`tarred_audio_filepaths` is None. Provided config : {config}"
)
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
if config.get("is_concat", False):
dataset = get_concat_tarred_speech_label_dataset(
featurizer=featurizer,
config=config,
shuffle_n=shuffle_n,
global_rank=self.global_rank,
world_size=self.world_size,
)
else:
dataset = get_tarred_speech_label_dataset(
featurizer=featurizer,
config=config,
shuffle_n=shuffle_n,
global_rank=self.global_rank,
world_size=self.world_size,
)
shuffle = False
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
dataset = AudioToSpeechLabelDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
featurizer=featurizer,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', False),
normalize_audio=config.get('normalize_audio', False),
cal_labels_occurrence=config.get('cal_labels_occurrence', False),
)
if dataset.labels_occurrence:
self.labels_occurrence = dataset.labels_occurrence
if hasattr(dataset, 'fixed_seq_collate_fn'):
collate_fn = dataset.fixed_seq_collate_fn
else:
collate_fn = dataset.datasets[0].fixed_seq_collate_fn
batch_size = config['batch_size']
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_layer_config: Optional[Union[DictConfig, Dict]]):
if self.cal_labels_occurrence_train:
# Calculate labels occurence for weighed CE loss for train set if weight equals 'auto'
# Note in this case, the cal_labels_occurrence in val_data_layer_config and test_data_layer_params need to be stay as False
OmegaConf.set_struct(train_data_layer_config, True)
with open_dict(train_data_layer_config):
train_data_layer_config['cal_labels_occurrence'] = True
self.labels = self.extract_labels(train_data_layer_config)
train_data_layer_config['labels'] = self.labels
if 'shuffle' not in train_data_layer_config:
train_data_layer_config['shuffle'] = True
self._train_dl = self.__setup_dataloader_from_config(config=train_data_layer_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if (
self._train_dl is not None
and hasattr(self._train_dl, 'dataset')
and isinstance(self._train_dl.dataset, torch.utils.data.IterableDataset)
):
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_layer_config['batch_size'])
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_validation_data(self, val_data_layer_config: Optional[Union[DictConfig, Dict]]):
val_data_layer_config['labels'] = self.labels
self._validation_dl = self.__setup_dataloader_from_config(config=val_data_layer_config)
def setup_test_data(self, test_data_layer_params: Optional[Union[DictConfig, Dict]]):
if hasattr(self, 'dataset'):
test_data_layer_params['labels'] = self.labels
self.embedding_dir = test_data_layer_params.get('embedding_dir', './')
self._test_dl = self.__setup_dataloader_from_config(config=test_data_layer_params)
self.test_manifest = test_data_layer_params.get('manifest_filepath', None)
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
audio_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), audio_eltype),
"input_signal_length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"logits": NeuralType(('B', 'D'), LogitsType()),
"embs": NeuralType(('B', 'D'), AcousticEncodedRepresentation()),
}
def forward_for_export(self, processed_signal, processed_signal_len):
encoded, length = self.encoder(audio_signal=processed_signal, length=processed_signal_len)
logits, embs = self.decoder(encoder_output=encoded, length=length)
return logits, embs
@typecheck()
def forward(self, input_signal, input_signal_length):
processed_signal, processed_signal_len = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_len)
encoded, length = self.encoder(audio_signal=processed_signal, length=processed_signal_len)
logits, embs = self.decoder(encoder_output=encoded, length=length)
return logits, embs
# PTL-specific methods
def training_step(self, batch, batch_idx):
audio_signal, audio_signal_len, labels, _ = batch
logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss = self.loss(logits=logits, labels=labels)
self.log('loss', loss)
self.log('learning_rate', self._optimizer.param_groups[0]['lr'])
self.log('global_step', self.trainer.global_step)
self._accuracy(logits=logits, labels=labels)
top_k = self._accuracy.compute()
self._accuracy.reset()
for i, top_i in enumerate(top_k):
self.log(f'training_batch_accuracy_top_{i}', top_i)
return {'loss': loss}
def evaluation_step(self, batch, batch_idx, dataloader_idx: int = 0, tag: str = 'val'):
audio_signal, audio_signal_len, labels, _ = batch
logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.eval_loss(logits=logits, labels=labels)
acc_top_k = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
self._macro_accuracy.update(preds=logits, target=labels)
stats = self._macro_accuracy._final_state()
return {
f'{tag}_loss': loss_value,
f'{tag}_correct_counts': correct_counts,
f'{tag}_total_counts': total_counts,
f'{tag}_acc_micro_top_k': acc_top_k,
f'{tag}_acc_macro_stats': stats,
}
def multi_evaluation_epoch_end(self, outputs, dataloader_idx: int = 0, tag: str = 'val'):
loss_mean = torch.stack([x[f'{tag}_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x[f'{tag}_correct_counts'] for x in outputs]).sum(axis=0)
total_counts = torch.stack([x[f'{tag}_total_counts'] for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
self._macro_accuracy.tp = torch.stack([x[f'{tag}_acc_macro_stats'][0] for x in outputs]).sum(axis=0)
self._macro_accuracy.fp = torch.stack([x[f'{tag}_acc_macro_stats'][1] for x in outputs]).sum(axis=0)
self._macro_accuracy.tn = torch.stack([x[f'{tag}_acc_macro_stats'][2] for x in outputs]).sum(axis=0)
self._macro_accuracy.fn = torch.stack([x[f'{tag}_acc_macro_stats'][3] for x in outputs]).sum(axis=0)
macro_accuracy_score = self._macro_accuracy.compute()
self._accuracy.reset()
self._macro_accuracy.reset()
self.log(f'{tag}_loss', loss_mean, sync_dist=True)
for top_k, score in zip(self._accuracy.top_k, topk_scores):
self.log(f'{tag}_acc_micro_top_{top_k}', score, sync_dist=True)
self.log(f'{tag}_acc_macro', macro_accuracy_score, sync_dist=True)
return {
f'{tag}_loss': loss_mean,
f'{tag}_acc_micro_top_k': topk_scores,
f'{tag}_acc_macro': macro_accuracy_score,
}
def validation_step(self, batch, batch_idx, dataloader_idx: int = 0):
return self.evaluation_step(batch, batch_idx, dataloader_idx, 'val')
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
return self.multi_evaluation_epoch_end(outputs, dataloader_idx, 'val')
def test_step(self, batch, batch_idx, dataloader_idx: int = 0):
return self.evaluation_step(batch, batch_idx, dataloader_idx, 'test')
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
return self.multi_evaluation_epoch_end(outputs, dataloader_idx, 'test')
@torch.no_grad()
def infer_file(self, path2audio_file):
"""
Args:
path2audio_file: path to an audio wav file
Returns:
emb: speaker embeddings (Audio representations)
logits: logits corresponding of final layer
"""
audio, sr = librosa.load(path2audio_file, sr=None)
target_sr = self._cfg.train_ds.get('sample_rate', 16000)
if sr != target_sr:
audio = librosa.core.resample(audio, orig_sr=sr, target_sr=target_sr)
audio_length = audio.shape[0]
device = self.device
audio = np.array([audio])
audio_signal, audio_signal_len = (
torch.tensor(audio, device=device),
torch.tensor([audio_length], device=device),
)
mode = self.training
self.freeze()
logits, emb = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
self.train(mode=mode)
if mode is True:
self.unfreeze()
del audio_signal, audio_signal_len
return emb, logits
def get_label(self, path2audio_file):
"""
Returns label of path2audio_file from classes the model was trained on.
Args:
path2audio_file: path to audio wav file
Returns:
label: label corresponding to the trained model
"""
_, logits = self.infer_file(path2audio_file=path2audio_file)
trained_labels = self._cfg['train_ds'].get('labels', None)
if trained_labels is not None:
trained_labels = list(trained_labels)
label_id = logits.argmax(axis=1)
label = trained_labels[int(label_id[0])]
else:
logging.info("labels are not saved to model, hence only outputting the label id index")
label = logits.argmax(axis=1)
return label
def get_embedding(self, path2audio_file):
"""
Returns the speaker embeddings for a provided audio file.
Args:
path2audio_file: path to an audio wav file
Returns:
emb: speaker embeddings (Audio representations)
"""
emb, _ = self.infer_file(path2audio_file=path2audio_file)
return emb
@torch.no_grad()
def verify_speakers(self, path2audio_file1, path2audio_file2, threshold=0.7):
"""
Verify if two audio files are from the same speaker or not.
Args:
path2audio_file1: path to audio wav file of speaker 1
path2audio_file2: path to audio wav file of speaker 2
threshold: cosine similarity score used as a threshold to distinguish two embeddings (default = 0.7)
Returns:
True if both audio files are from same speaker, False otherwise
"""
embs1 = self.get_embedding(path2audio_file1).squeeze()
embs2 = self.get_embedding(path2audio_file2).squeeze()
# Length Normalize
X = embs1 / torch.linalg.norm(embs1)
Y = embs2 / torch.linalg.norm(embs2)
# Score
similarity_score = torch.dot(X, Y) / ((torch.dot(X, X) * torch.dot(Y, Y)) ** 0.5)
similarity_score = (similarity_score + 1) / 2
# Decision
if similarity_score >= threshold:
logging.info(" two audio files are from same speaker")
return True
else:
logging.info(" two audio files are from different speakers")
return False
@torch.no_grad()
def batch_inference(self, manifest_filepath, batch_size=32, sample_rate=16000, device='cuda'):
"""
Perform batch inference on EncDecSpeakerLabelModel.
To perform inference on single audio file, once can use infer_model, get_label or get_embedding
To map predicted labels, one can do
`arg_values = logits.argmax(axis=1)`
`pred_labels = list(map(lambda t : trained_labels[t], arg_values))`
Args:
manifest_filepath: Path to manifest file
batch_size: batch size to perform batch inference
sample_rate: sample rate of audio files in manifest file
device: compute device to perform operations.
Returns:
The variables below all follow the audio file order in the manifest file.
embs: embeddings of files provided in manifest file
logits: logits of final layer of EncDecSpeakerLabel Model
gt_labels: labels from manifest file (needed for speaker enrollment and testing)
trained_labels: Classification labels sorted in the order that they are mapped by the trained model
"""
mode = self.training
self.freeze()
self.eval()
self.to(device)
trained_labels = self._cfg['train_ds']['labels']
if trained_labels is not None:
trained_labels = list(trained_labels)
featurizer = WaveformFeaturizer(sample_rate=sample_rate)
dataset = AudioToSpeechLabelDataset(manifest_filepath=manifest_filepath, labels=None, featurizer=featurizer)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, collate_fn=dataset.fixed_seq_collate_fn,
)
logits = []
embs = []
gt_labels = []
for test_batch in tqdm(dataloader):
if device == 'cuda':
test_batch = [x.to(device) for x in test_batch]
audio_signal, audio_signal_len, labels, _ = test_batch
logit, emb = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
logits.extend(logit.cpu().numpy())
gt_labels.extend(labels.cpu().numpy())
embs.extend(emb.cpu().numpy())
gt_labels = list(map(lambda t: dataset.id2label[t], gt_labels))
self.train(mode=mode)
if mode is True:
self.unfreeze()
logits, embs, gt_labels = np.asarray(logits), np.asarray(embs), np.asarray(gt_labels)
return embs, logits, gt_labels, trained_labels
|
NeMo-main
|
nemo/collections/asr/models/label_models.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import os
import tempfile
from math import ceil
from typing import Dict, List, Optional, Union
import editdistance
import torch
import torch.distributed as dist
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from tqdm.auto import tqdm
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import DALIOutputs
from nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecModel
from nemo.collections.asr.parts.mixins import ASRBPEMixin
from nemo.collections.common.losses import SmoothedCrossEntropyLoss
from nemo.collections.common.metrics import GlobalAverageLossMetric
from nemo.collections.common.parts import transformer_weights_init
from nemo.core.classes.common import typecheck
from nemo.core.neural_types import (
AudioSignal,
ChannelType,
LabelsType,
LengthsType,
LogprobsType,
MaskType,
NeuralType,
SpectrogramType,
)
from nemo.utils import logging
try:
from sacrebleu import corpus_bleu
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_transformer
from nemo.collections.nlp.modules.common.transformer import BeamSearchSequenceGenerator, TransformerEncoder
NLP_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
NLP_AVAILABLE = False
logging.warning("Could not import NeMo NLP collection which is required for speech translation model.")
__all__ = ['EncDecTransfModelBPE']
def lens_to_mask(lens, max_length):
batch_size = lens.shape[0]
mask = torch.arange(max_length).repeat(batch_size, 1).to(lens.device) < lens[:, None]
return mask
class EncDecTransfModelBPE(ASRModel, ExportableEncDecModel, ASRBPEMixin):
"""Base class for encoder decoder CTC-based models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if 'tokenizer' not in cfg:
raise ValueError("`cfg` must have `tokenizer` config to create a tokenizer !")
# Setup the tokenizer
self._setup_tokenizer(cfg.tokenizer)
super().__init__(cfg=cfg, trainer=trainer)
# Setup audio preprocessor
self.preprocessor = EncDecTransfModelBPE.from_config_dict(self.cfg.preprocessor)
# Setup audio encoder
self.encoder = EncDecTransfModelBPE.from_config_dict(self.cfg.encoder)
# Add projection layer if encoder and decoder differ in hidden size
if self.cfg.encoder['d_model'] != self.cfg.transf_decoder['hidden_size']:
self.adapter = torch.nn.Linear(self.cfg.encoder['d_model'], self.cfg.transf_decoder['hidden_size'])
else:
self.adapter = torch.nn.Identity()
transf_encoder_cfg_dict = OmegaConf.to_container(cfg.get('transf_encoder'))
# Whether to add Transformer Encoder block between Conformer and Transformer Decoder
self.use_transf_encoder = False
if transf_encoder_cfg_dict['num_layers'] > 0:
self.use_transf_encoder = True
self.transf_encoder = TransformerEncoder(
num_layers=transf_encoder_cfg_dict['num_layers'],
hidden_size=transf_encoder_cfg_dict['hidden_size'],
inner_size=transf_encoder_cfg_dict['inner_size'],
mask_future=False,
num_attention_heads=transf_encoder_cfg_dict['num_attention_heads'],
attn_score_dropout=transf_encoder_cfg_dict['attn_score_dropout'],
attn_layer_dropout=transf_encoder_cfg_dict['attn_layer_dropout'],
ffn_dropout=transf_encoder_cfg_dict['ffn_dropout'],
pre_ln=transf_encoder_cfg_dict.get('pre_ln', True),
pre_ln_final_layer_norm=transf_encoder_cfg_dict.get('pre_ln_final_layer_norm', True),
)
std_init_range = 1 / transf_encoder_cfg_dict['hidden_size'] ** 0.5
self.transf_encoder.apply(lambda module: transformer_weights_init(module, std_init_range))
transf_decoder_cfg_dict = OmegaConf.to_container(cfg.get('transf_decoder'))
# Transformer decoder
vocab_size = 8 * ceil(self.tokenizer.vocab_size / 8)
transf_decoder_cfg_dict['vocab_size'] = vocab_size
library = transf_decoder_cfg_dict.pop('library', 'nemo')
model_name = transf_decoder_cfg_dict.pop('model_name', None)
pretrained = transf_decoder_cfg_dict.pop('pretrained', False)
self.transf_decoder = get_transformer(
library=library,
model_name=model_name,
pretrained=pretrained,
config_dict=transf_decoder_cfg_dict,
encoder=False,
pre_ln_final_layer_norm=transf_decoder_cfg_dict.get("pre_ln_final_layer_norm", False),
)
self.log_softmax = TokenClassifier(
hidden_size=self.transf_decoder.hidden_size,
num_classes=vocab_size,
activation=self.cfg.head.activation,
log_softmax=self.cfg.head.log_softmax,
dropout=self.cfg.head.dropout,
use_transformer_init=self.cfg.head.use_transformer_init,
)
self.log_softmax.mlp.layer0.weight = self.transf_decoder.embedding.token_embedding.weight
std_init_range = 1 / self.transf_decoder.hidden_size ** 0.5
self.transf_decoder.apply(lambda module: transformer_weights_init(module, std_init_range))
self.log_softmax.apply(lambda module: transformer_weights_init(module, std_init_range))
# Beam Search decoding
self.beam_search = BeamSearchSequenceGenerator(
embedding=self.transf_decoder.embedding,
decoder=self.transf_decoder.decoder,
log_softmax=self.log_softmax,
max_sequence_length=self.transf_decoder.max_sequence_length,
beam_size=self.cfg.beam_search.beam_size,
bos=self.tokenizer.bos_id,
pad=self.tokenizer.pad_id,
eos=self.tokenizer.eos_id,
len_pen=self.cfg.beam_search.len_pen,
max_delta_length=self.cfg.beam_search.max_generation_delta,
)
# Define autoregressive CE loss
self.transf_loss = SmoothedCrossEntropyLoss(
pad_id=self.tokenizer.pad_id, label_smoothing=self.cfg.label_smoothing
)
if hasattr(self.cfg, 'spec_augment') and self.cfg.spec_augment is not None:
self.spec_augmentation = EncDecTransfModelBPE.from_config_dict(self.cfg.spec_augment)
else:
self.spec_augmentation = None
self.val_loss = GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True)
@torch.no_grad()
def translate(
self,
paths2audio_files: List[str],
batch_size: int = 4,
logprobs: bool = False,
return_hypotheses: bool = False,
) -> List[str]:
hypotheses = self.transcribe(paths2audio_files, batch_size, logprobs, return_hypotheses)
return hypotheses
@torch.no_grad()
def transcribe(
self,
paths2audio_files: List[str],
batch_size: int = 4,
logprobs: bool = False,
return_hypotheses: bool = False,
) -> List[str]:
"""
Uses greedy decoding to transcribe audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is between 5 and 25 seconds. \
But it is possible to pass a few hours long file if enough GPU memory is available.
batch_size: (int) batch size to use during inference.
Bigger will result in better throughput performance but would use more memory.
logprobs: (bool) pass True to get log probabilities instead of transcripts.
return_hypotheses: (bool) Either return hypotheses or text
With hypotheses can do some postprocessing like getting timestamp or rescoring
Returns:
A list of transcriptions (or raw log probabilities if logprobs is True) in the same order as paths2audio_files
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return {}
if return_hypotheses and logprobs:
raise ValueError(
"Either `return_hypotheses` or `logprobs` can be True at any given time."
"Returned hypotheses will contain the logprobs."
)
# We will store transcriptions here
hypotheses = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
dither_value = self.preprocessor.featurizer.dither
pad_to_value = self.preprocessor.featurizer.pad_to
try:
self.preprocessor.featurizer.dither = 0.0
self.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self.eval()
# Freeze the encoder and decoder modules
self.encoder.freeze()
self.transf_decoder.freeze()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Work in tmp directory - will store manifest file there
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w') as fp:
for audio_file in paths2audio_files:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': 'nothing'}
fp.write(json.dumps(entry) + '\n')
config = {'paths2audio_files': paths2audio_files, 'batch_size': batch_size, 'temp_dir': tmpdir}
temporary_datalayer = self._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Transcribing"):
log_probs, encoded_len, enc_states, enc_mask = self.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
beam_hypotheses = (
self.beam_search(
encoder_hidden_states=enc_states, encoder_input_mask=enc_mask, return_beam_scores=False
)
.detach()
.cpu()
.numpy()
)
beam_hypotheses = [self.tokenizer.ids_to_text(hyp) for hyp in beam_hypotheses]
if return_hypotheses:
# dump log probs per file
for idx in range(logits.shape[0]):
current_hypotheses[idx].y_sequence = logits[idx][: logits_len[idx]]
hypotheses += beam_hypotheses
del test_batch, log_probs, encoded_len, enc_states, enc_mask
finally:
# set mode back to its original value
self.train(mode=mode)
self.preprocessor.featurizer.dither = dither_value
self.preprocessor.featurizer.pad_to = pad_to_value
if mode is True:
self.encoder.unfreeze()
self.transf_decoder.unfreeze()
logging.set_verbosity(logging_level)
return hypotheses
def _setup_dataloader_from_config(self, config: Optional[Dict]):
dataset = audio_to_text_dataset.get_audio_to_text_bpe_dataset_from_config(
config=config,
local_rank=self.local_rank,
global_rank=self.global_rank,
world_size=self.world_size,
tokenizer=self.tokenizer,
preprocessor_cfg=self.cfg.get("preprocessor", None),
)
if dataset is None:
return None
shuffle = config['shuffle']
if config.get('is_tarred', False):
shuffle = False
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
else:
collate_fn = dataset.datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[DictConfig]):
# create audio-only data loader
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
# Need to set this because if using an IterableDataset, the length of the
# dataloader is the total number of samples rather than the number of batches,
# and this messes up the tqdm progress bar. So we set the number of steps manually
# (to the correct number) to fix this.
if 'is_tarred' in train_data_config and train_data_config['is_tarred']:
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane,
# i.e. <= # training batches, and don't change it. Otherwise, adjust
# batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the validation data loader via a Dict-like object.
Args:
val_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the test data loader via a Dict-like object.
Args:
test_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
self._test_dl = self._setup_dataloader_from_config(config=test_data_config)
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
input_signal_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
input_signal_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), input_signal_eltype, optional=True),
"input_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"processed_signal": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),
"processed_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"transcript": NeuralType(('B', 'T'), LabelsType(), optional=True),
"transcript_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"sample_id": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"transf_log_probs": NeuralType(('B', 'T', 'D'), LogprobsType()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
"encoder_states": NeuralType(('B', 'T', 'D'), ChannelType()),
"encoder_mask": NeuralType(('B', 'T'), MaskType()),
}
@typecheck()
def forward(
self,
input_signal=None,
input_signal_length=None,
processed_signal=None,
processed_signal_length=None,
transcript=None,
transcript_length=None,
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) == False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_len`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length
)
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
enc_states = encoded.permute(0, 2, 1)
enc_states = self.adapter(enc_states)
enc_mask = lens_to_mask(encoded_len, enc_states.shape[1]).to(enc_states.dtype)
if self.use_transf_encoder:
enc_states = self.transf_encoder(encoder_states=enc_states, encoder_mask=enc_mask)
transf_log_probs = None
if transcript is not None:
dec_mask = lens_to_mask(transcript_length, transcript.shape[1]).to(transcript.dtype)
dec_states = self.transf_decoder(
input_ids=transcript, decoder_mask=dec_mask, encoder_embeddings=enc_states, encoder_mask=enc_mask
)
transf_log_probs = self.log_softmax(hidden_states=dec_states)
return transf_log_probs, encoded_len, enc_states, enc_mask
def compute_audio_loss(self, batch):
if batch is None:
return 0
signal, signal_len, transcript, transcript_len = batch
input_ids, labels = transcript[:, :-1], transcript[:, 1:]
transf_log_probs, encoded_len, enc_states, enc_mask = self.forward(
input_signal=signal,
input_signal_length=signal_len,
transcript=input_ids,
transcript_length=transcript_len,
)
transf_loss = self.transf_loss(log_probs=transf_log_probs, labels=labels)
return transf_loss
# PTL-specific methods
def training_step(self, batch, batch_nb):
audio_loss = self.compute_audio_loss(batch)
tensorboard_logs = {
'train_loss': audio_loss,
'learning_rate': self._optimizer.param_groups[0]['lr'],
}
return {'loss': audio_loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx, dataloader_idx=0, eval_mode="val"):
signal, signal_len, transcript, transcript_len = batch
input_ids, labels = transcript[:, :-1], transcript[:, 1:]
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
transf_log_probs, encoded_len, enc_states, enc_mask = self.forward(
processed_signal=signal,
processed_signal_length=signal_len,
transcript=input_ids,
transcript_length=transcript_len,
)
else:
transf_log_probs, encoded_len, enc_states, enc_mask = self.forward(
input_signal=signal,
input_signal_length=signal_len,
transcript=input_ids,
transcript_length=transcript_len,
)
beam_hypotheses = self.beam_search(
encoder_hidden_states=enc_states, encoder_input_mask=enc_mask, return_beam_scores=False
)
transf_loss = self.transf_loss(log_probs=transf_log_probs, labels=labels)
ground_truths = [self.tokenizer.ids_to_text(sent) for sent in transcript.detach().cpu().tolist()]
translations = [self.tokenizer.ids_to_text(sent) for sent in beam_hypotheses.detach().cpu().tolist()]
self.val_loss(loss=transf_loss, num_measurements=transf_log_probs.shape[0] * transf_log_probs.shape[1])
return {f'{eval_mode}_loss': transf_loss, 'translations': translations, 'ground_truths': ground_truths}
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.validation_step(batch, batch_idx, dataloader_idx, eval_mode="test")
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0, eval_mode: str = "val"):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
if not outputs:
return
if isinstance(outputs[0], dict):
outputs = [outputs]
for output in outputs:
eval_loss = getattr(self, 'val_loss').compute()
translations = list(itertools.chain(*[x['translations'] for x in output]))
ground_truths = list(itertools.chain(*[x['ground_truths'] for x in output]))
# Gather translations and ground truths from all workers
tr_and_gt = [None for _ in range(self.world_size)]
# we also need to drop pairs where ground truth is an empty string
if self.world_size > 1:
dist.all_gather_object(
tr_and_gt, [(t, g) for (t, g) in zip(translations, ground_truths) if g.strip() != '']
)
else:
tr_and_gt[0] = [(t, g) for (t, g) in zip(translations, ground_truths) if g.strip() != '']
if self.global_rank == 0:
_translations = []
_ground_truths = []
for rank in range(0, self.world_size):
_translations += [t for (t, g) in tr_and_gt[rank]]
_ground_truths += [g for (t, g) in tr_and_gt[rank]]
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="13a")
sb_score = sacre_bleu.score * self.world_size
wer_scores, wer_words = 0, 0
for h, r in zip(_translations, _ground_truths):
wer_words += len(r.split())
wer_scores += editdistance.eval(h.split(), r.split())
wer_score = 1.0 * wer_scores * self.world_size / wer_words
else:
sb_score = 0.0
wer_score = 0.0
self.log(f"{eval_mode}_loss", eval_loss, sync_dist=True)
self.log(f"{eval_mode}_sacreBLEU", sb_score, sync_dist=True)
self.log(f"{eval_mode}_WER", wer_score, sync_dist=True)
self.val_loss.reset()
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
return self.multi_validation_epoch_end(outputs, dataloader_idx, eval_mode="test")
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \
Recommended length per file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
temp_dir: (str) A temporary directory where the audio manifest is temporarily
stored.
Returns:
A pytorch DataLoader for the given audio file(s).
"""
batch_size = min(config['batch_size'], len(config['paths2audio_files']))
dl_config = {
'manifest_filepath': os.path.join(config['temp_dir'], 'manifest.json'),
'sample_rate': self.preprocessor._sample_rate,
'batch_size': batch_size,
'trim_silence': False,
'shuffle': False,
'num_workers': min(batch_size, os.cpu_count() - 1),
'pin_memory': True,
}
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
|
NeMo-main
|
nemo/collections/asr/models/transformer_bpe_models.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import tempfile
from typing import List, Optional
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from tqdm.auto import tqdm
from nemo.collections.asr.data.audio_to_text_dali import DALIOutputs
from nemo.collections.asr.losses.ctc import CTCLoss
from nemo.collections.asr.metrics.wer import WER, CTCDecoding, CTCDecodingConfig
from nemo.collections.asr.models.rnnt_models import EncDecRNNTModel
from nemo.collections.asr.parts.mixins import ASRBPEMixin, InterCTCMixin
from nemo.collections.asr.parts.utils.audio_utils import ChannelSelectorType
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.classes.mixins import AccessMixin
from nemo.utils import logging, model_utils
class EncDecHybridRNNTCTCModel(EncDecRNNTModel, ASRBPEMixin, InterCTCMixin):
"""Base class for hybrid RNNT/CTC models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
super().__init__(cfg=cfg, trainer=trainer)
if 'aux_ctc' not in self.cfg:
raise ValueError(
"The config need to have a section for the CTC decoder named as aux_ctc for Hybrid models."
)
with open_dict(self.cfg.aux_ctc):
if "feat_in" not in self.cfg.aux_ctc.decoder or (
not self.cfg.aux_ctc.decoder.feat_in and hasattr(self.encoder, '_feat_out')
):
self.cfg.aux_ctc.decoder.feat_in = self.encoder._feat_out
if "feat_in" not in self.cfg.aux_ctc.decoder or not self.cfg.aux_ctc.decoder.feat_in:
raise ValueError("param feat_in of the decoder's config is not set!")
if self.cfg.aux_ctc.decoder.num_classes < 1 and self.cfg.aux_ctc.decoder.vocabulary is not None:
logging.info(
"\nReplacing placeholder number of classes ({}) with actual number of classes - {}".format(
self.cfg.aux_ctc.decoder.num_classes, len(self.cfg.aux_ctc.decoder.vocabulary)
)
)
self.cfg.aux_ctc.decoder["num_classes"] = len(self.cfg.aux_ctc.decoder.vocabulary)
self.ctc_decoder = EncDecRNNTModel.from_config_dict(self.cfg.aux_ctc.decoder)
self.ctc_loss_weight = self.cfg.aux_ctc.get("ctc_loss_weight", 0.5)
self.ctc_loss = CTCLoss(
num_classes=self.ctc_decoder.num_classes_with_blank - 1,
zero_infinity=True,
reduction=self.cfg.aux_ctc.get("ctc_reduction", "mean_batch"),
)
ctc_decoding_cfg = self.cfg.aux_ctc.get('decoding', None)
if ctc_decoding_cfg is None:
ctc_decoding_cfg = OmegaConf.structured(CTCDecodingConfig)
with open_dict(self.cfg.aux_ctc):
self.cfg.aux_ctc.decoding = ctc_decoding_cfg
self.ctc_decoding = CTCDecoding(self.cfg.aux_ctc.decoding, vocabulary=self.ctc_decoder.vocabulary)
self.ctc_wer = WER(
decoding=self.ctc_decoding,
use_cer=self.cfg.aux_ctc.get('use_cer', False),
dist_sync_on_step=True,
log_prediction=self.cfg.get("log_prediction", False),
)
# setting the RNNT decoder as the default one
self.cur_decoder = "rnnt"
# setting up interCTC loss (from InterCTCMixin)
self.setup_interctc(decoder_name='ctc_decoder', loss_name='ctc_loss', wer_name='ctc_wer')
@torch.no_grad()
def transcribe(
self,
paths2audio_files: List[str],
batch_size: int = 4,
return_hypotheses: bool = False,
partial_hypothesis: Optional[List['Hypothesis']] = None,
num_workers: int = 0,
channel_selector: Optional[ChannelSelectorType] = None,
augmentor: DictConfig = None,
verbose: bool = True,
logprobs: bool = False,
) -> (List[str], Optional[List['Hypothesis']]):
"""
Uses greedy decoding to transcribe audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is between 5 and 25 seconds. \
But it is possible to pass a few hours long file if enough GPU memory is available.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
return_hypotheses: (bool) Either return hypotheses or text
With hypotheses can do some postprocessing like getting timestamp or rescoring
num_workers: (int) number of workers for DataLoader
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
augmentor: (DictConfig): Augment audio samples during transcription if augmentor is applied.
verbose: (bool) whether to display tqdm progress bar
logprobs: (bool) whether to return ctc logits insted of hypotheses
Returns:
Returns a tuple of 2 items -
* A list of greedy transcript texts / Hypothesis
* An optional list of beam search transcript texts / Hypothesis / NBestHypothesis.
"""
if self.cur_decoder not in ["ctc", "rnnt"]:
raise ValueError(
f"{self.cur_decoder} is not supported for cur_decoder. Supported values are ['ctc', 'rnnt']"
)
if self.cur_decoder == "rnnt":
return super().transcribe(
paths2audio_files=paths2audio_files,
batch_size=batch_size,
return_hypotheses=return_hypotheses,
partial_hypothesis=partial_hypothesis,
num_workers=num_workers,
channel_selector=channel_selector,
augmentor=augmentor,
verbose=verbose,
)
if paths2audio_files is None or len(paths2audio_files) == 0:
return {}
# We will store transcriptions here
hypotheses = []
all_hypotheses = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
dither_value = self.preprocessor.featurizer.dither
pad_to_value = self.preprocessor.featurizer.pad_to
if num_workers is None:
num_workers = min(batch_size, os.cpu_count() - 1)
try:
self.preprocessor.featurizer.dither = 0.0
self.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self.eval()
# Freeze the encoder and decoder modules
self.encoder.freeze()
self.decoder.freeze()
self.joint.freeze()
if hasattr(self, 'ctc_decoder'):
self.ctc_decoder.freeze()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Work in tmp directory - will store manifest file there
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w', encoding='utf-8') as fp:
for audio_file in paths2audio_files:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': ''}
fp.write(json.dumps(entry) + '\n')
config = {
'paths2audio_files': paths2audio_files,
'batch_size': batch_size,
'temp_dir': tmpdir,
'num_workers': num_workers,
'channel_selector': channel_selector,
}
if augmentor:
config['augmentor'] = augmentor
temporary_datalayer = self._setup_transcribe_dataloader(config)
logits_list = []
for test_batch in tqdm(temporary_datalayer, desc="Transcribing", disable=not verbose):
encoded, encoded_len = self.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
logits = self.ctc_decoder(encoder_output=encoded)
best_hyp, all_hyp = self.ctc_decoding.ctc_decoder_predictions_tensor(
logits, encoded_len, return_hypotheses=return_hypotheses,
)
logits = logits.cpu()
if return_hypotheses:
# dump log probs per file
for idx in range(logits.shape[0]):
best_hyp[idx].y_sequence = logits[idx][: encoded_len[idx]]
if best_hyp[idx].alignments is None:
best_hyp[idx].alignments = best_hyp[idx].y_sequence
if logprobs:
for logit, elen in zip(logits, encoded_len):
logits_list.append(logit[:elen])
del logits
hypotheses += best_hyp
if all_hyp is not None:
all_hypotheses += all_hyp
else:
all_hypotheses += best_hyp
del encoded
del test_batch
finally:
# set mode back to its original value
self.train(mode=mode)
self.preprocessor.featurizer.dither = dither_value
self.preprocessor.featurizer.pad_to = pad_to_value
logging.set_verbosity(logging_level)
if mode is True:
self.encoder.unfreeze()
self.decoder.unfreeze()
self.joint.unfreeze()
if hasattr(self, 'ctc_decoder'):
self.ctc_decoder.unfreeze()
if logprobs:
return logits_list
else:
return hypotheses, all_hypotheses
def change_vocabulary(
self,
new_vocabulary: List[str],
decoding_cfg: Optional[DictConfig] = None,
ctc_decoding_cfg: Optional[DictConfig] = None,
):
"""
Changes vocabulary used during RNNT decoding process. Use this method when fine-tuning a pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
ctc_decoding_cfg: A config for CTC decoding, which is optional and can be used to change decoding type.
Returns: None
"""
super().change_vocabulary(new_vocabulary=new_vocabulary, decoding_cfg=decoding_cfg)
# set up the new tokenizer for the CTC decoder
if hasattr(self, 'ctc_decoder'):
if self.ctc_decoder.vocabulary == new_vocabulary:
logging.warning(
f"Old {self.ctc_decoder.vocabulary} and new {new_vocabulary} match. Not changing anything."
)
else:
if new_vocabulary is None or len(new_vocabulary) == 0:
raise ValueError(f'New vocabulary must be non-empty list of chars. But I got: {new_vocabulary}')
decoder_config = self.ctc_decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
new_decoder_config['vocabulary'] = new_vocabulary
new_decoder_config['num_classes'] = len(new_vocabulary)
del self.ctc_decoder
self.ctc_decoder = EncDecHybridRNNTCTCModel.from_config_dict(new_decoder_config)
del self.ctc_loss
self.ctc_loss = CTCLoss(
num_classes=self.ctc_decoder.num_classes_with_blank - 1,
zero_infinity=True,
reduction=self.cfg.aux_ctc.get("ctc_reduction", "mean_batch"),
)
if ctc_decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `ctc_decoding_cfg` passed when changing decoding strategy, using internal config")
ctc_decoding_cfg = self.cfg.aux_ctc.decoding
# Assert the decoding config with all hyper parameters
ctc_decoding_cls = OmegaConf.structured(CTCDecodingConfig)
ctc_decoding_cls = OmegaConf.create(OmegaConf.to_container(ctc_decoding_cls))
ctc_decoding_cfg = OmegaConf.merge(ctc_decoding_cls, ctc_decoding_cfg)
self.ctc_decoding = CTCDecoding(decoding_cfg=ctc_decoding_cfg, vocabulary=self.ctc_decoder.vocabulary)
self.ctc_wer = WER(
decoding=self.ctc_decoding,
use_cer=self.ctc_wer.use_cer,
log_prediction=self.ctc_wer.log_prediction,
dist_sync_on_step=True,
)
# Update config
with open_dict(self.cfg.aux_ctc):
self.cfg.aux_ctc.decoding = ctc_decoding_cfg
with open_dict(self.cfg.aux_ctc):
self.cfg.aux_ctc.decoder = new_decoder_config
ds_keys = ['train_ds', 'validation_ds', 'test_ds']
for key in ds_keys:
if key in self.cfg:
with open_dict(self.cfg[key]):
self.cfg[key]['labels'] = OmegaConf.create(new_vocabulary)
logging.info(f"Changed the tokenizer of the CTC decoder to {self.ctc_decoder.vocabulary} vocabulary.")
def change_decoding_strategy(self, decoding_cfg: DictConfig = None, decoder_type: str = None):
"""
Changes decoding strategy used during RNNT decoding process.
Args:
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
decoder_type: (str) Can be set to 'rnnt' or 'ctc' to switch between appropriate decoder in a
model having RNN-T and CTC decoders. Defaults to None, in which case RNN-T decoder is
used. If set to 'ctc', it raises error if 'ctc_decoder' is not an attribute of the model.
"""
if decoder_type is None or decoder_type == 'rnnt':
self.cur_decoder = "rnnt"
return super().change_decoding_strategy(decoding_cfg=decoding_cfg)
assert decoder_type == 'ctc' and hasattr(self, 'ctc_decoder')
if decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `decoding_cfg` passed when changing decoding strategy, using internal config")
decoding_cfg = self.cfg.aux_ctc.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(CTCDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.ctc_decoding = CTCDecoding(decoding_cfg=decoding_cfg, vocabulary=self.ctc_decoder.vocabulary)
self.ctc_wer = WER(
decoding=self.ctc_decoding,
use_cer=self.ctc_wer.use_cer,
log_prediction=self.ctc_wer.log_prediction,
dist_sync_on_step=True,
)
self.ctc_decoder.temperature = decoding_cfg.get('temperature', 1.0)
# Update config
with open_dict(self.cfg.aux_ctc):
self.cfg.aux_ctc.decoding = decoding_cfg
self.cur_decoder = "ctc"
logging.info(f"Changed decoding strategy to \n{OmegaConf.to_yaml(self.cfg.aux_ctc.decoding)}")
# PTL-specific methods
def training_step(self, batch, batch_nb):
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
if self.is_interctc_enabled():
AccessMixin.set_access_enabled(access_enabled=True)
signal, signal_len, transcript, transcript_len = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
# During training, loss must be computed, so decoder forward is necessary
decoder, target_length, states = self.decoder(targets=transcript, target_length=transcript_len)
if hasattr(self, '_trainer') and self._trainer is not None:
log_every_n_steps = self._trainer.log_every_n_steps
sample_id = self._trainer.global_step
else:
log_every_n_steps = 1
sample_id = batch_nb
if (sample_id + 1) % log_every_n_steps == 0:
compute_wer = True
else:
compute_wer = False
# If fused Joint-Loss-WER is not used
if not self.joint.fuse_loss_wer:
# Compute full joint and loss
joint = self.joint(encoder_outputs=encoded, decoder_outputs=decoder)
loss_value = self.loss(
log_probs=joint, targets=transcript, input_lengths=encoded_len, target_lengths=target_length
)
# Add auxiliary losses, if registered
loss_value = self.add_auxiliary_losses(loss_value)
tensorboard_logs = {
'learning_rate': self._optimizer.param_groups[0]['lr'],
'global_step': torch.tensor(self.trainer.global_step, dtype=torch.float32),
}
if compute_wer:
self.wer.update(encoded, encoded_len, transcript, transcript_len)
_, scores, words = self.wer.compute()
self.wer.reset()
tensorboard_logs.update({'training_batch_wer': scores.float() / words})
else: # If fused Joint-Loss-WER is used
# Fused joint step
loss_value, wer, _, _ = self.joint(
encoder_outputs=encoded,
decoder_outputs=decoder,
encoder_lengths=encoded_len,
transcripts=transcript,
transcript_lengths=transcript_len,
compute_wer=compute_wer,
)
# Add auxiliary losses, if registered
loss_value = self.add_auxiliary_losses(loss_value)
tensorboard_logs = {
'learning_rate': self._optimizer.param_groups[0]['lr'],
'global_step': torch.tensor(self.trainer.global_step, dtype=torch.float32),
}
if compute_wer:
tensorboard_logs.update({'training_batch_wer': wer})
if self.ctc_loss_weight > 0:
log_probs = self.ctc_decoder(encoder_output=encoded)
ctc_loss = self.ctc_loss(
log_probs=log_probs, targets=transcript, input_lengths=encoded_len, target_lengths=transcript_len
)
tensorboard_logs['train_rnnt_loss'] = loss_value
tensorboard_logs['train_ctc_loss'] = ctc_loss
loss_value = (1 - self.ctc_loss_weight) * loss_value + self.ctc_loss_weight * ctc_loss
if compute_wer:
self.ctc_wer.update(
predictions=log_probs,
targets=transcript,
target_lengths=transcript_len,
predictions_lengths=encoded_len,
)
ctc_wer, _, _ = self.ctc_wer.compute()
self.ctc_wer.reset()
tensorboard_logs.update({'training_batch_wer_ctc': ctc_wer})
# note that we want to apply interctc independent of whether main ctc
# loss is used or not (to allow rnnt + interctc training).
# assuming ``ctc_loss_weight=0.3`` and interctc is applied to a single
# layer with weight of ``0.1``, the total loss will be
# ``loss = 0.9 * (0.3 * ctc_loss + 0.7 * rnnt_loss) + 0.1 * interctc_loss``
loss_value, additional_logs = self.add_interctc_losses(
loss_value, transcript, transcript_len, compute_wer=compute_wer
)
tensorboard_logs.update(additional_logs)
tensorboard_logs['train_loss'] = loss_value
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
# Log items
self.log_dict(tensorboard_logs)
# Preserve batch acoustic model T and language model U parameters if normalizing
if self._optim_normalize_joint_txu:
self._optim_normalize_txu = [encoded_len.max(), transcript_len.max()]
return {'loss': loss_value}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
# TODO: add support for CTC decoding
signal, signal_len, transcript, transcript_len, sample_id = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
best_hyp_text, all_hyp_text = self.decoding.rnnt_decoder_predictions_tensor(
encoder_output=encoded, encoded_lengths=encoded_len, return_hypotheses=False
)
sample_id = sample_id.cpu().detach().numpy()
return list(zip(sample_id, best_hyp_text))
def validation_pass(self, batch, batch_idx, dataloader_idx):
if self.is_interctc_enabled():
AccessMixin.set_access_enabled(access_enabled=True)
signal, signal_len, transcript, transcript_len = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
tensorboard_logs = {}
loss_value = None
# If experimental fused Joint-Loss-WER is not used
if not self.joint.fuse_loss_wer:
if self.compute_eval_loss:
decoder, target_length, states = self.decoder(targets=transcript, target_length=transcript_len)
joint = self.joint(encoder_outputs=encoded, decoder_outputs=decoder)
loss_value = self.loss(
log_probs=joint, targets=transcript, input_lengths=encoded_len, target_lengths=target_length
)
tensorboard_logs['val_loss'] = loss_value
self.wer.update(encoded, encoded_len, transcript, transcript_len)
wer, wer_num, wer_denom = self.wer.compute()
self.wer.reset()
tensorboard_logs['val_wer_num'] = wer_num
tensorboard_logs['val_wer_denom'] = wer_denom
tensorboard_logs['val_wer'] = wer
else:
# If experimental fused Joint-Loss-WER is used
compute_wer = True
if self.compute_eval_loss:
decoded, target_len, states = self.decoder(targets=transcript, target_length=transcript_len)
else:
decoded = None
target_len = transcript_len
# Fused joint step
loss_value, wer, wer_num, wer_denom = self.joint(
encoder_outputs=encoded,
decoder_outputs=decoded,
encoder_lengths=encoded_len,
transcripts=transcript,
transcript_lengths=target_len,
compute_wer=compute_wer,
)
if loss_value is not None:
tensorboard_logs['val_loss'] = loss_value
tensorboard_logs['val_wer_num'] = wer_num
tensorboard_logs['val_wer_denom'] = wer_denom
tensorboard_logs['val_wer'] = wer
log_probs = self.ctc_decoder(encoder_output=encoded)
if self.compute_eval_loss:
ctc_loss = self.ctc_loss(
log_probs=log_probs, targets=transcript, input_lengths=encoded_len, target_lengths=transcript_len
)
tensorboard_logs['val_ctc_loss'] = ctc_loss
tensorboard_logs['val_rnnt_loss'] = loss_value
loss_value = (1 - self.ctc_loss_weight) * loss_value + self.ctc_loss_weight * ctc_loss
tensorboard_logs['val_loss'] = loss_value
self.ctc_wer.update(
predictions=log_probs, targets=transcript, target_lengths=transcript_len, predictions_lengths=encoded_len,
)
ctc_wer, ctc_wer_num, ctc_wer_denom = self.ctc_wer.compute()
self.ctc_wer.reset()
tensorboard_logs['val_wer_num_ctc'] = ctc_wer_num
tensorboard_logs['val_wer_denom_ctc'] = ctc_wer_denom
tensorboard_logs['val_wer_ctc'] = ctc_wer
self.log('global_step', torch.tensor(self.trainer.global_step, dtype=torch.float32))
loss_value, additional_logs = self.add_interctc_losses(
loss_value,
transcript,
transcript_len,
compute_wer=True,
compute_loss=self.compute_eval_loss,
log_wer_num_denom=True,
log_prefix="val_",
)
if self.compute_eval_loss:
# overriding total loss value. Note that the previous
# rnnt + ctc loss is available in metrics as "val_final_loss" now
tensorboard_logs['val_loss'] = loss_value
tensorboard_logs.update(additional_logs)
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
return tensorboard_logs
def validation_step(self, batch, batch_idx, dataloader_idx=0):
tensorboard_logs = self.validation_pass(batch, batch_idx, dataloader_idx)
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(tensorboard_logs)
else:
self.validation_step_outputs.append(tensorboard_logs)
return tensorboard_logs
def test_step(self, batch, batch_idx, dataloader_idx=0):
logs = self.validation_pass(batch, batch_idx, dataloader_idx=dataloader_idx)
test_logs = {name.replace("val_", "test_"): value for name, value in logs.items()}
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx].append(test_logs)
else:
self.test_step_outputs.append(test_logs)
return test_logs
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
if self.compute_eval_loss:
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
val_loss_log = {'val_loss': val_loss_mean}
else:
val_loss_log = {}
wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {**val_loss_log, 'val_wer': wer_num.float() / wer_denom}
if self.ctc_loss_weight > 0:
ctc_wer_num = torch.stack([x['val_wer_num_ctc'] for x in outputs]).sum()
ctc_wer_denom = torch.stack([x['val_wer_denom_ctc'] for x in outputs]).sum()
tensorboard_logs['val_wer_ctc'] = ctc_wer_num.float() / ctc_wer_denom
metrics = {**val_loss_log, 'log': tensorboard_logs}
self.finalize_interctc_metrics(metrics, outputs, prefix="val_")
return metrics
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
if self.compute_eval_loss:
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
test_loss_log = {'test_loss': test_loss_mean}
else:
test_loss_log = {}
wer_num = torch.stack([x['test_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['test_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {**test_loss_log, 'test_wer': wer_num.float() / wer_denom}
if self.ctc_loss_weight > 0:
ctc_wer_num = torch.stack([x['test_wer_num_ctc'] for x in outputs]).sum()
ctc_wer_denom = torch.stack([x['test_wer_denom_ctc'] for x in outputs]).sum()
tensorboard_logs['test_wer_ctc'] = ctc_wer_num.float() / ctc_wer_denom
metrics = {**test_loss_log, 'log': tensorboard_logs}
self.finalize_interctc_metrics(metrics, outputs, prefix="test_")
return metrics
# EncDecRNNTModel is exported in 2 parts
def list_export_subnets(self):
if self.cur_decoder == 'rnnt':
return ['encoder', 'decoder_joint']
else:
return ['self']
@property
def output_module(self):
if self.cur_decoder == 'rnnt':
return self.decoder
else:
return self.ctc_decoder
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
return results
|
NeMo-main
|
nemo/collections/asr/models/hybrid_rnnt_ctc_models.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.models.asr_model import ASRModel
from nemo.collections.asr.models.audio_to_audio_model import AudioToAudioModel
from nemo.collections.asr.models.classification_models import EncDecClassificationModel, EncDecFrameClassificationModel
from nemo.collections.asr.models.clustering_diarizer import ClusteringDiarizer
from nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
from nemo.collections.asr.models.enhancement_models import EncMaskDecAudioToAudioModel
from nemo.collections.asr.models.hybrid_rnnt_ctc_bpe_models import EncDecHybridRNNTCTCBPEModel
from nemo.collections.asr.models.hybrid_rnnt_ctc_models import EncDecHybridRNNTCTCModel
from nemo.collections.asr.models.k2_sequence_models import (
EncDecK2RnntSeqModel,
EncDecK2RnntSeqModelBPE,
EncDecK2SeqModel,
EncDecK2SeqModelBPE,
)
from nemo.collections.asr.models.label_models import EncDecSpeakerLabelModel
from nemo.collections.asr.models.msdd_models import EncDecDiarLabelModel, NeuralDiarizer
from nemo.collections.asr.models.rnnt_bpe_models import EncDecRNNTBPEModel
from nemo.collections.asr.models.rnnt_models import EncDecRNNTModel
from nemo.collections.asr.models.slu_models import SLUIntentSlotBPEModel
from nemo.collections.asr.models.ssl_models import SpeechEncDecSelfSupervisedModel
from nemo.collections.asr.models.transformer_bpe_models import EncDecTransfModelBPE
|
NeMo-main
|
nemo/collections/asr/models/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from copy import deepcopy
from typing import Dict
import torch
from omegaconf import DictConfig
from nemo.collections.asr.models import ClusteringDiarizer
from nemo.collections.asr.parts.utils.offline_clustering import get_scale_interpolated_embs, split_input_data
from nemo.collections.asr.parts.utils.online_clustering import OnlineSpeakerClustering
from nemo.collections.asr.parts.utils.speaker_utils import (
OnlineSegmentor,
audio_rttm_map,
generate_cluster_labels,
get_embs_and_timestamps,
)
from nemo.utils import logging, model_utils
__all__ = ['OnlineClusteringDiarizer']
def timeit(method):
"""
Monitor elapsed time of the corresponding function displaying the method name.
Args:
method: function that is being measured
Return:
`timed` function for measuring the elapsed time
"""
def timed(*args, **kwargs):
ts = time.time()
result = method(*args, **kwargs)
te = time.time()
if 'log_time' in kwargs:
name = kwargs.get('log_name', method.__name__.upper())
kwargs['log_time'][name] = int((te - ts) * 1000)
else:
logging.info('%2.2fms %r' % ((te - ts) * 1000, method.__name__))
return result
return timed
class OnlineClusteringDiarizer(ClusteringDiarizer):
"""
A class that enables online (streaming) clustering based diarization.
- The instance created from `OnlineClusteringDiarizer` sets aside a certain amount of memory
to provide the upcoming inference with history information
- There are two major modules involved: `OnlineSegmentor` and `OnlineSpeakerClustering`.
OnlineSegmentor: Take the VAD-timestamps and generate segments for each scale
OnlineSpeakerClustering: Update the entire speaker labels of the given online session
while updating the speaker labels of the streaming inputs.
- The overall diarization process is done by calling `diarize_step` function.
`diarize_step` function goes through the following steps:
(1) Segmentation (`OnlineSegmentor` class)
(2) Embedding extraction (`_extract_online_embeddings` function call)
(3) Online speaker counting and speaker clustering (`OnlineClusteringDiarizer` class)
(4) Label generation (`generate_cluster_labels` function call)
"""
def __init__(self, cfg: DictConfig):
super().__init__(cfg)
self.cfg = model_utils.convert_model_config_to_dict_config(cfg)
self._cfg_diarizer = self.cfg.diarizer
self.base_scale_index = max(self.multiscale_args_dict['scale_dict'].keys())
self.uniq_id = self._cfg_diarizer.get('uniq_id', None)
self.decimals = self._cfg_diarizer.get('decimals', 2)
self.AUDIO_RTTM_MAP = audio_rttm_map(self.cfg.diarizer.manifest_filepath)
self.sample_rate = self.cfg.sample_rate
torch.manual_seed(0)
self._out_dir = self._cfg_diarizer.out_dir
if not os.path.exists(self._out_dir):
os.mkdir(self._out_dir)
if torch.cuda.is_available():
self.cuda = True
self.device = torch.device("cuda")
else:
self.cuda = False
self.device = torch.device("cpu")
self.reset()
# Set speaker embedding model in eval mode
self._speaker_model.eval()
def _init_online_clustering_module(self, clustering_params):
"""
Initialize online speaker clustering module
Attributes:
online_clus (OnlineSpeakerClustering):
Online clustering diarizer class instance
history_n (int):
History buffer size for saving history of speaker label inference
Total number of embedding vectors saved in the buffer that is kept till the end of the session
current_n (int):
Current buffer (FIFO queue) size for calculating the speaker label inference
Total number of embedding vectors saved in the FIFO queue for clustering inference
"""
self.online_clus = OnlineSpeakerClustering(
max_num_speakers=clustering_params.max_num_speakers,
max_rp_threshold=clustering_params.max_rp_threshold,
sparse_search_volume=clustering_params.sparse_search_volume,
history_buffer_size=clustering_params.history_buffer_size,
current_buffer_size=clustering_params.current_buffer_size,
cuda=self.cuda,
)
self.history_n = clustering_params.history_buffer_size
self.current_n = clustering_params.current_buffer_size
self.max_num_speakers = self.online_clus.max_num_speakers
def _init_online_segmentor_module(self, sample_rate):
"""
Initialize an online segmentor module
Attributes:
online_segmentor (OnlineSegmentor):
online segmentation module that generates short speech segments from the VAD input
"""
self.online_segmentor = OnlineSegmentor(sample_rate)
def _init_memory_buffer(self):
"""
Variables are kept in memory for future updates
Attributes:
memory_margin (int):
The number of embeddings saved in the memory buffer.
This memory margin is dependent on the base scale length: margin = (buffer_length)/(base scale shift)
memory margin is automatically calculated to have minimal memory usage
memory_segment_ranges (dict):
The segment range information kept in the memory buffer
memory_segment_indexes (dict):
The segment indexes kept in the memory buffer
memory_cluster_labels (Tensor):
The cluster labels inferred in the previous diarization steps
"""
self.memory_margin = 0
self.memory_segment_ranges = {key: [] for key in self.multiscale_args_dict['scale_dict'].keys()}
self.memory_segment_indexes = {key: [] for key in self.multiscale_args_dict['scale_dict'].keys()}
self.memory_cluster_labels = torch.tensor([])
def _init_temporal_major_voting_module(self, clustering_params):
"""
Variables needed for taking majority votes for speaker labels
Attributes:
use_temporal_label_major_vote (bool):
Boolean for whether to use temporal majority voting
temporal_label_major_vote_buffer_size (int):
buffer size for majority voting
base_scale_label_dict (dict):
Dictionary containing multiple speaker labels for major voting
Speaker labels from multiple steps are saved for each segment index.
"""
self.use_temporal_label_major_vote = clustering_params.get('use_temporal_label_major_vote', False)
self.temporal_label_major_vote_buffer_size = clustering_params.get('temporal_label_major_vote_buffer_size', 1)
self.base_scale_label_dict = {}
def _init_segment_variables(self):
"""
Initialize segment variables for each scale.
Note that we have `uniq_id` variable in case where multiple sessions are handled.
"""
self.emb_vectors = {}
self.time_stamps = {}
self.segment_range_ts = {}
self.segment_raw_audio = {}
self.segment_indexes = {}
for scale_idx in self.multiscale_args_dict['scale_dict'].keys():
self.multiscale_embeddings_and_timestamps[scale_idx] = [None, None]
self.emb_vectors[scale_idx] = torch.tensor([])
self.time_stamps[scale_idx] = []
self.segment_range_ts[scale_idx] = []
self.segment_raw_audio[scale_idx] = []
self.segment_indexes[scale_idx] = []
def _init_buffer_frame_timestamps(self):
"""
Timing variables transferred from OnlineDiarWithASR class.
Buffer is window region where input signal is kept for ASR.
Frame is window region where the actual inference ASR decoded results are updated
Example:
buffer_len = 5.0
frame_len = 1.0
|___Buffer___[___________]____________|
|____________[ Frame ]____________|
| <- buffer_start
|____________| <- frame_start
|_____________________________________| <- buffer_end
buffer_start = 12.0
buffer_end = 17.0
frame_start = 14.0
These timestamps and index variables are updated by OnlineDiarWithASR.
Attributes:
frame_index (int):
Integer index of frame window
frame_start (float):
The start of the frame window
buffer_start (float):
The start of the buffer window
buffer_end (float):
The end of the buffer
"""
self.frame_index = 0
self.frame_start = 0.0
self.buffer_start = 0.0
self.buffer_end = 0.0
def _transfer_timestamps_to_segmentor(self):
"""
Pass the timing information from streaming ASR buffers.
"""
self.online_segmentor.frame_start = self.frame_start
self.online_segmentor.buffer_start = self.buffer_start
self.online_segmentor.buffer_end = self.buffer_end
def reset(self):
"""
Reset all the necessary variables and initialize classes.
Attributes:
n_embed_seg_len (int):
Number of segments needed for 1 second of input time-series signal
"""
self.n_embed_seg_len = int(
self.sample_rate * self.multiscale_args_dict['scale_dict'][self.base_scale_index][0]
)
self._init_segment_variables()
self._init_online_clustering_module(self._cfg_diarizer.clustering.parameters)
self._init_online_segmentor_module(self.cfg.sample_rate)
self._init_memory_buffer()
self._init_temporal_major_voting_module(self._cfg_diarizer.clustering.parameters)
self._init_buffer_frame_timestamps()
def _clear_memory(self, scale_idx: int):
"""
Calculate how many segments should be removed from memory (`memory_margin`) and
save the necessary information.
`keep_range` determines how many segments and their corresponding embedding, raw audio,
timestamps in the memory of the online diarizer instance.
Args:
scale_idx (int):
Scale index in integer type
"""
base_scale_shift = self.multiscale_args_dict['scale_dict'][self.base_scale_index][1]
self.memory_margin = int((self.buffer_end - self.buffer_start) / base_scale_shift)
scale_buffer_size = int(
len(set(self.scale_mapping_dict[scale_idx].tolist()))
/ len(set(self.scale_mapping_dict[self.base_scale_index].tolist()))
* (self.history_n + self.current_n)
)
keep_range = scale_buffer_size + self.memory_margin
self.emb_vectors[scale_idx] = self.emb_vectors[scale_idx][-keep_range:]
self.segment_raw_audio[scale_idx] = self.segment_raw_audio[scale_idx][-keep_range:]
self.segment_range_ts[scale_idx] = self.segment_range_ts[scale_idx][-keep_range:]
self.segment_indexes[scale_idx] = self.segment_indexes[scale_idx][-keep_range:]
@timeit
def _temporal_label_major_vote(self) -> torch.Tensor:
"""
Take a majority voting for every segment on temporal steps. This feature significantly reduces the error coming
from unstable speaker counting in the beginning of sessions.
Returns:
maj_vote_labels (list):
List containing the major-voted speaker labels on temporal domain
"""
maj_vote_labels = []
for seg_idx in self.memory_segment_indexes[self.base_scale_index]:
if seg_idx not in self.base_scale_label_dict:
self.base_scale_label_dict[seg_idx] = [self.memory_cluster_labels[seg_idx]]
else:
while len(self.base_scale_label_dict[seg_idx]) > self.temporal_label_major_vote_buffer_size:
self.base_scale_label_dict[seg_idx].pop(0)
self.base_scale_label_dict[seg_idx].append(self.memory_cluster_labels[seg_idx])
maj_vote_labels.append(torch.mode(torch.tensor(self.base_scale_label_dict[seg_idx]))[0].item())
return maj_vote_labels
def save_history_data(self, scale_idx: int, total_cluster_labels: torch.Tensor, is_online: bool) -> torch.Tensor:
"""
Save the temporary input to the class memory buffer.
- Clustering is done for (hist_N + curr_N) number of embeddings.
- Thus, we need to remove the clustering results on the embedding memory.
- If self.diar.history_buffer_seg_end is not None, that indicates streaming diarization system
is starting to save embeddings to its memory. Thus, the new incoming clustering label should be separated.
- If `is_online = True`, old embeddings outside the window are removed to save GPU memory.
Args:
scale_idx (int):
Scale index in integer
total_cluster_labels (Tensor):
The speaker labels from the beginning of the session to the current position
is_online (bool)
Boolean variable that indicates whether the system is currently in online mode or not
Returns:
cluster_label_hyp (Tensor):
Majority voted speaker labels over multiple inferences
"""
total_cluster_labels = total_cluster_labels.tolist()
if not is_online:
self.memory_segment_ranges[scale_idx] = deepcopy(self.segment_range_ts[scale_idx])
self.memory_segment_indexes[scale_idx] = deepcopy(self.segment_indexes[scale_idx])
if scale_idx == self.base_scale_index:
self.memory_cluster_labels = deepcopy(total_cluster_labels)
# Only if there are newly obtained embeddings, update ranges and embeddings.
elif self.segment_indexes[scale_idx][-1] > self.memory_segment_indexes[scale_idx][-1]:
# Get the global index of the first segment we want to keep in the buffer
global_stt_idx = max(max(self.memory_segment_indexes[scale_idx]) - self.memory_margin, 0)
# Convert global index global_stt_idx to buffer index buffer_stt_idx
segment_indexes_mat = torch.tensor(self.segment_indexes[scale_idx])
buffer_stt_idx = torch.where(segment_indexes_mat == global_stt_idx)[0][0]
self.memory_segment_ranges[scale_idx][global_stt_idx:] = deepcopy(
self.segment_range_ts[scale_idx][buffer_stt_idx:]
)
self.memory_segment_indexes[scale_idx][global_stt_idx:] = deepcopy(
self.segment_indexes[scale_idx][buffer_stt_idx:]
)
if scale_idx == self.base_scale_index:
self.memory_cluster_labels[global_stt_idx:] = deepcopy(total_cluster_labels[global_stt_idx:])
if len(self.memory_cluster_labels) != len(self.memory_segment_ranges[scale_idx]):
raise ValueError(
"self.memory_cluster_labels and self.memory_segment_ranges should always have the same length, "
f"but they have {len(self.memory_cluster_labels)} and {len(self.memory_segment_ranges[scale_idx])}."
)
# Remove unnecessary old values
self._clear_memory(scale_idx)
if not (
len(self.emb_vectors[scale_idx])
== len(self.segment_raw_audio[scale_idx])
== len(self.segment_indexes[scale_idx])
== len(self.segment_range_ts[scale_idx])
):
raise ValueError(
"self.emb_vectors, self.segment_raw_audio, self.segment_indexes, and self.segment_range_ts "
"should always have the same length, "
f"but they have {len(self.emb_vectors[scale_idx])}, {len(self.segment_raw_audio[scale_idx])}, "
f"{len(self.segment_indexes[scale_idx])}, and {len(self.segment_range_ts[scale_idx])}, respectively."
)
if self.use_temporal_label_major_vote:
cluster_label_hyp = self._temporal_label_major_vote()
else:
cluster_label_hyp = self.memory_cluster_labels
return cluster_label_hyp
@timeit
@torch.no_grad()
def _run_embedding_extractor(self, audio_signal: torch.Tensor) -> torch.Tensor:
"""
Call `forward` function of the speaker embedding model.
Args:
audio_signal (Tensor):
Torch tensor containing time-series signal
Returns:
Speaker embedding vectors for the given time-series input `audio_signal`.
"""
audio_signal = torch.stack(audio_signal).float().to(self.device)
audio_signal_lens = torch.tensor([self.n_embed_seg_len for k in range(audio_signal.shape[0])]).to(self.device)
_, torch_embs = self._speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_lens)
return torch_embs
@timeit
def _extract_online_embeddings(
self, audio_signal: torch.Tensor, segment_ranges: torch.Tensor, embeddings
) -> torch.Tensor:
"""
Incrementally extract speaker embeddings based on `audio_signal` and `segment_ranges` variables.
Unlike offline speaker diarization, speaker embedding and subsegment ranges are not saved to disk.
Measures the mismatch between `segment_ranges` and `embeddings` then extract the necessary amount of
speaker embeddings.
Args:
audio_signal (Tensor):
Torch tensor containing time-series audio signal
embeddings (Tensor):
Previously existing Torch tensor containing speaker embedding vector
segment_ranges(Tensor):
Torch tensor containing the start and end of each segment
Returns:
embeddings (Tensor):
Concatenated speaker embedding vectors that match segment range information in `segment_ranges`.
"""
stt_idx = 0 if embeddings is None else embeddings.shape[0]
end_idx = len(segment_ranges)
if end_idx > stt_idx:
torch_embs = self._run_embedding_extractor(audio_signal[stt_idx:end_idx])
if embeddings is None or embeddings.shape[0] == 0:
embeddings = torch_embs
else:
embeddings = torch.vstack((embeddings[:stt_idx, :], torch_embs))
elif end_idx < stt_idx:
embeddings = embeddings[: len(segment_ranges)]
if len(segment_ranges) != embeddings.shape[0]:
raise ValueError("Segment ranges and embeddings shapes do not match.")
return embeddings
@timeit
def _perform_online_clustering(
self, uniq_embs_and_timestamps: Dict[str, torch.Tensor], cuda=False,
) -> torch.Tensor:
"""
Launch online clustering for `uniq_embs_and_timestamps` input variable.
Args:
uniq_embs_and_timestamps (dict):
Dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
cuda (bool):
Boolean indicator for cuda usages
"""
device = torch.device("cuda") if cuda else torch.device("cpu")
# Get base-scale (the highest index) information from uniq_embs_and_timestamps.
embeddings_in_scales, timestamps_in_scales = split_input_data(
embeddings_in_scales=uniq_embs_and_timestamps['embeddings'],
timestamps_in_scales=uniq_embs_and_timestamps['timestamps'],
multiscale_segment_counts=uniq_embs_and_timestamps['multiscale_segment_counts'],
)
curr_emb, self.scale_mapping_dict = get_scale_interpolated_embs(
multiscale_weights=uniq_embs_and_timestamps['multiscale_weights'],
embeddings_in_scales=embeddings_in_scales,
timestamps_in_scales=timestamps_in_scales,
device=device,
)
base_segment_indexes = torch.tensor(self.segment_indexes[self.base_scale_index]).to(curr_emb.device)
merged_clus_labels = self.online_clus.forward_infer(
curr_emb=curr_emb, base_segment_indexes=base_segment_indexes, frame_index=self.frame_index, cuda=cuda,
)
# Update history data
for scale_idx, (window, shift) in self.multiscale_args_dict['scale_dict'].items():
cluster_label_hyp = self.save_history_data(scale_idx, merged_clus_labels, self.online_clus.is_online)
return cluster_label_hyp
def _get_interim_output(self) -> torch.Tensor:
"""
In case buffer is not filled or there is no speech activity in the input, generate temporary output.
Returns:
diar_hyp (Tensor): Speaker labels based on the previously saved segments and speaker labels
"""
if len(self.memory_cluster_labels) == 0 or self.buffer_start < 0:
diar_hyp, _ = generate_cluster_labels([[0.0, self.total_buffer_in_secs]], [0])
else:
diar_hyp, _ = generate_cluster_labels(
self.memory_segment_ranges[self.base_scale_index], self.memory_cluster_labels
)
return diar_hyp
@timeit
def diarize_step(self, audio_buffer: torch.Tensor, vad_timestamps: torch.Tensor) -> torch.Tensor:
"""
A function for a unit diarization step. Each diarization step goes through the following steps:
1. Segmentation:
Using `OnlineSegmentor` class, call `run_online_segmentation` method to get the segments.
2. Embedding Extraction:
Extract multiscale embeddings from the extracted speech segments.
3. Online Clustering & Counting
Perform online speaker clustering by using `OnlineSpeakerClustering` class.
4. Generate speaker labels:
Generate start and end timestamps of speaker labels based on the diarization results.
c.f.) Also see method `diarize` in `ClusteringDiarizer` class.
Args:
audio_buffer (Tensor):
Tensor variable containing the time series signal at the current frame
Dimensions: (Number of audio time-series samples) x 1
vad_timestamps (Tensor):
List containing VAD timestamps.
Dimensions: (Number of segments) x 2
Example:
>>> vad_timestamps = torch.Tensor([[0.05, 2.52], [3.12, 6.85]])
Returns:
diar_hyp (Tensor):
Speaker label hypothesis from the start of the session to the current position
"""
self._transfer_timestamps_to_segmentor()
# In case buffer is not filled or there is no speech activity in the input
if self.buffer_start < 0 or len(vad_timestamps) == 0:
return self._get_interim_output()
# Segmentation: (c.f. see `diarize` function in ClusteringDiarizer class)
for scale_idx, (window, shift) in self.multiscale_args_dict['scale_dict'].items():
# Step 1: Get subsegments for embedding extraction.
audio_sigs, segment_ranges, range_inds = self.online_segmentor.run_online_segmentation(
audio_buffer=audio_buffer,
vad_timestamps=vad_timestamps,
segment_raw_audio=self.segment_raw_audio[scale_idx],
segment_range_ts=self.segment_range_ts[scale_idx],
segment_indexes=self.segment_indexes[scale_idx],
window=window,
shift=shift,
)
self.segment_raw_audio[scale_idx] = audio_sigs
self.segment_range_ts[scale_idx] = segment_ranges
self.segment_indexes[scale_idx] = range_inds
# Step 2-1: Extract speaker embeddings from the extracted subsegment timestamps.
embeddings = self._extract_online_embeddings(
audio_signal=self.segment_raw_audio[scale_idx],
segment_ranges=self.segment_range_ts[scale_idx],
embeddings=self.emb_vectors[scale_idx],
)
# Step 2-2:Save the embeddings and segmentation timestamps in memory
self.emb_vectors[scale_idx] = embeddings
self.multiscale_embeddings_and_timestamps[scale_idx] = [
{self.uniq_id: embeddings},
{self.uniq_id: segment_ranges},
]
embs_and_timestamps = get_embs_and_timestamps(
self.multiscale_embeddings_and_timestamps, self.multiscale_args_dict
)
# Step 3 - Clustering: Perform an online version of clustering algorithm
cluster_label_hyp = self._perform_online_clustering(embs_and_timestamps[self.uniq_id], cuda=self.cuda,)
# Step 4: Generate RTTM style diarization labels from segment ranges and cluster labels
diar_hyp, _ = generate_cluster_labels(self.memory_segment_ranges[self.base_scale_index], cluster_label_hyp)
return diar_hyp
|
NeMo-main
|
nemo/collections/asr/models/online_diarizer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List, Union
import hydra
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.asr.metrics.audio import AudioMetricWrapper
from nemo.core.classes import ModelPT
from nemo.utils import logging, model_utils
__all__ = ['AudioToAudioModel']
class AudioToAudioModel(ModelPT, ABC):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self._setup_loss()
def _setup_loss(self):
"""Setup loss for this model.
"""
self.loss = AudioToAudioModel.from_config_dict(self._cfg.loss)
def _get_num_dataloaders(self, tag: str = 'val'):
if tag == 'val':
num_dataloaders = len(self._validation_dl) if isinstance(self._validation_dl, List) else 1
elif tag == 'test':
num_dataloaders = len(self._test_dl) if isinstance(self._test_dl, List) else 1
else:
raise ValueError(f'Unexpected tag {tag}.')
return num_dataloaders
def _setup_metrics(self, tag: str = 'val'):
"""Setup metrics for this model for all available dataloaders.
When using multiple DataLoaders, it is recommended to initialize separate modular
metric instances for each DataLoader and use them separately.
Reference:
- https://torchmetrics.readthedocs.io/en/stable/pages/lightning.html#common-pitfalls
"""
# Number of currently configured dataloaders
num_dataloaders = self._get_num_dataloaders(tag)
logging.debug('Found %d dataloaders for %s', num_dataloaders, tag)
if hasattr(self, 'metrics'):
if tag in self.metrics and len(self.metrics[tag]) == num_dataloaders:
# Exact number of metrics have already been configured, nothing else to do
logging.debug('Found %d metrics for tag %s, not necesary to initialize again', num_dataloaders, tag)
return
if 'metrics' not in self._cfg or tag not in self._cfg['metrics']:
# Metrics are not available in the configuration, nothing to do
logging.debug('No metrics configured for %s in model.metrics.%s', tag, tag)
return
metrics_cfg = self._cfg['metrics'][tag]
if 'loss' in metrics_cfg:
raise ValueError(
f'Loss is automatically included in the metrics, it should not be specified in model.metrics.{tag}.'
)
# Initialize metrics
if not hasattr(self, 'metrics'):
self.metrics = torch.nn.ModuleDict()
# Setup metrics for each dataloader
self.metrics[tag] = torch.nn.ModuleList()
for dataloader_idx in range(num_dataloaders):
metrics_dataloader_idx = torch.nn.ModuleDict(
{
name: AudioMetricWrapper(
metric=hydra.utils.instantiate(cfg),
channel=cfg.get('channel'),
metric_using_batch_averaging=cfg.get('metric_using_batch_averaging'),
)
for name, cfg in metrics_cfg.items()
}
)
self.metrics[tag].append(metrics_dataloader_idx.to(self.device))
logging.info(
'Setup metrics for %s, dataloader %d: %s', tag, dataloader_idx, ', '.join(metrics_dataloader_idx)
)
@abstractmethod
def evaluation_step(self, batch, batch_idx, dataloader_idx: int = 0, tag: str = 'val'):
pass
def on_validation_start(self):
self._setup_metrics('val')
return super().on_validation_start()
def on_test_start(self):
self._setup_metrics('test')
return super().on_test_start()
def validation_step(self, batch, batch_idx, dataloader_idx: int = 0):
return self.evaluation_step(batch, batch_idx, dataloader_idx, 'val')
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.evaluation_step(batch, batch_idx, dataloader_idx, 'test')
def multi_evaluation_epoch_end(self, outputs, dataloader_idx: int = 0, tag: str = 'val'):
# Handle loss
loss_mean = torch.stack([x[f'{tag}_loss'] for x in outputs]).mean()
output_dict = {f'{tag}_loss': loss_mean}
tensorboard_logs = {f'{tag}_loss': loss_mean}
# Handle metrics for this tag and dataloader_idx
if hasattr(self, 'metrics') and tag in self.metrics:
for name, metric in self.metrics[tag][dataloader_idx].items():
# Compute & reset the metric
value = metric.compute()
metric.reset()
# Store for logs
tensorboard_logs[f'{tag}_{name}'] = value
output_dict['log'] = tensorboard_logs
return output_dict
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
return self.multi_evaluation_epoch_end(outputs, dataloader_idx, 'val')
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
return self.multi_evaluation_epoch_end(outputs, dataloader_idx, 'test')
@abstractmethod
def process(
self, paths2audio_files: List[str], output_dir: str, batch_size: int = 4
) -> List[Union[str, List[str]]]:
"""
Takes paths to audio files and returns a list of paths to processed
audios.
Args:
paths2audio_files: paths to audio files to be processed
output_dir: directory to save processed files
batch_size: batch size for inference
Returns:
Paths to processed audio signals.
"""
pass
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
# recursively walk the subclasses to generate pretrained model info
list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)
return list_of_models
def setup_optimization_flags(self):
"""
Utility method that must be explicitly called by the subclass in order to support optional optimization flags.
This method is the only valid place to access self.cfg prior to DDP training occurs.
The subclass may chose not to support this method, therefore all variables here must be checked via hasattr()
"""
# Skip update if nan/inf grads appear on any rank.
self._skip_nan_grad = False
if "skip_nan_grad" in self._cfg and self._cfg["skip_nan_grad"]:
self._skip_nan_grad = self._cfg["skip_nan_grad"]
def on_after_backward(self):
"""
zero-out the gradients which any of them is NAN or INF
"""
super().on_after_backward()
if hasattr(self, '_skip_nan_grad') and self._skip_nan_grad:
device = next(self.parameters()).device
valid_gradients = torch.tensor([1], device=device, dtype=torch.float32)
# valid_gradients = True
for param_name, param in self.named_parameters():
if param.grad is not None:
is_not_nan_or_inf = not (torch.isnan(param.grad).any() or torch.isinf(param.grad).any())
if not is_not_nan_or_inf:
valid_gradients = valid_gradients * 0
break
if torch.distributed.is_initialized():
torch.distributed.all_reduce(valid_gradients, op=torch.distributed.ReduceOp.MIN)
if valid_gradients < 1:
logging.warning(f'detected inf or nan values in gradients! Setting gradients to zero.')
self.zero_grad()
|
NeMo-main
|
nemo/collections/asr/models/audio_to_audio_model.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import tempfile
from math import ceil
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from tqdm.auto import tqdm
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import AudioToCharDALIDataset, DALIOutputs
from nemo.collections.asr.losses.ctc import CTCLoss
from nemo.collections.asr.metrics.wer import WER, CTCDecoding, CTCDecodingConfig
from nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecModel
from nemo.collections.asr.parts.mixins import ASRModuleMixin, InterCTCMixin
from nemo.collections.asr.parts.utils.audio_utils import ChannelSelectorType
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.classes.mixins import AccessMixin
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, LogprobsType, NeuralType, SpectrogramType
from nemo.utils import logging
__all__ = ['EncDecCTCModel']
class EncDecCTCModel(ASRModel, ExportableEncDecModel, ASRModuleMixin, InterCTCMixin):
"""Base class for encoder decoder CTC-based models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.world_size
super().__init__(cfg=cfg, trainer=trainer)
self.preprocessor = EncDecCTCModel.from_config_dict(self._cfg.preprocessor)
self.encoder = EncDecCTCModel.from_config_dict(self._cfg.encoder)
with open_dict(self._cfg):
if "feat_in" not in self._cfg.decoder or (
not self._cfg.decoder.feat_in and hasattr(self.encoder, '_feat_out')
):
self._cfg.decoder.feat_in = self.encoder._feat_out
if "feat_in" not in self._cfg.decoder or not self._cfg.decoder.feat_in:
raise ValueError("param feat_in of the decoder's config is not set!")
if self.cfg.decoder.num_classes < 1 and self.cfg.decoder.vocabulary is not None:
logging.info(
"\nReplacing placeholder number of classes ({}) with actual number of classes - {}".format(
self.cfg.decoder.num_classes, len(self.cfg.decoder.vocabulary)
)
)
cfg.decoder["num_classes"] = len(self.cfg.decoder.vocabulary)
self.decoder = EncDecCTCModel.from_config_dict(self._cfg.decoder)
self.loss = CTCLoss(
num_classes=self.decoder.num_classes_with_blank - 1,
zero_infinity=True,
reduction=self._cfg.get("ctc_reduction", "mean_batch"),
)
if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = EncDecCTCModel.from_config_dict(self._cfg.spec_augment)
else:
self.spec_augmentation = None
# Setup decoding objects
decoding_cfg = self.cfg.get('decoding', None)
# In case decoding config not found, use default config
if decoding_cfg is None:
decoding_cfg = OmegaConf.structured(CTCDecodingConfig)
with open_dict(self.cfg):
self.cfg.decoding = decoding_cfg
self.decoding = CTCDecoding(self.cfg.decoding, vocabulary=OmegaConf.to_container(self.decoder.vocabulary))
# Setup metric with decoding strategy
self._wer = WER(
decoding=self.decoding,
use_cer=self._cfg.get('use_cer', False),
dist_sync_on_step=True,
log_prediction=self._cfg.get("log_prediction", False),
)
# Setup optional Optimization flags
self.setup_optimization_flags()
# setting up interCTC loss (from InterCTCMixin)
self.setup_interctc(decoder_name='decoder', loss_name='loss', wer_name='_wer')
# Adapter modules setup (from ASRAdapterModelMixin)
self.setup_adapters()
@torch.no_grad()
def transcribe(
self,
paths2audio_files: List[str],
batch_size: int = 4,
logprobs: bool = False,
return_hypotheses: bool = False,
num_workers: int = 0,
channel_selector: Optional[ChannelSelectorType] = None,
augmentor: DictConfig = None,
verbose: bool = True,
) -> List[str]:
"""
If modify this function, please remember update transcribe_partial_audio() in
nemo/collections/asr/parts/utils/trancribe_utils.py
Uses greedy decoding to transcribe audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is between 5 and 25 seconds. \
But it is possible to pass a few hours long file if enough GPU memory is available.
batch_size: (int) batch size to use during inference.
Bigger will result in better throughput performance but would use more memory.
logprobs: (bool) pass True to get log probabilities instead of transcripts.
return_hypotheses: (bool) Either return hypotheses or text
With hypotheses can do some postprocessing like getting timestamp or rescoring
num_workers: (int) number of workers for DataLoader
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`.
augmentor: (DictConfig): Augment audio samples during transcription if augmentor is applied.
verbose: (bool) whether to display tqdm progress bar
Returns:
A list of transcriptions (or raw log probabilities if logprobs is True) in the same order as paths2audio_files
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return {}
if return_hypotheses and logprobs:
raise ValueError(
"Either `return_hypotheses` or `logprobs` can be True at any given time."
"Returned hypotheses will contain the logprobs."
)
if num_workers is None:
num_workers = min(batch_size, os.cpu_count() - 1)
# We will store transcriptions here
hypotheses = []
all_hypotheses = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
dither_value = self.preprocessor.featurizer.dither
pad_to_value = self.preprocessor.featurizer.pad_to
try:
self.preprocessor.featurizer.dither = 0.0
self.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self.eval()
# Freeze the encoder and decoder modules
self.encoder.freeze()
self.decoder.freeze()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Work in tmp directory - will store manifest file there
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w', encoding='utf-8') as fp:
for audio_file in paths2audio_files:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': ''}
fp.write(json.dumps(entry) + '\n')
config = {
'paths2audio_files': paths2audio_files,
'batch_size': batch_size,
'temp_dir': tmpdir,
'num_workers': num_workers,
'channel_selector': channel_selector,
}
if augmentor:
config['augmentor'] = augmentor
temporary_datalayer = self._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Transcribing", disable=not verbose):
logits, logits_len, greedy_predictions = self.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
if logprobs:
# dump log probs per file
for idx in range(logits.shape[0]):
lg = logits[idx][: logits_len[idx]]
hypotheses.append(lg.cpu().numpy())
else:
current_hypotheses, all_hyp = self.decoding.ctc_decoder_predictions_tensor(
logits, decoder_lengths=logits_len, return_hypotheses=return_hypotheses,
)
logits = logits.cpu()
if return_hypotheses:
# dump log probs per file
for idx in range(logits.shape[0]):
current_hypotheses[idx].y_sequence = logits[idx][: logits_len[idx]]
if current_hypotheses[idx].alignments is None:
current_hypotheses[idx].alignments = current_hypotheses[idx].y_sequence
if all_hyp is None:
hypotheses += current_hypotheses
else:
hypotheses += all_hyp
del greedy_predictions
del logits
del test_batch
finally:
# set mode back to its original value
self.train(mode=mode)
self.preprocessor.featurizer.dither = dither_value
self.preprocessor.featurizer.pad_to = pad_to_value
if mode is True:
self.encoder.unfreeze()
self.decoder.unfreeze()
logging.set_verbosity(logging_level)
return hypotheses
def change_vocabulary(self, new_vocabulary: List[str], decoding_cfg: Optional[DictConfig] = None):
"""
Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
If new_vocabulary == self.decoder.vocabulary then nothing will be changed.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
Returns: None
"""
if self.decoder.vocabulary == new_vocabulary:
logging.warning(f"Old {self.decoder.vocabulary} and new {new_vocabulary} match. Not changing anything.")
else:
if new_vocabulary is None or len(new_vocabulary) == 0:
raise ValueError(f'New vocabulary must be non-empty list of chars. But I got: {new_vocabulary}')
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
new_decoder_config['vocabulary'] = new_vocabulary
new_decoder_config['num_classes'] = len(new_vocabulary)
del self.decoder
self.decoder = EncDecCTCModel.from_config_dict(new_decoder_config)
del self.loss
self.loss = CTCLoss(
num_classes=self.decoder.num_classes_with_blank - 1,
zero_infinity=True,
reduction=self._cfg.get("ctc_reduction", "mean_batch"),
)
if decoding_cfg is None:
# Assume same decoding config as before
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(CTCDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = CTCDecoding(
decoding_cfg=decoding_cfg, vocabulary=OmegaConf.to_container(self.decoder.vocabulary)
)
self._wer = WER(
decoding=self.decoding,
use_cer=self._cfg.get('use_cer', False),
dist_sync_on_step=True,
log_prediction=self._cfg.get("log_prediction", False),
)
# Update config
with open_dict(self.cfg.decoder):
self._cfg.decoder = new_decoder_config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
ds_keys = ['train_ds', 'validation_ds', 'test_ds']
for key in ds_keys:
if key in self.cfg:
with open_dict(self.cfg[key]):
self.cfg[key]['labels'] = OmegaConf.create(new_vocabulary)
logging.info(f"Changed decoder to output to {self.decoder.vocabulary} vocabulary.")
def change_decoding_strategy(self, decoding_cfg: DictConfig):
"""
Changes decoding strategy used during CTC decoding process.
Args:
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
"""
if decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `decoding_cfg` passed when changing decoding strategy, using internal config")
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(CTCDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = CTCDecoding(
decoding_cfg=decoding_cfg, vocabulary=OmegaConf.to_container(self.decoder.vocabulary)
)
self._wer = WER(
decoding=self.decoding,
use_cer=self._wer.use_cer,
log_prediction=self._wer.log_prediction,
dist_sync_on_step=True,
)
self.decoder.temperature = decoding_cfg.get('temperature', 1.0)
# Update config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
logging.info(f"Changed decoding strategy to \n{OmegaConf.to_yaml(self.cfg.decoding)}")
def _setup_dataloader_from_config(self, config: Optional[Dict]):
# Automatically inject args from model config to dataloader config
audio_to_text_dataset.inject_dataloader_value_from_model_config(self.cfg, config, key='sample_rate')
audio_to_text_dataset.inject_dataloader_value_from_model_config(self.cfg, config, key='labels')
dataset = audio_to_text_dataset.get_audio_to_text_char_dataset_from_config(
config=config,
local_rank=self.local_rank,
global_rank=self.global_rank,
world_size=self.world_size,
preprocessor_cfg=self._cfg.get("preprocessor", None),
)
if dataset is None:
return None
if isinstance(dataset, AudioToCharDALIDataset):
# DALI Dataset implements dataloader interface
return dataset
shuffle = config['shuffle']
if isinstance(dataset, torch.utils.data.IterableDataset):
shuffle = False
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the training data loader via a Dict-like object.
Args:
train_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
# preserve config
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if (
self._train_dl is not None
and hasattr(self._train_dl, 'dataset')
and isinstance(self._train_dl.dataset, torch.utils.data.IterableDataset)
):
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the validation data loader via a Dict-like object.
Args:
val_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the test data loader via a Dict-like object.
Args:
test_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
self._test_dl = self._setup_dataloader_from_config(config=test_data_config)
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
input_signal_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
input_signal_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), input_signal_eltype, optional=True),
"input_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"processed_signal": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),
"processed_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"sample_id": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"outputs": NeuralType(('B', 'T', 'D'), LogprobsType()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
"greedy_predictions": NeuralType(('B', 'T'), LabelsType()),
}
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) == False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_len`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
encoder_output = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
encoded = encoder_output[0]
encoded_len = encoder_output[1]
log_probs = self.decoder(encoder_output=encoded)
greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)
return (
log_probs,
encoded_len,
greedy_predictions,
)
# PTL-specific methods
def training_step(self, batch, batch_nb):
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
if self.is_interctc_enabled():
AccessMixin.set_access_enabled(access_enabled=True)
signal, signal_len, transcript, transcript_len = batch
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
log_probs, encoded_len, predictions = self.forward(
processed_signal=signal, processed_signal_length=signal_len
)
else:
log_probs, encoded_len, predictions = self.forward(input_signal=signal, input_signal_length=signal_len)
if hasattr(self, '_trainer') and self._trainer is not None:
log_every_n_steps = self._trainer.log_every_n_steps
else:
log_every_n_steps = 1
loss_value = self.loss(
log_probs=log_probs, targets=transcript, input_lengths=encoded_len, target_lengths=transcript_len
)
# Add auxiliary losses, if registered
loss_value = self.add_auxiliary_losses(loss_value)
# only computing WER when requested in the logs (same as done for final-layer WER below)
loss_value, tensorboard_logs = self.add_interctc_losses(
loss_value, transcript, transcript_len, compute_wer=((batch_nb + 1) % log_every_n_steps == 0)
)
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
tensorboard_logs.update(
{
'train_loss': loss_value,
'learning_rate': self._optimizer.param_groups[0]['lr'],
'global_step': torch.tensor(self.trainer.global_step, dtype=torch.float32),
}
)
if (batch_nb + 1) % log_every_n_steps == 0:
self._wer.update(
predictions=log_probs,
targets=transcript,
target_lengths=transcript_len,
predictions_lengths=encoded_len,
)
wer, _, _ = self._wer.compute()
self._wer.reset()
tensorboard_logs.update({'training_batch_wer': wer})
return {'loss': loss_value, 'log': tensorboard_logs}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
signal, signal_len, transcript, transcript_len, sample_id = batch
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
log_probs, encoded_len, predictions = self.forward(
processed_signal=signal, processed_signal_length=signal_len
)
else:
log_probs, encoded_len, predictions = self.forward(input_signal=signal, input_signal_length=signal_len)
transcribed_texts, _ = self._wer.decoding.ctc_decoder_predictions_tensor(
decoder_outputs=log_probs, decoder_lengths=encoded_len, return_hypotheses=False,
)
sample_id = sample_id.cpu().detach().numpy()
return list(zip(sample_id, transcribed_texts))
def validation_pass(self, batch, batch_idx, dataloader_idx=0):
if self.is_interctc_enabled():
AccessMixin.set_access_enabled(access_enabled=True)
signal, signal_len, transcript, transcript_len = batch
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
log_probs, encoded_len, predictions = self.forward(
processed_signal=signal, processed_signal_length=signal_len
)
else:
log_probs, encoded_len, predictions = self.forward(input_signal=signal, input_signal_length=signal_len)
loss_value = self.loss(
log_probs=log_probs, targets=transcript, input_lengths=encoded_len, target_lengths=transcript_len
)
loss_value, metrics = self.add_interctc_losses(
loss_value, transcript, transcript_len, compute_wer=True, log_wer_num_denom=True, log_prefix="val_",
)
self._wer.update(
predictions=log_probs, targets=transcript, target_lengths=transcript_len, predictions_lengths=encoded_len
)
wer, wer_num, wer_denom = self._wer.compute()
self._wer.reset()
metrics.update({'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom, 'val_wer': wer})
self.log('global_step', torch.tensor(self.trainer.global_step, dtype=torch.float32))
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
return metrics
def validation_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.validation_pass(batch, batch_idx, dataloader_idx)
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(metrics)
else:
self.validation_step_outputs.append(metrics)
return metrics
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
metrics = super().multi_validation_epoch_end(outputs, dataloader_idx)
self.finalize_interctc_metrics(metrics, outputs, prefix="val_")
return metrics
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
metrics = super().multi_test_epoch_end(outputs, dataloader_idx)
self.finalize_interctc_metrics(metrics, outputs, prefix="test_")
return metrics
def test_step(self, batch, batch_idx, dataloader_idx=0):
logs = self.validation_pass(batch, batch_idx, dataloader_idx=dataloader_idx)
test_logs = {name.replace("val_", "test_"): value for name, value in logs.items()}
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx].append(test_logs)
else:
self.test_step_outputs.append(test_logs)
return test_logs
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \
Recommended length per file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
temp_dir: (str) A temporary directory where the audio manifest is temporarily
stored.
num_workers: (int) number of workers. Depends of the batch_size and machine. \
0 - only the main process will load batches, 1 - one worker (not main process)
Returns:
A pytorch DataLoader for the given audio file(s).
"""
if 'manifest_filepath' in config:
manifest_filepath = config['manifest_filepath']
batch_size = config['batch_size']
else:
manifest_filepath = os.path.join(config['temp_dir'], 'manifest.json')
batch_size = min(config['batch_size'], len(config['paths2audio_files']))
dl_config = {
'manifest_filepath': manifest_filepath,
'sample_rate': self.preprocessor._sample_rate,
'labels': OmegaConf.to_container(self.decoder.vocabulary),
'batch_size': batch_size,
'trim_silence': False,
'shuffle': False,
'num_workers': config.get('num_workers', min(batch_size, os.cpu_count() - 1)),
'pin_memory': True,
'channel_selector': config.get('channel_selector', None),
}
if config.get("augmentor"):
dl_config['augmentor'] = config.get("augmentor")
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="QuartzNet15x5Base-En",
description="QuartzNet15x5 model trained on six datasets: LibriSpeech, Mozilla Common Voice (validated clips from en_1488h_2019-12-10), WSJ, Fisher, Switchboard, and NSC Singapore English. It was trained with Apex/Amp optimization level O1 for 600 epochs. The model achieves a WER of 3.79% on LibriSpeech dev-clean, and a WER of 10.05% on dev-other. Please visit https://ngc.nvidia.com/catalog/models/nvidia:nemospeechmodels for further details.",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/QuartzNet15x5Base-En.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_quartznet15x5",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_quartznet15x5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_quartznet15x5/versions/1.0.0rc1/files/stt_en_quartznet15x5.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_jasper10x5dr",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_jasper10x5dr",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_jasper10x5dr/versions/1.0.0rc1/files/stt_en_jasper10x5dr.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_ca_quartznet15x5",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ca_quartznet15x5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ca_quartznet15x5/versions/1.0.0rc1/files/stt_ca_quartznet15x5.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_it_quartznet15x5",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_quartznet15x5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_it_quartznet15x5/versions/1.0.0rc1/files/stt_it_quartznet15x5.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_fr_quartznet15x5",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_quartznet15x5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_quartznet15x5/versions/1.0.0rc1/files/stt_fr_quartznet15x5.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_es_quartznet15x5",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_quartznet15x5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_es_quartznet15x5/versions/1.0.0rc1/files/stt_es_quartznet15x5.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_de_quartznet15x5",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_quartznet15x5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_de_quartznet15x5/versions/1.0.0rc1/files/stt_de_quartznet15x5.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_pl_quartznet15x5",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_pl_quartznet15x5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_pl_quartznet15x5/versions/1.0.0rc1/files/stt_pl_quartznet15x5.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_ru_quartznet15x5",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_quartznet15x5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ru_quartznet15x5/versions/1.0.0rc1/files/stt_ru_quartznet15x5.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_zh_citrinet_512",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_zh_citrinet_512",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_zh_citrinet_512/versions/1.0.0rc1/files/stt_zh_citrinet_512.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_zh_citrinet_1024_gamma_0_25",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_zh_citrinet_1024_gamma_0_25",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_zh_citrinet_1024_gamma_0_25/versions/1.0.0/files/stt_zh_citrinet_1024_gamma_0_25.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_zh_citrinet_1024_gamma_0_25",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_zh_citrinet_1024_gamma_0_25",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_zh_citrinet_1024_gamma_0_25/versions/1.0.0/files/stt_zh_citrinet_1024_gamma_0_25.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="asr_talknet_aligner",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:asr_talknet_aligner",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/asr_talknet_aligner/versions/1.0.0rc1/files/qn5x5_libri_tts_phonemes.nemo",
)
results.append(model)
return results
|
NeMo-main
|
nemo/collections/asr/models/ctc_models.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import AudioToBPEDALIDataset
from nemo.collections.asr.losses.rnnt import RNNTLoss
from nemo.collections.asr.metrics.rnnt_wer_bpe import RNNTBPEWER, RNNTBPEDecoding, RNNTBPEDecodingConfig
from nemo.collections.asr.models.rnnt_models import EncDecRNNTModel
from nemo.collections.asr.parts.mixins import ASRBPEMixin
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging, model_utils
class EncDecRNNTBPEModel(EncDecRNNTModel, ASRBPEMixin):
"""Base class for encoder decoder RNNT-based models with subword tokenization."""
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="stt_en_contextnet_256",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_256",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_contextnet_256/versions/1.6.0/files/stt_en_contextnet_256.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_contextnet_512",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_512",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_contextnet_512/versions/1.6.0/files/stt_en_contextnet_512.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_contextnet_1024",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_1024",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_contextnet_1024/versions/1.9.0/files/stt_en_contextnet_1024.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_contextnet_256_mls",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_256_mls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_contextnet_256_mls/versions/1.0.0/files/stt_en_contextnet_256_mls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_contextnet_512_mls",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_512_mls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_contextnet_512_mls/versions/1.0.0/files/stt_en_contextnet_512_mls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_contextnet_1024_mls",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_1024_mls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_contextnet_1024_mls/versions/1.0.0/files/stt_en_contextnet_1024_mls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_transducer_small",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_small",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_transducer_small/versions/1.6.0/files/stt_en_conformer_transducer_small.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_transducer_medium",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_medium",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_transducer_medium/versions/1.6.0/files/stt_en_conformer_transducer_medium.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_transducer_large/versions/1.10.0/files/stt_en_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_transducer_large_ls",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_large_ls",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_transducer_large_ls/versions/1.8.0/files/stt_en_conformer_transducer_large_ls.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_transducer_xlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_xlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_transducer_xlarge/versions/1.10.0/files/stt_en_conformer_transducer_xlarge.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_conformer_transducer_xxlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_xxlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_conformer_transducer_xxlarge/versions/1.8.0/files/stt_en_conformer_transducer_xxlarge.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_de_contextnet_1024",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_contextnet_1024",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_de_contextnet_1024/versions/1.4.0/files/stt_de_contextnet_1024.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_fr_contextnet_1024",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_contextnet_1024",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_contextnet_1024/versions/1.5/files/stt_fr_contextnet_1024.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_es_contextnet_1024",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_contextnet_1024",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_es_contextnet_1024/versions/1.8.0/files/stt_es_contextnet_1024.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_de_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_de_conformer_transducer_large/versions/1.5.0/files/stt_de_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_fr_conformer_transducer_large",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_fr_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_conformer_transducer_large/versions/1.5/files/stt_fr_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_es_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_es_conformer_transducer_large/versions/1.8.0/files/stt_es_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_enes_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_enes_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_enes_conformer_transducer_large/versions/1.0.0/files/stt_enes_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_enes_contextnet_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_enes_contextnet_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_enes_contextnet_large/versions/1.0.0/files/stt_enes_contextnet_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_ca_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ca_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ca_conformer_transducer_large/versions/1.11.0/files/stt_ca_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_rw_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_rw_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_rw_conformer_transducer_large/versions/1.11.0/files/stt_rw_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_enes_conformer_transducer_large_codesw",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_enes_conformer_transducer_large_codesw",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_enes_conformer_transducer_large_codesw/versions/1.0.0/files/stt_enes_conformer_transducer_large_codesw.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_kab_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_kab_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_kab_conformer_transducer_large/versions/1.12.0/files/stt_kab_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_be_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_be_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_be_conformer_transducer_large/versions/1.12.0/files/stt_be_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_hr_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_hr_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_hr_conformer_transducer_large/versions/1.11.0/files/stt_hr_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_it_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_it_conformer_transducer_large/versions/1.13.0/files/stt_it_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_ru_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ru_conformer_transducer_large/versions/1.13.0/files/stt_ru_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_eo_conformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_eo_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_eo_conformer_transducer_large/versions/1.14.0/files/stt_eo_conformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_transducer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_transducer_large/versions/1.0.0/files/stt_en_fastconformer_transducer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_transducer_xlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_transducer_xlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_transducer_xlarge/versions/1.20.1/files/stt_en_fastconformer_transducer_xlarge.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_transducer_xxlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_transducer_xxlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_transducer_xxlarge/versions/1.20.1/files/stt_en_fastconformer_transducer_xxlarge.nemo",
)
results.append(model)
return results
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# Tokenizer is necessary for this model
if 'tokenizer' not in cfg:
raise ValueError("`cfg` must have `tokenizer` config to create a tokenizer !")
if not isinstance(cfg, DictConfig):
cfg = OmegaConf.create(cfg)
# Setup the tokenizer
self._setup_tokenizer(cfg.tokenizer)
# Initialize a dummy vocabulary
vocabulary = self.tokenizer.tokenizer.get_vocab()
# Set the new vocabulary
with open_dict(cfg):
cfg.labels = ListConfig(list(vocabulary))
with open_dict(cfg.decoder):
cfg.decoder.vocab_size = len(vocabulary)
with open_dict(cfg.joint):
cfg.joint.num_classes = len(vocabulary)
cfg.joint.vocabulary = ListConfig(list(vocabulary))
cfg.joint.jointnet.encoder_hidden = cfg.model_defaults.enc_hidden
cfg.joint.jointnet.pred_hidden = cfg.model_defaults.pred_hidden
super().__init__(cfg=cfg, trainer=trainer)
# Setup decoding object
self.decoding = RNNTBPEDecoding(
decoding_cfg=self.cfg.decoding, decoder=self.decoder, joint=self.joint, tokenizer=self.tokenizer,
)
# Setup wer object
self.wer = RNNTBPEWER(
decoding=self.decoding,
batch_dim_index=0,
use_cer=self._cfg.get('use_cer', False),
log_prediction=self._cfg.get('log_prediction', True),
dist_sync_on_step=True,
)
# Setup fused Joint step if flag is set
if self.joint.fuse_loss_wer:
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
def change_vocabulary(
self,
new_tokenizer_dir: Union[str, DictConfig],
new_tokenizer_type: str,
decoding_cfg: Optional[DictConfig] = None,
):
"""
Changes vocabulary used during RNNT decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Directory path to tokenizer or a config for a new tokenizer (if the tokenizer type is `agg`)
new_tokenizer_type: Type of tokenizer. Can be either `agg`, `bpe` or `wpe`.
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
Returns: None
"""
if isinstance(new_tokenizer_dir, DictConfig):
if new_tokenizer_type == 'agg':
new_tokenizer_cfg = new_tokenizer_dir
else:
raise ValueError(
f'New tokenizer dir should be a string unless the tokenizer is `agg`, but this tokenizer type is: {new_tokenizer_type}'
)
else:
new_tokenizer_cfg = None
if new_tokenizer_cfg is not None:
tokenizer_cfg = new_tokenizer_cfg
else:
if not os.path.isdir(new_tokenizer_dir):
raise NotADirectoryError(
f'New tokenizer dir must be non-empty path to a directory. But I got: {new_tokenizer_dir}'
)
if new_tokenizer_type.lower() not in ('bpe', 'wpe'):
raise ValueError(f'New tokenizer type must be either `bpe` or `wpe`')
tokenizer_cfg = OmegaConf.create({'dir': new_tokenizer_dir, 'type': new_tokenizer_type})
# Setup the tokenizer
self._setup_tokenizer(tokenizer_cfg)
# Initialize a dummy vocabulary
vocabulary = self.tokenizer.tokenizer.get_vocab()
joint_config = self.joint.to_config_dict()
new_joint_config = copy.deepcopy(joint_config)
if self.tokenizer_type == "agg":
new_joint_config["vocabulary"] = ListConfig(vocabulary)
else:
new_joint_config["vocabulary"] = ListConfig(list(vocabulary.keys()))
new_joint_config['num_classes'] = len(vocabulary)
del self.joint
self.joint = EncDecRNNTBPEModel.from_config_dict(new_joint_config)
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
new_decoder_config.vocab_size = len(vocabulary)
del self.decoder
self.decoder = EncDecRNNTBPEModel.from_config_dict(new_decoder_config)
del self.loss
self.loss = RNNTLoss(num_classes=self.joint.num_classes_with_blank - 1)
if decoding_cfg is None:
# Assume same decoding config as before
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(RNNTBPEDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = RNNTBPEDecoding(
decoding_cfg=decoding_cfg, decoder=self.decoder, joint=self.joint, tokenizer=self.tokenizer,
)
self.wer = RNNTBPEWER(
decoding=self.decoding,
batch_dim_index=self.wer.batch_dim_index,
use_cer=self.wer.use_cer,
log_prediction=self.wer.log_prediction,
dist_sync_on_step=True,
)
# Setup fused Joint step
if self.joint.fuse_loss_wer or (
self.decoding.joint_fused_batch_size is not None and self.decoding.joint_fused_batch_size > 0
):
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
# Update config
with open_dict(self.cfg.joint):
self.cfg.joint = new_joint_config
with open_dict(self.cfg.decoder):
self.cfg.decoder = new_decoder_config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
logging.info(f"Changed decoder to output to {self.joint.vocabulary} vocabulary.")
def change_decoding_strategy(self, decoding_cfg: DictConfig):
"""
Changes decoding strategy used during RNNT decoding process.
Args:
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
"""
if decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `decoding_cfg` passed when changing decoding strategy, using internal config")
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(RNNTBPEDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = RNNTBPEDecoding(
decoding_cfg=decoding_cfg, decoder=self.decoder, joint=self.joint, tokenizer=self.tokenizer,
)
self.wer = RNNTBPEWER(
decoding=self.decoding,
batch_dim_index=self.wer.batch_dim_index,
use_cer=self.wer.use_cer,
log_prediction=self.wer.log_prediction,
dist_sync_on_step=True,
)
# Setup fused Joint step
if self.joint.fuse_loss_wer or (
self.decoding.joint_fused_batch_size is not None and self.decoding.joint_fused_batch_size > 0
):
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
self.joint.temperature = decoding_cfg.get('temperature', 1.0)
# Update config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
logging.info(f"Changed decoding strategy to \n{OmegaConf.to_yaml(self.cfg.decoding)}")
def _setup_dataloader_from_config(self, config: Optional[Dict]):
dataset = audio_to_text_dataset.get_audio_to_text_bpe_dataset_from_config(
config=config,
local_rank=self.local_rank,
global_rank=self.global_rank,
world_size=self.world_size,
tokenizer=self.tokenizer,
preprocessor_cfg=self.cfg.get("preprocessor", None),
)
if dataset is None:
return None
if isinstance(dataset, AudioToBPEDALIDataset):
# DALI Dataset implements dataloader interface
return dataset
shuffle = config['shuffle']
if isinstance(dataset, torch.utils.data.IterableDataset):
shuffle = False
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \
Recommended length per file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
temp_dir: (str) A temporary directory where the audio manifest is temporarily
stored.
Returns:
A pytorch DataLoader for the given audio file(s).
"""
if 'manifest_filepath' in config:
manifest_filepath = config['manifest_filepath']
batch_size = config['batch_size']
else:
manifest_filepath = os.path.join(config['temp_dir'], 'manifest.json')
batch_size = min(config['batch_size'], len(config['paths2audio_files']))
dl_config = {
'manifest_filepath': manifest_filepath,
'sample_rate': self.preprocessor._sample_rate,
'batch_size': batch_size,
'shuffle': False,
'num_workers': config.get('num_workers', min(batch_size, os.cpu_count() - 1)),
'pin_memory': True,
'channel_selector': config.get('channel_selector', None),
'use_start_end_token': self.cfg.validation_ds.get('use_start_end_token', False),
}
if config.get("augmentor"):
dl_config['augmentor'] = config.get("augmentor")
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
|
NeMo-main
|
nemo/collections/asr/models/rnnt_bpe_models.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import tempfile
from math import ceil, isclose
from typing import Dict, List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from tqdm.auto import tqdm
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import AudioToCharDALIDataset, DALIOutputs
from nemo.collections.asr.losses.rnnt import RNNTLoss, resolve_rnnt_default_loss_name
from nemo.collections.asr.metrics.rnnt_wer import RNNTWER, RNNTDecoding, RNNTDecodingConfig
from nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecModel
from nemo.collections.asr.modules.rnnt import RNNTDecoderJoint
from nemo.collections.asr.parts.mixins import ASRModuleMixin
from nemo.collections.asr.parts.utils.audio_utils import ChannelSelectorType
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.classes.mixins import AccessMixin
from nemo.core.neural_types import AcousticEncodedRepresentation, AudioSignal, LengthsType, NeuralType, SpectrogramType
from nemo.utils import logging
class EncDecRNNTModel(ASRModel, ASRModuleMixin, ExportableEncDecModel):
"""Base class for encoder decoder RNNT-based models."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.world_size
super().__init__(cfg=cfg, trainer=trainer)
# Initialize components
self.preprocessor = EncDecRNNTModel.from_config_dict(self.cfg.preprocessor)
self.encoder = EncDecRNNTModel.from_config_dict(self.cfg.encoder)
# Update config values required by components dynamically
with open_dict(self.cfg.decoder):
self.cfg.decoder.vocab_size = len(self.cfg.labels)
with open_dict(self.cfg.joint):
self.cfg.joint.num_classes = len(self.cfg.labels)
self.cfg.joint.vocabulary = self.cfg.labels
self.cfg.joint.jointnet.encoder_hidden = self.cfg.model_defaults.enc_hidden
self.cfg.joint.jointnet.pred_hidden = self.cfg.model_defaults.pred_hidden
self.decoder = EncDecRNNTModel.from_config_dict(self.cfg.decoder)
self.joint = EncDecRNNTModel.from_config_dict(self.cfg.joint)
# Setup RNNT Loss
loss_name, loss_kwargs = self.extract_rnnt_loss_cfg(self.cfg.get("loss", None))
num_classes = self.joint.num_classes_with_blank - 1 # for standard RNNT and multi-blank
if loss_name == 'tdt':
num_classes = num_classes - self.joint.num_extra_outputs
self.loss = RNNTLoss(
num_classes=num_classes,
loss_name=loss_name,
loss_kwargs=loss_kwargs,
reduction=self.cfg.get("rnnt_reduction", "mean_batch"),
)
if hasattr(self.cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = EncDecRNNTModel.from_config_dict(self.cfg.spec_augment)
else:
self.spec_augmentation = None
# Setup decoding objects
self.decoding = RNNTDecoding(
decoding_cfg=self.cfg.decoding, decoder=self.decoder, joint=self.joint, vocabulary=self.joint.vocabulary,
)
# Setup WER calculation
self.wer = RNNTWER(
decoding=self.decoding,
batch_dim_index=0,
use_cer=self._cfg.get('use_cer', False),
log_prediction=self._cfg.get('log_prediction', True),
dist_sync_on_step=True,
)
# Whether to compute loss during evaluation
if 'compute_eval_loss' in self.cfg:
self.compute_eval_loss = self.cfg.compute_eval_loss
else:
self.compute_eval_loss = True
# Setup fused Joint step if flag is set
if self.joint.fuse_loss_wer or (
self.decoding.joint_fused_batch_size is not None and self.decoding.joint_fused_batch_size > 0
):
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
# Setup optimization normalization (if provided in config)
self.setup_optim_normalization()
# Setup optional Optimization flags
self.setup_optimization_flags()
# Setup encoder adapters (from ASRAdapterModelMixin)
self.setup_adapters()
def setup_optim_normalization(self):
"""
Helper method to setup normalization of certain parts of the model prior to the optimization step.
Supported pre-optimization normalizations are as follows:
.. code-block:: yaml
# Variation Noise injection
model:
variational_noise:
std: 0.0
start_step: 0
# Joint - Length normalization
model:
normalize_joint_txu: false
# Encoder Network - gradient normalization
model:
normalize_encoder_norm: false
# Decoder / Prediction Network - gradient normalization
model:
normalize_decoder_norm: false
# Joint - gradient normalization
model:
normalize_joint_norm: false
"""
# setting up the variational noise for the decoder
if hasattr(self.cfg, 'variational_noise'):
self._optim_variational_noise_std = self.cfg['variational_noise'].get('std', 0)
self._optim_variational_noise_start = self.cfg['variational_noise'].get('start_step', 0)
else:
self._optim_variational_noise_std = 0
self._optim_variational_noise_start = 0
# Setup normalized gradients for model joint by T x U scaling factor (joint length normalization)
self._optim_normalize_joint_txu = self.cfg.get('normalize_joint_txu', False)
self._optim_normalize_txu = None
# Setup normalized encoder norm for model
self._optim_normalize_encoder_norm = self.cfg.get('normalize_encoder_norm', False)
# Setup normalized decoder norm for model
self._optim_normalize_decoder_norm = self.cfg.get('normalize_decoder_norm', False)
# Setup normalized joint norm for model
self._optim_normalize_joint_norm = self.cfg.get('normalize_joint_norm', False)
def extract_rnnt_loss_cfg(self, cfg: Optional[DictConfig]):
"""
Helper method to extract the rnnt loss name, and potentially its kwargs
to be passed.
Args:
cfg: Should contain `loss_name` as a string which is resolved to a RNNT loss name.
If the default should be used, then `default` can be used.
Optionally, one can pass additional kwargs to the loss function. The subdict
should have a keyname as follows : `{loss_name}_kwargs`.
Note that whichever loss_name is selected, that corresponding kwargs will be
selected. For the "default" case, the "{resolved_default}_kwargs" will be used.
Examples:
.. code-block:: yaml
loss_name: "default"
warprnnt_numba_kwargs:
kwargs2: some_other_val
Returns:
A tuple, the resolved loss name as well as its kwargs (if found).
"""
if cfg is None:
cfg = DictConfig({})
loss_name = cfg.get("loss_name", "default")
if loss_name == "default":
loss_name = resolve_rnnt_default_loss_name()
loss_kwargs = cfg.get(f"{loss_name}_kwargs", None)
logging.info(f"Using RNNT Loss : {loss_name}\n" f"Loss {loss_name}_kwargs: {loss_kwargs}")
return loss_name, loss_kwargs
@torch.no_grad()
def transcribe(
self,
paths2audio_files: List[str],
batch_size: int = 4,
return_hypotheses: bool = False,
partial_hypothesis: Optional[List['Hypothesis']] = None,
num_workers: int = 0,
channel_selector: Optional[ChannelSelectorType] = None,
augmentor: DictConfig = None,
verbose: bool = True,
) -> Tuple[List[str], Optional[List['Hypothesis']]]:
"""
Uses greedy decoding to transcribe audio files. Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is between 5 and 25 seconds. \
But it is possible to pass a few hours long file if enough GPU memory is available.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
return_hypotheses: (bool) Either return hypotheses or text
With hypotheses can do some postprocessing like getting timestamp or rescoring
num_workers: (int) number of workers for DataLoader
channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
augmentor: (DictConfig): Augment audio samples during transcription if augmentor is applied.
verbose: (bool) whether to display tqdm progress bar
Returns:
Returns a tuple of 2 items -
* A list of greedy transcript texts / Hypothesis
* An optional list of beam search transcript texts / Hypothesis / NBestHypothesis.
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return {}
# We will store transcriptions here
hypotheses = []
all_hypotheses = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
dither_value = self.preprocessor.featurizer.dither
pad_to_value = self.preprocessor.featurizer.pad_to
if num_workers is None:
num_workers = min(batch_size, os.cpu_count() - 1)
try:
self.preprocessor.featurizer.dither = 0.0
self.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self.eval()
# Freeze the encoder and decoder modules
self.encoder.freeze()
self.decoder.freeze()
self.joint.freeze()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Work in tmp directory - will store manifest file there
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w', encoding='utf-8') as fp:
for audio_file in paths2audio_files:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': ''}
fp.write(json.dumps(entry) + '\n')
config = {
'paths2audio_files': paths2audio_files,
'batch_size': batch_size,
'temp_dir': tmpdir,
'num_workers': num_workers,
'channel_selector': channel_selector,
}
if augmentor:
config['augmentor'] = augmentor
temporary_datalayer = self._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Transcribing", disable=(not verbose)):
encoded, encoded_len = self.forward(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
best_hyp, all_hyp = self.decoding.rnnt_decoder_predictions_tensor(
encoded,
encoded_len,
return_hypotheses=return_hypotheses,
partial_hypotheses=partial_hypothesis,
)
hypotheses += best_hyp
if all_hyp is not None:
all_hypotheses += all_hyp
else:
all_hypotheses += best_hyp
del encoded
del test_batch
finally:
# set mode back to its original value
self.train(mode=mode)
self.preprocessor.featurizer.dither = dither_value
self.preprocessor.featurizer.pad_to = pad_to_value
logging.set_verbosity(logging_level)
if mode is True:
self.encoder.unfreeze()
self.decoder.unfreeze()
self.joint.unfreeze()
return hypotheses, all_hypotheses
def change_vocabulary(self, new_vocabulary: List[str], decoding_cfg: Optional[DictConfig] = None):
"""
Changes vocabulary used during RNNT decoding process. Use this method when fine-tuning a pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
Returns: None
"""
if self.joint.vocabulary == new_vocabulary:
logging.warning(f"Old {self.joint.vocabulary} and new {new_vocabulary} match. Not changing anything.")
else:
if new_vocabulary is None or len(new_vocabulary) == 0:
raise ValueError(f'New vocabulary must be non-empty list of chars. But I got: {new_vocabulary}')
joint_config = self.joint.to_config_dict()
new_joint_config = copy.deepcopy(joint_config)
new_joint_config['vocabulary'] = new_vocabulary
new_joint_config['num_classes'] = len(new_vocabulary)
del self.joint
self.joint = EncDecRNNTModel.from_config_dict(new_joint_config)
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
new_decoder_config.vocab_size = len(new_vocabulary)
del self.decoder
self.decoder = EncDecRNNTModel.from_config_dict(new_decoder_config)
del self.loss
loss_name, loss_kwargs = self.extract_rnnt_loss_cfg(self.cfg.get('loss', None))
self.loss = RNNTLoss(
num_classes=self.joint.num_classes_with_blank - 1, loss_name=loss_name, loss_kwargs=loss_kwargs
)
if decoding_cfg is None:
# Assume same decoding config as before
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(RNNTDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = RNNTDecoding(
decoding_cfg=decoding_cfg, decoder=self.decoder, joint=self.joint, vocabulary=self.joint.vocabulary,
)
self.wer = RNNTWER(
decoding=self.decoding,
batch_dim_index=self.wer.batch_dim_index,
use_cer=self.wer.use_cer,
log_prediction=self.wer.log_prediction,
dist_sync_on_step=True,
)
# Setup fused Joint step
if self.joint.fuse_loss_wer or (
self.decoding.joint_fused_batch_size is not None and self.decoding.joint_fused_batch_size > 0
):
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
# Update config
with open_dict(self.cfg.joint):
self.cfg.joint = new_joint_config
with open_dict(self.cfg.decoder):
self.cfg.decoder = new_decoder_config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
ds_keys = ['train_ds', 'validation_ds', 'test_ds']
for key in ds_keys:
if key in self.cfg:
with open_dict(self.cfg[key]):
self.cfg[key]['labels'] = OmegaConf.create(new_vocabulary)
logging.info(f"Changed decoder to output to {self.joint.vocabulary} vocabulary.")
def change_decoding_strategy(self, decoding_cfg: DictConfig):
"""
Changes decoding strategy used during RNNT decoding process.
Args:
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
"""
if decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `decoding_cfg` passed when changing decoding strategy, using internal config")
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(RNNTDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = RNNTDecoding(
decoding_cfg=decoding_cfg, decoder=self.decoder, joint=self.joint, vocabulary=self.joint.vocabulary,
)
self.wer = RNNTWER(
decoding=self.decoding,
batch_dim_index=self.wer.batch_dim_index,
use_cer=self.wer.use_cer,
log_prediction=self.wer.log_prediction,
dist_sync_on_step=True,
)
# Setup fused Joint step
if self.joint.fuse_loss_wer or (
self.decoding.joint_fused_batch_size is not None and self.decoding.joint_fused_batch_size > 0
):
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
self.joint.temperature = decoding_cfg.get('temperature', 1.0)
# Update config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
logging.info(f"Changed decoding strategy to \n{OmegaConf.to_yaml(self.cfg.decoding)}")
def _setup_dataloader_from_config(self, config: Optional[Dict]):
# Automatically inject args from model config to dataloader config
audio_to_text_dataset.inject_dataloader_value_from_model_config(self.cfg, config, key='sample_rate')
audio_to_text_dataset.inject_dataloader_value_from_model_config(self.cfg, config, key='labels')
dataset = audio_to_text_dataset.get_audio_to_text_char_dataset_from_config(
config=config,
local_rank=self.local_rank,
global_rank=self.global_rank,
world_size=self.world_size,
preprocessor_cfg=self._cfg.get("preprocessor", None),
)
if dataset is None:
return None
if isinstance(dataset, AudioToCharDALIDataset):
# DALI Dataset implements dataloader interface
return dataset
shuffle = config['shuffle']
if isinstance(dataset, torch.utils.data.IterableDataset):
shuffle = False
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the training data loader via a Dict-like object.
Args:
train_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
# preserve config
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if (
self._train_dl is not None
and hasattr(self._train_dl, 'dataset')
and isinstance(self._train_dl.dataset, torch.utils.data.IterableDataset)
):
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the validation data loader via a Dict-like object.
Args:
val_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the test data loader via a Dict-like object.
Args:
test_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
self._test_dl = self._setup_dataloader_from_config(config=test_data_config)
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
input_signal_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
input_signal_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), input_signal_eltype, optional=True),
"input_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"processed_signal": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),
"processed_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
}
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None
):
"""
Forward pass of the model. Note that for RNNT Models, the forward pass of the model is a 3 step process,
and this method only performs the first step - forward of the acoustic model.
Please refer to the `training_step` in order to see the full `forward` step for training - which
performs the forward of the acoustic model, the prediction network and then the joint network.
Finally, it computes the loss and possibly compute the detokenized text via the `decoding` step.
Please refer to the `validation_step` in order to see the full `forward` step for inference - which
performs the forward of the acoustic model, the prediction network and then the joint network.
Finally, it computes the decoded tokens via the `decoding` step and possibly compute the batch metrics.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 2 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
"""
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) is False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_len`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
# Spec augment is not applied during evaluation/testing
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
return encoded, encoded_len
# PTL-specific methods
def training_step(self, batch, batch_nb):
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
signal, signal_len, transcript, transcript_len = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
# During training, loss must be computed, so decoder forward is necessary
decoder, target_length, states = self.decoder(targets=transcript, target_length=transcript_len)
if hasattr(self, '_trainer') and self._trainer is not None:
log_every_n_steps = self._trainer.log_every_n_steps
sample_id = self._trainer.global_step
else:
log_every_n_steps = 1
sample_id = batch_nb
# If experimental fused Joint-Loss-WER is not used
if not self.joint.fuse_loss_wer:
# Compute full joint and loss
joint = self.joint(encoder_outputs=encoded, decoder_outputs=decoder)
loss_value = self.loss(
log_probs=joint, targets=transcript, input_lengths=encoded_len, target_lengths=target_length
)
# Add auxiliary losses, if registered
loss_value = self.add_auxiliary_losses(loss_value)
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
tensorboard_logs = {
'train_loss': loss_value,
'learning_rate': self._optimizer.param_groups[0]['lr'],
'global_step': torch.tensor(self.trainer.global_step, dtype=torch.float32),
}
if (sample_id + 1) % log_every_n_steps == 0:
self.wer.update(encoded, encoded_len, transcript, transcript_len)
_, scores, words = self.wer.compute()
self.wer.reset()
tensorboard_logs.update({'training_batch_wer': scores.float() / words})
else:
# If experimental fused Joint-Loss-WER is used
if (sample_id + 1) % log_every_n_steps == 0:
compute_wer = True
else:
compute_wer = False
# Fused joint step
loss_value, wer, _, _ = self.joint(
encoder_outputs=encoded,
decoder_outputs=decoder,
encoder_lengths=encoded_len,
transcripts=transcript,
transcript_lengths=transcript_len,
compute_wer=compute_wer,
)
# Add auxiliary losses, if registered
loss_value = self.add_auxiliary_losses(loss_value)
# Reset access registry
if AccessMixin.is_access_enabled():
AccessMixin.reset_registry(self)
tensorboard_logs = {
'train_loss': loss_value,
'learning_rate': self._optimizer.param_groups[0]['lr'],
'global_step': torch.tensor(self.trainer.global_step, dtype=torch.float32),
}
if compute_wer:
tensorboard_logs.update({'training_batch_wer': wer})
# Log items
self.log_dict(tensorboard_logs)
# Preserve batch acoustic model T and language model U parameters if normalizing
if self._optim_normalize_joint_txu:
self._optim_normalize_txu = [encoded_len.max(), transcript_len.max()]
return {'loss': loss_value}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
signal, signal_len, transcript, transcript_len, sample_id = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
best_hyp_text, all_hyp_text = self.decoding.rnnt_decoder_predictions_tensor(
encoder_output=encoded, encoded_lengths=encoded_len, return_hypotheses=False
)
sample_id = sample_id.cpu().detach().numpy()
return list(zip(sample_id, best_hyp_text))
def validation_step(self, batch, batch_idx, dataloader_idx=0):
signal, signal_len, transcript, transcript_len = batch
# forward() only performs encoder forward
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
encoded, encoded_len = self.forward(processed_signal=signal, processed_signal_length=signal_len)
else:
encoded, encoded_len = self.forward(input_signal=signal, input_signal_length=signal_len)
del signal
tensorboard_logs = {}
# If experimental fused Joint-Loss-WER is not used
if not self.joint.fuse_loss_wer:
if self.compute_eval_loss:
decoder, target_length, states = self.decoder(targets=transcript, target_length=transcript_len)
joint = self.joint(encoder_outputs=encoded, decoder_outputs=decoder)
loss_value = self.loss(
log_probs=joint, targets=transcript, input_lengths=encoded_len, target_lengths=target_length
)
tensorboard_logs['val_loss'] = loss_value
self.wer.update(encoded, encoded_len, transcript, transcript_len)
wer, wer_num, wer_denom = self.wer.compute()
self.wer.reset()
tensorboard_logs['val_wer_num'] = wer_num
tensorboard_logs['val_wer_denom'] = wer_denom
tensorboard_logs['val_wer'] = wer
else:
# If experimental fused Joint-Loss-WER is used
compute_wer = True
if self.compute_eval_loss:
decoded, target_len, states = self.decoder(targets=transcript, target_length=transcript_len)
else:
decoded = None
target_len = transcript_len
# Fused joint step
loss_value, wer, wer_num, wer_denom = self.joint(
encoder_outputs=encoded,
decoder_outputs=decoded,
encoder_lengths=encoded_len,
transcripts=transcript,
transcript_lengths=target_len,
compute_wer=compute_wer,
)
if loss_value is not None:
tensorboard_logs['val_loss'] = loss_value
tensorboard_logs['val_wer_num'] = wer_num
tensorboard_logs['val_wer_denom'] = wer_denom
tensorboard_logs['val_wer'] = wer
self.log('global_step', torch.tensor(self.trainer.global_step, dtype=torch.float32))
return tensorboard_logs
def test_step(self, batch, batch_idx, dataloader_idx=0):
logs = self.validation_step(batch, batch_idx, dataloader_idx=dataloader_idx)
test_logs = {
'test_wer_num': logs['val_wer_num'],
'test_wer_denom': logs['val_wer_denom'],
# 'test_wer': logs['val_wer'],
}
if 'val_loss' in logs:
test_logs['test_loss'] = logs['val_loss']
return test_logs
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
if self.compute_eval_loss:
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
val_loss_log = {'val_loss': val_loss_mean}
else:
val_loss_log = {}
wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {**val_loss_log, 'val_wer': wer_num.float() / wer_denom}
return {**val_loss_log, 'log': tensorboard_logs}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
if self.compute_eval_loss:
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
test_loss_log = {'test_loss': test_loss_mean}
else:
test_loss_log = {}
wer_num = torch.stack([x['test_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['test_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {**test_loss_log, 'test_wer': wer_num.float() / wer_denom}
return {**test_loss_log, 'log': tensorboard_logs}
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \
Recommended length per file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
temp_dir: (str) A temporary directory where the audio manifest is temporarily
stored.
Returns:
A pytorch DataLoader for the given audio file(s).
"""
if 'manifest_filepath' in config:
manifest_filepath = config['manifest_filepath']
batch_size = config['batch_size']
else:
manifest_filepath = os.path.join(config['temp_dir'], 'manifest.json')
batch_size = min(config['batch_size'], len(config['paths2audio_files']))
dl_config = {
'manifest_filepath': manifest_filepath,
'sample_rate': self.preprocessor._sample_rate,
'labels': self.joint.vocabulary,
'batch_size': batch_size,
'trim_silence': False,
'shuffle': False,
'num_workers': config.get('num_workers', min(batch_size, os.cpu_count() - 1)),
'pin_memory': True,
}
if config.get("augmentor"):
dl_config['augmentor'] = config.get("augmentor")
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
def on_after_backward(self):
super().on_after_backward()
if self._optim_variational_noise_std > 0 and self.global_step >= self._optim_variational_noise_start:
for param_name, param in self.decoder.named_parameters():
if param.grad is not None:
noise = torch.normal(
mean=0.0,
std=self._optim_variational_noise_std,
size=param.size(),
device=param.device,
dtype=param.dtype,
)
param.grad.data.add_(noise)
if self._optim_normalize_joint_txu:
T, U = self._optim_normalize_txu
if T is not None and U is not None:
for param_name, param in self.encoder.named_parameters():
if param.grad is not None:
param.grad.data.div_(U)
for param_name, param in self.decoder.named_parameters():
if param.grad is not None:
param.grad.data.div_(T)
if self._optim_normalize_encoder_norm:
for param_name, param in self.encoder.named_parameters():
if param.grad is not None:
norm = param.grad.norm()
param.grad.data.div_(norm)
if self._optim_normalize_decoder_norm:
for param_name, param in self.decoder.named_parameters():
if param.grad is not None:
norm = param.grad.norm()
param.grad.data.div_(norm)
if self._optim_normalize_joint_norm:
for param_name, param in self.joint.named_parameters():
if param.grad is not None:
norm = param.grad.norm()
param.grad.data.div_(norm)
# EncDecRNNTModel is exported in 2 parts
def list_export_subnets(self):
return ['encoder', 'decoder_joint']
# for export
@property
def decoder_joint(self):
return RNNTDecoderJoint(self.decoder, self.joint)
def set_export_config(self, args):
if 'decoder_type' in args:
if hasattr(self, 'change_decoding_strategy'):
self.change_decoding_strategy(decoder_type=args['decoder_type'])
else:
raise Exception("Model does not have decoder type option")
super().set_export_config(args)
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="stt_zh_conformer_transducer_large",
description="For details about this model, please visit https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_zh_conformer_transducer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_zh_conformer_transducer_large/versions/1.8.0/files/stt_zh_conformer_transducer_large.nemo",
)
results.append(model)
return results
|
NeMo-main
|
nemo/collections/asr/models/rnnt_models.py
|
# ! /usr/bin/python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
from math import ceil
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from tqdm.auto import tqdm
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import DALIOutputs
from nemo.collections.asr.metrics.wer_bpe import WERBPE, CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecModel
from nemo.collections.asr.parts.mixins import ASRBPEMixin, ASRModuleMixin
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
from nemo.collections.asr.parts.utils.slu_utils import SequenceGenerator, SequenceGeneratorConfig, get_seq_mask
from nemo.collections.common.losses import SmoothedNLLLoss
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, LogprobsType, NeuralType, SpectrogramType
from nemo.utils import logging, model_utils
__all__ = ["SLUIntentSlotBPEModel"]
class SLUIntentSlotBPEModel(ASRModel, ExportableEncDecModel, ASRModuleMixin, ASRBPEMixin):
"""Model for end-to-end speech intent classification and slot filling, which is formulated as a speech-to-sequence task"""
def __init__(self, cfg: DictConfig, trainer=None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
if 'tokenizer' not in cfg:
raise ValueError("`cfg` must have `tokenizer` config to create a tokenizer !")
# Setup the tokenizer
self._setup_tokenizer(cfg.tokenizer)
super().__init__(cfg=cfg, trainer=trainer)
self.preprocessor = self.from_config_dict(self.cfg.preprocessor)
self.encoder = self.from_config_dict(self.cfg.encoder)
self.decoder = self.from_config_dict(self.cfg.decoder)
if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:
self.spec_augmentation = self.from_config_dict(self._cfg.spec_augment)
else:
self.spec_augmentation = None
# Setup optional Optimization flags
self.setup_optimization_flags()
# Adapter modules setup (from ASRAdapterModelMixin)
self.setup_adapters()
self.vocabulary = self.tokenizer.tokenizer.get_vocab()
vocab_size = len(self.vocabulary)
# Create embedding layer
self.cfg.embedding["vocab_size"] = vocab_size
self.embedding = self.from_config_dict(self.cfg.embedding)
# Create token classifier
self.cfg.classifier["num_classes"] = vocab_size
self.classifier = self.from_config_dict(self.cfg.classifier)
self.loss = SmoothedNLLLoss(label_smoothing=self.cfg.loss.label_smoothing)
self.sequence_generator = SequenceGenerator(
cfg=self.cfg.sequence_generator,
embedding=self.embedding,
decoder=self.decoder,
log_softmax=self.classifier,
tokenizer=self.tokenizer,
)
# Setup decoding objects
decoding_cfg = self.cfg.get('decoding', None)
# In case decoding config not found, use default config
if decoding_cfg is None:
decoding_cfg = OmegaConf.structured(CTCBPEDecodingConfig)
with open_dict(self.cfg):
self.cfg.decoding = decoding_cfg
self.decoding = CTCBPEDecoding(self.cfg.decoding, tokenizer=self.tokenizer)
# Setup metric with decoding strategy
self._wer = WERBPE(
decoding=self.decoding,
use_cer=self._cfg.get('use_cer', False),
dist_sync_on_step=True,
log_prediction=self._cfg.get("log_prediction", False),
fold_consecutive=False,
)
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
input_signal_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
input_signal_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), input_signal_eltype, optional=True),
"input_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"target_semantics": NeuralType(('B', 'T'), input_signal_eltype, optional=True),
"target_semantics_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"processed_signal": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),
"processed_signal_length": NeuralType(tuple('B'), LengthsType(), optional=True),
"sample_id": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"log_probs": NeuralType(('B', 'T', 'D'), LogprobsType(), optional=True),
"lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"greedy_predictions": NeuralType(('B', 'T'), LabelsType(), optional=True),
}
def set_decoding_strategy(self, cfg: SequenceGeneratorConfig):
cfg.max_sequence_length = self.sequence_generator.generator.max_seq_length
self.sequence_generator = SequenceGenerator(cfg, self.embedding, self.decoder, self.classifier, self.tokenizer)
@typecheck()
def forward(
self,
input_signal=None,
input_signal_length=None,
target_semantics=None,
target_semantics_length=None,
processed_signal=None,
processed_signal_length=None,
):
"""
Forward pass of the model.
Params:
input_signal: Tensor that represents a batch of raw audio signals, of shape [B, T]. T here represents
timesteps, with 1 second of audio represented as `self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio sequences.
target_semantics: Tensor that represents a batch of semantic tokens, of shape [B, L].
target_semantics_length: Vector of length B, that contains the individual lengths of the semantic sequences.
processed_signal: Tensor that represents a batch of processed audio signals, of shape (B, D, T) that has
undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the processed audio
sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the output sequence after decoder, of shape [B].
3) The token predictions of the model of shape [B, T].
"""
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) == False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_len`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
encoded = encoded.transpose(1, 2) # BxDxT -> BxTxD
encoded_mask = get_seq_mask(encoded, encoded_len)
if target_semantics is None: # in inference-only mode
predictions = self.sequence_generator(encoded, encoded_mask)
return None, None, predictions
bos_semantics_tokens = target_semantics[:, :-1]
bos_semantics = self.embedding(bos_semantics_tokens)
bos_semantics_mask = get_seq_mask(bos_semantics, target_semantics_length - 1)
decoded = self.decoder(
encoder_states=encoded,
encoder_mask=encoded_mask,
decoder_states=bos_semantics,
decoder_mask=bos_semantics_mask,
)
log_probs = self.classifier(decoded)
predictions = log_probs.argmax(dim=-1, keepdim=False)
pred_len = self.sequence_generator.get_seq_length(predictions)
return log_probs, pred_len, predictions
# PTL-specific methods
def training_step(self, batch, batch_nb):
if len(batch) == 4:
signal, signal_len, semantics, semantics_len = batch
else:
signal, signal_len, semantics, semantics_len, sample_id = batch
log_probs, pred_len, predictions = self.forward(
input_signal=signal,
input_signal_length=signal_len,
target_semantics=semantics,
target_semantics_length=semantics_len,
)
eos_semantics = semantics[:, 1:]
eos_semantics_len = semantics_len - 1 # subtract 1 for eos tokens
loss_value = self.loss(log_probs=log_probs, labels=eos_semantics, lengths=eos_semantics_len)
tensorboard_logs = {'train_loss': loss_value.item()}
if len(self._optimizer.param_groups) == 1:
tensorboard_logs['learning_rate'] = self._optimizer.param_groups[0]['lr']
else:
for i, group in enumerate(self._optimizer.param_groups):
tensorboard_logs[f'learning_rate_g{i}'] = group['lr']
if hasattr(self, '_trainer') and self._trainer is not None:
log_every_n_steps = self._trainer.log_every_n_steps
else:
log_every_n_steps = 1
if (batch_nb + 1) % log_every_n_steps == 0:
self._wer.update(
predictions=predictions,
targets=eos_semantics,
predictions_lengths=pred_len,
target_lengths=eos_semantics_len,
)
wer, _, _ = self._wer.compute()
self._wer.reset()
tensorboard_logs.update({'training_batch_wer': wer})
return {'loss': loss_value, 'log': tensorboard_logs}
def predict(
self, input_signal, input_signal_length, processed_signal=None, processed_signal_length=None, dataloader_idx=0
) -> List[str]:
has_input_signal = input_signal is not None and input_signal_length is not None
has_processed_signal = processed_signal is not None and processed_signal_length is not None
if (has_input_signal ^ has_processed_signal) == False:
raise ValueError(
f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive "
" with ``processed_signal`` and ``processed_signal_len`` arguments."
)
if not has_processed_signal:
processed_signal, processed_signal_length = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
if self.spec_augmentation is not None and self.training:
processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_length)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)
encoded = encoded.transpose(1, 2) # BxDxT -> BxTxD
encoded_mask = get_seq_mask(encoded, encoded_len)
pred_tokens = self.sequence_generator(encoded, encoded_mask)
predictions = self.sequence_generator.decode_semantics_from_tokens(pred_tokens)
return predictions
def validation_step(self, batch, batch_idx, dataloader_idx=0):
if len(batch) == 4:
signal, signal_len, semantics, semantics_len = batch
else:
signal, signal_len, semantics, semantics_len, sample_id = batch
if isinstance(batch, DALIOutputs) and batch.has_processed_signal:
log_probs, pred_len, predictions = self.forward(
processed_signal=signal,
processed_signal_length=signal_len,
target_semantics=semantics,
target_semantics_length=semantics_len,
)
else:
log_probs, pred_len, predictions = self.forward(
input_signal=signal,
input_signal_length=signal_len,
target_semantics=semantics,
target_semantics_length=semantics_len,
)
eos_semantics = semantics[:, 1:]
eos_semantics_len = semantics_len - 1 # subtract 1 for bos&eos tokens
loss_value = self.loss(log_probs=log_probs, labels=eos_semantics, lengths=eos_semantics_len)
self._wer.update(
predictions=predictions,
targets=eos_semantics,
predictions_lengths=pred_len,
target_lengths=eos_semantics_len,
)
wer, wer_num, wer_denom = self._wer.compute()
self._wer.reset()
return {
'val_loss': loss_value,
'val_wer_num': wer_num,
'val_wer_denom': wer_denom,
'val_wer': wer,
}
def test_step(self, batch, batch_idx, dataloader_idx=0):
logs = self.validation_step(batch, batch_idx, dataloader_idx=dataloader_idx)
test_logs = {
'test_loss': logs['val_loss'],
'test_wer_num': logs['val_wer_num'],
'test_wer_denom': logs['val_wer_denom'],
'test_wer': logs['val_wer'],
}
return test_logs
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def _setup_dataloader_from_config(self, config: Optional[Dict]):
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
shuffle = config['shuffle']
device = 'gpu' if torch.cuda.is_available() else 'cpu'
if config.get('use_dali', False):
device_id = self.local_rank if device == 'gpu' else None
dataset = audio_to_text_dataset.get_dali_bpe_dataset(
config=config,
tokenizer=self.tokenizer,
shuffle=shuffle,
device_id=device_id,
global_rank=self.global_rank,
world_size=self.world_size,
preprocessor_cfg=self._cfg.preprocessor,
)
return dataset
# Instantiate tarred dataset loader or normal dataset loader
if config.get('is_tarred', False):
if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
'manifest_filepath' in config and config['manifest_filepath'] is None
):
logging.warning(
"Could not load dataset as `manifest_filepath` was None or "
f"`tarred_audio_filepaths` is None. Provided config : {config}"
)
return None
shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
dataset = audio_to_text_dataset.get_tarred_dataset(
config=config,
tokenizer=self.tokenizer,
shuffle_n=shuffle_n,
global_rank=self.global_rank,
world_size=self.world_size,
augmentor=augmentor,
)
shuffle = False
else:
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
dataset = audio_to_text_dataset.get_bpe_dataset(
config=config, tokenizer=self.tokenizer, augmentor=augmentor
)
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the training data loader via a Dict-like object.
Args:
train_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
# preserve config
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
# Need to set this because if using an IterableDataset, the length of the dataloader is the total number
# of samples rather than the number of batches, and this messes up the tqdm progress bar.
# So we set the number of steps manually (to the correct number) to fix this.
if (
self._train_dl is not None
and hasattr(self._train_dl, 'dataset')
and isinstance(self._train_dl.dataset, torch.utils.data.IterableDataset)
):
# We also need to check if limit_train_batches is already set.
# If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,
# and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).
if self._trainer is not None and isinstance(self._trainer.limit_train_batches, float):
self._trainer.limit_train_batches = int(
self._trainer.limit_train_batches
* ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])
)
elif self._trainer is None:
logging.warning(
"Model Trainer was not set before constructing the dataset, incorrect number of "
"training batches will be used. Please set the trainer and rebuild the dataset."
)
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the validation data loader via a Dict-like object.
Args:
val_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the test data loader via a Dict-like object.
Args:
test_data_config: A config that contains the information regarding construction
of an ASR Training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`
- :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`
- :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`
"""
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
self._test_dl = self._setup_dataloader_from_config(config=test_data_config)
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \
Recommended length per file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
temp_dir: (str) A temporary directory where the audio manifest is temporarily
stored.
num_workers: (int) number of workers. Depends of the batch_size and machine. \
0 - only the main process will load batches, 1 - one worker (not main process)
Returns:
A pytorch DataLoader for the given audio file(s).
"""
if 'manifest_filepath' in config:
manifest_filepath = config['manifest_filepath']
batch_size = config['batch_size']
else:
manifest_filepath = os.path.join(config['temp_dir'], 'manifest.json')
batch_size = min(config['batch_size'], len(config['paths2audio_files']))
dl_config = {
'manifest_filepath': manifest_filepath,
'sample_rate': self.preprocessor._sample_rate,
'batch_size': batch_size,
'shuffle': False,
'num_workers': config.get('num_workers', min(batch_size, os.cpu_count() - 1)),
'pin_memory': True,
'use_start_end_token': self.cfg.validation_ds.get('use_start_end_token', False),
}
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
@torch.no_grad()
def transcribe(
self,
paths2audio_files: List[str],
batch_size: int = 4,
logprobs: bool = False,
return_hypotheses: bool = False,
num_workers: int = 0,
verbose: bool = True,
) -> List[str]:
"""
Uses greedy decoding to transcribe audio files into SLU semantics.
Use this method for debugging and prototyping.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is between 5 and 25 seconds. \
But it is possible to pass a few hours long file if enough GPU memory is available.
batch_size: (int) batch size to use during inference.
Bigger will result in better throughput performance but would use more memory.
logprobs: (bool) pass True to get log probabilities instead of transcripts.
return_hypotheses: (bool) Either return hypotheses or text
With hypotheses can do some postprocessing like getting timestamp or rescoring
num_workers: (int) number of workers for DataLoader
verbose: (bool) whether to display tqdm progress bar
Returns:
A list of transcriptions (or raw log probabilities if logprobs is True) in the same order as paths2audio_files
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return {}
if return_hypotheses and logprobs:
raise ValueError(
"Either `return_hypotheses` or `logprobs` can be True at any given time."
"Returned hypotheses will contain the logprobs."
)
if num_workers is None:
num_workers = min(batch_size, os.cpu_count() - 1)
# We will store transcriptions here
hypotheses = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
dither_value = self.preprocessor.featurizer.dither
pad_to_value = self.preprocessor.featurizer.pad_to
try:
self.preprocessor.featurizer.dither = 0.0
self.preprocessor.featurizer.pad_to = 0
# Switch model to evaluation mode
self.eval()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Work in tmp directory - will store manifest file there
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w', encoding='utf-8') as fp:
for audio_file in paths2audio_files:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': ''}
fp.write(json.dumps(entry) + '\n')
config = {
'paths2audio_files': paths2audio_files,
'batch_size': batch_size,
'temp_dir': tmpdir,
'num_workers': num_workers,
}
temporary_datalayer = self._setup_transcribe_dataloader(config)
for test_batch in tqdm(temporary_datalayer, desc="Transcribing", disable=not verbose):
predictions = self.predict(
input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)
)
hypotheses += predictions
del predictions
del test_batch
finally:
# set mode back to its original value
self.train(mode=mode)
self.preprocessor.featurizer.dither = dither_value
self.preprocessor.featurizer.pad_to = pad_to_value
logging.set_verbosity(logging_level)
return hypotheses
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="slu_conformer_transformer_large_slurp",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:slu_conformer_transformer_large_slurp",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/slu_conformer_transformer_large_slurp/versions/1.13.0/files/slu_conformer_transformer_large_slurp.nemo",
)
results.append(model)
|
NeMo-main
|
nemo/collections/asr/models/slu_models.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
from typing import Dict, List, Optional, Union
import librosa
import soundfile as sf
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from tqdm import tqdm
from nemo.collections.asr.data import audio_to_audio_dataset
from nemo.collections.asr.data.audio_to_text_dataset import inject_dataloader_value_from_model_config
from nemo.collections.asr.models.audio_to_audio_model import AudioToAudioModel
from nemo.collections.asr.parts.utils.audio_utils import ChannelSelectorType
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import AudioSignal, LengthsType, NeuralType
from nemo.utils import logging
__all__ = ['EncMaskDecAudioToAudioModel']
class EncMaskDecAudioToAudioModel(AudioToAudioModel):
"""Class for encoder-mask-decoder audio processing models.
The model consists of the following blocks:
- encoder: transforms input multi-channel audio signal into an encoded representation (analysis transform)
- mask_estimator: estimates a mask used by signal processor
- mask_processor: mask-based signal processor, combines the encoded input and the estimated mask
- decoder: transforms processor output into the time domain (synthesis transform)
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.world_size
super().__init__(cfg=cfg, trainer=trainer)
self.sample_rate = self._cfg.sample_rate
# Setup processing modules
self.encoder = EncMaskDecAudioToAudioModel.from_config_dict(self._cfg.encoder)
self.mask_estimator = EncMaskDecAudioToAudioModel.from_config_dict(self._cfg.mask_estimator)
self.mask_processor = EncMaskDecAudioToAudioModel.from_config_dict(self._cfg.mask_processor)
self.decoder = EncMaskDecAudioToAudioModel.from_config_dict(self._cfg.decoder)
if 'mixture_consistency' in self._cfg:
self.mixture_consistency = EncMaskDecAudioToAudioModel.from_config_dict(self._cfg.mixture_consistency)
else:
self.mixture_consistency = None
# Future enhancement:
# If subclasses need to modify the config before calling super()
# Check ASRBPE* classes do with their mixin
# Setup optional Optimization flags
self.setup_optimization_flags()
@torch.no_grad()
def process(
self,
paths2audio_files: List[str],
output_dir: str,
batch_size: int = 1,
num_workers: Optional[int] = None,
input_channel_selector: Optional[ChannelSelectorType] = None,
) -> List[str]:
"""
Process audio files provided in paths2audio_files.
Processed signals will be saved in output_dir.
Args:
paths2audio_files: (a list) of paths to audio files. \
Recommended length per file is between 5 and 25 seconds. \
But it is possible to pass a few hours long file if enough GPU memory is available.
output_dir:
batch_size: (int) batch size to use during inference.
Bigger will result in better throughput performance but would use more memory.
num_workers: Number of workers for the dataloader
input_channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`.
Returns:
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return {}
if num_workers is None:
num_workers = min(batch_size, os.cpu_count() - 1)
# Output
paths2processed_files = []
# Model's mode and device
mode = self.training
device = next(self.parameters()).device
try:
# Switch model to evaluation mode
self.eval()
# Freeze weights
self.freeze()
logging_level = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
# Processing
with tempfile.TemporaryDirectory() as tmpdir:
# Save temporary manifest
temporary_manifest_filepath = os.path.join(tmpdir, 'manifest.json')
with open(temporary_manifest_filepath, 'w', encoding='utf-8') as fp:
for audio_file in paths2audio_files:
entry = {'input_filepath': audio_file, 'duration': librosa.get_duration(filename=audio_file)}
fp.write(json.dumps(entry) + '\n')
config = {
'manifest_filepath': temporary_manifest_filepath,
'input_key': 'input_filepath',
'input_channel_selector': input_channel_selector,
'batch_size': min(batch_size, len(paths2audio_files)),
'num_workers': num_workers,
}
# Create output dir if necessary
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# DataLoader for the input files
temporary_dataloader = self._setup_process_dataloader(config)
# Indexing of the original files, used to form the output file name
file_idx = 0
# Process batches
for test_batch in tqdm(temporary_dataloader, desc="Processing"):
input_signal = test_batch[0]
input_length = test_batch[1]
# Expand channel dimension, if necessary
# For consistency, the model uses multi-channel format, even if the channel dimension is 1
if input_signal.ndim == 2:
input_signal = input_signal.unsqueeze(1)
processed_batch, _ = self.forward(
input_signal=input_signal.to(device), input_length=input_length.to(device)
)
for example_idx in range(processed_batch.size(0)):
# This assumes the data loader is not shuffling files
file_name = os.path.basename(paths2audio_files[file_idx])
# Prepare output file
output_file = os.path.join(output_dir, f'processed_{file_name}')
# Crop the output signal to the actual length
output_signal = processed_batch[example_idx, :, : input_length[example_idx]].cpu().numpy()
# Write audio
sf.write(output_file, output_signal.T, self.sample_rate, 'float')
# Update the file counter
file_idx += 1
# Save processed file
paths2processed_files.append(output_file)
del test_batch
del processed_batch
finally:
# set mode back to its original value
self.train(mode=mode)
if mode is True:
self.unfreeze()
logging.set_verbosity(logging_level)
return paths2processed_files
def _setup_dataloader_from_config(self, config: Optional[Dict]):
is_concat = config.get('is_concat', False)
if is_concat:
raise NotImplementedError('Concat not implemented')
# TODO: Consider moving `inject` from `audio_to_text_dataset` to a utility module?
# Automatically inject args from model config to dataloader config
inject_dataloader_value_from_model_config(self.cfg, config, key='sample_rate')
# Instantiate tarred dataset loader or normal dataset loader
if config.get('is_tarred', False):
raise NotImplementedError('Tarred datasets not supported')
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
dataset = audio_to_audio_dataset.get_audio_to_target_dataset(config=config)
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=config['shuffle'],
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the training data loader via a Dict-like object.
Args:
train_data_config: A config that contains the information regarding construction
of a training dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_audio.AudioToTargetDataset`
"""
if 'shuffle' not in train_data_config:
train_data_config['shuffle'] = True
# preserve config
self._update_dataset_config(dataset_name='train', config=train_data_config)
self._train_dl = self._setup_dataloader_from_config(config=train_data_config)
if 'is_tarred' in train_data_config and train_data_config['is_tarred']:
raise NotImplementedError('Tarred datasets not supported')
def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the validation data loader via a Dict-like object.
Args:
val_data_config: A config that contains the information regarding construction
of a validation dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_audio.AudioToTargetDataset`
"""
if 'shuffle' not in val_data_config:
val_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
"""
Sets up the test data loader via a Dict-like object.
Args:
test_data_config: A config that contains the information regarding construction
of a test dataset.
Supported Datasets:
- :class:`~nemo.collections.asr.data.audio_to_audio.AudioToTargetDataset`
"""
if 'shuffle' not in test_data_config:
test_data_config['shuffle'] = False
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
self._test_dl = self._setup_dataloader_from_config(config=test_data_config)
def _setup_process_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""Prepare a dataloader for processing files.
Args:
config: A python dictionary which contains the following keys:
manifest_filepath: path to a manifest file
input_key: key with audio filepaths in the manifest
input_channel_selector: Optional, used to select a subset of channels from input audio files
batch_size: batch size for the dataloader
num_workers: number of workers for the dataloader
Returns:
A pytorch DataLoader for the given manifest filepath.
"""
dl_config = {
'manifest_filepath': config['manifest_filepath'],
'sample_rate': self.sample_rate,
'input_key': config['input_key'],
'input_channel_selector': config.get('input_channel_selector', None),
'target_key': None,
'target_channel_selector': None,
'batch_size': config['batch_size'],
'shuffle': False,
'num_workers': config.get('num_workers', min(config['batch_size'], os.cpu_count() - 1)),
'pin_memory': True,
}
temporary_dataloader = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_dataloader
@property
def input_types(self) -> Dict[str, NeuralType]:
return {
"input_signal": NeuralType(
('B', 'C', 'T'), AudioSignal(freq=self.sample_rate)
), # multi-channel format, channel dimension can be 1 for single-channel audio
"input_length": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
return {
"output_signal": NeuralType(
('B', 'C', 'T'), AudioSignal(freq=self.sample_rate)
), # multi-channel format, channel dimension can be 1 for single-channel audio
"output_length": NeuralType(tuple('B'), LengthsType(), optional=True),
}
def match_batch_length(self, input: torch.Tensor, batch_length: int):
"""Trim or pad the output to match the batch length.
Args:
input: tensor with shape (B, C, T)
batch_length: int
Returns:
Tensor with shape (B, C, T), where T matches the
batch length.
"""
input_length = input.size(-1)
pad_length = batch_length - input_length
pad = (0, pad_length)
# pad with zeros or crop
return torch.nn.functional.pad(input, pad, 'constant', 0)
@typecheck()
def forward(self, input_signal, input_length=None):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T] or [B, T, C]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
Returns:
"""
batch_length = input_signal.size(-1)
# Encoder
encoded, encoded_length = self.encoder(input=input_signal, input_length=input_length)
# Mask estimator
mask, _ = self.mask_estimator(input=encoded, input_length=encoded_length)
# Mask-based processor in the encoded domain
processed, processed_length = self.mask_processor(input=encoded, input_length=encoded_length, mask=mask)
# Mixture consistency
if self.mixture_consistency is not None:
processed = self.mixture_consistency(mixture=encoded, estimate=processed)
# Decoder
processed, processed_length = self.decoder(input=processed, input_length=processed_length)
# Trim or pad the estimated signal to match input length
processed = self.match_batch_length(input=processed, batch_length=batch_length)
return processed, processed_length
# PTL-specific methods
def training_step(self, batch, batch_idx):
input_signal, input_length, target_signal, target_length = batch
# Expand channel dimension, if necessary
# For consistency, the model uses multi-channel format, even if the channel dimension is 1
if input_signal.ndim == 2:
input_signal = input_signal.unsqueeze(1)
if target_signal.ndim == 2:
target_signal = target_signal.unsqueeze(1)
processed_signal, _ = self.forward(input_signal=input_signal, input_length=input_length)
loss_value = self.loss(estimate=processed_signal, target=target_signal, input_length=input_length)
tensorboard_logs = {
'train_loss': loss_value,
'learning_rate': self._optimizer.param_groups[0]['lr'],
'global_step': torch.tensor(self.trainer.global_step, dtype=torch.float32),
}
return {'loss': loss_value, 'log': tensorboard_logs}
def evaluation_step(self, batch, batch_idx, dataloader_idx: int = 0, tag: str = 'val'):
input_signal, input_length, target_signal, target_length = batch
# Expand channel dimension, if necessary
# For consistency, the model uses multi-channel format, even if the channel dimension is 1
if input_signal.ndim == 2:
input_signal = input_signal.unsqueeze(1)
if target_signal.ndim == 2:
target_signal = target_signal.unsqueeze(1)
processed_signal, _ = self.forward(input_signal=input_signal, input_length=input_length)
# Prepare output
loss_value = self.loss(estimate=processed_signal, target=target_signal, input_length=input_length)
output_dict = {f'{tag}_loss': loss_value}
# Update metrics
if hasattr(self, 'metrics') and tag in self.metrics:
# Update metrics for this (tag, dataloader_idx)
for name, metric in self.metrics[tag][dataloader_idx].items():
metric.update(preds=processed_signal, target=target_signal, input_length=input_length)
# Log global step
self.log('global_step', torch.tensor(self.trainer.global_step, dtype=torch.float32), sync_dist=True)
return output_dict
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
return results
|
NeMo-main
|
nemo/collections/asr/models/enhancement_models.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from typing import Dict, List, Optional, Union
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from nemo.collections.asr.data import audio_to_text_dataset
from nemo.collections.asr.data.audio_to_text_dali import AudioToBPEDALIDataset
from nemo.collections.asr.losses.ctc import CTCLoss
from nemo.collections.asr.losses.rnnt import RNNTLoss
from nemo.collections.asr.metrics.rnnt_wer_bpe import RNNTBPEWER, RNNTBPEDecoding, RNNTBPEDecodingConfig
from nemo.collections.asr.metrics.wer_bpe import WERBPE, CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.models.hybrid_rnnt_ctc_models import EncDecHybridRNNTCTCModel
from nemo.collections.asr.parts.mixins import ASRBPEMixin
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging, model_utils
class EncDecHybridRNNTCTCBPEModel(EncDecHybridRNNTCTCModel, ASRBPEMixin):
"""Base class for encoder decoder RNNT-based models with auxiliary CTC decoder/loss and subword tokenization."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# Tokenizer is necessary for this model
if 'tokenizer' not in cfg:
raise ValueError("`cfg` must have `tokenizer` config to create a tokenizer !")
if not isinstance(cfg, DictConfig):
cfg = OmegaConf.create(cfg)
# Setup the tokenizer
self._setup_tokenizer(cfg.tokenizer)
# Initialize a dummy vocabulary
vocabulary = self.tokenizer.tokenizer.get_vocab()
# Set the new vocabulary
with open_dict(cfg):
cfg.labels = ListConfig(list(vocabulary))
with open_dict(cfg.decoder):
cfg.decoder.vocab_size = len(vocabulary)
with open_dict(cfg.joint):
cfg.joint.num_classes = len(vocabulary)
cfg.joint.vocabulary = ListConfig(list(vocabulary))
cfg.joint.jointnet.encoder_hidden = cfg.model_defaults.enc_hidden
cfg.joint.jointnet.pred_hidden = cfg.model_defaults.pred_hidden
# setup auxiliary CTC decoder
if 'aux_ctc' not in cfg:
raise ValueError(
"The config need to have a section for the CTC decoder named as aux_ctc for Hybrid models."
)
with open_dict(cfg):
if self.tokenizer_type == "agg":
cfg.aux_ctc.decoder.vocabulary = ListConfig(vocabulary)
else:
cfg.aux_ctc.decoder.vocabulary = ListConfig(list(vocabulary.keys()))
if cfg.aux_ctc.decoder["num_classes"] < 1:
logging.info(
"\nReplacing placholder number of classes ({}) with actual number of classes - {}".format(
cfg.aux_ctc.decoder["num_classes"], len(vocabulary)
)
)
cfg.aux_ctc.decoder["num_classes"] = len(vocabulary)
super().__init__(cfg=cfg, trainer=trainer)
# Setup decoding object
self.decoding = RNNTBPEDecoding(
decoding_cfg=self.cfg.decoding, decoder=self.decoder, joint=self.joint, tokenizer=self.tokenizer,
)
# Setup wer object
self.wer = RNNTBPEWER(
decoding=self.decoding,
batch_dim_index=0,
use_cer=self.cfg.get('use_cer', False),
log_prediction=self.cfg.get('log_prediction', True),
dist_sync_on_step=True,
)
# Setup fused Joint step if flag is set
if self.joint.fuse_loss_wer:
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
# Setup CTC decoding
ctc_decoding_cfg = self.cfg.aux_ctc.get('decoding', None)
if ctc_decoding_cfg is None:
ctc_decoding_cfg = OmegaConf.structured(CTCBPEDecodingConfig)
with open_dict(self.cfg.aux_ctc):
self.cfg.aux_ctc.decoding = ctc_decoding_cfg
self.ctc_decoding = CTCBPEDecoding(self.cfg.aux_ctc.decoding, tokenizer=self.tokenizer)
# Setup CTC WER
self.ctc_wer = WERBPE(
decoding=self.ctc_decoding,
use_cer=self.cfg.aux_ctc.get('use_cer', False),
dist_sync_on_step=True,
log_prediction=self.cfg.get("log_prediction", False),
)
# setting the RNNT decoder as the default one
self.cur_decoder = "rnnt"
def _setup_dataloader_from_config(self, config: Optional[Dict]):
dataset = audio_to_text_dataset.get_audio_to_text_bpe_dataset_from_config(
config=config,
local_rank=self.local_rank,
global_rank=self.global_rank,
world_size=self.world_size,
tokenizer=self.tokenizer,
preprocessor_cfg=self.cfg.get("preprocessor", None),
)
if dataset is None:
return None
if isinstance(dataset, AudioToBPEDALIDataset):
# DALI Dataset implements dataloader interface
return dataset
shuffle = config['shuffle']
if isinstance(dataset, torch.utils.data.IterableDataset):
shuffle = False
if hasattr(dataset, 'collate_fn'):
collate_fn = dataset.collate_fn
elif hasattr(dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = dataset.datasets[0].datasets[0].collate_fn
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=config['batch_size'],
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
config: A python dictionary which contains the following keys:
paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \
Recommended length per file is between 5 and 25 seconds.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
temp_dir: (str) A temporary directory where the audio manifest is temporarily
stored.
num_workers: (int) number of workers. Depends of the batch_size and machine. \
0 - only the main process will load batches, 1 - one worker (not main process)
Returns:
A pytorch DataLoader for the given audio file(s).
"""
if 'manifest_filepath' in config:
manifest_filepath = config['manifest_filepath']
batch_size = config['batch_size']
else:
manifest_filepath = os.path.join(config['temp_dir'], 'manifest.json')
batch_size = min(config['batch_size'], len(config['paths2audio_files']))
dl_config = {
'manifest_filepath': manifest_filepath,
'sample_rate': self.preprocessor._sample_rate,
'batch_size': batch_size,
'shuffle': False,
'num_workers': config.get('num_workers', min(batch_size, os.cpu_count() - 1)),
'pin_memory': True,
'channel_selector': config.get('channel_selector', None),
'use_start_end_token': self.cfg.validation_ds.get('use_start_end_token', False),
}
if config.get("augmentor"):
dl_config['augmentor'] = config.get("augmentor")
temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))
return temporary_datalayer
def change_vocabulary(
self,
new_tokenizer_dir: Union[str, DictConfig],
new_tokenizer_type: str,
decoding_cfg: Optional[DictConfig] = None,
ctc_decoding_cfg: Optional[DictConfig] = None,
):
"""
Changes vocabulary used during RNNT decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Directory path to tokenizer or a config for a new tokenizer (if the tokenizer type is `agg`)
new_tokenizer_type: Type of tokenizer. Can be either `agg`, `bpe` or `wpe`.
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
ctc_decoding_cfg: A config for auxiliary CTC decoding, which is optional and can be used to change the decoding type.
Returns: None
"""
if isinstance(new_tokenizer_dir, DictConfig):
if new_tokenizer_type == 'agg':
new_tokenizer_cfg = new_tokenizer_dir
else:
raise ValueError(
f'New tokenizer dir should be a string unless the tokenizer is `agg`, but this tokenizer type is: {new_tokenizer_type}'
)
else:
new_tokenizer_cfg = None
if new_tokenizer_cfg is not None:
tokenizer_cfg = new_tokenizer_cfg
else:
if not os.path.isdir(new_tokenizer_dir):
raise NotADirectoryError(
f'New tokenizer dir must be non-empty path to a directory. But I got: {new_tokenizer_dir}'
)
if new_tokenizer_type.lower() not in ('bpe', 'wpe'):
raise ValueError(f'New tokenizer type must be either `bpe` or `wpe`')
tokenizer_cfg = OmegaConf.create({'dir': new_tokenizer_dir, 'type': new_tokenizer_type})
# Setup the tokenizer
self._setup_tokenizer(tokenizer_cfg)
# Initialize a dummy vocabulary
vocabulary = self.tokenizer.tokenizer.get_vocab()
joint_config = self.joint.to_config_dict()
new_joint_config = copy.deepcopy(joint_config)
if self.tokenizer_type == "agg":
new_joint_config["vocabulary"] = ListConfig(vocabulary)
else:
new_joint_config["vocabulary"] = ListConfig(list(vocabulary.keys()))
new_joint_config['num_classes'] = len(vocabulary)
del self.joint
self.joint = EncDecHybridRNNTCTCBPEModel.from_config_dict(new_joint_config)
decoder_config = self.decoder.to_config_dict()
new_decoder_config = copy.deepcopy(decoder_config)
new_decoder_config.vocab_size = len(vocabulary)
del self.decoder
self.decoder = EncDecHybridRNNTCTCBPEModel.from_config_dict(new_decoder_config)
del self.loss
self.loss = RNNTLoss(num_classes=self.joint.num_classes_with_blank - 1)
if decoding_cfg is None:
# Assume same decoding config as before
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(RNNTBPEDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = RNNTBPEDecoding(
decoding_cfg=decoding_cfg, decoder=self.decoder, joint=self.joint, tokenizer=self.tokenizer,
)
self.wer = RNNTBPEWER(
decoding=self.decoding,
batch_dim_index=self.wer.batch_dim_index,
use_cer=self.wer.use_cer,
log_prediction=self.wer.log_prediction,
dist_sync_on_step=True,
)
# Setup fused Joint step
if self.joint.fuse_loss_wer or (
self.decoding.joint_fused_batch_size is not None and self.decoding.joint_fused_batch_size > 0
):
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
# Update config
with open_dict(self.cfg.joint):
self.cfg.joint = new_joint_config
with open_dict(self.cfg.decoder):
self.cfg.decoder = new_decoder_config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
logging.info(f"Changed tokenizer of the RNNT decoder to {self.joint.vocabulary} vocabulary.")
# set up the new tokenizer for the CTC decoder
if hasattr(self, 'ctc_decoder'):
ctc_decoder_config = copy.deepcopy(self.ctc_decoder.to_config_dict())
# sidestepping the potential overlapping tokens issue in aggregate tokenizers
if self.tokenizer_type == "agg":
ctc_decoder_config.vocabulary = ListConfig(vocabulary)
else:
ctc_decoder_config.vocabulary = ListConfig(list(vocabulary.keys()))
decoder_num_classes = ctc_decoder_config['num_classes']
# Override number of classes if placeholder provided
logging.info(
"\nReplacing old number of classes ({}) with new number of classes - {}".format(
decoder_num_classes, len(vocabulary)
)
)
ctc_decoder_config['num_classes'] = len(vocabulary)
del self.ctc_decoder
self.ctc_decoder = EncDecHybridRNNTCTCBPEModel.from_config_dict(ctc_decoder_config)
del self.ctc_loss
self.ctc_loss = CTCLoss(
num_classes=self.ctc_decoder.num_classes_with_blank - 1,
zero_infinity=True,
reduction=self.cfg.aux_ctc.get("ctc_reduction", "mean_batch"),
)
if ctc_decoding_cfg is None:
# Assume same decoding config as before
ctc_decoding_cfg = self.cfg.aux_ctc.decoding
# Assert the decoding config with all hyper parameters
ctc_decoding_cls = OmegaConf.structured(CTCBPEDecodingConfig)
ctc_decoding_cls = OmegaConf.create(OmegaConf.to_container(ctc_decoding_cls))
ctc_decoding_cfg = OmegaConf.merge(ctc_decoding_cls, ctc_decoding_cfg)
self.ctc_decoding = CTCBPEDecoding(decoding_cfg=ctc_decoding_cfg, tokenizer=self.tokenizer)
self.ctc_wer = WERBPE(
decoding=self.ctc_decoding,
use_cer=self.cfg.aux_ctc.get('use_cer', False),
log_prediction=self.cfg.get("log_prediction", False),
dist_sync_on_step=True,
)
# Update config
with open_dict(self.cfg.aux_ctc):
self.cfg.aux_ctc.decoder = ctc_decoder_config
with open_dict(self.cfg.aux_ctc):
self.cfg.aux_ctc.decoding = ctc_decoding_cfg
logging.info(f"Changed tokenizer of the CTC decoder to {self.ctc_decoder.vocabulary} vocabulary.")
def change_decoding_strategy(self, decoding_cfg: DictConfig = None, decoder_type: str = None):
"""
Changes decoding strategy used during RNNT decoding process.
Args:
decoding_cfg: A config for the decoder, which is optional. If the decoding type
needs to be changed (from say Greedy to Beam decoding etc), the config can be passed here.
decoder_type: (str) Can be set to 'rnnt' or 'ctc' to switch between appropriate decoder in a
model having both RNN-T and CTC decoders. Defaults to None, in which case RNN-T decoder is
used. If set to 'ctc', it raises error if 'ctc_decoder' is not an attribute of the model.
"""
if decoder_type is None or decoder_type == 'rnnt':
if decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `decoding_cfg` passed when changing decoding strategy, using internal config")
decoding_cfg = self.cfg.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(RNNTBPEDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.decoding = RNNTBPEDecoding(
decoding_cfg=decoding_cfg, decoder=self.decoder, joint=self.joint, tokenizer=self.tokenizer,
)
self.wer = RNNTBPEWER(
decoding=self.decoding,
batch_dim_index=self.wer.batch_dim_index,
use_cer=self.wer.use_cer,
log_prediction=self.wer.log_prediction,
dist_sync_on_step=True,
)
# Setup fused Joint step
if self.joint.fuse_loss_wer or (
self.decoding.joint_fused_batch_size is not None and self.decoding.joint_fused_batch_size > 0
):
self.joint.set_loss(self.loss)
self.joint.set_wer(self.wer)
self.joint.temperature = decoding_cfg.get('temperature', 1.0)
# Update config
with open_dict(self.cfg.decoding):
self.cfg.decoding = decoding_cfg
logging.info(f"Changed decoding strategy of the RNNT decoder to \n{OmegaConf.to_yaml(self.cfg.decoding)}")
elif decoder_type == 'ctc':
if not hasattr(self, 'ctc_decoding'):
raise ValueError("The model does not have the ctc_decoding module and does not support ctc decoding.")
if decoding_cfg is None:
# Assume same decoding config as before
logging.info("No `decoding_cfg` passed when changing decoding strategy, using internal config")
decoding_cfg = self.cfg.aux_ctc.decoding
# Assert the decoding config with all hyper parameters
decoding_cls = OmegaConf.structured(CTCBPEDecodingConfig)
decoding_cls = OmegaConf.create(OmegaConf.to_container(decoding_cls))
decoding_cfg = OmegaConf.merge(decoding_cls, decoding_cfg)
self.ctc_decoding = CTCBPEDecoding(decoding_cfg=decoding_cfg, tokenizer=self.tokenizer)
self.ctc_wer = WERBPE(
decoding=self.ctc_decoding,
use_cer=self.ctc_wer.use_cer,
log_prediction=self.ctc_wer.log_prediction,
dist_sync_on_step=True,
)
self.ctc_decoder.temperature = decoding_cfg.get('temperature', 1.0)
# Update config
with open_dict(self.cfg.aux_ctc.decoding):
self.cfg.aux_ctc.decoding = decoding_cfg
self.cur_decoder = "ctc"
logging.info(
f"Changed decoding strategy of the CTC decoder to \n{OmegaConf.to_yaml(self.cfg.aux_ctc.decoding)}"
)
else:
raise ValueError(f"decoder_type={decoder_type} is not supported. Supported values: [ctc,rnnt]")
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_en_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_de_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_de_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_de_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_it_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_it_fastconformer_hybrid_large_pc/versions/1.20.0/files/stt_it_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_es_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_es_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_es_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_hr_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_hr_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_hr_fastconformer_hybrid_large_pc/versions/1.21.0/files/FastConformer-Hybrid-Transducer-CTC-BPE-v256-averaged.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_ua_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ua_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ua_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_ua_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_pl_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_pl_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_pl_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_pl_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_by_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_by_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_by_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_by_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_ru_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ru_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_ru_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_fr_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_fr_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_multilingual_fastconformer_hybrid_large_pc_blend_eu",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_multilingual_fastconformer_hybrid_large_pc_blend_eu",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_multilingual_fastconformer_hybrid_large_pc_blend_eu/versions/1.21.0/files/stt_multilingual_fastconformer_hybrid_large_pc_blend_eu.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_multilingual_fastconformer_hybrid_large_pc",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_multilingual_fastconformer_hybrid_large_pc",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_multilingual_fastconformer_hybrid_large_pc/versions/1.21.0/files/stt_multilingual_fastconformer_hybrid_large_pc.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_hybrid_large_streaming_80ms",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_streaming_80ms",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_hybrid_large_streaming_80ms/versions/1.20.0/files/stt_en_fastconformer_hybrid_large_streaming_80ms.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_hybrid_large_streaming_480ms",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_streaming_480ms",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_hybrid_large_streaming_480ms/versions/1.20.0/files/stt_en_fastconformer_hybrid_large_streaming_480ms.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_hybrid_large_streaming_1040ms",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_streaming_1040ms",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_hybrid_large_streaming_1040ms/versions/1.20.0/files/stt_en_fastconformer_hybrid_large_streaming_1040ms.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="stt_en_fastconformer_hybrid_large_streaming_multi",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_streaming_multi",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_fastconformer_hybrid_large_streaming_multi/versions/1.20.0/files/stt_en_fastconformer_hybrid_large_streaming_multi.nemo",
)
results.append(model)
return results
|
NeMo-main
|
nemo/collections/asr/models/hybrid_rnnt_ctc_bpe_models.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from abc import ABC, abstractmethod
from typing import List
import torch
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.mixins import AccessMixin
from nemo.core.utils.neural_type_utils import get_io_names
from nemo.utils import logging, model_utils
from nemo.utils.cast_utils import cast_all
__all__ = ['ASRModel']
class ASRModel(ModelPT, ABC):
@abstractmethod
def transcribe(self, paths2audio_files: List[str], batch_size: int = 4, verbose: bool = True) -> List[str]:
"""
Takes paths to audio files and returns text transcription
Args:
paths2audio_files: paths to audio fragment to be transcribed
verbose: (bool) whether to display tqdm progress bar
Returns:
transcription texts
"""
pass
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {'val_loss': val_loss_mean, 'val_wer': wer_num / wer_denom}
return {'val_loss': val_loss_mean, 'log': tensorboard_logs}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
wer_num = torch.stack([x['test_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['test_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {'test_loss': val_loss_mean, 'test_wer': wer_num / wer_denom}
return {'test_loss': val_loss_mean, 'log': tensorboard_logs}
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
# recursively walk the subclasses to generate pretrained model info
list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)
return list_of_models
def add_auxiliary_losses(self, loss: torch.Tensor, reset_registry: bool = False) -> torch.Tensor:
"""
Utility method to enable calculation of auxiliary losses for ASR training.
Args:
loss: The output loss value prior to addition with auxiliary losses.
reset_registry: Bool, whether to reset the AccessMixin registry after adding auxiliary losses.
Returns:
Loss tensor used for back propagation.
"""
# Add adapter auxiliary losses, if registered
if AccessMixin.is_access_enabled():
registry = AccessMixin.get_module_registry(self)
log_dict = {}
for loss_key, loss_registry in registry.items():
# Add auxiliary loss to total loss
if 'adapter_loss' in loss_registry:
loss_list = loss_registry['adapter_loss']
loss_value = sum(loss_list)
loss += loss_value
# Log current loss name and value
keys = loss_key.split(".")
key = "/".join(keys)
key = "adapter_loss/" + key
log_dict[key] = loss_value.detach()
if len(log_dict) > 0:
self.log_dict(log_dict)
if reset_registry:
AccessMixin.reset_registry(self)
# return total loss
return loss
def setup_optimization_flags(self):
"""
Utility method that must be explicitly called by the subclass in order to support optional optimization flags.
This method is the only valid place to access self.cfg prior to DDP training occurs.
The subclass may chose not to support this method, therefore all variables here must be checked via hasattr()
"""
# Skip update if nan/inf grads appear on any rank.
self._skip_nan_grad = False
if "skip_nan_grad" in self._cfg and self._cfg["skip_nan_grad"]:
self._skip_nan_grad = self._cfg["skip_nan_grad"]
def on_after_backward(self):
"""
zero-out the gradients which any of them is NAN or INF
"""
super().on_after_backward()
if hasattr(self, '_skip_nan_grad') and self._skip_nan_grad:
device = next(self.parameters()).device
valid_gradients = torch.tensor([1], device=device, dtype=torch.float32)
# valid_gradients = True
for param_name, param in self.named_parameters():
if param.grad is not None:
is_not_nan_or_inf = not (torch.isnan(param.grad).any() or torch.isinf(param.grad).any())
if not is_not_nan_or_inf:
valid_gradients = valid_gradients * 0
break
if torch.distributed.is_initialized():
torch.distributed.all_reduce(valid_gradients, op=torch.distributed.ReduceOp.MIN)
if valid_gradients < 1:
logging.warning(f'detected inf or nan values in gradients! Setting gradients to zero.')
self.zero_grad()
class ExportableEncDecModel(Exportable):
"""
Simple utiliy mix-in to export models that consist of encoder/decoder pair
plus pre/post processor, but have to be exported as encoder/decoder pair only
(covers most ASR classes)
"""
@property
def input_module(self):
return self.encoder
@property
def output_module(self):
return self.decoder
@property
def output_names(self):
otypes = self.output_module.output_types
if getattr(self.input_module, 'export_cache_support', False):
in_types = self.input_module.output_types
otypes = {n: t for (n, t) in list(otypes.items())[:1]}
for (n, t) in list(in_types.items())[1:]:
otypes[n] = t
return get_io_names(otypes, self.disabled_deployment_output_names)
def forward_for_export(
self, input, length=None, cache_last_channel=None, cache_last_time=None, cache_last_channel_len=None
):
"""
This forward is used when we need to export the model to ONNX format.
Inputs cache_last_channel and cache_last_time are needed to be passed for exporting streaming models.
Args:
input: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps.
length: Vector of length B, that contains the individual lengths of the audio sequences.
cache_last_channel: Tensor of shape [N, B, T, H] which contains the cache for last channel layers
cache_last_time: Tensor of shape [N, B, H, T] which contains the cache for last time layers
N is the number of such layers which need caching, B is batch size, H is the hidden size of activations,
and T is the length of the cache
Returns:
the output of the model
"""
enc_fun = getattr(self.input_module, 'forward_for_export', self.input_module.forward)
if cache_last_channel is None:
encoder_output = enc_fun(audio_signal=input, length=length)
if isinstance(encoder_output, tuple):
encoder_output = encoder_output[0]
else:
encoder_output, length, cache_last_channel, cache_last_time, cache_last_channel_len = enc_fun(
audio_signal=input,
length=length,
cache_last_channel=cache_last_channel,
cache_last_time=cache_last_time,
cache_last_channel_len=cache_last_channel_len,
)
dec_fun = getattr(self.output_module, 'forward_for_export', self.output_module.forward)
ret = dec_fun(encoder_output=encoder_output)
if isinstance(ret, tuple):
ret = ret[0]
if cache_last_channel is not None:
ret = (ret, length, cache_last_channel, cache_last_time, cache_last_channel_len)
return cast_all(ret, from_dtype=torch.float16, to_dtype=torch.float32)
@property
def disabled_deployment_input_names(self):
return self.encoder.disabled_deployment_input_names
@property
def disabled_deployment_output_names(self):
return self.encoder.disabled_deployment_output_names
def set_export_config(self, args):
if 'cache_support' in args:
enable = bool(args['cache_support'])
self.encoder.export_cache_support = enable
logging.info(f"Caching support enabled: {enable}")
self.encoder.setup_streaming_params()
super().set_export_config(args)
|
NeMo-main
|
nemo/collections/asr/models/asr_model.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import torch
from omegaconf import MISSING, DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from torch.nn.utils.rnn import pad_sequence
from nemo.collections.asr.data.audio_to_text_dali import DALIOutputs
from nemo.collections.asr.data.audio_to_text_dataset import get_audio_to_text_bpe_dataset_from_config
from nemo.collections.asr.data.text_to_text import (
TextOrAudioToTextBatch,
TextToTextBatch,
TextToTextDataset,
TextToTextIterableDataset,
)
from nemo.collections.asr.models.asr_model import ASRModel
from nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE
from nemo.collections.asr.models.hybrid_rnnt_ctc_bpe_models import EncDecHybridRNNTCTCBPEModel
from nemo.collections.asr.models.rnnt_bpe_models import EncDecRNNTBPEModel
from nemo.collections.asr.modules.conformer_encoder import ConformerEncoder
from nemo.collections.asr.parts.preprocessing.features import clean_spectrogram_batch, normalize_batch
from nemo.collections.asr.parts.submodules.batchnorm import replace_bn_with_fused_bn_all
from nemo.collections.common.data import ConcatDataset, ConcatMapDataset
from nemo.collections.tts.models import FastPitchModel, SpectrogramEnhancerModel
from nemo.core.classes import Dataset, typecheck
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
from nemo.utils.enum import PrettyStrEnum
from nemo.utils.exceptions import NeMoBaseException
def _fuse_bn_in_conformer(asr_model: ASRModel):
"""
Replace BatchNorm with Fused BatchNorm in Conformer and fixes model config inplace
Expected `encoder` model to exist and be of type ConformerEncoder
"""
logging.info("Replacing BatchNorm with Fused BatchNorm")
if not hasattr(asr_model, "encoder"):
raise NotImplementedError("No encoder found in ASR Model, replacement not supported")
if not isinstance(asr_model.encoder, ConformerEncoder):
raise NotImplementedError(f"Unsupported encoder type: {type(asr_model.encoder)}")
replace_bn_with_fused_bn_all(asr_model.encoder)
if "conv_norm_type" not in asr_model.cfg.encoder:
# old CTC models from NGC don't have such param
logging.warning("conv_norm_type not in encoder config, adding parameter")
with open_dict(asr_model.cfg):
asr_model.cfg.encoder.conv_norm_type = "fused_batch_norm"
else:
asr_model.cfg.encoder.conv_norm_type = "fused_batch_norm"
@dataclass
class TextDataConfig:
"""
Text dataset subconfig for text-only dataset
"""
manifest_filepath: Any = MISSING # actual Union[str, List[str]], but this type is not supported by OmegaConf
speakers_filepath: Any = MISSING
min_words: int = 1
max_words: int = 45 # 45 - recommended value, ~16.7 sec for LibriSpeech
tokenizer_workers: int = 1
asr_tts_sampling_technique: Optional[str] = None
asr_tts_sampling_temperature: Optional[int] = None
asr_tts_sampling_probabilities: Optional[List[float]] = None
class ASRWithTTSModel(ASRModel):
"""
Hybrid ASR-TTS model: a transparent wrapper for ASR model
with frozen text-to-spectrogram pretrained model, which allows to use text-only data for training/finetuning
Text-only data can be mixed with audio-text pairs
"""
asr_model: Union[EncDecRNNTBPEModel, EncDecCTCModelBPE, EncDecHybridRNNTCTCBPEModel]
tts_model: FastPitchModel
enhancer_model: Optional[SpectrogramEnhancerModel]
class ASRModelTypes(PrettyStrEnum):
"""
Supported ASR types, needed for training from scratch
"""
RNNT_BPE = "rnnt_bpe"
CTC_BPE = "ctc_bpe"
HYBRID_RNNT_CTC_BPE = "hybrid_rnnt_ctc_bpe"
@classmethod
def from_asr_model(cls, model: Any):
if isinstance(model, EncDecRNNTBPEModel):
return cls.RNNT_BPE
if isinstance(model, EncDecCTCModelBPE):
return cls.CTC_BPE
if isinstance(model, EncDecHybridRNNTCTCBPEModel):
return cls.HYBRID_RNNT_CTC_BPE
raise ValueError(f"Unsupported model type: {type(model)}")
def get_asr_cls(self):
if self == self.RNNT_BPE:
return EncDecRNNTBPEModel
if self == self.CTC_BPE:
return EncDecCTCModelBPE
if self == self.HYBRID_RNNT_CTC_BPE:
return EncDecHybridRNNTCTCBPEModel
raise NotImplementedError(f"Not implemented for value {self.value}")
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
return []
@classmethod
def _check_config(cls, cfg: DictConfig):
"""
Check that all required fields are present in config
Structured configs are not compatible with model serialization, so we check fields manually
"""
expected_fields = [
# asr
"asr_model",
"asr_model_path",
"asr_model_fuse_bn",
"asr_model_type",
# tts
"tts_model",
"tts_model_path",
# enhancer
"enhancer_model_path",
"enhancer_model",
]
for field in expected_fields:
if field not in cfg:
raise NeMoBaseException(f"Field {field} is required in config (possibly should be None/null)")
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self._full_init_guard = False
self._check_config(cfg) # check all required keys are in config
# setup datasets and optimizer after model is fully initialized
# since it's done automatically, remove options from config
cfg = copy.deepcopy(cfg) # copy to avoid modifying original config
with open_dict(cfg):
train_ds_cfg = cfg.pop("train_ds", None)
validation_ds_cfg = cfg.pop("validation_ds", None)
test_ds_cfg = cfg.pop("test_ds", None)
optim_cfg = cfg.pop("optim", None)
super().__init__(cfg, trainer=trainer)
# tts model
if cfg.tts_model is not None:
self.register_nemo_submodule("tts_model", config_field="tts_model", model=FastPitchModel(cfg.tts_model))
else:
if cfg.tts_model_path is None:
raise NeMoBaseException("Either tts_model or tts_model_path should be provided")
self.register_nemo_submodule(
"tts_model",
config_field="tts_model",
model=FastPitchModel.restore_from(f"{cfg.tts_model_path}", map_location=torch.device("cpu")),
)
self.tts_model.freeze() # tts model should be always frozen
if cfg.asr_model is not None:
self.asr_model_type = self.ASRModelTypes(cfg.asr_model_type) # convert to enum
self.register_nemo_submodule(
"asr_model", config_field="asr_model", model=self.asr_model_type.get_asr_cls()(cfg.asr_model)
)
else:
if cfg.asr_model_path is None:
raise NeMoBaseException("Either asr_model or asr_model_path should be provided")
self.register_nemo_submodule(
"asr_model",
config_field="asr_model",
model=ASRModel.restore_from(f"{cfg.asr_model_path}", map_location=torch.device("cpu")),
)
self.asr_model_type = self.ASRModelTypes.from_asr_model(self.asr_model)
self.cfg.asr_model_type = f"{self.asr_model_type}" # save to config
# replace BatchNorm with FusedBatchNorm
if cfg.asr_model_fuse_bn:
_fuse_bn_in_conformer(self.asr_model)
self.cfg.asr_model_fuse_bn = False # no need to fuse anymore
if cfg.enhancer_model is not None:
self.register_nemo_submodule(
"enhancer_model", config_field="enhancer_model", model=SpectrogramEnhancerModel(cfg.enhancer_model)
)
elif cfg.enhancer_model_path is not None:
self.register_nemo_submodule(
"enhancer_model",
config_field="enhancer_model",
model=SpectrogramEnhancerModel.restore_from(cfg.enhancer_model_path, map_location=torch.device("cpu")),
)
else:
self.enhancer_model = None
self._full_init_guard = True
# initialize optimizer and datasets, asr/tts models are initialized here
if optim_cfg:
with open_dict(self.cfg):
self.cfg.optim = optim_cfg
self.setup_optimization(optim_config=optim_cfg)
if train_ds_cfg:
with open_dict(self.cfg):
self.cfg.train_ds = train_ds_cfg
self.setup_training_data(train_data_config=train_ds_cfg)
if validation_ds_cfg:
with open_dict(self.cfg):
self.cfg.validation_ds = validation_ds_cfg
self.setup_multiple_validation_data(val_data_config=validation_ds_cfg)
if test_ds_cfg:
with open_dict(self.cfg):
self.cfg.test_ds = test_ds_cfg
self.setup_test_data(test_data_config=test_ds_cfg)
@classmethod
def from_asr_config(
cls,
asr_cfg: DictConfig,
asr_model_type: Union[str, ASRModelTypes],
tts_model_path: Union[str, Path],
enhancer_model_path: Optional[Union[str, Path]] = None,
trainer: Trainer = None,
):
"""
Method to construct model from ASR config for training from scratch
"""
model_type = cls.ASRModelTypes(asr_model_type)
cfg = DictConfig(
dict(
asr_model_path=None,
asr_model=None,
asr_model_type=f"{model_type}",
asr_model_fuse_bn=False, # for training from scratch always should be False
tts_model_path=f"{tts_model_path}",
tts_model=None,
enhancer_model_path=f"{enhancer_model_path}" if enhancer_model_path is not None else None,
enhancer_model=None,
train_ds=None,
validation_ds=None,
test_ds=None,
optim=None,
)
)
asr_cfg = copy.deepcopy(asr_cfg) # copy not to affect original config
with open_dict(asr_cfg):
for subconfig_path in ["train_ds", "validation_ds", "test_ds", "optim"]:
if subconfig_path in asr_cfg:
cfg[subconfig_path] = asr_cfg.pop(subconfig_path)
cfg.asr_model = asr_cfg
return cls(cfg=cfg, trainer=trainer)
@classmethod
def from_pretrained_models(
cls,
asr_model_path: Union[str, Path],
tts_model_path: Union[str, Path],
enhancer_model_path: Optional[Union[str, Path]] = None,
asr_model_fuse_bn: bool = False,
cfg: Optional[DictConfig] = None,
trainer: Optional[Trainer] = None,
):
"""
Load model from pretrained ASR and TTS models
Args:
asr_model_path: path to .nemo ASR model checkpoint
tts_model_path: path to .nemo TTS model checkpoint
enhancer_model_path: path to .nemo enhancer model checkpoint
asr_model_fuse_bn: automatically fuse batchnorm layers in ASR model
cfg: optional config for hybrid model
trainer: Pytorch-Lightning trainer
Returns:
ASRWithTTSModel instance
"""
if cfg is None:
cfg = DictConfig(
dict(
asr_model_path=f"{asr_model_path}",
asr_model=None,
tts_model_path=f"{tts_model_path}",
tts_model=None,
enhancer_model_path=f"{enhancer_model_path}" if enhancer_model_path is not None else None,
enhancer_model=None,
asr_model_type=None,
asr_model_fuse_bn=asr_model_fuse_bn,
train_ds=None,
validation_ds=None,
test_ds=None,
optim=None,
)
)
else:
cfg = copy.deepcopy(cfg) # copy to avoid modifying original config
cfg.tts_model_path = f"{tts_model_path}"
cfg.asr_model_path = f"{asr_model_path}"
cfg.enhancer_model_path = f"{enhancer_model_path}" if enhancer_model_path is not None else None
return ASRWithTTSModel(cfg, trainer=trainer)
def __setattr__(self, name, value):
# pytorch-lightning magic, allows to call *_step on asr_model
if name == "_current_fx_name" and self._full_init_guard:
self.asr_model._current_fx_name = value # need to make logging inside asr_model work
return super().__setattr__(name, value)
def setup_optimization(
self, optim_config: Optional[Union[DictConfig, Dict]] = None, optim_kwargs: Optional[Dict[str, Any]] = None,
):
"""
Setup optimizer and scheduler. Ensure tts model is frozen.
Add optimizer and scheduler to asr model, to allow `train_step` on ASR model
"""
self.tts_model.freeze()
optimizer, scheduler = super().setup_optimization(optim_config=optim_config, optim_kwargs=optim_kwargs)
# set ASR model optimizer/scheduler to allow training_step on asr_model
self.asr_model._optimizer = optimizer
self.asr_model._scheduler = scheduler
return optimizer, scheduler
def setup_validation_data(self, val_data_config: Union[DictConfig, Dict]):
"""Setup validation data for ASR model"""
return self.asr_model.setup_validation_data(val_data_config)
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
"""Validation epoch end hook for ASR model"""
return self.asr_model.multi_validation_epoch_end(outputs=outputs, dataloader_idx=dataloader_idx)
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
"""Test epoch end hook for ASR model"""
return self.asr_model.multi_test_epoch_end(outputs=outputs, dataloader_idx=dataloader_idx)
def transcribe(self, paths2audio_files: List[str], batch_size: int = 4, verbose: bool = True) -> List[str]:
"""Transcribe audio data using ASR model"""
return self.asr_model.transcribe(paths2audio_files=paths2audio_files, batch_size=batch_size, verbose=verbose)
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict]):
"""Setup multiple validation data for ASR model"""
self.asr_model.setup_multiple_validation_data(val_data_config)
def setup_test_data(self, test_data_config: Union[DictConfig, Dict]):
"""Setup test data for ASR model"""
self.asr_model.setup_test_data(test_data_config)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
"""Setup multiple test data for ASR Model"""
return self.asr_model.setup_multiple_test_data(test_data_config)
def save_asr_model_to(self, save_path: str):
"""Save ASR model separately"""
return self.asr_model.save_to(save_path=save_path)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
"""Validation step, forward to ASR model"""
loss = self.asr_model.validation_step(batch=batch, batch_idx=batch_idx, dataloader_idx=dataloader_idx)
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(loss)
else:
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""Validation epoch end hook, forward to ASR model"""
return self.asr_model.on_validation_epoch_end()
def on_test_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""Test epoch end hook, forward to ASR model"""
return self.asr_model.on_test_epoch_end()
def val_dataloader(self):
"""Get valudation dataloader from ASR model"""
return self.asr_model.val_dataloader()
def unfreeze(self) -> None:
"""Unfreeze the ASR model, keep TTS model frozen."""
super().unfreeze()
self.tts_model.freeze() # tts model should be always frozen
def on_fit_start(self):
"""Call asr_model on_fit_start hook, ensure TTS model is frozen"""
self.asr_model.on_fit_start()
self.tts_model.freeze()
def train(self, mode: bool = True):
"""Train mode, ensure TTS model is frozen"""
super().train(mode)
self.tts_model.eval()
return self
def _get_tts_spectrogram(
self, tts_texts: torch.Tensor, speakers: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Get TTS spectrogram from text and speaker ids"""
with torch.no_grad():
spectrogram, spectrogram_len, *_ = self.tts_model(text=tts_texts, durs=None, pitch=None, speaker=speakers)
if self.enhancer_model is not None:
# apply enhancer
with typecheck.disable_checks():
# spectrogram_len are of TokenDurationType, enhancer requires LengthsType
# TODO: fix FastPitch model to return LengthsType
spectrogram = self.enhancer_model.forward(input_spectrograms=spectrogram, lengths=spectrogram_len)
spectrogram, *_ = normalize_batch(spectrogram, spectrogram_len, self.asr_model.cfg.preprocessor.normalize)
return spectrogram, spectrogram_len
def _get_batch_spect(self, batch: Union[TextToTextBatch, TextOrAudioToTextBatch, tuple]):
"""Get batch with spectrograms from text-only, audio-text or mixed batch data"""
if isinstance(batch, TextToTextBatch):
spectrogram, spectrogram_len = self._get_tts_spectrogram(batch.tts_texts, batch.speakers)
transcript = batch.transcripts
transcript_len = batch.transcript_lengths
elif isinstance(batch, TextOrAudioToTextBatch):
tts_spectrogram, tts_spectrogram_len = self._get_tts_spectrogram(batch.tts_texts, batch.speakers)
asr_spectrogram, asr_spectrogram_len = self.asr_model.preprocessor(
input_signal=batch.audio_signals, length=batch.audio_signal_lengths,
)
spectrogram = pad_sequence(
[
x.squeeze(0)
for x in itertools.chain(
torch.tensor_split(tts_spectrogram.transpose(1, 2), tts_spectrogram.size(0)),
torch.tensor_split(asr_spectrogram.transpose(1, 2), asr_spectrogram.size(0)),
)
],
batch_first=True,
padding_value=0.0,
).transpose(1, 2)
spectrogram_len = torch.cat([tts_spectrogram_len, asr_spectrogram_len], dim=0)
transcript = batch.transcripts
transcript_len = batch.transcript_lengths
else:
audio_signal, audio_signal_len, transcript, transcript_len, *_ = batch # audio batch: 4 or 5 elements
spectrogram, spectrogram_len = self.asr_model.preprocessor(
input_signal=audio_signal, length=audio_signal_len
)
spectrogram = clean_spectrogram_batch(spectrogram, spectrogram_len)
return spectrogram.detach(), spectrogram_len.detach(), transcript, transcript_len
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
"""
Setup training data from config: text-only, audio-text or mixed data.
"""
if train_data_config is None:
logging.warning("No training data")
return
self._update_dataset_config(dataset_name='train', config=train_data_config)
asr_dataset = get_audio_to_text_bpe_dataset_from_config(
train_data_config,
local_rank=self.local_rank,
global_rank=self.global_rank,
world_size=self.world_size,
tokenizer=self.asr_model.tokenizer,
preprocessor_cfg=self.asr_model.cfg.get("preprocessor", None),
)
dataset_iterable = True
if asr_dataset is not None and isinstance(asr_dataset, Dataset):
# asr_dataset is map-style, for mixing datasets use map-style text-to-text dataset
dataset_iterable = False
if train_data_config.get("text_data") is not None:
tts_dataset = self._setup_text_dataset_from_config(train_data_config, iterable=dataset_iterable)
else:
tts_dataset = None
if tts_dataset and asr_dataset:
text_data_config: TextDataConfig = cast(
TextDataConfig, OmegaConf.merge(OmegaConf.structured(TextDataConfig), train_data_config.text_data)
)
concat_kwargs = dict()
if text_data_config.asr_tts_sampling_technique is not None:
concat_kwargs["sampling_technique"] = text_data_config.asr_tts_sampling_technique
if text_data_config.asr_tts_sampling_temperature is not None:
concat_kwargs["sampling_temperature"] = text_data_config.asr_tts_sampling_temperature
if text_data_config.asr_tts_sampling_probabilities:
concat_kwargs["sampling_probabilities"] = text_data_config.asr_tts_sampling_probabilities
if dataset_iterable:
dataset = ConcatDataset(datasets=[asr_dataset, tts_dataset], **concat_kwargs)
else:
dataset = ConcatMapDataset(datasets=[asr_dataset, tts_dataset], **concat_kwargs)
else:
dataset = tts_dataset or asr_dataset
if dataset is None:
return
if tts_dataset:
collate_fn = tts_dataset.collate_fn
else:
if hasattr(asr_dataset, 'collate_fn'):
collate_fn = asr_dataset.collate_fn
elif hasattr(asr_dataset.datasets[0], 'collate_fn'):
# support datasets that are lists of entries
collate_fn = asr_dataset.datasets[0].collate_fn
else:
# support datasets that are lists of lists
collate_fn = asr_dataset.datasets[0].datasets[0].collate_fn
shuffle = train_data_config.get("shuffle", True) and not dataset_iterable
self._train_dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=train_data_config['batch_size'],
collate_fn=collate_fn,
drop_last=train_data_config.get('drop_last', False),
shuffle=shuffle,
num_workers=train_data_config.get('num_workers', 0),
pin_memory=train_data_config.get('pin_memory', False),
)
def _setup_text_dataset_from_config(
self, train_data_config: DictConfig, iterable=True
) -> Union[TextToTextDataset, TextToTextIterableDataset]:
"""
Construct text-to-text (text-only) dataset from config.
Args:
train_data_config: config
iterable: construct iterable-style datasset if True, otherwise map-style
Returns:
text-to-text dataset of TextToTextDataset or TextToTextIterableDataset type
"""
text_data_config: TextDataConfig = cast(
TextDataConfig, OmegaConf.merge(OmegaConf.structured(TextDataConfig), train_data_config.text_data)
)
if iterable:
textonly_ds = TextToTextIterableDataset(
manifest_filepath=text_data_config.manifest_filepath,
speakers_filepath=text_data_config.speakers_filepath,
asr_tokenizer=self.asr_model.tokenizer,
asr_use_start_end_token=train_data_config.get("use_start_end_token", False),
tts_parser=self.tts_model.parser,
tts_text_pad_id=self.tts_model.vocab.pad,
tts_text_normalizer=self.tts_model.normalizer,
tts_text_normalizer_call_kwargs=self.tts_model.text_normalizer_call_kwargs,
min_words=text_data_config.min_words,
max_words=text_data_config.max_words,
tokenizer_workers=text_data_config.tokenizer_workers,
num_parts=self.world_size,
current_part_index=self.global_rank,
)
else:
textonly_ds = TextToTextDataset(
manifest_filepath=text_data_config.manifest_filepath,
speakers_filepath=text_data_config.speakers_filepath,
asr_tokenizer=self.asr_model.tokenizer,
asr_use_start_end_token=train_data_config.get("use_start_end_token", False),
tts_parser=self.tts_model.parser,
tts_text_pad_id=self.tts_model.vocab.pad,
tts_text_normalizer=self.tts_model.normalizer,
tts_text_normalizer_call_kwargs=self.tts_model.text_normalizer_call_kwargs,
min_words=text_data_config.min_words,
max_words=text_data_config.max_words,
tokenizer_workers=text_data_config.tokenizer_workers,
)
return textonly_ds
def training_step(self, batch: Union[TextOrAudioToTextBatch, TextToTextBatch, DALIOutputs, tuple], batch_nb: int):
"""
Training step for ASR-TTS model.
- construct spectrogram for the batch (from text - using TTS model, from audio - using ASR preprocessor)
- call training_step on ASR model
"""
assert not self.tts_model.training
if isinstance(batch, DALIOutputs):
return self.asr_model.training_step(batch=batch, batch_nb=batch_nb)
with torch.no_grad():
spectrogram, spectrogram_len, transcript, transcript_len = self._get_batch_spect(batch)
# TODO: maybe support precomputed without DALIOutputs
return self.asr_model.training_step(
batch=DALIOutputs(
dict(
processed_signal=spectrogram,
processed_signal_len=spectrogram_len,
transcript=transcript,
transcript_len=transcript_len,
)
),
batch_nb=batch_nb,
)
|
NeMo-main
|
nemo/collections/asr/models/hybrid_asr_tts_models.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import pickle as pkl
import tempfile
from collections import OrderedDict
from pathlib import Path
from statistics import mode
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, open_dict
from pyannote.core import Annotation
from pyannote.metrics.diarization import DiarizationErrorRate
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.utilities import rank_zero_only
from tqdm import tqdm
from nemo.collections.asr.data.audio_to_diar_label import AudioToSpeechMSDDInferDataset, AudioToSpeechMSDDTrainDataset
from nemo.collections.asr.metrics.der import score_labels
from nemo.collections.asr.metrics.multi_binary_acc import MultiBinaryAccuracy
from nemo.collections.asr.models import ClusteringDiarizer
from nemo.collections.asr.models.asr_model import ExportableEncDecModel
from nemo.collections.asr.models.clustering_diarizer import (
_MODEL_CONFIG_YAML,
_SPEAKER_MODEL,
_VAD_MODEL,
get_available_model_names,
)
from nemo.collections.asr.models.configs.diarizer_config import NeuralDiarizerInferenceConfig
from nemo.collections.asr.models.label_models import EncDecSpeakerLabelModel
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.asr.parts.utils.speaker_utils import (
audio_rttm_map,
get_embs_and_timestamps,
get_id_tup_dict,
get_scale_mapping_argmat,
get_uniq_id_list_from_manifest,
labels_to_pyannote_object,
make_rttm_with_overlap,
parse_scale_configs,
rttm_to_labels,
)
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import AudioSignal, LengthsType, NeuralType
from nemo.core.neural_types.elements import ProbsType
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
__all__ = ['EncDecDiarLabelModel', 'ClusterEmbedding', 'NeuralDiarizer']
class EncDecDiarLabelModel(ModelPT, ExportableEncDecModel):
"""
Encoder decoder class for multiscale diarization decoder (MSDD). Model class creates training, validation methods for setting
up data performing model forward pass.
This model class expects config dict for:
* preprocessor
* msdd_model
* speaker_model
"""
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="diar_msdd_telephonic",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/diar_msdd_telephonic/versions/1.0.1/files/diar_msdd_telephonic.nemo",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:diar_msdd_telephonic",
)
result.append(model)
return result
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
"""
Initialize an MSDD model and the specified speaker embedding model. In this init function, training and validation datasets are prepared.
"""
self._trainer = trainer if trainer else None
self.cfg_msdd_model = cfg
if self._trainer:
self._init_segmentation_info()
self.world_size = trainer.num_nodes * trainer.num_devices
self.emb_batch_size = self.cfg_msdd_model.emb_batch_size
self.pairwise_infer = False
else:
self.world_size = 1
self.pairwise_infer = True
super().__init__(cfg=self.cfg_msdd_model, trainer=trainer)
window_length_in_sec = self.cfg_msdd_model.diarizer.speaker_embeddings.parameters.window_length_in_sec
if isinstance(window_length_in_sec, int) or len(window_length_in_sec) <= 1:
raise ValueError("window_length_in_sec should be a list containing multiple segment (window) lengths")
else:
self.cfg_msdd_model.scale_n = len(window_length_in_sec)
self.cfg_msdd_model.msdd_module.scale_n = self.cfg_msdd_model.scale_n
self.scale_n = self.cfg_msdd_model.scale_n
self.preprocessor = EncDecSpeakerLabelModel.from_config_dict(self.cfg_msdd_model.preprocessor)
self.frame_per_sec = int(1 / self.preprocessor._cfg.window_stride)
self.msdd = EncDecDiarLabelModel.from_config_dict(self.cfg_msdd_model.msdd_module)
if trainer is not None:
self._init_speaker_model()
self.add_speaker_model_config(cfg)
else:
self.msdd._speaker_model = EncDecSpeakerLabelModel.from_config_dict(cfg.speaker_model_cfg)
# Call `self.save_hyperparameters` in modelPT.py again since cfg should contain speaker model's config.
self.save_hyperparameters("cfg")
self.loss = instantiate(self.cfg_msdd_model.loss)
self._accuracy_test = MultiBinaryAccuracy()
self._accuracy_train = MultiBinaryAccuracy()
self._accuracy_valid = MultiBinaryAccuracy()
def add_speaker_model_config(self, cfg):
"""
Add config dictionary of the speaker model to the model's config dictionary. This is required to
save and load speaker model with MSDD model.
Args:
cfg (DictConfig): DictConfig type variable that conatains hyperparameters of MSDD model.
"""
with open_dict(cfg):
cfg_cp = copy.copy(self.msdd._speaker_model.cfg)
cfg.speaker_model_cfg = cfg_cp
del cfg.speaker_model_cfg.train_ds
del cfg.speaker_model_cfg.validation_ds
def _init_segmentation_info(self):
"""Initialize segmentation settings: window, shift and multiscale weights.
"""
self._diarizer_params = self.cfg_msdd_model.diarizer
self.multiscale_args_dict = parse_scale_configs(
self._diarizer_params.speaker_embeddings.parameters.window_length_in_sec,
self._diarizer_params.speaker_embeddings.parameters.shift_length_in_sec,
self._diarizer_params.speaker_embeddings.parameters.multiscale_weights,
)
def _init_speaker_model(self):
"""
Initialize speaker embedding model with model name or path passed through config. Note that speaker embedding model is loaded to
`self.msdd` to enable multi-gpu and multi-node training. In addition, speaker embedding model is also saved with msdd model when
`.ckpt` files are saved.
"""
model_path = self.cfg_msdd_model.diarizer.speaker_embeddings.model_path
self._diarizer_params = self.cfg_msdd_model.diarizer
if not torch.cuda.is_available():
rank_id = torch.device('cpu')
elif self._trainer:
rank_id = torch.device(self._trainer.global_rank)
else:
rank_id = None
if model_path is not None and model_path.endswith('.nemo'):
self.msdd._speaker_model = EncDecSpeakerLabelModel.restore_from(model_path, map_location=rank_id)
logging.info("Speaker Model restored locally from {}".format(model_path))
elif model_path.endswith('.ckpt'):
self._speaker_model = EncDecSpeakerLabelModel.load_from_checkpoint(model_path, map_location=rank_id)
logging.info("Speaker Model restored locally from {}".format(model_path))
else:
if model_path not in get_available_model_names(EncDecSpeakerLabelModel):
logging.warning(
"requested {} model name not available in pretrained models, instead".format(model_path)
)
model_path = "titanet_large"
logging.info("Loading pretrained {} model from NGC".format(model_path))
self.msdd._speaker_model = EncDecSpeakerLabelModel.from_pretrained(
model_name=model_path, map_location=rank_id
)
self._speaker_params = self.cfg_msdd_model.diarizer.speaker_embeddings.parameters
def __setup_dataloader_from_config(self, config):
featurizer = WaveformFeaturizer(
sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=None
)
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
dataset = AudioToSpeechMSDDTrainDataset(
manifest_filepath=config.manifest_filepath,
emb_dir=config.emb_dir,
multiscale_args_dict=self.multiscale_args_dict,
soft_label_thres=config.soft_label_thres,
featurizer=featurizer,
window_stride=self.cfg_msdd_model.preprocessor.window_stride,
emb_batch_size=config.emb_batch_size,
pairwise_infer=False,
global_rank=self._trainer.global_rank,
)
self.data_collection = dataset.collection
collate_ds = dataset
collate_fn = collate_ds.msdd_train_collate_fn
batch_size = config['batch_size']
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=False,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def __setup_dataloader_from_config_infer(
self, config: DictConfig, emb_dict: dict, emb_seq: dict, clus_label_dict: dict, pairwise_infer=False
):
shuffle = config.get('shuffle', False)
if 'manifest_filepath' in config and config['manifest_filepath'] is None:
logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
return None
dataset = AudioToSpeechMSDDInferDataset(
manifest_filepath=config['manifest_filepath'],
emb_dict=emb_dict,
clus_label_dict=clus_label_dict,
emb_seq=emb_seq,
soft_label_thres=config.soft_label_thres,
seq_eval_mode=config.seq_eval_mode,
window_stride=self._cfg.preprocessor.window_stride,
use_single_scale_clus=False,
pairwise_infer=pairwise_infer,
)
self.data_collection = dataset.collection
collate_ds = dataset
collate_fn = collate_ds.msdd_infer_collate_fn
batch_size = config['batch_size']
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=collate_fn,
drop_last=config.get('drop_last', False),
shuffle=shuffle,
num_workers=config.get('num_workers', 0),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):
self._train_dl = self.__setup_dataloader_from_config(config=train_data_config,)
def setup_validation_data(self, val_data_layer_config: Optional[Union[DictConfig, Dict]]):
self._validation_dl = self.__setup_dataloader_from_config(config=val_data_layer_config,)
def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):
if self.pairwise_infer:
self._test_dl = self.__setup_dataloader_from_config_infer(
config=test_data_config,
emb_dict=self.emb_sess_test_dict,
emb_seq=self.emb_seq_test,
clus_label_dict=self.clus_test_label_dict,
pairwise_infer=self.pairwise_infer,
)
def setup_multiple_test_data(self, test_data_config):
"""
MSDD does not use multiple_test_data template. This function is a placeholder for preventing error.
"""
return None
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
audio_eltype = AudioSignal()
return {
"features": NeuralType(('B', 'T'), audio_eltype),
"feature_length": NeuralType(('B',), LengthsType()),
"ms_seg_timestamps": NeuralType(('B', 'C', 'T', 'D'), LengthsType()),
"ms_seg_counts": NeuralType(('B', 'C'), LengthsType()),
"clus_label_index": NeuralType(('B', 'T'), LengthsType()),
"scale_mapping": NeuralType(('B', 'C', 'T'), LengthsType()),
"targets": NeuralType(('B', 'T', 'C'), ProbsType()),
}
@property
def output_types(self) -> Dict[str, NeuralType]:
return OrderedDict(
{
"probs": NeuralType(('B', 'T', 'C'), ProbsType()),
"scale_weights": NeuralType(('B', 'T', 'C', 'D'), ProbsType()),
}
)
def get_ms_emb_seq(
self, embs: torch.Tensor, scale_mapping: torch.Tensor, ms_seg_counts: torch.Tensor
) -> torch.Tensor:
"""
Reshape the given tensor and organize the embedding sequence based on the original sequence counts.
Repeat the embeddings according to the scale_mapping information so that the final embedding sequence has
the identical length for all scales.
Args:
embs (Tensor):
Merged embeddings without zero-padding in the batch. See `ms_seg_counts` for details.
Shape: (Total number of segments in the batch, emb_dim)
scale_mapping (Tensor):
The element at the m-th row and the n-th column of the scale mapping matrix indicates the (m+1)-th scale
segment index which has the closest center distance with (n+1)-th segment in the base scale.
Example:
scale_mapping_argmat[2][101] = 85
In the above example, it means that 86-th segment in the 3rd scale (python index is 2) is mapped with
102-th segment in the base scale. Thus, the longer segments bound to have more repeating numbers since
multiple base scale segments (since the base scale has the shortest length) fall into the range of the
longer segments. At the same time, each row contains N numbers of indices where N is number of
segments in the base-scale (i.e., the finest scale).
Shape: (batch_size, scale_n, self.diar_window_length)
ms_seg_counts (Tensor):
Cumulative sum of the number of segments in each scale. This information is needed to reconstruct
the multi-scale input matrix during forward propagating.
Example: `batch_size=3, scale_n=6, emb_dim=192`
ms_seg_counts =
[[8, 9, 12, 16, 25, 51],
[11, 13, 14, 17, 25, 51],
[ 9, 9, 11, 16, 23, 50]]
In this function, `ms_seg_counts` is used to get the actual length of each embedding sequence without
zero-padding.
Returns:
ms_emb_seq (Tensor):
Multi-scale embedding sequence that is mapped, matched and repeated. The longer scales are less repeated,
while shorter scales are more frequently repeated following the scale mapping tensor.
"""
scale_n, batch_size = scale_mapping[0].shape[0], scale_mapping.shape[0]
split_emb_tup = torch.split(embs, ms_seg_counts.view(-1).tolist(), dim=0)
batch_emb_list = [split_emb_tup[i : i + scale_n] for i in range(0, len(split_emb_tup), scale_n)]
ms_emb_seq_list = []
for batch_idx in range(batch_size):
feats_list = []
for scale_index in range(scale_n):
repeat_mat = scale_mapping[batch_idx][scale_index]
feats_list.append(batch_emb_list[batch_idx][scale_index][repeat_mat, :])
repp = torch.stack(feats_list).permute(1, 0, 2)
ms_emb_seq_list.append(repp)
ms_emb_seq = torch.stack(ms_emb_seq_list)
return ms_emb_seq
@torch.no_grad()
def get_cluster_avg_embs_model(
self, embs: torch.Tensor, clus_label_index: torch.Tensor, ms_seg_counts: torch.Tensor, scale_mapping
) -> torch.Tensor:
"""
Calculate the cluster-average speaker embedding based on the ground-truth speaker labels (i.e., cluster labels).
Args:
embs (Tensor):
Merged embeddings without zero-padding in the batch. See `ms_seg_counts` for details.
Shape: (Total number of segments in the batch, emb_dim)
clus_label_index (Tensor):
Merged ground-truth cluster labels from all scales with zero-padding. Each scale's index can be
retrieved by using segment index in `ms_seg_counts`.
Shape: (batch_size, maximum total segment count among the samples in the batch)
ms_seg_counts (Tensor):
Cumulative sum of the number of segments in each scale. This information is needed to reconstruct
multi-scale input tensors during forward propagating.
Example: `batch_size=3, scale_n=6, emb_dim=192`
ms_seg_counts =
[[8, 9, 12, 16, 25, 51],
[11, 13, 14, 17, 25, 51],
[ 9, 9, 11, 16, 23, 50]]
Counts of merged segments: (121, 131, 118)
embs has shape of (370, 192)
clus_label_index has shape of (3, 131)
Shape: (batch_size, scale_n)
Returns:
ms_avg_embs (Tensor):
Multi-scale cluster-average speaker embedding vectors. These embedding vectors are used as reference for
each speaker to predict the speaker label for the given multi-scale embedding sequences.
Shape: (batch_size, scale_n, emb_dim, self.num_spks_per_model)
"""
scale_n, batch_size = scale_mapping[0].shape[0], scale_mapping.shape[0]
split_emb_tup = torch.split(embs, ms_seg_counts.view(-1).tolist(), dim=0)
batch_emb_list = [split_emb_tup[i : i + scale_n] for i in range(0, len(split_emb_tup), scale_n)]
ms_avg_embs_list = []
for batch_idx in range(batch_size):
oracle_clus_idx = clus_label_index[batch_idx]
max_seq_len = sum(ms_seg_counts[batch_idx])
clus_label_index_batch = torch.split(oracle_clus_idx[:max_seq_len], ms_seg_counts[batch_idx].tolist())
session_avg_emb_set_list = []
for scale_index in range(scale_n):
spk_set_list = []
for idx in range(self.cfg_msdd_model.max_num_of_spks):
_where = (clus_label_index_batch[scale_index] == idx).clone().detach()
if not torch.any(_where):
avg_emb = torch.zeros(self.msdd._speaker_model._cfg.decoder.emb_sizes).to(embs.device)
else:
avg_emb = torch.mean(batch_emb_list[batch_idx][scale_index][_where], dim=0)
spk_set_list.append(avg_emb)
session_avg_emb_set_list.append(torch.stack(spk_set_list))
session_avg_emb_set = torch.stack(session_avg_emb_set_list)
ms_avg_embs_list.append(session_avg_emb_set)
ms_avg_embs = torch.stack(ms_avg_embs_list).permute(0, 1, 3, 2)
ms_avg_embs = ms_avg_embs.float().detach().to(embs.device)
assert (
not ms_avg_embs.requires_grad
), "ms_avg_embs.requires_grad = True. ms_avg_embs should be detached from the torch graph."
return ms_avg_embs
@torch.no_grad()
def get_ms_mel_feat(
self,
processed_signal: torch.Tensor,
processed_signal_len: torch.Tensor,
ms_seg_timestamps: torch.Tensor,
ms_seg_counts: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Load acoustic feature from audio segments for each scale and save it into a torch.tensor matrix.
In addition, create variables containing the information of the multiscale subsegmentation information.
Note: `self.emb_batch_size` determines the number of embedding tensors attached to the computational graph.
If `self.emb_batch_size` is greater than 0, speaker embedding models are simultaneosly trained. Due to the
constrant of GPU memory size, only a subset of embedding tensors can be attached to the computational graph.
By default, the graph-attached embeddings are selected randomly by `torch.randperm`. Default value of
`self.emb_batch_size` is 0.
Args:
processed_signal (Tensor):
Zero-padded Feature input.
Shape: (batch_size, feat_dim, the longest feature sequence length)
processed_signal_len (Tensor):
The actual legnth of feature input without zero-padding.
Shape: (batch_size,)
ms_seg_timestamps (Tensor):
Timestamps of the base-scale segments.
Shape: (batch_size, scale_n, number of base-scale segments, self.num_spks_per_model)
ms_seg_counts (Tensor):
Cumulative sum of the number of segments in each scale. This information is needed to reconstruct
the multi-scale input matrix during forward propagating.
Shape: (batch_size, scale_n)
Returns:
ms_mel_feat (Tensor):
Feature input stream split into the same length.
Shape: (total number of segments, feat_dim, self.frame_per_sec * the-longest-scale-length)
ms_mel_feat_len (Tensor):
The actual length of feature without zero-padding.
Shape: (total number of segments,)
seq_len (Tensor):
The length of the input embedding sequences.
Shape: (total number of segments,)
detach_ids (tuple):
Tuple containing both detached embeding indices and attached embedding indices
"""
device = processed_signal.device
_emb_batch_size = min(self.emb_batch_size, ms_seg_counts.sum().item())
feat_dim = self.preprocessor._cfg.features
max_sample_count = int(self.multiscale_args_dict["scale_dict"][0][0] * self.frame_per_sec)
ms_mel_feat_len_list, sequence_lengths_list, ms_mel_feat_list = [], [], []
total_seg_count = torch.sum(ms_seg_counts)
batch_size = processed_signal.shape[0]
for batch_idx in range(batch_size):
for scale_idx in range(self.scale_n):
scale_seg_num = ms_seg_counts[batch_idx][scale_idx]
for k, (stt, end) in enumerate(ms_seg_timestamps[batch_idx][scale_idx][:scale_seg_num]):
stt, end = int(stt.detach().item()), int(end.detach().item())
end = min(end, stt + max_sample_count)
_features = torch.zeros(feat_dim, max_sample_count).to(torch.float32).to(device)
_features[:, : (end - stt)] = processed_signal[batch_idx][:, stt:end]
ms_mel_feat_list.append(_features)
ms_mel_feat_len_list.append(end - stt)
sequence_lengths_list.append(ms_seg_counts[batch_idx][-1])
ms_mel_feat = torch.stack(ms_mel_feat_list).to(device)
ms_mel_feat_len = torch.tensor(ms_mel_feat_len_list).to(device)
seq_len = torch.tensor(sequence_lengths_list).to(device)
if _emb_batch_size == 0:
attached, _emb_batch_size = torch.tensor([]), 0
detached = torch.arange(total_seg_count)
else:
torch.manual_seed(self._trainer.current_epoch)
attached = torch.randperm(total_seg_count)[:_emb_batch_size]
detached = torch.randperm(total_seg_count)[_emb_batch_size:]
detach_ids = (attached, detached)
return ms_mel_feat, ms_mel_feat_len, seq_len, detach_ids
def forward_infer(self, input_signal, input_signal_length, emb_vectors, targets):
"""
Wrapper function for inference case.
"""
preds, scale_weights = self.msdd(
ms_emb_seq=input_signal, length=input_signal_length, ms_avg_embs=emb_vectors, targets=targets
)
return preds, scale_weights
@typecheck()
def forward(
self, features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets
):
processed_signal, processed_signal_len = self.msdd._speaker_model.preprocessor(
input_signal=features, length=feature_length
)
audio_signal, audio_signal_len, sequence_lengths, detach_ids = self.get_ms_mel_feat(
processed_signal, processed_signal_len, ms_seg_timestamps, ms_seg_counts
)
# For detached embeddings
with torch.no_grad():
self.msdd._speaker_model.eval()
logits, embs_d = self.msdd._speaker_model.forward_for_export(
processed_signal=audio_signal[detach_ids[1]], processed_signal_len=audio_signal_len[detach_ids[1]]
)
embs = torch.zeros(audio_signal.shape[0], embs_d.shape[1]).to(embs_d.device)
embs[detach_ids[1], :] = embs_d.detach()
# For attached embeddings
self.msdd._speaker_model.train()
if len(detach_ids[0]) > 1:
logits, embs_a = self.msdd._speaker_model.forward_for_export(
processed_signal=audio_signal[detach_ids[0]], processed_signal_len=audio_signal_len[detach_ids[0]]
)
embs[detach_ids[0], :] = embs_a
ms_emb_seq = self.get_ms_emb_seq(embs, scale_mapping, ms_seg_counts)
ms_avg_embs = self.get_cluster_avg_embs_model(embs, clus_label_index, ms_seg_counts, scale_mapping)
preds, scale_weights = self.msdd(
ms_emb_seq=ms_emb_seq, length=sequence_lengths, ms_avg_embs=ms_avg_embs, targets=targets
)
return preds, scale_weights
def training_step(self, batch: list, batch_idx: int):
features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets = batch
sequence_lengths = torch.tensor([x[-1] for x in ms_seg_counts.detach()])
preds, _ = self.forward(
features=features,
feature_length=feature_length,
ms_seg_timestamps=ms_seg_timestamps,
ms_seg_counts=ms_seg_counts,
clus_label_index=clus_label_index,
scale_mapping=scale_mapping,
targets=targets,
)
loss = self.loss(probs=preds, labels=targets, signal_lengths=sequence_lengths)
self._accuracy_train(preds, targets, sequence_lengths)
torch.cuda.empty_cache()
f1_acc = self._accuracy_train.compute()
self.log('loss', loss, sync_dist=True)
self.log('learning_rate', self._optimizer.param_groups[0]['lr'], sync_dist=True)
self.log('train_f1_acc', f1_acc, sync_dist=True)
self._accuracy_train.reset()
return {'loss': loss}
def validation_step(self, batch: list, batch_idx: int, dataloader_idx: int = 0):
features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets = batch
sequence_lengths = torch.tensor([x[-1] for x in ms_seg_counts])
preds, _ = self.forward(
features=features,
feature_length=feature_length,
ms_seg_timestamps=ms_seg_timestamps,
ms_seg_counts=ms_seg_counts,
clus_label_index=clus_label_index,
scale_mapping=scale_mapping,
targets=targets,
)
loss = self.loss(probs=preds, labels=targets, signal_lengths=sequence_lengths)
self._accuracy_valid(preds, targets, sequence_lengths)
f1_acc = self._accuracy_valid.compute()
self.log('val_loss', loss, sync_dist=True)
self.log('val_f1_acc', f1_acc, sync_dist=True)
return {
'val_loss': loss,
'val_f1_acc': f1_acc,
}
def multi_validation_epoch_end(self, outputs: list, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
f1_acc = self._accuracy_valid.compute()
self._accuracy_valid.reset()
self.log('val_loss', val_loss_mean, sync_dist=True)
self.log('val_f1_acc', f1_acc, sync_dist=True)
return {
'val_loss': val_loss_mean,
'val_f1_acc': f1_acc,
}
def multi_test_epoch_end(self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
f1_acc = self._accuracy_test.compute()
self._accuracy_test.reset()
self.log('test_f1_acc', f1_acc, sync_dist=True)
return {
'test_loss': test_loss_mean,
'test_f1_acc': f1_acc,
}
def compute_accuracies(self):
"""
Calculate F1 score and accuracy of the predicted sigmoid values.
Returns:
f1_score (float):
F1 score of the estimated diarized speaker label sequences.
simple_acc (float):
Accuracy of predicted speaker labels: (total # of correct labels)/(total # of sigmoid values)
"""
f1_score = self._accuracy_test.compute()
num_correct = torch.sum(self._accuracy_test.true.bool())
total_count = torch.prod(torch.tensor(self._accuracy_test.targets.shape))
simple_acc = num_correct / total_count
return f1_score, simple_acc
class ClusterEmbedding(torch.nn.Module):
"""
This class is built for calculating cluster-average embeddings, segmentation and load/save of the estimated cluster labels.
The methods in this class is used for the inference of MSDD models.
Args:
cfg_diar_infer (DictConfig):
Config dictionary from diarization inference YAML file
cfg_msdd_model (DictConfig):
Config dictionary from MSDD model checkpoint file
Class Variables:
self.cfg_diar_infer (DictConfig):
Config dictionary from diarization inference YAML file
cfg_msdd_model (DictConfig):
Config dictionary from MSDD model checkpoint file
self._speaker_model (class `EncDecSpeakerLabelModel`):
This is a placeholder for class instance of `EncDecSpeakerLabelModel`
self.scale_window_length_list (list):
List containing the window lengths (i.e., scale length) of each scale.
self.scale_n (int):
Number of scales for multi-scale clustering diarizer
self.base_scale_index (int):
The index of the base-scale which is the shortest scale among the given multiple scales
"""
def __init__(
self, cfg_diar_infer: DictConfig, cfg_msdd_model: DictConfig, speaker_model: Optional[EncDecSpeakerLabelModel]
):
super().__init__()
self.cfg_diar_infer = cfg_diar_infer
self._cfg_msdd = cfg_msdd_model
self._speaker_model = speaker_model
self.scale_window_length_list = list(
self.cfg_diar_infer.diarizer.speaker_embeddings.parameters.window_length_in_sec
)
self.scale_n = len(self.scale_window_length_list)
self.base_scale_index = len(self.scale_window_length_list) - 1
self.clus_diar_model = ClusteringDiarizer(cfg=self.cfg_diar_infer, speaker_model=self._speaker_model)
def prepare_cluster_embs_infer(self):
"""
Launch clustering diarizer to prepare embedding vectors and clustering results.
"""
self.max_num_speakers = self.cfg_diar_infer.diarizer.clustering.parameters.max_num_speakers
self.emb_sess_test_dict, self.emb_seq_test, self.clus_test_label_dict, _ = self.run_clustering_diarizer(
self._cfg_msdd.test_ds.manifest_filepath, self._cfg_msdd.test_ds.emb_dir
)
def assign_labels_to_longer_segs(self, base_clus_label_dict: Dict, session_scale_mapping_dict: Dict):
"""
In multi-scale speaker diarization system, clustering result is solely based on the base-scale (the shortest scale).
To calculate cluster-average speaker embeddings for each scale that are longer than the base-scale, this function assigns
clustering results for the base-scale to the longer scales by measuring the distance between subsegment timestamps in the
base-scale and non-base-scales.
Args:
base_clus_label_dict (dict):
Dictionary containing clustering results for base-scale segments. Indexed by `uniq_id` string.
session_scale_mapping_dict (dict):
Dictionary containing multiscale mapping information for each session. Indexed by `uniq_id` string.
Returns:
all_scale_clus_label_dict (dict):
Dictionary containing clustering labels of all scales. Indexed by scale_index in integer format.
"""
all_scale_clus_label_dict = {scale_index: {} for scale_index in range(self.scale_n)}
for uniq_id, uniq_scale_mapping_dict in session_scale_mapping_dict.items():
base_scale_clus_label = np.array([x[-1] for x in base_clus_label_dict[uniq_id]])
all_scale_clus_label_dict[self.base_scale_index][uniq_id] = base_scale_clus_label
for scale_index in range(self.scale_n - 1):
new_clus_label = []
assert (
uniq_scale_mapping_dict[scale_index].shape[0] == base_scale_clus_label.shape[0]
), "The number of base scale labels does not match the segment numbers in uniq_scale_mapping_dict"
max_index = max(uniq_scale_mapping_dict[scale_index])
for seg_idx in range(max_index + 1):
if seg_idx in uniq_scale_mapping_dict[scale_index]:
seg_clus_label = mode(base_scale_clus_label[uniq_scale_mapping_dict[scale_index] == seg_idx])
else:
seg_clus_label = 0 if len(new_clus_label) == 0 else new_clus_label[-1]
new_clus_label.append(seg_clus_label)
all_scale_clus_label_dict[scale_index][uniq_id] = new_clus_label
return all_scale_clus_label_dict
def get_base_clus_label_dict(self, clus_labels: List[str], emb_scale_seq_dict: Dict[int, dict]):
"""
Retrieve base scale clustering labels from `emb_scale_seq_dict`.
Args:
clus_labels (list):
List containing cluster results generated by clustering diarizer.
emb_scale_seq_dict (dict):
Dictionary containing multiscale embedding input sequences.
Returns:
base_clus_label_dict (dict):
Dictionary containing start and end of base scale segments and its cluster label. Indexed by `uniq_id`.
emb_dim (int):
Embedding dimension in integer.
"""
base_clus_label_dict = {key: [] for key in emb_scale_seq_dict[self.base_scale_index].keys()}
for line in clus_labels:
uniq_id = line.split()[0]
label = int(line.split()[-1].split('_')[-1])
stt, end = [round(float(x), 2) for x in line.split()[1:3]]
base_clus_label_dict[uniq_id].append([stt, end, label])
emb_dim = emb_scale_seq_dict[0][uniq_id][0].shape[0]
return base_clus_label_dict, emb_dim
def get_cluster_avg_embs(
self, emb_scale_seq_dict: Dict, clus_labels: List, speaker_mapping_dict: Dict, session_scale_mapping_dict: Dict
):
"""
MSDD requires cluster-average speaker embedding vectors for each scale. This function calculates an average embedding vector for each cluster (speaker)
and each scale.
Args:
emb_scale_seq_dict (dict):
Dictionary containing embedding sequence for each scale. Keys are scale index in integer.
clus_labels (list):
Clustering results from clustering diarizer including all the sessions provided in input manifest files.
speaker_mapping_dict (dict):
Speaker mapping dictionary in case RTTM files are provided. This is mapping between integer based speaker index and
speaker ID tokens in RTTM files.
Example:
{'en_0638': {'speaker_0': 'en_0638_A', 'speaker_1': 'en_0638_B'},
'en_4065': {'speaker_0': 'en_4065_B', 'speaker_1': 'en_4065_A'}, ...,}
session_scale_mapping_dict (dict):
Dictionary containing multiscale mapping information for each session. Indexed by `uniq_id` string.
Returns:
emb_sess_avg_dict (dict):
Dictionary containing speaker mapping information and cluster-average speaker embedding vector.
Each session-level dictionary is indexed by scale index in integer.
output_clus_label_dict (dict):
Subegmentation timestamps in float type and Clustering result in integer type. Indexed by `uniq_id` keys.
"""
self.scale_n = len(emb_scale_seq_dict.keys())
emb_sess_avg_dict = {
scale_index: {key: [] for key in emb_scale_seq_dict[self.scale_n - 1].keys()}
for scale_index in emb_scale_seq_dict.keys()
}
output_clus_label_dict, emb_dim = self.get_base_clus_label_dict(clus_labels, emb_scale_seq_dict)
all_scale_clus_label_dict = self.assign_labels_to_longer_segs(
output_clus_label_dict, session_scale_mapping_dict
)
for scale_index in emb_scale_seq_dict.keys():
for uniq_id, _emb_tensor in emb_scale_seq_dict[scale_index].items():
if type(_emb_tensor) == list:
emb_tensor = torch.tensor(np.array(_emb_tensor))
else:
emb_tensor = _emb_tensor
clus_label_list = all_scale_clus_label_dict[scale_index][uniq_id]
spk_set = set(clus_label_list)
# Create a label array which identifies clustering result for each segment.
label_array = torch.Tensor(clus_label_list)
avg_embs = torch.zeros(emb_dim, self.max_num_speakers)
for spk_idx in spk_set:
selected_embs = emb_tensor[label_array == spk_idx]
avg_embs[:, spk_idx] = torch.mean(selected_embs, dim=0)
if speaker_mapping_dict is not None:
inv_map = {clus_key: rttm_key for rttm_key, clus_key in speaker_mapping_dict[uniq_id].items()}
else:
inv_map = None
emb_sess_avg_dict[scale_index][uniq_id] = {'mapping': inv_map, 'avg_embs': avg_embs}
return emb_sess_avg_dict, output_clus_label_dict
def run_clustering_diarizer(self, manifest_filepath: str, emb_dir: str):
"""
If no pre-existing data is provided, run clustering diarizer from scratch. This will create scale-wise speaker embedding
sequence, cluster-average embeddings, scale mapping and base scale clustering labels. Note that speaker embedding `state_dict`
is loaded from the `state_dict` in the provided MSDD checkpoint.
Args:
manifest_filepath (str):
Input manifest file for creating audio-to-RTTM mapping.
emb_dir (str):
Output directory where embedding files and timestamp files are saved.
Returns:
emb_sess_avg_dict (dict):
Dictionary containing cluster-average embeddings for each session.
emb_scale_seq_dict (dict):
Dictionary containing embedding tensors which are indexed by scale numbers.
base_clus_label_dict (dict):
Dictionary containing clustering results. Clustering results are cluster labels for the base scale segments.
"""
self.cfg_diar_infer.diarizer.manifest_filepath = manifest_filepath
self.cfg_diar_infer.diarizer.out_dir = emb_dir
# Run ClusteringDiarizer which includes system VAD or oracle VAD.
self._out_dir = self.clus_diar_model._diarizer_params.out_dir
self.out_rttm_dir = os.path.join(self._out_dir, 'pred_rttms')
os.makedirs(self.out_rttm_dir, exist_ok=True)
self.clus_diar_model._cluster_params = self.cfg_diar_infer.diarizer.clustering.parameters
self.clus_diar_model.multiscale_args_dict[
"multiscale_weights"
] = self.cfg_diar_infer.diarizer.speaker_embeddings.parameters.multiscale_weights
self.clus_diar_model._diarizer_params.speaker_embeddings.parameters = (
self.cfg_diar_infer.diarizer.speaker_embeddings.parameters
)
cluster_params = self.clus_diar_model._cluster_params
cluster_params = dict(cluster_params) if isinstance(cluster_params, DictConfig) else cluster_params.dict()
clustering_params_str = json.dumps(cluster_params, indent=4)
logging.info(f"Multiscale Weights: {self.clus_diar_model.multiscale_args_dict['multiscale_weights']}")
logging.info(f"Clustering Parameters: {clustering_params_str}")
scores = self.clus_diar_model.diarize(batch_size=self.cfg_diar_infer.batch_size)
# If RTTM (ground-truth diarization annotation) files do not exist, scores is None.
if scores is not None:
metric, speaker_mapping_dict, _ = scores
else:
metric, speaker_mapping_dict = None, None
# Get the mapping between segments in different scales.
self._embs_and_timestamps = get_embs_and_timestamps(
self.clus_diar_model.multiscale_embeddings_and_timestamps, self.clus_diar_model.multiscale_args_dict
)
session_scale_mapping_dict = self.get_scale_map(self._embs_and_timestamps)
emb_scale_seq_dict = self.load_emb_scale_seq_dict(emb_dir)
clus_labels = self.load_clustering_labels(emb_dir)
emb_sess_avg_dict, base_clus_label_dict = self.get_cluster_avg_embs(
emb_scale_seq_dict, clus_labels, speaker_mapping_dict, session_scale_mapping_dict
)
emb_scale_seq_dict['session_scale_mapping'] = session_scale_mapping_dict
return emb_sess_avg_dict, emb_scale_seq_dict, base_clus_label_dict, metric
def get_scale_map(self, embs_and_timestamps):
"""
Save multiscale mapping data into dictionary format.
Args:
embs_and_timestamps (dict):
Dictionary containing embedding tensors and timestamp tensors. Indexed by `uniq_id` string.
Returns:
session_scale_mapping_dict (dict):
Dictionary containing multiscale mapping information for each session. Indexed by `uniq_id` string.
"""
session_scale_mapping_dict = {}
for uniq_id, uniq_embs_and_timestamps in embs_and_timestamps.items():
scale_mapping_dict = get_scale_mapping_argmat(uniq_embs_and_timestamps)
session_scale_mapping_dict[uniq_id] = scale_mapping_dict
return session_scale_mapping_dict
def check_clustering_labels(self, out_dir):
"""
Check whether the laoded clustering label file is including clustering results for all sessions.
This function is used for inference mode of MSDD.
Args:
out_dir (str):
Path to the directory where clustering result files are saved.
Returns:
file_exists (bool):
Boolean that indicates whether clustering result file exists.
clus_label_path (str):
Path to the clustering label output file.
"""
clus_label_path = os.path.join(
out_dir, 'speaker_outputs', f'subsegments_scale{self.base_scale_index}_cluster.label'
)
file_exists = os.path.exists(clus_label_path)
if not file_exists:
logging.info(f"Clustering label file {clus_label_path} does not exist.")
return file_exists, clus_label_path
def load_clustering_labels(self, out_dir):
"""
Load clustering labels generated by clustering diarizer. This function is used for inference mode of MSDD.
Args:
out_dir (str):
Path to the directory where clustering result files are saved.
Returns:
emb_scale_seq_dict (dict):
List containing clustering results in string format.
"""
file_exists, clus_label_path = self.check_clustering_labels(out_dir)
logging.info(f"Loading cluster label file from {clus_label_path}")
with open(clus_label_path) as f:
clus_labels = f.readlines()
return clus_labels
def load_emb_scale_seq_dict(self, out_dir):
"""
Load saved embeddings generated by clustering diarizer. This function is used for inference mode of MSDD.
Args:
out_dir (str):
Path to the directory where embedding pickle files are saved.
Returns:
emb_scale_seq_dict (dict):
Dictionary containing embedding tensors which are indexed by scale numbers.
"""
window_len_list = list(self.cfg_diar_infer.diarizer.speaker_embeddings.parameters.window_length_in_sec)
emb_scale_seq_dict = {scale_index: None for scale_index in range(len(window_len_list))}
for scale_index in range(len(window_len_list)):
pickle_path = os.path.join(
out_dir, 'speaker_outputs', 'embeddings', f'subsegments_scale{scale_index}_embeddings.pkl'
)
logging.info(f"Loading embedding pickle file of scale:{scale_index} at {pickle_path}")
with open(pickle_path, "rb") as input_file:
emb_dict = pkl.load(input_file)
for key, val in emb_dict.items():
emb_dict[key] = val
emb_scale_seq_dict[scale_index] = emb_dict
return emb_scale_seq_dict
class NeuralDiarizer(LightningModule):
"""
Class for inference based on multiscale diarization decoder (MSDD). MSDD requires initializing clustering results from
clustering diarizer. Overlap-aware diarizer requires separate RTTM generation and evaluation modules to check the effect of
overlap detection in speaker diarization.
"""
def __init__(self, cfg: Union[DictConfig, NeuralDiarizerInferenceConfig]):
super().__init__()
self._cfg = cfg
# Parameter settings for MSDD model
self.use_speaker_model_from_ckpt = cfg.diarizer.msdd_model.parameters.get('use_speaker_model_from_ckpt', True)
self.use_clus_as_main = cfg.diarizer.msdd_model.parameters.get('use_clus_as_main', False)
self.max_overlap_spks = cfg.diarizer.msdd_model.parameters.get('max_overlap_spks', 2)
self.num_spks_per_model = cfg.diarizer.msdd_model.parameters.get('num_spks_per_model', 2)
self.use_adaptive_thres = cfg.diarizer.msdd_model.parameters.get('use_adaptive_thres', True)
self.max_pred_length = cfg.diarizer.msdd_model.parameters.get('max_pred_length', 0)
self.diar_eval_settings = cfg.diarizer.msdd_model.parameters.get(
'diar_eval_settings', [(0.25, True), (0.25, False), (0.0, False)]
)
self._init_msdd_model(cfg)
self.diar_window_length = cfg.diarizer.msdd_model.parameters.diar_window_length
self.msdd_model.cfg = self.transfer_diar_params_to_model_params(self.msdd_model, cfg)
# Initialize clustering and embedding preparation instance (as a diarization encoder).
self.clustering_embedding = ClusterEmbedding(
cfg_diar_infer=cfg, cfg_msdd_model=self.msdd_model.cfg, speaker_model=self._speaker_model
)
# Parameters for creating diarization results from MSDD outputs.
self.clustering_max_spks = self.msdd_model._cfg.max_num_of_spks
self.overlap_infer_spk_limit = cfg.diarizer.msdd_model.parameters.get(
'overlap_infer_spk_limit', self.clustering_max_spks
)
def transfer_diar_params_to_model_params(self, msdd_model, cfg):
"""
Transfer the parameters that are needed for MSDD inference from the diarization inference config files
to MSDD model config `msdd_model.cfg`.
"""
msdd_model.cfg.diarizer.out_dir = cfg.diarizer.out_dir
msdd_model.cfg.test_ds.manifest_filepath = cfg.diarizer.manifest_filepath
msdd_model.cfg.test_ds.emb_dir = cfg.diarizer.out_dir
msdd_model.cfg.test_ds.batch_size = cfg.diarizer.msdd_model.parameters.infer_batch_size
msdd_model.cfg.test_ds.seq_eval_mode = cfg.diarizer.msdd_model.parameters.seq_eval_mode
msdd_model._cfg.max_num_of_spks = cfg.diarizer.clustering.parameters.max_num_speakers
return msdd_model.cfg
@rank_zero_only
def save_to(self, save_path: str):
"""
Saves model instances (weights and configuration) into EFF archive.
You can use "restore_from" method to fully restore instance from .nemo file.
.nemo file is an archive (tar.gz) with the following:
model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor
model_wights.chpt - model checkpoint
Args:
save_path: Path to .nemo file where model instance should be saved
"""
self.clus_diar = self.clustering_embedding.clus_diar_model
_NEURAL_DIAR_MODEL = "msdd_model.nemo"
with tempfile.TemporaryDirectory() as tmpdir:
config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)
spkr_model = os.path.join(tmpdir, _SPEAKER_MODEL)
neural_diar_model = os.path.join(tmpdir, _NEURAL_DIAR_MODEL)
self.clus_diar.to_config_file(path2yaml_file=config_yaml)
if self.clus_diar.has_vad_model:
vad_model = os.path.join(tmpdir, _VAD_MODEL)
self.clus_diar._vad_model.save_to(vad_model)
self.clus_diar._speaker_model.save_to(spkr_model)
self.msdd_model.save_to(neural_diar_model)
self.clus_diar.__make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir)
def extract_standalone_speaker_model(self, prefix: str = 'msdd._speaker_model.') -> EncDecSpeakerLabelModel:
"""
MSDD model file contains speaker embedding model and MSDD model. This function extracts standalone speaker model and save it to
`self.spk_emb_state_dict` to be loaded separately for clustering diarizer.
Args:
ext (str):
File-name extension of the provided model path.
Returns:
standalone_model_path (str):
Path to the extracted standalone model without speaker embedding extractor model.
"""
model_state_dict = self.msdd_model.state_dict()
spk_emb_module_names = []
for name in model_state_dict.keys():
if prefix in name:
spk_emb_module_names.append(name)
spk_emb_state_dict = {}
for name in spk_emb_module_names:
org_name = name.replace(prefix, '')
spk_emb_state_dict[org_name] = model_state_dict[name]
_speaker_model = EncDecSpeakerLabelModel.from_config_dict(self.msdd_model.cfg.speaker_model_cfg)
_speaker_model.load_state_dict(spk_emb_state_dict)
return _speaker_model
def _init_msdd_model(self, cfg: Union[DictConfig, NeuralDiarizerInferenceConfig]):
"""
Initialized MSDD model with the provided config. Load either from `.nemo` file or `.ckpt` checkpoint files.
"""
model_path = cfg.diarizer.msdd_model.model_path
if model_path.endswith('.nemo'):
logging.info(f"Using local nemo file from {model_path}")
self.msdd_model = EncDecDiarLabelModel.restore_from(restore_path=model_path, map_location=cfg.device)
elif model_path.endswith('.ckpt'):
logging.info(f"Using local checkpoint from {model_path}")
self.msdd_model = EncDecDiarLabelModel.load_from_checkpoint(
checkpoint_path=model_path, map_location=cfg.device
)
else:
if model_path not in get_available_model_names(EncDecDiarLabelModel):
logging.warning(f"requested {model_path} model name not available in pretrained models, instead")
logging.info("Loading pretrained {} model from NGC".format(model_path))
self.msdd_model = EncDecDiarLabelModel.from_pretrained(model_name=model_path, map_location=cfg.device)
# Load speaker embedding model state_dict which is loaded from the MSDD checkpoint.
if self.use_speaker_model_from_ckpt:
self._speaker_model = self.extract_standalone_speaker_model()
else:
self._speaker_model = None
def get_pred_mat(self, data_list: List[Union[Tuple[int], List[torch.Tensor]]]) -> torch.Tensor:
"""
This module puts together the pairwise, two-speaker, predicted results to form a finalized matrix that has dimension of
`(total_len, n_est_spks)`. The pairwise results are evenutally averaged. For example, in 4 speaker case (speaker 1, 2, 3, 4),
the sum of the pairwise results (1, 2), (1, 3), (1, 4) are then divided by 3 to take average of the sigmoid values.
Args:
data_list (list):
List containing data points from `test_data_collection` variable. `data_list` has sublists `data` as follows:
data[0]: `target_spks` tuple
Examples: (0, 1, 2)
data[1]: Tensor containing estimaged sigmoid values.
[[0.0264, 0.9995],
[0.0112, 1.0000],
...,
[1.0000, 0.0512]]
Returns:
sum_pred (Tensor):
Tensor containing the averaged sigmoid values for each speaker.
"""
all_tups = tuple()
for data in data_list:
all_tups += data[0]
n_est_spks = len(set(all_tups))
digit_map = dict(zip(sorted(set(all_tups)), range(n_est_spks)))
total_len = max([sess[1].shape[1] for sess in data_list])
sum_pred = torch.zeros(total_len, n_est_spks)
for (_dim_tup, pred_mat) in data_list:
dim_tup = [digit_map[x] for x in _dim_tup]
if len(pred_mat.shape) == 3:
pred_mat = pred_mat.squeeze(0)
if n_est_spks <= self.num_spks_per_model:
sum_pred = pred_mat
else:
_end = pred_mat.shape[0]
sum_pred[:_end, dim_tup] += pred_mat.cpu().float()
sum_pred = sum_pred / (n_est_spks - 1)
return sum_pred
def get_integrated_preds_list(
self, uniq_id_list: List[str], test_data_collection: List[Any], preds_list: List[torch.Tensor]
) -> List[torch.Tensor]:
"""
Merge multiple sequence inference outputs into a session level result.
Args:
uniq_id_list (list):
List containing `uniq_id` values.
test_data_collection (collections.DiarizationLabelEntity):
Class instance that is containing session information such as targeted speaker indices, audio filepaths and RTTM filepaths.
preds_list (list):
List containing tensors filled with sigmoid values.
Returns:
output_list (list):
List containing session-level estimated prediction matrix.
"""
session_dict = get_id_tup_dict(uniq_id_list, test_data_collection, preds_list)
output_dict = {uniq_id: [] for uniq_id in uniq_id_list}
for uniq_id, data_list in session_dict.items():
sum_pred = self.get_pred_mat(data_list)
output_dict[uniq_id] = sum_pred.unsqueeze(0)
output_list = [output_dict[uniq_id] for uniq_id in uniq_id_list]
return output_list
def get_emb_clus_infer(self, cluster_embeddings):
"""Assign dictionaries containing the clustering results from the class instance `cluster_embeddings`.
"""
self.msdd_model.emb_sess_test_dict = cluster_embeddings.emb_sess_test_dict
self.msdd_model.clus_test_label_dict = cluster_embeddings.clus_test_label_dict
self.msdd_model.emb_seq_test = cluster_embeddings.emb_seq_test
@torch.no_grad()
def diarize(self) -> Optional[List[Optional[List[Tuple[DiarizationErrorRate, Dict]]]]]:
"""
Launch diarization pipeline which starts from VAD (or a oracle VAD stamp generation), initialization clustering and multiscale diarization decoder (MSDD).
Note that the result of MSDD can include multiple speakers at the same time. Therefore, RTTM output of MSDD needs to be based on `make_rttm_with_overlap()`
function that can generate overlapping timestamps. `self.run_overlap_aware_eval()` function performs DER evaluation.
"""
self.clustering_embedding.prepare_cluster_embs_infer()
self.msdd_model.pairwise_infer = True
self.get_emb_clus_infer(self.clustering_embedding)
preds_list, targets_list, signal_lengths_list = self.run_pairwise_diarization()
thresholds = list(self._cfg.diarizer.msdd_model.parameters.sigmoid_threshold)
return [self.run_overlap_aware_eval(preds_list, threshold) for threshold in thresholds]
def get_range_average(
self, signals: torch.Tensor, emb_vectors: torch.Tensor, diar_window_index: int, test_data_collection: List[Any]
) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""
This function is only used when `split_infer=True`. This module calculates cluster-average embeddings for the given short range.
The range length is set by `self.diar_window_length`, and each cluster-average is only calculated for the specified range.
Note that if the specified range does not contain some speakers (e.g. the range contains speaker 1, 3) compared to the global speaker sets
(e.g. speaker 1, 2, 3, 4) then the missing speakers (e.g. speakers 2, 4) are assigned with zero-filled cluster-average speaker embedding.
Args:
signals (Tensor):
Zero-padded Input multi-scale embedding sequences.
Shape: (length, scale_n, emb_vectors, emb_dim)
emb_vectors (Tensor):
Cluster-average multi-scale embedding vectors.
Shape: (length, scale_n, emb_vectors, emb_dim)
diar_window_index (int):
Index of split diarization wondows.
test_data_collection (collections.DiarizationLabelEntity)
Class instance that is containing session information such as targeted speaker indices, audio filepath and RTTM filepath.
Returns:
return emb_vectors_split (Tensor):
Cluster-average speaker embedding vectors for each scale.
emb_seq (Tensor):
Zero-padded multi-scale embedding sequences.
seq_len (int):
Length of the sequence determined by `self.diar_window_length` variable.
"""
emb_vectors_split = torch.zeros_like(emb_vectors)
uniq_id = os.path.splitext(os.path.basename(test_data_collection.audio_file))[0]
clus_label_tensor = torch.tensor([x[-1] for x in self.msdd_model.clus_test_label_dict[uniq_id]])
for spk_idx in range(len(test_data_collection.target_spks)):
stt, end = (
diar_window_index * self.diar_window_length,
min((diar_window_index + 1) * self.diar_window_length, clus_label_tensor.shape[0]),
)
seq_len = end - stt
if stt < clus_label_tensor.shape[0]:
target_clus_label_tensor = clus_label_tensor[stt:end]
emb_seq, seg_length = (
signals[stt:end, :, :],
min(
self.diar_window_length,
clus_label_tensor.shape[0] - diar_window_index * self.diar_window_length,
),
)
target_clus_label_bool = target_clus_label_tensor == test_data_collection.target_spks[spk_idx]
# There are cases where there is no corresponding speaker in split range, so any(target_clus_label_bool) could be False.
if any(target_clus_label_bool):
emb_vectors_split[:, :, spk_idx] = torch.mean(emb_seq[target_clus_label_bool], dim=0)
# In case when the loop reaches the end of the sequence
if seq_len < self.diar_window_length:
emb_seq = torch.cat(
[
emb_seq,
torch.zeros(self.diar_window_length - seq_len, emb_seq.shape[1], emb_seq.shape[2]).to(
signals.device
),
],
dim=0,
)
else:
emb_seq = torch.zeros(self.diar_window_length, emb_vectors.shape[0], emb_vectors.shape[1]).to(
signals.device
)
seq_len = 0
return emb_vectors_split, emb_seq, seq_len
def get_range_clus_avg_emb(
self, test_batch: List[torch.Tensor], _test_data_collection: List[Any], device: torch.device('cpu')
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
This function is only used when `get_range_average` function is called. This module calculates cluster-average embeddings for
the given short range. The range length is set by `self.diar_window_length`, and each cluster-average is only calculated for the specified range.
Args:
test_batch: (list)
List containing embedding sequences, length of embedding sequences, ground truth labels (if exists) and initializing embedding vectors.
test_data_collection: (list)
List containing test-set dataloader contents. test_data_collection includes wav file path, RTTM file path, clustered speaker indices.
Returns:
sess_emb_vectors (Tensor):
Tensor of cluster-average speaker embedding vectors.
Shape: (batch_size, scale_n, emb_dim, 2*num_of_spks)
sess_emb_seq (Tensor):
Tensor of input multi-scale embedding sequences.
Shape: (batch_size, length, scale_n, emb_dim)
sess_sig_lengths (Tensor):
Tensor of the actucal sequence length without zero-padding.
Shape: (batch_size)
"""
_signals, signal_lengths, _targets, _emb_vectors = test_batch
sess_emb_vectors, sess_emb_seq, sess_sig_lengths = [], [], []
split_count = torch.ceil(torch.tensor(_signals.shape[1] / self.diar_window_length)).int()
self.max_pred_length = max(self.max_pred_length, self.diar_window_length * split_count)
for k in range(_signals.shape[0]):
signals, emb_vectors, test_data_collection = _signals[k], _emb_vectors[k], _test_data_collection[k]
for diar_window_index in range(split_count):
emb_vectors_split, emb_seq, seq_len = self.get_range_average(
signals, emb_vectors, diar_window_index, test_data_collection
)
sess_emb_vectors.append(emb_vectors_split)
sess_emb_seq.append(emb_seq)
sess_sig_lengths.append(seq_len)
sess_emb_vectors = torch.stack(sess_emb_vectors).to(device)
sess_emb_seq = torch.stack(sess_emb_seq).to(device)
sess_sig_lengths = torch.tensor(sess_sig_lengths).to(device)
return sess_emb_vectors, sess_emb_seq, sess_sig_lengths
def diar_infer(
self, test_batch: List[torch.Tensor], test_data_collection: List[Any]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Launch forward_infer() function by feeding the session-wise embedding sequences to get pairwise speaker prediction values.
If split_infer is True, the input audio clips are broken into short sequences then cluster average embeddings are calculated
for inference. Split-infer might result in an improved results if calculating clustering average on the shorter tim-espan can
help speaker assignment.
Args:
test_batch: (list)
List containing embedding sequences, length of embedding sequences, ground truth labels (if exists) and initializing embedding vectors.
test_data_collection: (list)
List containing test-set dataloader contents. test_data_collection includes wav file path, RTTM file path, clustered speaker indices.
Returns:
preds (Tensor):
Tensor containing predicted values which are generated from MSDD model.
targets (Tensor):
Tensor containing binary ground-truth values.
signal_lengths (Tensor):
The actual Session length (number of steps = number of base-scale segments) without zero padding.
"""
signals, signal_lengths, _targets, emb_vectors = test_batch
if self._cfg.diarizer.msdd_model.parameters.split_infer:
split_count = torch.ceil(torch.tensor(signals.shape[1] / self.diar_window_length)).int()
sess_emb_vectors, sess_emb_seq, sess_sig_lengths = self.get_range_clus_avg_emb(
test_batch, test_data_collection, device=self.msdd_model.device
)
with autocast():
_preds, scale_weights = self.msdd_model.forward_infer(
input_signal=sess_emb_seq,
input_signal_length=sess_sig_lengths,
emb_vectors=sess_emb_vectors,
targets=None,
)
_preds = _preds.reshape(len(signal_lengths), split_count * self.diar_window_length, -1)
_preds = _preds[:, : signals.shape[1], :]
else:
with autocast():
_preds, scale_weights = self.msdd_model.forward_infer(
input_signal=signals, input_signal_length=signal_lengths, emb_vectors=emb_vectors, targets=None
)
self.max_pred_length = max(_preds.shape[1], self.max_pred_length)
preds = torch.zeros(_preds.shape[0], self.max_pred_length, _preds.shape[2])
targets = torch.zeros(_preds.shape[0], self.max_pred_length, _preds.shape[2])
preds[:, : _preds.shape[1], :] = _preds
return preds, targets, signal_lengths
@torch.no_grad()
def run_pairwise_diarization(self) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]:
"""
Setup the parameters needed for batch inference and run batch inference. Note that each sample is pairwise speaker input.
The pairwise inference results are reconstructed to make session-wise prediction results.
Returns:
integrated_preds_list: (list)
List containing the session-wise speaker predictions in torch.tensor format.
targets_list: (list)
List containing the ground-truth labels in matrix format filled with 0 or 1.
signal_lengths_list: (list)
List containing the actual length of each sequence in session.
"""
self.out_rttm_dir = self.clustering_embedding.out_rttm_dir
self.msdd_model.setup_test_data(self.msdd_model.cfg.test_ds)
self.msdd_model.eval()
cumul_sample_count = [0]
preds_list, targets_list, signal_lengths_list = [], [], []
uniq_id_list = get_uniq_id_list_from_manifest(self.msdd_model.cfg.test_ds.manifest_filepath)
test_data_collection = [d for d in self.msdd_model.data_collection]
for sidx, test_batch in enumerate(tqdm(self.msdd_model.test_dataloader())):
signals, signal_lengths, _targets, emb_vectors = test_batch
cumul_sample_count.append(cumul_sample_count[-1] + signal_lengths.shape[0])
preds, targets, signal_lengths = self.diar_infer(
test_batch, test_data_collection[cumul_sample_count[-2] : cumul_sample_count[-1]]
)
if self._cfg.diarizer.msdd_model.parameters.seq_eval_mode:
self.msdd_model._accuracy_test(preds, targets, signal_lengths)
preds_list.extend(list(torch.split(preds, 1)))
targets_list.extend(list(torch.split(targets, 1)))
signal_lengths_list.extend(list(torch.split(signal_lengths, 1)))
if self._cfg.diarizer.msdd_model.parameters.seq_eval_mode:
f1_score, simple_acc = self.msdd_model.compute_accuracies()
logging.info(f"Test Inference F1 score. {f1_score:.4f}, simple Acc. {simple_acc:.4f}")
integrated_preds_list = self.get_integrated_preds_list(uniq_id_list, test_data_collection, preds_list)
return integrated_preds_list, targets_list, signal_lengths_list
def run_overlap_aware_eval(
self, preds_list: List[torch.Tensor], threshold: float
) -> List[Optional[Tuple[DiarizationErrorRate, Dict]]]:
"""
Based on the predicted sigmoid values, render RTTM files then evaluate the overlap-aware diarization results.
Args:
preds_list: (list)
List containing predicted pairwise speaker labels.
threshold: (float)
A floating-point threshold value that determines overlapped speech detection.
- If threshold is 1.0, no overlap speech is detected and only detect major speaker.
- If threshold is 0.0, all speakers are considered active at any time step.
"""
logging.info(
f" [Threshold: {threshold:.4f}] [use_clus_as_main={self.use_clus_as_main}] [diar_window={self.diar_window_length}]"
)
outputs = []
manifest_filepath = self.msdd_model.cfg.test_ds.manifest_filepath
rttm_map = audio_rttm_map(manifest_filepath)
for k, (collar, ignore_overlap) in enumerate(self.diar_eval_settings):
all_reference, all_hypothesis = make_rttm_with_overlap(
manifest_filepath,
self.msdd_model.clus_test_label_dict,
preds_list,
threshold=threshold,
infer_overlap=True,
use_clus_as_main=self.use_clus_as_main,
overlap_infer_spk_limit=self.overlap_infer_spk_limit,
use_adaptive_thres=self.use_adaptive_thres,
max_overlap_spks=self.max_overlap_spks,
out_rttm_dir=self.out_rttm_dir,
)
output = score_labels(
rttm_map,
all_reference,
all_hypothesis,
collar=collar,
ignore_overlap=ignore_overlap,
verbose=self._cfg.verbose,
)
outputs.append(output)
logging.info(f" \n")
return outputs
@classmethod
def from_pretrained(
cls,
model_name: str,
vad_model_name: str = 'vad_multilingual_marblenet',
map_location: Optional[str] = None,
verbose: bool = False,
):
"""
Instantiate a `NeuralDiarizer` to run Speaker Diarization.
Args:
model_name (str): Path/Name of the neural diarization model to load.
vad_model_name (str): Path/Name of the voice activity detection (VAD) model to load.
map_location (str): Optional str to map the instantiated model to a device (cpu, cuda).
By default, (None), it will select a GPU if available, falling back to CPU otherwise.
verbose (bool): Enable verbose logging when loading models/running diarization.
Returns:
`NeuralDiarizer`
"""
logging.setLevel(logging.INFO if verbose else logging.WARNING)
cfg = NeuralDiarizerInferenceConfig.init_config(
diar_model_path=model_name, vad_model_path=vad_model_name, map_location=map_location, verbose=verbose,
)
return cls(cfg)
def __call__(
self,
audio_filepath: str,
batch_size: int = 64,
num_workers: int = 1,
max_speakers: Optional[int] = None,
num_speakers: Optional[int] = None,
out_dir: Optional[str] = None,
verbose: bool = False,
) -> Union[Annotation, List[Annotation]]:
"""
Run the `NeuralDiarizer` inference pipeline.
Args:
audio_filepath (str, list): Audio path to run speaker diarization on.
max_speakers (int): If known, the max number of speakers in the file(s).
num_speakers (int): If known, the exact number of speakers in the file(s).
batch_size (int): Batch size when running inference.
num_workers (int): Number of workers to use in data-loading.
out_dir (str): Path to store intermediate files during inference (default temp directory).
Returns:
`pyannote.Annotation` for each audio path, containing speaker labels and segment timestamps.
"""
if out_dir:
os.makedirs(out_dir, exist_ok=True)
with tempfile.TemporaryDirectory(dir=out_dir) as tmpdir:
manifest_path = os.path.join(tmpdir, 'manifest.json')
meta = [
{
'audio_filepath': audio_filepath,
'offset': 0,
'duration': None,
'label': 'infer',
'text': '-',
'num_speakers': num_speakers,
'rttm_filepath': None,
'uem_filepath': None,
}
]
with open(manifest_path, 'w') as f:
f.write('\n'.join(json.dumps(x) for x in meta))
self._initialize_configs(
manifest_path=manifest_path,
max_speakers=max_speakers,
num_speakers=num_speakers,
tmpdir=tmpdir,
batch_size=batch_size,
num_workers=num_workers,
verbose=verbose,
)
self.msdd_model.cfg.test_ds.manifest_filepath = manifest_path
self.diarize()
pred_labels_clus = rttm_to_labels(f'{tmpdir}/pred_rttms/{Path(audio_filepath).stem}.rttm')
return labels_to_pyannote_object(pred_labels_clus)
def _initialize_configs(
self,
manifest_path: str,
max_speakers: Optional[int],
num_speakers: Optional[int],
tmpdir: tempfile.TemporaryDirectory,
batch_size: int,
num_workers: int,
verbose: bool,
) -> None:
self._cfg.batch_size = batch_size
self._cfg.num_workers = num_workers
self._cfg.diarizer.manifest_filepath = manifest_path
self._cfg.diarizer.out_dir = tmpdir
self._cfg.verbose = verbose
self._cfg.diarizer.clustering.parameters.oracle_num_speakers = num_speakers is not None
if max_speakers:
self._cfg.diarizer.clustering.parameters.max_num_speakers = max_speakers
self.transfer_diar_params_to_model_params(self.msdd_model, self._cfg)
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
return EncDecDiarLabelModel.list_available_models()
|
NeMo-main
|
nemo/collections/asr/models/msdd_models.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from omegaconf import MISSING
import nemo.core.classes.dataset
from nemo.collections.asr.modules.audio_preprocessing import (
AudioToMFCCPreprocessorConfig,
CropOrPadSpectrogramAugmentationConfig,
SpectrogramAugmentationConfig,
)
from nemo.collections.asr.modules.conv_asr import ConvASRDecoderClassificationConfig, ConvASREncoderConfig
from nemo.core.config import modelPT as model_cfg
@dataclass
class EncDecClassificationDatasetConfig(nemo.core.classes.dataset.DatasetConfig):
manifest_filepath: Optional[str] = None
sample_rate: int = MISSING
labels: List[str] = MISSING
trim_silence: bool = False
# Tarred dataset support
is_tarred: bool = False
tarred_audio_filepaths: Optional[str] = None
tarred_shard_strategy: str = "scatter"
shuffle_n: int = 0
# Optional
int_values: Optional[int] = None
augmentor: Optional[Dict[str, Any]] = None
max_duration: Optional[float] = None
min_duration: Optional[float] = None
cal_labels_occurrence: Optional[bool] = False
# VAD Optional
vad_stream: Optional[bool] = None
window_length_in_sec: float = 0.31
shift_length_in_sec: float = 0.01
normalize_audio: bool = False
is_regression_task: bool = False
# bucketing params
bucketing_strategy: str = "synced_randomized"
bucketing_batch_size: Optional[Any] = None
bucketing_weights: Optional[List[int]] = None
@dataclass
class EncDecClassificationConfig(model_cfg.ModelConfig):
# Model global arguments
sample_rate: int = 16000
repeat: int = 1
dropout: float = 0.0
separable: bool = True
kernel_size_factor: float = 1.0
labels: List[str] = MISSING
timesteps: int = MISSING
# Dataset configs
train_ds: EncDecClassificationDatasetConfig = EncDecClassificationDatasetConfig(
manifest_filepath=None, shuffle=True, trim_silence=False
)
validation_ds: EncDecClassificationDatasetConfig = EncDecClassificationDatasetConfig(
manifest_filepath=None, shuffle=False
)
test_ds: EncDecClassificationDatasetConfig = EncDecClassificationDatasetConfig(
manifest_filepath=None, shuffle=False
)
# Optimizer / Scheduler config
optim: Optional[model_cfg.OptimConfig] = model_cfg.OptimConfig(sched=model_cfg.SchedConfig())
# Model component configs
preprocessor: AudioToMFCCPreprocessorConfig = AudioToMFCCPreprocessorConfig()
spec_augment: Optional[SpectrogramAugmentationConfig] = SpectrogramAugmentationConfig()
crop_or_pad_augment: Optional[CropOrPadSpectrogramAugmentationConfig] = CropOrPadSpectrogramAugmentationConfig(
audio_length=timesteps
)
encoder: ConvASREncoderConfig = ConvASREncoderConfig()
decoder: ConvASRDecoderClassificationConfig = ConvASRDecoderClassificationConfig()
@dataclass
class EncDecClassificationModelConfig(model_cfg.NemoConfig):
model: EncDecClassificationConfig = EncDecClassificationConfig()
|
NeMo-main
|
nemo/collections/asr/models/configs/classification_models_config.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from nemo.collections.asr.models.configs.asr_models_config import EncDecCTCConfig
from nemo.collections.asr.parts.k2.classes import GraphModuleConfig as BackendConfig
from nemo.core.config.modelPT import NemoConfig
@dataclass
class GraphModuleConfig:
criterion_type: str = "ml"
loss_type: str = "ctc"
split_batch_size: int = 0
dec_type: str = "topo"
transcribe_training: bool = True
backend_cfg: BackendConfig = BackendConfig()
@dataclass
class EncDecK2SeqConfig(EncDecCTCConfig):
graph_module_cfg: GraphModuleConfig = GraphModuleConfig()
@dataclass
class EncDecK2SeqModelConfig(NemoConfig):
model: EncDecK2SeqConfig = EncDecK2SeqConfig()
|
NeMo-main
|
nemo/collections/asr/models/configs/k2_sequence_models_config.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from omegaconf import MISSING
import nemo.core.classes.dataset
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.modules.audio_preprocessing import (
AudioToMelSpectrogramPreprocessorConfig,
SpectrogramAugmentationConfig,
)
from nemo.collections.asr.modules.conv_asr import ConvASRDecoderConfig, ConvASREncoderConfig
from nemo.core.config import modelPT as model_cfg
@dataclass
class ASRDatasetConfig(nemo.core.classes.dataset.DatasetConfig):
manifest_filepath: Optional[Any] = None
sample_rate: int = MISSING
labels: List[str] = MISSING
trim_silence: bool = False
# Tarred dataset support
is_tarred: bool = False
tarred_audio_filepaths: Optional[Any] = None
tarred_shard_strategy: str = "scatter"
shard_manifests: bool = False
shuffle_n: int = 0
# Optional
int_values: Optional[int] = None
augmentor: Optional[Dict[str, Any]] = None
max_duration: Optional[float] = None
min_duration: Optional[float] = None
max_utts: int = 0
blank_index: int = -1
unk_index: int = -1
normalize: bool = False
trim: bool = True
parser: Optional[str] = 'en'
eos_id: Optional[int] = None
bos_id: Optional[int] = None
pad_id: int = 0
use_start_end_token: bool = False
return_sample_id: Optional[bool] = False
# bucketing params
bucketing_strategy: str = "synced_randomized"
bucketing_batch_size: Optional[Any] = None
bucketing_weights: Optional[List[int]] = None
@dataclass
class EncDecCTCConfig(model_cfg.ModelConfig):
# Model global arguments
sample_rate: int = 16000
repeat: int = 1
dropout: float = 0.0
separable: bool = False
labels: List[str] = MISSING
# Dataset configs
train_ds: ASRDatasetConfig = ASRDatasetConfig(manifest_filepath=None, shuffle=True)
validation_ds: ASRDatasetConfig = ASRDatasetConfig(manifest_filepath=None, shuffle=False)
test_ds: ASRDatasetConfig = ASRDatasetConfig(manifest_filepath=None, shuffle=False)
# Optimizer / Scheduler config
optim: Optional[model_cfg.OptimConfig] = model_cfg.OptimConfig(sched=model_cfg.SchedConfig())
# Model component configs
preprocessor: AudioToMelSpectrogramPreprocessorConfig = AudioToMelSpectrogramPreprocessorConfig()
spec_augment: Optional[SpectrogramAugmentationConfig] = SpectrogramAugmentationConfig()
encoder: ConvASREncoderConfig = ConvASREncoderConfig()
decoder: ConvASRDecoderConfig = ConvASRDecoderConfig()
decoding: CTCDecodingConfig = CTCDecodingConfig()
@dataclass
class EncDecCTCModelConfig(model_cfg.NemoConfig):
model: EncDecCTCConfig = EncDecCTCConfig()
@dataclass
class CacheAwareStreamingConfig:
chunk_size: int = 0 # the size of each chunk at each step, it can be a list of two integers to specify different chunk sizes for the first step and others
shift_size: int = 0 # the size of the shift in each step, it can be a list of two integers to specify different shift sizes for the first step and others
cache_drop_size: int = 0 # the number of steps to drop from the cache
last_channel_cache_size: int = 0 # the size of the needed cache for last channel layers
valid_out_len: int = 0 # the number of the steps in the final output which are valid (have the same value as in the offline mode)
pre_encode_cache_size: int = 0 # the size of the needed cache for the pre-encoding part of the model to avoid caching inside the pre-encoding layers
drop_extra_pre_encoded: int = 0 # the number of steps to get dropped after the pre-encoding layer
last_channel_num: int = 0 # number of the last channel layers (like MHA layers) which need caching in the model
last_time_num: int = 0 # number of the last time layers (like convolutions) which need caching in the model
|
NeMo-main
|
nemo/collections/asr/models/configs/asr_models_config.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.models.configs.asr_models_config import (
ASRDatasetConfig,
CacheAwareStreamingConfig,
EncDecCTCConfig,
EncDecCTCModelConfig,
)
from nemo.collections.asr.models.configs.classification_models_config import (
EncDecClassificationConfig,
EncDecClassificationDatasetConfig,
EncDecClassificationModelConfig,
)
from nemo.collections.asr.models.configs.diarizer_config import NeuralDiarizerInferenceConfig
from nemo.collections.asr.models.configs.matchboxnet_config import (
EncDecClassificationModelConfigBuilder,
MatchboxNetModelConfig,
MatchboxNetVADModelConfig,
)
from nemo.collections.asr.models.configs.quartznet_config import (
EncDecCTCModelConfigBuilder,
JasperModelConfig,
QuartzNetModelConfig,
)
from nemo.collections.asr.modules.audio_preprocessing import (
AudioToMelSpectrogramPreprocessorConfig,
AudioToMFCCPreprocessorConfig,
CropOrPadSpectrogramAugmentationConfig,
SpectrogramAugmentationConfig,
)
from nemo.collections.asr.modules.conv_asr import (
ConvASRDecoderClassificationConfig,
ConvASRDecoderConfig,
ConvASREncoderConfig,
JasperEncoderConfig,
)
|
NeMo-main
|
nemo/collections/asr/models/configs/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict, dataclass
from typing import Any, Dict, Optional, Tuple, Union
@dataclass
class DiarizerComponentConfig:
"""Dataclass to imitate HydraConfig dict when accessing parameters."""
def get(self, name: str, default: Optional[Any] = None):
return getattr(self, name, default)
def __iter__(self):
for key in asdict(self):
yield key
def dict(self) -> Dict:
return asdict(self)
@dataclass
class ASRDiarizerCTCDecoderParams:
pretrained_language_model: Optional[str] = None # KenLM model file: .arpa model file or .bin binary file.
beam_width: int = 32
alpha: float = 0.5
beta: float = 2.5
@dataclass
class ASRRealigningLMParams:
# Provide a KenLM language model in .arpa format.
arpa_language_model: Optional[str] = None
# Min number of words for the left context.
min_number_of_words: int = 3
# Max number of words for the right context.
max_number_of_words: int = 10
# The threshold for the difference between two log probability values from two hypotheses.
logprob_diff_threshold: float = 1.2
@dataclass
class ASRDiarizerParams(DiarizerComponentConfig):
# if True, speech segmentation for diarization is based on word-timestamps from ASR inference.
asr_based_vad: bool = False
# Threshold (in sec) that caps the gap between two words when generating VAD timestamps using ASR based VAD.
asr_based_vad_threshold: float = 1.0
# Batch size can be dependent on each ASR model. Default batch sizes are applied if set to null.
asr_batch_size: Optional[int] = None
# Native decoder delay. null is recommended to use the default values for each ASR model.
decoder_delay_in_sec: Optional[float] = None
# Offset to set a reference point from the start of the word. Recommended range of values is [-0.05 0.2].
word_ts_anchor_offset: Optional[float] = None
# Select which part of the word timestamp we want to use. The options are: 'start', 'end', 'mid'.
word_ts_anchor_pos: str = "start"
# Fix the word timestamp using VAD output. You must provide a VAD model to use this feature.
fix_word_ts_with_VAD: bool = False
# If True, use colored text to distinguish speakers in the output transcript.
colored_text: bool = False
# If True, the start and end time of each speaker turn is printed in the output transcript.
print_time: bool = True
# If True, the output transcript breaks the line to fix the line width (default is 90 chars)
break_lines: bool = False
@dataclass
class ASRDiarizerConfig(DiarizerComponentConfig):
model_path: Optional[str] = "stt_en_conformer_ctc_large"
parameters: ASRDiarizerParams = ASRDiarizerParams()
ctc_decoder_parameters: ASRDiarizerCTCDecoderParams = ASRDiarizerCTCDecoderParams()
realigning_lm_parameters: ASRRealigningLMParams = ASRRealigningLMParams()
@dataclass
class VADParams(DiarizerComponentConfig):
window_length_in_sec: float = 0.15 # Window length in sec for VAD context input
shift_length_in_sec: float = 0.01 # Shift length in sec for generate frame level VAD prediction
smoothing: Union[str, bool] = "median" # False or type of smoothing method (eg: median)
overlap: float = 0.5 # Overlap ratio for overlapped mean/median smoothing filter
onset: float = 0.1 # Onset threshold for detecting the beginning and end of a speech
offset: float = 0.1 # Offset threshold for detecting the end of a speech
pad_onset: float = 0.1 # Adding durations before each speech segment
pad_offset: float = 0 # Adding durations after each speech segment
min_duration_on: float = 0 # Threshold for small non_speech deletion
min_duration_off: float = 0.2 # Threshold for short speech segment deletion
filter_speech_first: bool = True
@dataclass
class VADConfig(DiarizerComponentConfig):
model_path: str = "vad_multilingual_marblenet" # .nemo local model path or pretrained VAD model name
external_vad_manifest: Optional[str] = None
parameters: VADParams = VADParams()
@dataclass
class SpeakerEmbeddingsParams(DiarizerComponentConfig):
# Window length(s) in sec (floating-point number). either a number or a list. ex) 1.5 or [1.5,1.0,0.5]
window_length_in_sec: Tuple[float] = (1.5, 1.25, 1.0, 0.75, 0.5)
# Shift length(s) in sec (floating-point number). either a number or a list. ex) 0.75 or [0.75,0.5,0.25]
shift_length_in_sec: Tuple[float] = (0.75, 0.625, 0.5, 0.375, 0.25)
# Weight for each scale. None (for single scale) or list with window/shift scale count. ex) [0.33,0.33,0.33]
multiscale_weights: Tuple[float] = (1, 1, 1, 1, 1)
# save speaker embeddings in pickle format. True if clustering result is used for other models, such as MSDD.
save_embeddings: bool = True
@dataclass
class SpeakerEmbeddingsConfig(DiarizerComponentConfig):
# .nemo local model path or pretrained model name (titanet_large, ecapa_tdnn or speakerverification_speakernet)
model_path: Optional[str] = None
parameters: SpeakerEmbeddingsParams = SpeakerEmbeddingsParams()
@dataclass
class ClusteringParams(DiarizerComponentConfig):
# If True, use num of speakers value provided in manifest file.
oracle_num_speakers: bool = False
# Max number of speakers for each recording. If an oracle number of speakers is passed, this value is ignored.
max_num_speakers: int = 8
# If the number of segments is lower than this number, enhanced speaker counting is activated.
enhanced_count_thres: int = 80
# Determines the range of p-value search: 0 < p <= max_rp_threshold.
max_rp_threshold: float = 0.25
# The higher the number, the more values will be examined with more time.
sparse_search_volume: int = 30
# If True, take a majority vote on multiple p-values to estimate the number of speakers.
maj_vote_spk_count: bool = False
@dataclass
class ClusteringConfig(DiarizerComponentConfig):
parameters: ClusteringParams = ClusteringParams()
@dataclass
class MSDDParams(DiarizerComponentConfig):
# If True, use speaker embedding model in checkpoint, else provided speaker embedding model in config will be used.
use_speaker_model_from_ckpt: bool = True
# Batch size for MSDD inference.
infer_batch_size: int = 25
# Sigmoid threshold for generating binarized speaker labels. The smaller the more generous on detecting overlaps.
sigmoid_threshold: Tuple[float] = (0.7,)
# If True, use oracle number of speaker and evaluate F1 score for the given speaker sequences. Default is False.
seq_eval_mode: bool = False
# If True, break the input audio clip to short sequences and calculate cluster average embeddings for inference.
split_infer: bool = True
# The length of split short sequence when split_infer is True.
diar_window_length: int = 50
# If the estimated number of speakers are larger than this number, overlap speech is not estimated.
overlap_infer_spk_limit: int = 5
@dataclass
class MSDDConfig(DiarizerComponentConfig):
model_path: Optional[str] = "diar_msdd_telephonic"
parameters: MSDDParams = MSDDParams()
@dataclass
class DiarizerConfig(DiarizerComponentConfig):
manifest_filepath: Optional[str] = None
out_dir: Optional[str] = None
oracle_vad: bool = False # If True, uses RTTM files provided in the manifest file to get VAD timestamps
collar: float = 0.25 # Collar value for scoring
ignore_overlap: bool = True # Consider or ignore overlap segments while scoring
vad: VADConfig = VADConfig()
speaker_embeddings: SpeakerEmbeddingsConfig = SpeakerEmbeddingsConfig()
clustering: ClusteringConfig = ClusteringConfig()
msdd_model: MSDDConfig = MSDDConfig()
asr: ASRDiarizerConfig = ASRDiarizerConfig()
@dataclass
class NeuralDiarizerInferenceConfig(DiarizerComponentConfig):
diarizer: DiarizerConfig = DiarizerConfig()
device: str = "cpu"
verbose: bool = False
batch_size: int = 64
num_workers: int = 1
sample_rate: int = 16000
name: str = ""
@classmethod
def init_config(cls, diar_model_path: str, vad_model_path: str, map_location: str, verbose: bool):
return NeuralDiarizerInferenceConfig(
DiarizerConfig(
vad=VADConfig(model_path=vad_model_path), msdd_model=MSDDConfig(model_path=diar_model_path),
),
device=map_location,
verbose=verbose,
)
|
NeMo-main
|
nemo/collections/asr/models/configs/diarizer_config.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from nemo.collections.asr.parts.k2.classes import GraphModuleConfig
@dataclass
class AlignerCTCConfig:
prob_suppress_index: int = -1
prob_suppress_value: float = 1.0
@dataclass
class AlignerRNNTConfig:
predictor_window_size: int = 0
predictor_step_size: int = 1
@dataclass
class AlignerWrapperModelConfig:
alignment_type: str = "forced"
word_output: bool = True
cpu_decoding: bool = False
decode_batch_size: int = 0
ctc_cfg: AlignerCTCConfig = AlignerCTCConfig()
rnnt_cfg: AlignerRNNTConfig = AlignerRNNTConfig()
@dataclass
class K2AlignerWrapperModelConfig(AlignerWrapperModelConfig):
decoder_module_cfg: GraphModuleConfig = GraphModuleConfig()
|
NeMo-main
|
nemo/collections/asr/models/configs/aligner_config.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Callable, List, Optional
from omegaconf import MISSING
from nemo.collections.asr.models.configs import asr_models_config as ctc_cfg
from nemo.collections.asr.modules.audio_preprocessing import (
AudioToMelSpectrogramPreprocessorConfig,
SpectrogramAugmentationConfig,
)
from nemo.collections.asr.modules.conv_asr import ConvASRDecoderConfig, ConvASREncoderConfig, JasperEncoderConfig
from nemo.core.config import modelPT as model_cfg
# fmt: off
def qn_15x5():
config = [
JasperEncoderConfig(filters=256, repeat=1, kernel=[33], stride=[2], dilation=[1], dropout=0.0,
residual=False, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=256, repeat=5, kernel=[33], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=256, repeat=5, kernel=[33], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=256, repeat=5, kernel=[33], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=256, repeat=5, kernel=[39], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=256, repeat=5, kernel=[39], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=256, repeat=5, kernel=[39], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[51], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[51], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[51], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[63], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[63], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[63], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[75], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[75], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[75], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=1, kernel=[87], stride=[1], dilation=[2], dropout=0.0,
residual=False, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=1024, repeat=1, kernel=[1], stride=[1], dilation=[1], dropout=0.0,
residual=False, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False)
]
return config
def jasper_10x5_dr():
config = [
JasperEncoderConfig(filters=256, repeat=1, kernel=[11], stride=[2], dilation=[1], dropout=0.2,
residual=False, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=256, repeat=5, kernel=[11], stride=[1], dilation=[1], dropout=0.2,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=256, repeat=5, kernel=[11], stride=[1], dilation=[1], dropout=0.2,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=384, repeat=5, kernel=[13], stride=[1], dilation=[1], dropout=0.2,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=384, repeat=5, kernel=[13], stride=[1], dilation=[1], dropout=0.2,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[17], stride=[1], dilation=[1], dropout=0.2,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=512, repeat=5, kernel=[17], stride=[1], dilation=[1], dropout=0.2,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=640, repeat=5, kernel=[21], stride=[1], dilation=[1], dropout=0.3,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=640, repeat=5, kernel=[21], stride=[1], dilation=[1], dropout=0.3,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=768, repeat=5, kernel=[25], stride=[1], dilation=[1], dropout=0.3,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=768, repeat=5, kernel=[25], stride=[1], dilation=[1], dropout=0.3,
residual=True, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=True, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=896, repeat=1, kernel=[29], stride=[1], dilation=[2], dropout=0.4,
residual=False, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=1024, repeat=1, kernel=[1], stride=[1], dilation=[1], dropout=0.4,
residual=False, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False)
]
return config
# fmt: on
@dataclass
class JasperModelConfig(ctc_cfg.EncDecCTCConfig):
# Model global arguments
sample_rate: int = 16000
repeat: int = 1
dropout: float = 0.0
separable: bool = False
labels: List[str] = MISSING
# Dataset configs
train_ds: ctc_cfg.ASRDatasetConfig = ctc_cfg.ASRDatasetConfig(
manifest_filepath=None, shuffle=True, trim_silence=True
)
validation_ds: ctc_cfg.ASRDatasetConfig = ctc_cfg.ASRDatasetConfig(manifest_filepath=None, shuffle=False)
test_ds: ctc_cfg.ASRDatasetConfig = ctc_cfg.ASRDatasetConfig(manifest_filepath=None, shuffle=False)
# Optimizer / Scheduler config
optim: Optional[model_cfg.OptimConfig] = model_cfg.OptimConfig(sched=model_cfg.SchedConfig())
# Model general component configs
preprocessor: AudioToMelSpectrogramPreprocessorConfig = AudioToMelSpectrogramPreprocessorConfig()
spec_augment: Optional[SpectrogramAugmentationConfig] = SpectrogramAugmentationConfig()
encoder: ConvASREncoderConfig = ConvASREncoderConfig(activation="relu")
decoder: ConvASRDecoderConfig = ConvASRDecoderConfig()
@dataclass
class QuartzNetModelConfig(JasperModelConfig):
separable: bool = True
class EncDecCTCModelConfigBuilder(model_cfg.ModelConfigBuilder):
VALID_CONFIGS = ['quartznet_15x5', 'quartznet_15x5_zh', 'jasper_10x5dr']
def __init__(self, name: str = 'quartznet_15x5', encoder_cfg_func: Optional[Callable[[], List[Any]]] = None):
if name not in EncDecCTCModelConfigBuilder.VALID_CONFIGS:
raise ValueError("`name` must be one of : \n" f"{EncDecCTCModelConfigBuilder.VALID_CONFIGS}")
self.name = name
if 'quartznet_15x5' in name:
if encoder_cfg_func is None:
encoder_cfg_func = qn_15x5
model_cfg = QuartzNetModelConfig(
repeat=5,
separable=True,
spec_augment=SpectrogramAugmentationConfig(rect_masks=5, rect_freq=50, rect_time=120),
encoder=ConvASREncoderConfig(jasper=encoder_cfg_func(), activation="relu"),
decoder=ConvASRDecoderConfig(),
)
elif 'jasper_10x5' in name:
if encoder_cfg_func is None:
encoder_cfg_func = jasper_10x5_dr
model_cfg = JasperModelConfig(
repeat=5,
separable=False,
spec_augment=SpectrogramAugmentationConfig(rect_masks=5, rect_freq=50, rect_time=120),
encoder=ConvASREncoderConfig(jasper=encoder_cfg_func(), activation="relu"),
decoder=ConvASRDecoderConfig(),
)
else:
raise ValueError(f"Invalid config name submitted to {self.__class__.__name__}")
super(EncDecCTCModelConfigBuilder, self).__init__(model_cfg)
self.model_cfg: ctc_cfg.EncDecCTCConfig = model_cfg # enable type hinting
if 'zh' in name:
self.set_dataset_normalize(normalize=False)
def set_labels(self, labels: List[str]):
self.model_cfg.labels = labels
def set_separable(self, separable: bool):
self.model_cfg.separable = separable
def set_repeat(self, repeat: int):
self.model_cfg.repeat = repeat
def set_sample_rate(self, sample_rate: int):
self.model_cfg.sample_rate = sample_rate
def set_dropout(self, dropout: float = 0.0):
self.model_cfg.dropout = dropout
def set_dataset_normalize(self, normalize: bool):
self.model_cfg.train_ds.normalize = normalize
self.model_cfg.validation_ds.normalize = normalize
self.model_cfg.test_ds.normalize = normalize
# Note: Autocomplete for users wont work without these overrides
# But practically it is not needed since python will infer at runtime
# def set_train_ds(self, cfg: Optional[ctc_cfg.ASRDatasetConfig] = None):
# super().set_train_ds(cfg)
#
# def set_validation_ds(self, cfg: Optional[ctc_cfg.ASRDatasetConfig] = None):
# super().set_validation_ds(cfg)
#
# def set_test_ds(self, cfg: Optional[ctc_cfg.ASRDatasetConfig] = None):
# super().set_test_ds(cfg)
def _finalize_cfg(self):
# propagate labels
self.model_cfg.train_ds.labels = self.model_cfg.labels
self.model_cfg.validation_ds.labels = self.model_cfg.labels
self.model_cfg.test_ds.labels = self.model_cfg.labels
self.model_cfg.decoder.vocabulary = self.model_cfg.labels
# propagate num classes
self.model_cfg.decoder.num_classes = len(self.model_cfg.labels)
# propagate sample rate
self.model_cfg.sample_rate = self.model_cfg.sample_rate
self.model_cfg.preprocessor.sample_rate = self.model_cfg.sample_rate
self.model_cfg.train_ds.sample_rate = self.model_cfg.sample_rate
self.model_cfg.validation_ds.sample_rate = self.model_cfg.sample_rate
self.model_cfg.test_ds.sample_rate = self.model_cfg.sample_rate
# propagate filters
self.model_cfg.encoder.feat_in = self.model_cfg.preprocessor.features
self.model_cfg.decoder.feat_in = self.model_cfg.encoder.jasper[-1].filters
# propagate separable
for layer in self.model_cfg.encoder.jasper[:-1]: # type: JasperEncoderConfig
layer.separable = self.model_cfg.separable
# propagate repeat
for layer in self.model_cfg.encoder.jasper[1:-2]: # type: JasperEncoderConfig
layer.repeat = self.model_cfg.repeat
# propagate dropout
for layer in self.model_cfg.encoder.jasper: # type: JasperEncoderConfig
layer.dropout = self.model_cfg.dropout
def build(self) -> ctc_cfg.EncDecCTCConfig:
return super().build()
|
NeMo-main
|
nemo/collections/asr/models/configs/quartznet_config.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Callable, List, Optional
from omegaconf import MISSING
from nemo.collections.asr.models.configs import classification_models_config as clf_cfg
from nemo.collections.asr.modules.audio_preprocessing import (
AudioToMFCCPreprocessorConfig,
CropOrPadSpectrogramAugmentationConfig,
SpectrogramAugmentationConfig,
)
from nemo.collections.asr.modules.conv_asr import (
ConvASRDecoderClassificationConfig,
ConvASREncoderConfig,
JasperEncoderConfig,
)
from nemo.core.config import modelPT as model_cfg
# fmt: off
def matchboxnet_3x1x64():
config = [
JasperEncoderConfig(filters=128, repeat=1, kernel=[11], stride=[1], dilation=[1], dropout=0.0,
residual=False, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=64, repeat=1, kernel=[13], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=64, repeat=1, kernel=[15], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=64, repeat=1, kernel=[17], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=128, repeat=1, kernel=[29], stride=[1], dilation=[2], dropout=0.0,
residual=False, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=128, repeat=1, kernel=[1], stride=[1], dilation=[1], dropout=0.0,
residual=False, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False)
]
return config
def matchboxnet_3x1x64_vad():
config = [
JasperEncoderConfig(filters=128, repeat=1, kernel=[11], stride=[1], dilation=[1], dropout=0.0,
residual=False, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=64, repeat=1, kernel=[13], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=64, repeat=1, kernel=[15], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=64, repeat=1, kernel=[17], stride=[1], dilation=[1], dropout=0.0,
residual=True, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=128, repeat=1, kernel=[29], stride=[1], dilation=[2], dropout=0.0,
residual=False, groups=1, separable=True, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False),
JasperEncoderConfig(filters=128, repeat=1, kernel=[1], stride=[1], dilation=[1], dropout=0.0,
residual=False, groups=1, separable=False, heads=-1, residual_mode='add',
residual_dense=False, se=False, se_reduction_ratio=8, se_context_size=-1,
se_interpolation_mode='nearest', kernel_size_factor=1.0, stride_last=False)
]
return config
# fmt: on
@dataclass
class MatchboxNetModelConfig(clf_cfg.EncDecClassificationConfig):
# Model global arguments
sample_rate: int = 16000
repeat: int = 1
dropout: float = 0.0
separable: bool = True
kernel_size_factor: float = 1.0
timesteps: int = 128
labels: List[str] = MISSING
# Dataset configs
train_ds: clf_cfg.EncDecClassificationDatasetConfig = clf_cfg.EncDecClassificationDatasetConfig(
manifest_filepath=None, shuffle=True, trim_silence=False
)
validation_ds: clf_cfg.EncDecClassificationDatasetConfig = clf_cfg.EncDecClassificationDatasetConfig(
manifest_filepath=None, shuffle=False
)
test_ds: clf_cfg.EncDecClassificationDatasetConfig = clf_cfg.EncDecClassificationDatasetConfig(
manifest_filepath=None, shuffle=False
)
# Optimizer / Scheduler config
optim: Optional[model_cfg.OptimConfig] = model_cfg.OptimConfig(sched=model_cfg.SchedConfig())
# Model general component configs
preprocessor: AudioToMFCCPreprocessorConfig = AudioToMFCCPreprocessorConfig(window_size=0.025)
spec_augment: Optional[SpectrogramAugmentationConfig] = SpectrogramAugmentationConfig(
freq_masks=2, time_masks=2, freq_width=15, time_width=25, rect_masks=5, rect_time=25, rect_freq=15
)
crop_or_pad_augment: Optional[CropOrPadSpectrogramAugmentationConfig] = CropOrPadSpectrogramAugmentationConfig(
audio_length=128
)
encoder: ConvASREncoderConfig = ConvASREncoderConfig(activation="relu")
decoder: ConvASRDecoderClassificationConfig = ConvASRDecoderClassificationConfig()
@dataclass
class MatchboxNetVADModelConfig(MatchboxNetModelConfig):
timesteps: int = 64
labels: List[str] = field(default_factory=lambda: ['background', 'speech'])
crop_or_pad_augment: Optional[CropOrPadSpectrogramAugmentationConfig] = None
class EncDecClassificationModelConfigBuilder(model_cfg.ModelConfigBuilder):
VALID_CONFIGS = ['matchboxnet_3x1x64', 'matchboxnet_3x1x64_vad']
def __init__(self, name: str = 'matchboxnet_3x1x64', encoder_cfg_func: Optional[Callable[[], List[Any]]] = None):
if name not in EncDecClassificationModelConfigBuilder.VALID_CONFIGS:
raise ValueError("`name` must be one of : \n" f"{EncDecClassificationModelConfigBuilder.VALID_CONFIGS}")
self.name = name
if 'matchboxnet_3x1x64_vad' in name:
if encoder_cfg_func is None:
encoder_cfg_func = matchboxnet_3x1x64_vad
model_cfg = MatchboxNetVADModelConfig(
repeat=1,
separable=True,
encoder=ConvASREncoderConfig(jasper=encoder_cfg_func(), activation="relu"),
decoder=ConvASRDecoderClassificationConfig(),
)
elif 'matchboxnet_3x1x64' in name:
if encoder_cfg_func is None:
encoder_cfg_func = matchboxnet_3x1x64
model_cfg = MatchboxNetModelConfig(
repeat=1,
separable=False,
spec_augment=SpectrogramAugmentationConfig(rect_masks=5, rect_freq=50, rect_time=120),
encoder=ConvASREncoderConfig(jasper=encoder_cfg_func(), activation="relu"),
decoder=ConvASRDecoderClassificationConfig(),
)
else:
raise ValueError(f"Invalid config name submitted to {self.__class__.__name__}")
super(EncDecClassificationModelConfigBuilder, self).__init__(model_cfg)
self.model_cfg: clf_cfg.EncDecClassificationConfig = model_cfg # enable type hinting
def set_labels(self, labels: List[str]):
self.model_cfg.labels = labels
def set_separable(self, separable: bool):
self.model_cfg.separable = separable
def set_repeat(self, repeat: int):
self.model_cfg.repeat = repeat
def set_sample_rate(self, sample_rate: int):
self.model_cfg.sample_rate = sample_rate
def set_dropout(self, dropout: float = 0.0):
self.model_cfg.dropout = dropout
def set_timesteps(self, timesteps: int):
self.model_cfg.timesteps = timesteps
def set_is_regression_task(self, is_regression_task: bool):
self.model_cfg.is_regression_task = is_regression_task
# Note: Autocomplete for users wont work without these overrides
# But practically it is not needed since python will infer at runtime
# def set_train_ds(self, cfg: Optional[clf_cfg.EncDecClassificationDatasetConfig] = None):
# super().set_train_ds(cfg)
#
# def set_validation_ds(self, cfg: Optional[clf_cfg.EncDecClassificationDatasetConfig] = None):
# super().set_validation_ds(cfg)
#
# def set_test_ds(self, cfg: Optional[clf_cfg.EncDecClassificationDatasetConfig] = None):
# super().set_test_ds(cfg)
def _finalize_cfg(self):
# propagate labels
self.model_cfg.train_ds.labels = self.model_cfg.labels
self.model_cfg.validation_ds.labels = self.model_cfg.labels
self.model_cfg.test_ds.labels = self.model_cfg.labels
self.model_cfg.decoder.vocabulary = self.model_cfg.labels
# propagate num classes
self.model_cfg.decoder.num_classes = len(self.model_cfg.labels)
# propagate sample rate
self.model_cfg.sample_rate = self.model_cfg.sample_rate
self.model_cfg.preprocessor.sample_rate = self.model_cfg.sample_rate
self.model_cfg.train_ds.sample_rate = self.model_cfg.sample_rate
self.model_cfg.validation_ds.sample_rate = self.model_cfg.sample_rate
self.model_cfg.test_ds.sample_rate = self.model_cfg.sample_rate
# propagate filters
self.model_cfg.encoder.feat_in = self.model_cfg.preprocessor.features
self.model_cfg.decoder.feat_in = self.model_cfg.encoder.jasper[-1].filters
# propagate timeteps
if self.model_cfg.crop_or_pad_augment is not None:
self.model_cfg.crop_or_pad_augment.audio_length = self.model_cfg.timesteps
# propagate separable
for layer in self.model_cfg.encoder.jasper[:-1]: # type: JasperEncoderConfig
layer.separable = self.model_cfg.separable
# propagate repeat
for layer in self.model_cfg.encoder.jasper[1:-2]: # type: JasperEncoderConfig
layer.repeat = self.model_cfg.repeat
# propagate dropout
for layer in self.model_cfg.encoder.jasper: # type: JasperEncoderConfig
layer.dropout = self.model_cfg.dropout
def build(self) -> clf_cfg.EncDecClassificationConfig:
return super().build()
|
NeMo-main
|
nemo/collections/asr/models/configs/matchboxnet_config.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/parts/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 Ryan Leary
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file contains code artifacts adapted from https://github.com/ryanleary/patter
"""
ALIAS FILE for backward compatibility
"""
from nemo.collections.asr.parts.preprocessing.features import *
|
NeMo-main
|
nemo/collections/asr/parts/features.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
from omegaconf import DictConfig, open_dict
from nemo.core.classes.mixins.adapter_mixins import AdapterModelPTMixin, AdapterModuleMixin
from nemo.utils import logging, logging_mode
class ASRAdapterModelMixin(AdapterModelPTMixin):
""" ASR Adapter Mixin that can augment any Encoder module with Adapter module support.
This mixin class should be used only with a top level ModelPT subclass, that includes an `encoder` submodule.
This mixin class adds several utility methods which are propagated to the `encoder`.
An Adapter module is any Pytorch nn.Module that possess a few properties :
- It's input and output dimension are the same, while the hidden dimension need not be the same.
- The final layer of the Adapter module is zero-initialized, so that the residual connection to the adapter
yields the original output.
This mixin adds the following instance variables to the class this inherits it:
- `adapter_layer`: A torch.nn.ModuleDict(), whose keys are the names of the adapter (globally unique),
and values are the Adapter nn.Module().
- `adapter_cfg`: A OmegaConf DictConfig object that holds the config of the adapters that are initialized.
- `adapter_global_cfg_key`: A str representing a key in the model config that can be provided by the user.
The value resolves to `global_cfg`, and can be overridden via `model.cfg.adapters.global_cfg.*`.
**Note**: This module **is** responsible for maintaining its config. At the ModelPT level, it will access and
write Adapter config information to `self.cfg.adapters`.
"""
def setup_adapters(self):
"""
Utility method that is called in the ASR ModelPT-implementation constructor, so as to restore any
adapters that were previously added.
This method should be called just once at constructor time.
"""
supports_adapters = False
# At least the encoder must extend AdapterModuleMixin
if hasattr(self, 'encoder') and isinstance(self.encoder, AdapterModuleMixin):
supports_adapters |= True
if hasattr(self, 'decoder') and isinstance(self.decoder, AdapterModuleMixin):
supports_adapters |= True
if hasattr(self, 'joint') and isinstance(self.joint, AdapterModuleMixin):
supports_adapters |= True
# If adapters are supported, setup the adapter config + any modules (pre-existing adapter modules)
if supports_adapters:
super().setup_adapters()
def add_adapter(self, name: str, cfg: DictConfig):
"""
Add an Adapter module to this model.
Args:
name: A globally unique name for the adapter. Will be used to access, enable and disable adapters.
cfg: A DictConfig that contains at the bare minimum `__target__` to instantiate a new Adapter module.
"""
# setup the config for adapters
super().add_adapter(name=name, cfg=cfg)
# Resolve module name and adapter name
module_name, _ = self.resolve_adapter_module_name_(name)
# Use + as a splitter, in order to share one name across multiple modules
if '+' in module_name:
module_names = module_name.split('+')
else:
module_names = [module_name]
# Update the model.cfg with information about the new adapter from cfg
with open_dict(self.cfg):
for module_name in module_names:
# Check if encoder adapters should be added
if module_name in ('', 'encoder'):
# Dispatch the call to the encoder.
self.encoder.add_adapter(name=name, cfg=cfg)
# Check if decoder adapters should be added
if module_name == 'decoder':
# Dispatch call to the decoder.
self.decoder.add_adapter(name=name, cfg=cfg)
# Check if joint adapters should be added;
# Note: We need additional check if joint even exists in model (for CTC models)
if hasattr(self, 'joint') and module_name == 'joint':
# Dispatch call to the joint.
self.joint.add_adapter(name=name, cfg=cfg)
def is_adapter_available(self) -> bool:
"""
Checks if any Adapter module has been instantiated.
Returns:
bool, determining if any Adapter module has been instantiated. Returns true even if the adapters are
enabled or disabled, false only if no adapters exist.
"""
config_contains_adapter = super().is_adapter_available()
# Forward the method call to the individual modules
if hasattr(self, 'encoder') and isinstance(self.encoder, AdapterModuleMixin):
config_contains_adapter |= self.encoder.is_adapter_available()
if hasattr(self, 'decoder') and isinstance(self.decoder, AdapterModuleMixin):
config_contains_adapter |= self.decoder.is_adapter_available()
if hasattr(self, 'joint') and isinstance(self.joint, AdapterModuleMixin):
config_contains_adapter |= self.joint.is_adapter_available()
return config_contains_adapter
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
"""
Updated the internal adapter config, determining if an adapter (or all adapters) are either
enabled or disabled.
A common user pattern would be to disable all adapters (either after adding them, or restoring a model
with pre-existing adapters) and then simply enable one of the adapters.
.. code::
model.set_enabled_adapters(enabled=False)
model.set_enabled_adapters(name=<some adapter name>, enabled=True)
Args:
name: Optional str. If a str name is given, the config will be updated to the value of `enabled`.
If no name is given, then all adapters will be enabled/disabled.
enabled: Bool, determines if the adapter(s) will be enabled/disabled.
"""
super().set_enabled_adapters(name=name, enabled=enabled)
# Resolve the module name and adapter name
if name is not None:
module_name, _ = self.resolve_adapter_module_name_(name)
else:
module_name = None
# Use + as a splitter, in order to share one name across multiple modules
if module_name is not None and '+' in module_name:
module_names = module_name.split('+')
else:
module_names = [module_name]
for module_name in module_names:
# Check if encoder adapters should be used
# Dispatch the call to the encoder.
if name is None or module_name in ('', 'encoder'):
if self.encoder.is_adapter_available():
self.encoder.set_enabled_adapters(name=name, enabled=enabled)
# Dispatch the call to the decoder.
if name is None or module_name == 'decoder':
if self.decoder.is_adapter_available():
self.decoder.set_enabled_adapters(name=name, enabled=enabled)
# Dispatch the call to the joint.
# Note: We need additional check for joint, since it may not exist (CTC models).
if name is None or module_name == 'joint':
if hasattr(self, 'joint') and self.joint.is_adapter_available():
self.joint.set_enabled_adapters(name=name, enabled=enabled)
def get_enabled_adapters(self) -> List[str]:
"""
Returns a list of all enabled adapters.
Returns:
A list of str names of each enabled adapter(s).
"""
enabled_adapters = super().get_enabled_adapters()
# Check if encoder adapters should be used or are enabled
if hasattr(self, 'encoder') and isinstance(self.encoder, AdapterModuleMixin):
enabled_adapters.extend(self.encoder.get_enabled_adapters())
if hasattr(self, 'decoder') and isinstance(self.decoder, AdapterModuleMixin):
enabled_adapters.extend(self.decoder.get_enabled_adapters())
if hasattr(self, 'joint') and isinstance(self.joint, AdapterModuleMixin):
enabled_adapters.extend(self.joint.get_enabled_adapters())
enabled_adapters = list(sorted(list(set(enabled_adapters))))
return enabled_adapters
def check_valid_model_with_adapter_support_(self):
"""
Utility method to test if the subclass of this mixin is an appropriate subclass of ModelPT itself.
"""
# Obtain the global adapter config if possible, otherwise use sensible defaults.
global_cfg = self._get_global_cfg()
# Test whether the encoder supports adapters
use_encoder_adapter = global_cfg.get('check_encoder_adapter', True)
if use_encoder_adapter:
if not hasattr(self, 'encoder'):
logging.warning(
"Cannot add adapter to this object as it does not have an `encoder` sub-module!",
mode=logging_mode.ONCE,
)
if hasattr(self, 'encoder') and not isinstance(self.encoder, AdapterModuleMixin):
logging.warning(
f'{self.encoder.__class__.__name__} does not implement `AdapterModuleMixin`',
mode=logging_mode.ONCE,
)
# Test whether the decoder supports adapters
use_decoder_adapter = global_cfg.get('check_decoder_adapter', True)
if use_decoder_adapter:
if not hasattr(self, 'decoder'):
logging.warning(
"Cannot add adapter to this object as it does not have an `decoder` sub-module!",
mode=logging_mode.ONCE,
)
if hasattr(self, 'decoder') and not isinstance(self.decoder, AdapterModuleMixin):
logging.warning(
f'{self.decoder.__class__.__name__} does not implement `AdapterModuleMixin`',
mode=logging_mode.ONCE,
)
# Test whether the joint supports adapters
use_joint_adapter = global_cfg.get('check_joint_adapter', True)
if use_joint_adapter:
# Joint is only for RNNT models, skip assertion that it must always exist.
if hasattr(self, 'joint') and not isinstance(self.joint, AdapterModuleMixin):
logging.warning(
f'{self.joint.__class__.__name__} does not implement `AdapterModuleMixin`', mode=logging_mode.ONCE
)
def resolve_adapter_module_name_(self, name: str) -> Tuple[str, str]:
"""
Utility method to resolve a given global/module adapter name to its components.
Always returns a tuple representing (module_name, adapter_name). ":" is used as the
delimiter for denoting the module name vs the adapter name.
Will attempt to also resolve a given adapter_name alone back to (module_name, adapter_name)
if the metadata config exists for access.
Args:
name: A global adapter, or a module adapter name (with structure module_name:adapter_name).
Returns:
A tuple representing (module_name, adapter_name). If a global adapter is provided,
module_name is set to ''.
"""
module_name, adapter_name = super().resolve_adapter_module_name_(name)
# Use + as a splitter, in order to share one name across multiple modules
if '+' in module_name:
module_names = module_name.split('+')
else:
module_names = [module_name]
# resolve name and module only for valid modules
valid_module_names = self.adapter_module_names
for mod_name in module_names:
if mod_name not in valid_module_names:
raise ValueError(f"Provided module name `{mod_name}` is not in valid list : {valid_module_names}")
return (module_name, adapter_name)
def _get_global_cfg(self):
"""
Utility method, to either extract or construct the global config inside adapters config.
"""
global_config = DictConfig({})
if 'adapters' in self.cfg and self.adapter_global_cfg_key in self.cfg.adapters:
global_config = self.adapter_cfg[self.adapter_global_cfg_key]
return global_config
@property
def adapter_module_names(self) -> List[str]:
valid_module_names = ['', 'encoder', 'decoder', 'joint']
return valid_module_names
|
NeMo-main
|
nemo/collections/asr/parts/mixins/asr_adapter_mixins.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from typing import List
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
import nemo.collections.asr.models as asr_models
from nemo.collections.asr.parts.mixins.asr_adapter_mixins import ASRAdapterModelMixin
from nemo.collections.asr.parts.mixins.streaming import StreamingEncoder
from nemo.collections.asr.parts.utils import asr_module_utils
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.collections.common import tokenizers
from nemo.utils import logging
class ASRBPEMixin(ABC):
""" ASR BPE Mixin class that sets up a Tokenizer via a config
This mixin class adds the method `_setup_tokenizer(...)`, which can be used by ASR models
which depend on subword tokenization.
The setup_tokenizer method adds the following parameters to the class -
- tokenizer_cfg: The resolved config supplied to the tokenizer (with `dir` and `type` arguments).
- tokenizer_dir: The directory path to the tokenizer vocabulary + additional metadata.
- tokenizer_type: The type of the tokenizer. Currently supports `bpe` and `wpe`, as well as `agg`.
- vocab_path: Resolved path to the vocabulary text file.
In addition to these variables, the method will also instantiate and preserve a tokenizer
(subclass of TokenizerSpec) if successful, and assign it to self.tokenizer.
The mixin also supports aggregate tokenizers, which consist of ordinary, monolingual tokenizers.
If a conversion between a monolongual and an aggregate tokenizer (or vice versa) is detected,
all registered artifacts will be cleaned up.
"""
# this will be used in configs and nemo artifacts
AGGREGATE_TOKENIZERS_DICT_PREFIX = 'langs'
def _setup_tokenizer(self, tokenizer_cfg: DictConfig):
tokenizer_type = tokenizer_cfg.get('type')
if tokenizer_type is None:
raise ValueError("`tokenizer.type` cannot be None")
elif tokenizer_type.lower() == 'agg':
self._setup_aggregate_tokenizer(tokenizer_cfg)
else:
self._setup_monolingual_tokenizer(tokenizer_cfg)
def _setup_monolingual_tokenizer(self, tokenizer_cfg: DictConfig):
# Prevent tokenizer parallelism (unless user has explicitly set it)
if 'TOKENIZERS_PARALLELISM' not in os.environ:
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
self.tokenizer_cfg = OmegaConf.to_container(tokenizer_cfg, resolve=True) # type: dict
self.tokenizer_dir = self.tokenizer_cfg.pop('dir') # Remove tokenizer directory
self.tokenizer_type = self.tokenizer_cfg.pop('type').lower() # Remove tokenizer_type
self.hf_tokenizer_kwargs = self.tokenizer_cfg.pop("hf_kwargs", {}) # Remove HF tokenizer kwargs
# just in case the previous tokenizer was an aggregate
self._cleanup_aggregate_config_and_artifacts_if_needed()
# Preserve config
if hasattr(self, 'cfg') and 'tokenizer' in self.cfg:
self.cfg.tokenizer.dir = self.tokenizer_dir
self.cfg.tokenizer.type = self.tokenizer_type
if 'hf_kwargs' in tokenizer_cfg:
with open_dict(self.cfg.tokenizer):
self.cfg.tokenizer.hf_kwargs = tokenizer_cfg.get('hf_kwargs')
if self.tokenizer_type not in ['bpe', 'wpe']:
raise ValueError(
"`tokenizer.type` must be either `bpe` for SentencePiece tokenizer or "
"`wpe` for BERT based tokenizer"
)
if self.tokenizer_type == 'bpe':
# This is a BPE Tokenizer
if 'model_path' in self.tokenizer_cfg:
model_path = self.tokenizer_cfg.get('model_path')
else:
model_path = os.path.join(self.tokenizer_dir, 'tokenizer.model')
model_path = self.register_artifact('tokenizer.model_path', model_path)
self.model_path = model_path
if 'special_tokens' in self.tokenizer_cfg:
special_tokens = self.tokenizer_cfg['special_tokens']
if special_tokens is not None:
raise ValueError("`special_tokens` are no longer supported for SentencePiece based tokenizers.")
# Update special tokens
self.tokenizer = tokenizers.SentencePieceTokenizer(model_path=model_path)
if 'vocab_path' in self.tokenizer_cfg:
vocab_path = self.tokenizer_cfg.get('vocab_path')
else:
vocab_path = os.path.join(self.tokenizer_dir, 'vocab.txt')
vocab_path = self.register_artifact('tokenizer.vocab_path', vocab_path)
self.vocab_path = vocab_path
try:
if 'spe_tokenizer_vocab' in self.tokenizer_cfg:
spe_vocab_path = self.tokenizer_cfg.get('spe_tokenizer_vocab')
else:
spe_vocab_path = os.path.join(self.tokenizer_dir, 'tokenizer.vocab')
spe_vocab_path = self.register_artifact('tokenizer.spe_tokenizer_vocab', spe_vocab_path)
self.spe_vocab_path = spe_vocab_path
except FileNotFoundError:
# fallback case for older checkpoints that did not preserve the tokenizer.vocab
self.spe_vocab_path = None
vocabulary = {}
for i in range(self.tokenizer.vocab_size):
piece = self.tokenizer.ids_to_tokens([i])
piece = piece[0]
vocabulary[piece] = i + 1
# wrapper method to get vocabulary conveniently
def get_vocab():
return vocabulary
# attach utility values to the tokenizer wrapper
self.tokenizer.tokenizer.vocab_size = len(vocabulary)
self.tokenizer.tokenizer.get_vocab = get_vocab
self.tokenizer.tokenizer.all_special_tokens = self.tokenizer.special_token_to_id
else:
# This is a WPE Tokenizer
# If path from previous registration exists, remove it
if 'vocab_path' in self.tokenizer_cfg:
vocab_path = self.tokenizer_cfg.get('vocab_path')
else:
vocab_path = os.path.join(self.tokenizer_dir, 'vocab.txt')
vocab_path = self.register_artifact('tokenizer.vocab_path', vocab_path)
self.vocab_path = vocab_path
# If path from previous registration exists, remove it
if 'vocab_path' in self.tokenizer_cfg:
self.tokenizer_cfg.pop('vocab_path')
self.tokenizer = tokenizers.AutoTokenizer(
pretrained_model_name='bert-base-cased',
vocab_file=self.vocab_path,
mask_token=self.hf_tokenizer_kwargs.get('mask_token', None),
bos_token=self.hf_tokenizer_kwargs.get('bos_token', None),
eos_token=self.hf_tokenizer_kwargs.get('eos_token', None),
pad_token=self.hf_tokenizer_kwargs.get('pad_token', None),
sep_token=self.hf_tokenizer_kwargs.get('sep_token', None),
cls_token=self.hf_tokenizer_kwargs.get('cls_token', None),
unk_token=self.hf_tokenizer_kwargs.get('unk_token', None),
use_fast=self.hf_tokenizer_kwargs.get('use_fast', False),
)
logging.info(
"Tokenizer {} initialized with {} tokens".format(
self.tokenizer.__class__.__name__, self.tokenizer.vocab_size
)
)
def _setup_aggregate_tokenizer(self, tokenizer_cfg: DictConfig):
# Prevent tokenizer parallelism (unless user has explicitly set it)
if 'TOKENIZERS_PARALLELISM' not in os.environ:
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
self.tokenizer_cfg = OmegaConf.to_container(tokenizer_cfg, resolve=True) # type: dict
# the aggregate tokenizer does not have one tokenizer_dir but multiple ones
self.tokenizer_dir = None
self.tokenizer_cfg.pop('dir', None) # Remove tokenizer directory, if any
# Remove tokenizer_type -- obviously if we are here, the type is 'agg'
self.tokenizer_type = self.tokenizer_cfg.pop('type').lower()
# the aggregate tokenizer should not have these
self.hf_tokenizer_kwargs = {}
self.tokenizer_cfg.pop("hf_kwargs", {}) # Remove HF tokenizer kwargs, if any
logging.info('_setup_tokenizer: detected an aggregate tokenizer')
# need to de-register any monolingual config items if they exist
self._cleanup_monolingual_and_aggregate_config_and_artifacts_if_needed()
# overwrite tokenizer type
if hasattr(self, 'cfg') and 'tokenizer' in self.cfg:
self.cfg.tokenizer.type = self.tokenizer_type
tokenizers_dict = {}
# init each of the monolingual tokenizers found in the config and assemble into AggregateTokenizer
for lang, tokenizer_config in self.tokenizer_cfg[self.AGGREGATE_TOKENIZERS_DICT_PREFIX].items():
(tokenizer, model_path, vocab_path, spe_vocab_path,) = self._make_tokenizer(tokenizer_config, lang)
tokenizers_dict[lang] = tokenizer
if hasattr(self, 'cfg'):
with open_dict(self.cfg.tokenizer):
self.cfg.tokenizer[self.AGGREGATE_TOKENIZERS_DICT_PREFIX][lang]['dir'] = self.tokenizer_cfg[
self.AGGREGATE_TOKENIZERS_DICT_PREFIX
][lang]['dir']
self.cfg.tokenizer[self.AGGREGATE_TOKENIZERS_DICT_PREFIX][lang]['type'] = self.tokenizer_cfg[
self.AGGREGATE_TOKENIZERS_DICT_PREFIX
][lang]['type']
self.tokenizer = tokenizers.AggregateTokenizer(tokenizers_dict)
def _make_tokenizer(self, tokenizer_cfg: DictConfig, lang=None):
tokenizer_type = tokenizer_cfg.get('type').lower()
tokenizer_dir = tokenizer_cfg.get('dir')
if tokenizer_type not in ['bpe', 'wpe']:
raise ValueError(
'`tokenizer.type` must be either `bpe` for SentencePiece tokenizer or' '`wpe` for BERT based tokenizer'
)
# defaults
model_path = None
vocab_path = None
spe_vocab_path = None
if tokenizer_type == 'bpe':
# This is a BPE Tokenizer
if 'model_path' in tokenizer_cfg:
model_path = tokenizer_cfg.get('model_path')
else:
model_path = os.path.join(tokenizer_dir, 'tokenizer.model')
model_path = self.register_artifact(
'tokenizer.' + self.AGGREGATE_TOKENIZERS_DICT_PREFIX + '.' + lang + '.model_path', model_path
)
if 'special_tokens' in tokenizer_cfg:
special_tokens = tokenizer_cfg['special_tokens']
if special_tokens is not None:
raise ValueError('`special_tokens` are no longer supported for SentencePiece based tokenizers.')
# Update special tokens
tokenizer = tokenizers.SentencePieceTokenizer(model_path=model_path)
if 'vocab_path' in tokenizer_cfg:
vocab_path = tokenizer_cfg.get('vocab_path')
else:
vocab_path = os.path.join(tokenizer_dir, 'vocab.txt')
vocab_path = self.register_artifact(
'tokenizer.' + self.AGGREGATE_TOKENIZERS_DICT_PREFIX + '.' + lang + '.vocab_path', vocab_path
)
try:
if 'spe_tokenizer_vocab' in tokenizer_cfg:
spe_vocab_path = tokenizer_cfg.get('spe_tokenizer_vocab')
else:
spe_vocab_path = os.path.join(tokenizer_dir, 'tokenizer.vocab')
spe_vocab_path = self.register_artifact(
'tokenizer.' + self.AGGREGATE_TOKENIZERS_DICT_PREFIX + '.' + lang + '.spe_tokenizer_vocab',
spe_vocab_path,
)
except FileNotFoundError:
# fallback case for older checkpoints that did not preserve the tokenizer.vocab
spe_vocab_path = None
vocabulary = {}
for i in range(tokenizer.vocab_size):
piece = tokenizer.ids_to_tokens([i])
piece = piece[0]
vocabulary[piece] = i + 1
# wrapper method to get vocabulary conveniently
def get_vocab():
return vocabulary
# attach utility values to the tokenizer wrapper
tokenizer.tokenizer.vocab_size = len(vocabulary)
tokenizer.tokenizer.get_vocab = get_vocab
tokenizer.tokenizer.all_special_tokens = tokenizer.special_token_to_id
else:
# This is a WPE Tokenizer
# If path from previous registration exists, remove it
if 'vocab_path' in tokenizer_cfg:
vocab_path = tokenizer_cfg.get('vocab_path')
else:
vocab_path = os.path.join(tokenizer_dir, 'vocab.txt')
vocab_path = self.register_artifact(
'tokenizer.' + self.AGGREGATE_TOKENIZERS_DICT_PREFIX + '.' + lang + '.vocab_path', vocab_path
)
# If path from previous registration exists, remove it
if 'vocab_path' in tokenizer_cfg:
tokenizer_cfg.pop('vocab_path')
hf_tokenizer_kwargs = tokenizer_cfg.get('hf_kwargs', {})
tokenizer = tokenizers.AutoTokenizer(
pretrained_model_name='bert-base-cased',
vocab_file=vocab_path,
mask_token=hf_tokenizer_kwargs.get('mask_token', None),
bos_token=hf_tokenizer_kwargs.get('bos_token', None),
eos_token=hf_tokenizer_kwargs.get('eos_token', None),
pad_token=hf_tokenizer_kwargs.get('pad_token', None),
sep_token=hf_tokenizer_kwargs.get('sep_token', None),
cls_token=hf_tokenizer_kwargs.get('cls_token', None),
unk_token=hf_tokenizer_kwargs.get('unk_token', None),
use_fast=hf_tokenizer_kwargs.get('use_fast', False),
)
logging.info(
'Tokenizer {} initialized with {} tokens'.format(tokenizer.__class__.__name__, tokenizer.vocab_size)
)
return tokenizer, model_path, vocab_path, spe_vocab_path
def _cleanup_monolingual_and_aggregate_config_and_artifacts_if_needed(self):
"""
Clean ups any monolingual and some aggregate config items and artifacts.
We need to do this when we switch from a monolingual tokenizer to an aggregate one
or go between aggregate tokenizers which could have a different number of languages
"""
if hasattr(self, 'cfg'):
with open_dict(self.cfg.tokenizer):
self.cfg.tokenizer.pop('dir', None)
self.cfg.tokenizer.pop('model_path', None)
self.cfg.tokenizer.pop('vocab_path', None)
self.cfg.tokenizer.pop('spe_tokenizer_vocab', None)
self.cfg.tokenizer.pop('hf_kwargs', None)
# need to de-register any monolingual artifacts if they exist
if hasattr(self, 'artifacts'):
self.artifacts.pop('tokenizer.model_path', None)
self.artifacts.pop('tokenizer.vocab_path', None)
self.artifacts.pop('tokenizer.spe_tokenizer_vocab', None)
# just in case we are replacing one aggregate tokenizer with another one, we better
# clean up the old aggregate artifacts as well
for akey in list(self.artifacts.keys()):
if akey.startswith('tokenizer.' + self.AGGREGATE_TOKENIZERS_DICT_PREFIX + '.'):
self.artifacts.pop(akey)
def _cleanup_aggregate_config_and_artifacts_if_needed(self):
"""
Clean ups any aggregate config items and artifacts.
We need to do this when we switch from an aggregate tokenizer to a monolingual one
"""
if hasattr(self, 'cfg'):
with open_dict(self.cfg.tokenizer):
self.cfg.tokenizer.pop(self.AGGREGATE_TOKENIZERS_DICT_PREFIX, None)
# clean up the old aggregate artifacts as well
if hasattr(self, 'artifacts'):
for akey in list(self.artifacts.keys()):
if akey.startswith('tokenizer.' + self.AGGREGATE_TOKENIZERS_DICT_PREFIX + '.'):
self.artifacts.pop(akey)
class ASRModuleMixin(ASRAdapterModelMixin):
"""
ASRModuleMixin is a mixin class added to ASR models in order to add methods that are specific
to a particular instantiation of a module inside of an ASRModel.
Each method should first check that the module is present within the subclass, and support additional
functionality if the corresponding module is present.
"""
def change_conv_asr_se_context_window(self, context_window: int, update_config: bool = True):
"""
Update the context window of the SqueezeExcitation module if the provided model contains an
`encoder` which is an instance of `ConvASREncoder`.
Args:
context_window: An integer representing the number of input timeframes that will be used
to compute the context. Each timeframe corresponds to a single window stride of the
STFT features.
Say the window_stride = 0.01s, then a context window of 128 represents 128 * 0.01 s
of context to compute the Squeeze step.
update_config: Whether to update the config or not with the new context window.
"""
asr_module_utils.change_conv_asr_se_context_window(
self, context_window=context_window, update_config=update_config
)
def change_attention_model(
self, self_attention_model: str = None, att_context_size: List[int] = None, update_config: bool = True
):
"""
Update the self_attention_model if function is available in encoder.
Args:
self_attention_model (str): type of the attention layer and positional encoding
'rel_pos': relative positional embedding and Transformer-XL
'rel_pos_local_attn': relative positional embedding and Transformer-XL with local attention using
overlapping windows. Attention context is determined by att_context_size parameter.
'abs_pos': absolute positional embedding and Transformer
If None is provided, the self_attention_model isn't changed. Defauts to None.
att_context_size (List[int]): List of 2 ints corresponding to left and right attention context sizes,
or None to keep as it is. Defauts to None.
update_config (bool): Whether to update the config or not with the new attention model.
Defaults to True.
"""
if self_attention_model is None and att_context_size is None:
return
if not hasattr(self, 'encoder'):
logging.info(
"Could not change the self_attention_model in encoder "
"since the model provided does not contain an `encoder` module in its config."
)
return
if not hasattr(self.encoder, "change_attention_model"):
logging.info("Model encoder doesn't have a change_attention_model method ")
return
self.encoder.change_attention_model(self_attention_model, att_context_size, update_config, self.device)
if update_config:
with open_dict(self.cfg):
self.cfg.encoder.self_attention_model = self_attention_model
self.cfg.encoder.att_context_size = att_context_size
def change_subsampling_conv_chunking_factor(
self, subsampling_conv_chunking_factor: int, update_config: bool = True
):
"""
Update the conv_chunking_factor (int) if function is available in encoder.
Default is 1 (auto)
Set it to -1 (disabled) or to a specific value (power of 2) if you OOM in the conv subsampling layers
Args:
conv_chunking_factor (int)
"""
if not hasattr(self, 'encoder'):
logging.info(
"Could not call the change_subsampling_conv_chunking_factor method in encoder "
"since the model provided does not contain an `encoder` module in its config."
)
return
if not hasattr(self.encoder, "change_subsampling_conv_chunking_factor"):
logging.info("Model encoder doesn't have a change_subsampling_conv_chunking_factor method ")
return
self.encoder.change_subsampling_conv_chunking_factor(subsampling_conv_chunking_factor)
if update_config:
with open_dict(self.cfg):
self.cfg.encoder.subsampling_conv_chunking_factor = subsampling_conv_chunking_factor
def conformer_stream_step(
self,
processed_signal: torch.Tensor,
processed_signal_length: torch.Tensor = None,
cache_last_channel: torch.Tensor = None,
cache_last_time: torch.Tensor = None,
cache_last_channel_len: torch.Tensor = None,
keep_all_outputs: bool = True,
previous_hypotheses: List[Hypothesis] = None,
previous_pred_out: torch.Tensor = None,
drop_extra_pre_encoded: int = None,
return_transcription: bool = True,
return_log_probs: bool = False,
):
"""
It simulates a forward step with caching for streaming purposes.
It supports the ASR models where their encoder supports streaming like Conformer.
Args:
processed_signal: the input audio signals
processed_signal_length: the length of the audios
cache_last_channel: the cache tensor for last channel layers like MHA
cache_last_channel_len: engths for cache_last_channel
cache_last_time: the cache tensor for last time layers like convolutions
keep_all_outputs: if set to True, would not drop the extra outputs specified by encoder.streaming_cfg.valid_out_len
previous_hypotheses: the hypotheses from the previous step for RNNT models
previous_pred_out: the predicted outputs from the previous step for CTC models
drop_extra_pre_encoded: number of steps to drop from the beginning of the outputs after the downsampling module. This can be used if extra paddings are added on the left side of the input.
return_transcription: whether to decode and return the transcriptions. It can not get disabled for Transducer models.
return_log_probs: whether to return the log probs, only valid for ctc model
Returns:
greedy_predictions: the greedy predictions from the decoder
all_hyp_or_transcribed_texts: the decoder hypotheses for Transducer models and the transcriptions for CTC models
cache_last_channel_next: the updated tensor cache for last channel layers to be used for next streaming step
cache_last_time_next: the updated tensor cache for last time layers to be used for next streaming step
cache_last_channel_next_len: the updated lengths for cache_last_channel
best_hyp: the best hypotheses for the Transducer models
log_probs: the logits tensor of current streaming chunk, only returned when return_log_probs=True
encoded_len: the length of the output log_probs + history chunk log_probs, only returned when return_log_probs=True
"""
if not isinstance(self, asr_models.EncDecRNNTModel) and not isinstance(self, asr_models.EncDecCTCModel):
raise NotImplementedError(f"stream_step does not support {type(self)}!")
if not isinstance(self.encoder, StreamingEncoder):
raise NotImplementedError(f"Encoder of this model does not support streaming!")
if isinstance(self, asr_models.EncDecRNNTModel) and return_transcription is False:
logging.info(
"return_transcription can not be False for Transducer models as decoder returns the transcriptions too."
)
if not isinstance(self, asr_models.EncDecCTCModel) and return_log_probs is True:
logging.info("return_log_probs can only be True for CTC models.")
(
encoded,
encoded_len,
cache_last_channel_next,
cache_last_time_next,
cache_last_channel_next_len,
) = self.encoder.cache_aware_stream_step(
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
cache_last_channel=cache_last_channel,
cache_last_time=cache_last_time,
cache_last_channel_len=cache_last_channel_len,
keep_all_outputs=keep_all_outputs,
drop_extra_pre_encoded=drop_extra_pre_encoded,
)
if isinstance(self, asr_models.EncDecCTCModel) or (
isinstance(self, asr_models.EncDecHybridRNNTCTCModel) and self.cur_decoder == "ctc"
):
if hasattr(self, "ctc_decoder"):
decoding = self.ctc_decoding
decoder = self.ctc_decoder
else:
decoding = self.decoding
decoder = self.decoder
log_probs = decoder(encoder_output=encoded)
predictions_tensor = log_probs.argmax(dim=-1, keepdim=False)
# Concatenate the previous predictions with the current one to have the full predictions.
# We drop the extra predictions for each sample by using the lengths returned by the encoder (encoded_len)
# Then create a list of the predictions for the batch. The predictions can have different lengths because of the paddings.
greedy_predictions = []
if return_transcription:
all_hyp_or_transcribed_texts = []
else:
all_hyp_or_transcribed_texts = None
for preds_idx, preds in enumerate(predictions_tensor):
if encoded_len is None:
preds_cur = predictions_tensor[preds_idx]
else:
preds_cur = predictions_tensor[preds_idx, : encoded_len[preds_idx]]
if previous_pred_out is not None:
greedy_predictions_concat = torch.cat((previous_pred_out[preds_idx], preds_cur), dim=-1)
encoded_len[preds_idx] += len(previous_pred_out[preds_idx])
else:
greedy_predictions_concat = preds_cur
greedy_predictions.append(greedy_predictions_concat)
# TODO: make decoding more efficient by avoiding the decoding process from the beginning
if return_transcription:
decoded_out = decoding.ctc_decoder_predictions_tensor(
decoder_outputs=greedy_predictions_concat.unsqueeze(0),
decoder_lengths=encoded_len[preds_idx : preds_idx + 1],
return_hypotheses=False,
)
all_hyp_or_transcribed_texts.append(decoded_out[0][0])
best_hyp = None
else:
best_hyp, all_hyp_or_transcribed_texts = self.decoding.rnnt_decoder_predictions_tensor(
encoder_output=encoded,
encoded_lengths=encoded_len,
return_hypotheses=True,
partial_hypotheses=previous_hypotheses,
)
greedy_predictions = [hyp.y_sequence for hyp in best_hyp]
if all_hyp_or_transcribed_texts is None:
all_hyp_or_transcribed_texts = best_hyp
result = [
greedy_predictions,
all_hyp_or_transcribed_texts,
cache_last_channel_next,
cache_last_time_next,
cache_last_channel_next_len,
best_hyp,
]
if return_log_probs:
result.append(log_probs)
result.append(encoded_len)
return tuple(result)
@torch.no_grad()
def transcribe_simulate_cache_aware_streaming(
self,
paths2audio_files: List[str],
batch_size: int = 4,
logprobs: bool = False,
return_hypotheses: bool = False,
online_normalization: bool = False,
):
"""
Args:
paths2audio_files: (a list) of paths to audio files.
batch_size: (int) batch size to use during inference.
Bigger will result in better throughput performance but would use more memory.
logprobs: (bool) pass True to get log probabilities instead of transcripts.
return_hypotheses: (bool) Either return hypotheses or text
With hypotheses can do some postprocessing like getting timestamp or rescoring
online_normalization: (bool) Perform normalization on the run per chunk.
Returns:
A list of transcriptions (or raw log probabilities if logprobs is True) in the same order as paths2audio_files
"""
if paths2audio_files is None or len(paths2audio_files) == 0:
return {}
if return_hypotheses and logprobs:
raise ValueError(
"Either `return_hypotheses` or `logprobs` can be True at any given time."
"Returned hypotheses will contain the logprobs."
)
if not isinstance(self, asr_models.EncDecCTCModel):
raise NotImplementedError(f"simulate streaming does not support {type(self)}!")
if not isinstance(self.encoder, StreamingEncoder):
raise NotImplementedError(f"Encoder of this model does not support streaming!")
data_loader = self._setup_streaming_transcribe_dataloader(paths2audio_files, batch_size, online_normalization)
total_log_probs = []
total_texts = []
for streaming_buffer in data_loader:
streaming_buffer_iter = iter(streaming_buffer)
batch_size = len(streaming_buffer.streams_length)
cache_last_channel, cache_last_time, cache_last_channel_len = self.encoder.get_initial_cache_state(
batch_size=batch_size
)
previous_hypotheses = None
pred_out_stream = None
encoded_len = None
transcribed_texts = None
batch_log_probs = []
for step_num, (chunk_audio, chunk_lengths) in enumerate(streaming_buffer_iter):
drop_extra_pre_encoded = self.encoder.streaming_cfg.drop_extra_pre_encoded if step_num != 0 else 0
with torch.inference_mode():
result = self.conformer_stream_step(
processed_signal=chunk_audio,
processed_signal_length=chunk_lengths,
cache_last_channel=cache_last_channel,
cache_last_time=cache_last_time,
cache_last_channel_len=cache_last_channel_len,
keep_all_outputs=streaming_buffer.is_buffer_empty(),
previous_hypotheses=previous_hypotheses,
previous_pred_out=pred_out_stream,
drop_extra_pre_encoded=drop_extra_pre_encoded,
return_transcription=True,
return_log_probs=logprobs or return_hypotheses,
)
if logprobs or return_hypotheses:
(
pred_out_stream,
transcribed_texts,
cache_last_channel,
cache_last_time,
cache_last_channel_len,
previous_hypotheses,
cur_chunk_log_probs,
encoded_len,
) = result
batch_log_probs.append(cur_chunk_log_probs.cpu())
else:
(
pred_out_stream,
transcribed_texts,
cache_last_channel,
cache_last_time,
cache_last_channel_len,
previous_hypotheses,
) = result
if logprobs or return_hypotheses:
# concatenate chunk log probs on T dim
batch_log_probs = torch.cat(batch_log_probs, axis=1)
for log_probs, log_prob_len in zip(batch_log_probs, encoded_len):
total_log_probs.append(log_probs[0:log_prob_len])
if transcribed_texts is None:
total_texts += [''] * batch_size
else:
total_texts += transcribed_texts
if logprobs:
return total_log_probs
if not return_hypotheses:
return total_texts
hyps = []
for log_probs, text in zip(total_log_probs, total_texts):
hyps.append(Hypothesis(y_sequence=log_probs, text=text, score=0.0, dec_state=None))
return hyps
def _setup_streaming_transcribe_dataloader(
self, paths2audio_files: List[str], batch_size: int, online_normalization=False
):
"""
Setup function for a temporary data loader which wraps the provided audio file.
Args:
paths2audio_files: (a list) of paths to audio files.
batch_size: (int) batch size to use during inference. \
Bigger will result in better throughput performance but would use more memory.
online_normalization: whether to do online normalization
Returns:
a new batch streaming buffer
"""
from nemo.collections.asr.parts.utils.streaming_utils import CacheAwareStreamingAudioBuffer
streaming_buffer = CacheAwareStreamingAudioBuffer(model=self, online_normalization=online_normalization)
for sample_idx, sample in enumerate(paths2audio_files):
processed_signal, processed_signal_length, stream_id = streaming_buffer.append_audio_file(
sample, stream_id=-1
)
logging.info(f'Added this sample to the buffer: {sample}')
if (sample_idx + 1) % batch_size == 0 or sample_idx == len(paths2audio_files) - 1:
logging.info(f"Starting to stream samples {sample_idx - len(streaming_buffer) + 1} to {sample_idx}...")
yield streaming_buffer
streaming_buffer.reset_buffer()
class DiarizationMixin(ABC):
@abstractmethod
def diarize(self, paths2audio_files: List[str], batch_size: int = 1) -> List[str]:
"""
Takes paths to audio files and returns speaker labels
Args:
paths2audio_files: paths to audio fragment to be transcribed
Returns:
Speaker labels
"""
pass
|
NeMo-main
|
nemo/collections/asr/parts/mixins/mixins.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.parts.mixins.asr_adapter_mixins import ASRAdapterModelMixin
from nemo.collections.asr.parts.mixins.interctc_mixin import InterCTCMixin
from nemo.collections.asr.parts.mixins.mixins import (
ASRAdapterModelMixin,
ASRBPEMixin,
ASRModuleMixin,
DiarizationMixin,
)
|
NeMo-main
|
nemo/collections/asr/parts/mixins/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Tuple
import torch
from nemo.core.classes.mixins import AccessMixin
class InterCTCMixin:
"""Adds utilities for computing interCTC loss from https://arxiv.org/abs/2102.03216.
To use, make sure encoder accesses ``interctc['capture_layers']``
property in the AccessMixin and registers ``interctc/layer_output_X`` and
``interctc/layer_length_X`` for all layers that we want to get loss from.
Additionally, specify the following config parameters to set up loss::
interctc:
# can use different values
loss_weights: [0.3]
apply_at_layers: [8]
Then call
* ``self.setup_interctc(ctc_decoder_name, ctc_loss_name, ctc_wer_name)``
in the init method
* ``self.add_interctc_losses`` after computing regular loss.
* ``self.finalize_interctc_metrics(metrics, outputs, prefix="val_")``
in the `multi_validation_epoch_end` method.
* ``self.finalize_interctc_metrics(metrics, outputs, prefix="test_")``
in the `multi_test_epoch_end` method.
"""
def _process_config_values(self, loss_weights: List[float], apply_at_layers: List[int]):
self.set_interctc_param('intermediate_loss_weights', loss_weights)
self.set_interctc_param('apply_at_layers', apply_at_layers)
self.set_interctc_param('main_loss_weight', 1.0 - sum(loss_weights))
if self.get_interctc_param('main_loss_weight') <= 0.0:
raise ValueError(
"Make sure that sum of intermediate loss weights is < 1.0. "
"Note that we don't do any normalization and assign "
"remaining weight to the regular model loss. "
"E.g., if interctc.loss_weights = [0.1, 0.3], regular "
"loss will have weight of 0.6"
)
self.set_interctc_param('enabled', len(loss_weights) > 0)
if len(apply_at_layers) != len(loss_weights):
raise ValueError('Length of interctc.apply_at_layers has to match interctc.loss_weights')
# setting up config for AccessMixin that will be checked in encoders to
# log the layers we need
AccessMixin.update_access_cfg({'interctc': {'capture_layers': apply_at_layers}})
def setup_interctc(self, decoder_name, loss_name, wer_name):
"""Sets up all interctc-specific parameters and checks config consistency.
Caller has to specify names of attributes to perform CTC-specific WER,
decoder and loss computation. They will be looked up in the class
state with ``getattr``.
The reason we get the names and look up object later is because those
objects might change without re-calling the setup of this class. So
we always want to look up the most up-to-date object instead of
"caching" it here.
"""
# registering all parameters in a dictionary to avoid conflicts with
# main class's names
self._interctc_params = {}
interctc_config = self.cfg.get("interctc")
if interctc_config is not None:
# if interctc is in the config, we want to check that it indeed defines
# the required keys and nothing else - that's automatically done by
# matching with keyword arguments in self._process_config_values
self._process_config_values(**interctc_config)
self._interctc_params['decoder_name'] = decoder_name
self._interctc_params['loss_name'] = loss_name
self._interctc_params['wer_name'] = wer_name
else:
self.set_interctc_param('enabled', False)
def get_interctc_param(self, param_name):
"""Either directly get parameter from ``self._interctc_params`` or
call getattr with the corresponding name.
"""
if param_name in ['decoder', 'loss', 'wer']:
return getattr(self, self._interctc_params[param_name + "_name"])
return self._interctc_params[param_name]
def set_interctc_param(self, param_name, param_value):
"""Setting the parameter to the ``self._interctc_params`` dictionary.
Raises an error if trying to set decoder, loss or wer as those should
always come from the main class.
"""
if param_name in ['decoder', 'loss', 'wer']:
raise ValueError(
'Cannot set "decoder", "loss" or "wer" as parameters. '
'They are always looked up in the main class state.'
)
self._interctc_params[param_name] = param_value
def _verify_setup_was_called(self):
"""Can be used to verify if setup_interctc was called."""
if not hasattr(self, '_interctc_params'):
raise RuntimeError(
'self.setup_interctc(ctc_decoder_name, ctc_loss_name, ctc_wer_name) has to be '
'called before InterCTC loss can be used!'
)
def is_interctc_enabled(self) -> bool:
"""Returns whether interCTC loss is enabled."""
self._verify_setup_was_called()
return self.get_interctc_param('enabled')
def set_interctc_enabled(self, enabled: bool):
"""Can be used to enable/disable InterCTC manually."""
self._verify_setup_was_called()
if enabled: # checking if proper config parameters were specified
if len(self.get_interctc_param('intermediate_loss_weights')) == 0:
raise RuntimeError(
'InterCTC cannot be enabled since interctc.loss_weights was not specified in the config.'
)
if len(self.get_interctc_param('apply_at_layers')) != len(
self.get_interctc_param('intermediate_loss_weights')
):
raise RuntimeError(
'InterCTC cannot be enabled, since length of "loss_weights" does not match "apply_at_layers".'
)
self.set_interctc_param('enabled', enabled)
def finalize_interctc_metrics(self, metrics: Dict, outputs: List[Dict], prefix: str):
"""Finalizes InterCTC WER and loss metrics for logging purposes.
Should be called inside ``multi_validation_epoch_end`` (with ``prefix="val_"``) or
``multi_test_epoch_end`` (with ``prefix="test_"``).
Note that ``metrics`` dictionary is going to be updated in-place.
"""
if self.is_interctc_enabled():
for layer_idx in self.get_interctc_param('apply_at_layers'):
# assuming that if the first batch logged the metrics, then all batches did
if f"{prefix}inter_ctc_loss_l{layer_idx}" in outputs[0]:
loss = torch.stack([x[f"{prefix}inter_ctc_loss_l{layer_idx}"] for x in outputs]).mean()
metrics["log"][f"{prefix}inter_ctc_loss_l{layer_idx}"] = loss
if f"{prefix}inter_wer_num_l{layer_idx}" in outputs[0]:
wer_num = torch.stack([x[f"{prefix}inter_wer_num_l{layer_idx}"] for x in outputs]).sum()
wer_denom = torch.stack([x[f"{prefix}inter_wer_denom_l{layer_idx}"] for x in outputs]).sum()
metrics["log"][f"{prefix}inter_wer_l{layer_idx}"] = wer_num / wer_denom
if f"{prefix}final_loss" in outputs[0]:
metrics["log"][f"{prefix}final_loss"] = torch.stack([x[f"{prefix}final_loss"] for x in outputs]).mean()
def get_captured_interctc_tensors(self) -> List[Tuple[torch.Tensor, torch.Tensor]]:
"""Returns a list of captured tensors from encoder: tuples of (output, length).
Will additionally apply ``ctc_decoder`` to the outputs.
"""
if not self.is_interctc_enabled():
return []
# note that we have a loop here, because tensors can be defined from
# submodules of encoder (e.g., that's the case in Jasper)
total_registry = {}
for module_registry in AccessMixin.get_module_registry(self.encoder).values():
for key in module_registry:
if key.startswith("interctc/") and key in total_registry:
raise RuntimeError(f"layer {key} has been logged multiple times!")
total_registry.update(module_registry)
# if intermediate_loss_weights was set, the encoder has to register
# interctc/layer_output_X and interctc/layer_length_X tensors.
# We need to apply decoder to each of them and compute CTC loss.
captured_tensors = []
for layer_idx in self.get_interctc_param('apply_at_layers'):
try:
layer_outputs = total_registry[f"interctc/layer_output_{layer_idx}"]
layer_lengths = total_registry[f"interctc/layer_length_{layer_idx}"]
except KeyError:
raise RuntimeError(
f"Intermediate layer {layer_idx} was not captured! "
"Check if length of model.encoder.captured_layer_outputs matches "
"length of model.intermediate_loss_weights properties."
)
if len(layer_outputs) > 1 or len(layer_lengths) > 1:
raise RuntimeError(
"Make sure encoder.forward is called exactly one time before interCTC loss is computed."
)
captured_tensors.append(
(self.get_interctc_param('decoder')(encoder_output=layer_outputs[0]), layer_lengths[0])
)
return captured_tensors
def add_interctc_losses(
self,
loss_value: torch.Tensor,
transcript: torch.Tensor,
transcript_len: torch.Tensor,
compute_wer: bool,
compute_loss: bool = True,
log_wer_num_denom: bool = False,
log_prefix: str = "",
) -> Tuple[Optional[torch.Tensor], Dict]:
"""Adding interCTC losses if required.
Will also register loss/wer metrics in the returned dictionary.
Args:
loss_value (torch.Tensor): regular loss tensor (will add interCTC loss to it).
transcript (torch.Tensor): current utterance transcript.
transcript_len (torch.Tensor): current utterance transcript length.
compute_wer (bool): whether to compute WER for the current utterance.
Should typically be True for validation/test and only True for
training if current batch WER should be logged.
compute_loss (bool): whether to compute loss for the current utterance.
Should always be True in training and almost always True in
validation, unless all other losses are disabled as well.
Defaults to True.
log_wer_num_denom (bool): if True, will additionally log WER num/denom
in the returned metrics dictionary. Should always be True for
validation/test to allow correct metrics aggregation. Should
always be False for training. Defaults to False.
log_prefix (str): prefix added to all log values. Should be ``""`` for
training and ``"val_"`` for validation. Defaults to "".
Returns:
tuple[Optional[torch.Tensor], Dict]: tuple of new loss tensor and dictionary with logged metrics.
"""
if not self.is_interctc_enabled() or not AccessMixin.is_access_enabled():
return loss_value, {}
metrics = {}
if compute_loss:
metrics[f"{log_prefix}final_loss"] = loss_value
else:
loss_value = None
captured_tensors = self.get_captured_interctc_tensors()
if compute_loss:
loss_value *= self.get_interctc_param('main_loss_weight')
for layer_idx, intermediate_result, loss_weight in zip(
self.get_interctc_param('apply_at_layers'),
captured_tensors,
self.get_interctc_param('intermediate_loss_weights'),
):
if compute_loss:
inter_loss_value = self.get_interctc_param('loss')(
log_probs=intermediate_result[0],
targets=transcript,
target_lengths=transcript_len,
input_lengths=intermediate_result[1],
)
metrics[f"{log_prefix}inter_ctc_loss_l{layer_idx}"] = inter_loss_value.detach()
loss_value += inter_loss_value * loss_weight
if compute_wer:
self.get_interctc_param('wer').update(
predictions=intermediate_result[0],
targets=transcript,
target_lengths=transcript_len,
predictions_lengths=intermediate_result[1],
)
wer, wer_num, wer_denom = self.get_interctc_param('wer').compute()
self.get_interctc_param('wer').reset()
metrics.update({f'{log_prefix}inter_wer_l{layer_idx}': wer})
if log_wer_num_denom:
metrics.update(
{
f'{log_prefix}inter_wer_num_l{layer_idx}': wer_num,
f'{log_prefix}inter_wer_denom_l{layer_idx}': wer_denom,
}
)
# return total loss and dictionary of metrics
return loss_value, metrics
|
NeMo-main
|
nemo/collections/asr/parts/mixins/interctc_mixin.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import torch
class StreamingEncoder(ABC):
@abstractmethod
def setup_streaming_params(
self, max_look_ahead: int = 10000,
):
"""
This function sets the needed values and parameters to perform streaming. The configuration (CacheAwareStreamingConfig) need to be stored in self.streaming_cfg.
The streaming configuration is needed to simulate streaming inference. It would set the following
"""
pass
@abstractmethod
def get_initial_cache_state(self, batch_size, dtype, device, max_dim):
pass
@staticmethod
def to_numpy(tensor):
if tensor is None:
return None
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def cache_aware_stream_step(
self,
processed_signal,
processed_signal_length=None,
cache_last_channel=None,
cache_last_time=None,
cache_last_channel_len=None,
keep_all_outputs=True,
drop_extra_pre_encoded=None,
):
if self.streaming_cfg is None:
self.setup_streaming_params()
if drop_extra_pre_encoded is not None:
prev_drop_extra_pre_encoded = self.streaming_cfg.drop_extra_pre_encoded
self.streaming_cfg.drop_extra_pre_encoded = drop_extra_pre_encoded
else:
prev_drop_extra_pre_encoded = None
if processed_signal_length is None:
processed_signal_length = processed_signal.new_full(processed_signal.size(0), processed_signal.size(-1))
encoder_output = self(
audio_signal=processed_signal,
length=processed_signal_length,
cache_last_channel=cache_last_channel,
cache_last_time=cache_last_time,
cache_last_channel_len=cache_last_channel_len,
)
encoder_output = self.streaming_post_process(encoder_output, keep_all_outputs=keep_all_outputs)
if prev_drop_extra_pre_encoded is not None:
self.streaming_cfg.drop_extra_pre_encoded = prev_drop_extra_pre_encoded
return encoder_output
|
NeMo-main
|
nemo/collections/asr/parts/mixins/streaming.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.asr.parts.k2.utils import make_non_pad_mask
class GradExpNormalize(torch.autograd.Function):
"""Function for fast gradient normalization.
Typical use case is normalization for mle loss.
"""
@staticmethod
def forward(
ctx, log_probs: torch.Tensor, input_lengths: torch.Tensor, reduction: str = "mean",
):
mask = make_non_pad_mask(input_lengths, log_probs.shape[1])
probs = log_probs.exp()
norm_probs = torch.zeros_like(log_probs)
norm_probs[mask] += probs[mask]
if reduction == "mean":
norm_probs /= norm_probs.shape[0]
ctx.save_for_backward(norm_probs)
return log_probs
@staticmethod
def backward(ctx, grad_output: torch.Tensor):
return grad_output - grad_output.sum(-1).unsqueeze(-1) * ctx.saved_tensors[0], None, None
class GradInsert(torch.autograd.Function):
"""Function to attach a pre-computed gradient to a tensor.
Typical use case is gradient computation before calling loss.backward().
"""
@staticmethod
def forward(
ctx, input_tensor: torch.Tensor, output_tensor: torch.Tensor, grad: torch.Tensor, mask: torch.Tensor,
):
assert input_tensor.requires_grad
assert not output_tensor.requires_grad and not grad.requires_grad
ctx.save_for_backward(grad, mask)
return output_tensor
@staticmethod
def backward(ctx, grad_output: torch.Tensor):
saved_grad, mask = ctx.saved_tensors
# TODO (alaptev): make it work for grad_output with arbitrary shape
padded_grad_output = torch.zeros(saved_grad.shape[0], dtype=grad_output.dtype, device=grad_output.device)
padded_grad_output[mask] = grad_output
return (padded_grad_output * saved_grad.T).T, None, None, None
class PartialGrad(torch.nn.Module):
"""Module for partial gradient computation.
Useful when computing loss on batch splits to save memory.
"""
def __init__(self, func: torch.nn.Module):
super().__init__()
self.func = func
def forward(
self,
input_tensor: torch.Tensor,
targets: torch.Tensor,
input_lengths: torch.Tensor,
target_lengths: torch.Tensor,
):
# break the gradient chain
loc_tensor = input_tensor.detach()
loc_tensor.requires_grad_(True)
new_tensor, mask = self.func(loc_tensor, targets, input_lengths, target_lengths)
loc_new_tensor = new_tensor.detach()
new_tensor.sum().backward()
grad = loc_tensor.grad
return GradInsert.apply(input_tensor, loc_new_tensor, grad, mask), mask
|
NeMo-main
|
nemo/collections/asr/parts/k2/grad_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import nullcontext
from typing import Union
import torch
import torch.nn.functional as F
from nemo.collections.asr.parts.k2.graph_transducer import GraphRnntLoss, force_float32_context
from nemo.core.utils.k2_guard import k2
from nemo.utils.enum import PrettyStrEnum
class GraphWTransducerLoss(GraphRnntLoss):
"""
W-Transducer loss: RNN-T loss modification for training RNN-T model for the case
when some text at the beginning/end of the utterance is missing.
The resulting model behaves like the RNN-T model (no modification for decoding is required).
For details see "Powerful and Extensible WFST Framework for RNN-Transducer Losses" paper
https://ieeexplore.ieee.org/document/10096679
"""
class LastBlankMode(PrettyStrEnum):
ALLOW_IGNORE = "allow_ignore"
FORCE_FINAL = "force_final"
def __init__(
self,
blank: int,
eps_weight: float = 0.0,
last_blank_mode: Union[LastBlankMode, str] = LastBlankMode.FORCE_FINAL,
use_grid_implementation=True,
connect_composed=False,
double_scores=False,
cast_to_float32=False,
):
"""
Init method
Args:
blank: blank label index
eps_weight: weight of epsilon transitions, 0 means no penalty (default)
last_blank_mode: allow to skip last blank in the prediction (default) or force it
use_grid_implementation: Whether to use the grid implementation (Grid-Transducer).
connect_composed: Connect graph after composing unit and temporal schemas
(only for Compose-Transducer). `connect` operation is slow, it is useful for visualization,
but not necessary for loss computation.
double_scores: Use calculation of loss in double precision (float64) in the lattice.
Does not significantly affect memory usage since the lattice is ~V/2 times smaller than the joint tensor.
cast_to_float32: Force cast joint tensor to float32 before log-softmax calculation.
"""
super().__init__(
blank=blank,
use_grid_implementation=use_grid_implementation,
connect_composed=connect_composed,
double_scores=double_scores,
cast_to_float32=cast_to_float32,
)
self.eps_weight = eps_weight
self.last_blank_mode = self.LastBlankMode(last_blank_mode)
def get_unit_schema(self, units_tensor: torch.Tensor, vocab_size: int) -> "k2.Fsa":
"""
Get unit schema (target text) graph for W-Transducer loss (Compose-Transducer).
Forward arcs represent text labels.
Example graph: text [1, 2], blank=0. Eps ids: 3, 4.
graph::
3:3:0 0:0:1 0:0:2
+-------+ +-------+ +-------+
v | v | v |
+-----------+ 1:1:0 +-----------+ 2:2:1 +-----------+ -1:-1:-1 #===#
| 0 | -------> | 1 | -------> | 2 | ---------> H 3 H
+-----------+ +-----------+ +-----------+ #===#
^ 0:0:0 | ^ 4:4:2 |
+-------+ +-------+
Args:
units_tensor: 1d tensor with text units
vocab_size: number of total labels (vocab size including blank)
Returns:
unit schema graph (k2.Fsa).
Labels: <unit>:<unit>:<unit_position> (k2.Fsa: labels, aux_labels, unit_positions)
"""
blank_id = self.blank
start_eps_id = vocab_size
end_eps_id = vocab_size + 1
device = units_tensor.device
text_len = units_tensor.shape[0]
# arcs: scr, dest, label, score
arcs = torch.zeros(((text_len + 1) * 2 + 2, 4), dtype=torch.int32, device=device)
text_indices = torch.arange(0, text_len + 1, dtype=torch.int32, device=device)
# eps
arcs[0, 2] = start_eps_id
# blank labels
arcs[1:-1:2, 0] = text_indices # from state
arcs[1:-1:2, 1] = text_indices # to state
arcs[1:-1:2, 2] = blank_id
# text labels
arcs[2:-1:2, 0] = text_indices # from state
arcs[2:-1:2, 1] = text_indices + 1 # to state
arcs[2:-2:2, 2] = units_tensor # labels: text
arcs[-1] = arcs[-2]
arcs[-2, 1] = text_len
arcs[-2, 2] = end_eps_id
arcs[-1, 2] = -1 # last transition to final state, ilabel=-1 (special for k2)
olabels = arcs[:, 2].detach().clone() # same as ilabels
fsa_text = k2.Fsa(arcs, olabels)
fsa_text.unit_positions = torch.zeros_like(olabels)
fsa_text.unit_positions[1:-1] = text_indices.expand(2, -1).transpose(0, 1).flatten()
fsa_text.unit_positions[-1] = -1
return fsa_text
def get_temporal_schema(self, num_frames: int, vocab_size: int, device: torch.device) -> "k2.Fsa":
"""
Get temporal schema graph for W-Transducer loss (Compose-Transducer).
Example graph: blank=0, num_frames=3, vocab_size=3, last_blank_mode="force_final".
Labels: <unit>:<frame_index>. <unit> is a unit from vocab + special eps ids `vocab_size`, `vocab_size+1`.
graph for force_final::
4:0
+--------------------------------------------+
| 4:1 |
| +--------------------+ |
1:0 | 1:1 | 1:2 | |
+-----+ | +-----+ | +-----+ | |
v | | v | | v | v v
+--------------+ 0:0 +------------+ 0:1 +------------+ 0:2 +---+ -1:-1 #===#
| 0 | ----> | 1 | -----> | 2 | -----> | 3 | -------> H 4 H
+--------------+ +------------+ +------------+ +---+ #===#
^ 2:0 | | | ^ 2:1 | ^ ^ 2:2 | ^
+-----+ | | +-----+ | +-----+ |
| | 3:0 | |
| +------------------+ 3:0 |
+-------------------------------------------+
Args:
num_frames: length of the sequence (in frames)
vocab_size: number of labels (including blank)
device: device for tensor to construct
Returns:
temporal schema graph (k2.Fsa).
Labels: <unit>:<frame_index>. <unit> is a unit from vocab + special units (e.g., additional eps).
"""
blank_id = self.blank
start_eps_id = vocab_size
end_eps_id = vocab_size + 1
num_eps = 2
num_sequence_arcs = num_frames * vocab_size + (num_frames - 1) * num_eps + 1
fsa_temporal_arcs = torch.zeros((num_sequence_arcs, 4), dtype=torch.int32, device=device)
sequence_states = torch.arange(0, num_frames, dtype=torch.int32, device=device)
sequence_states_next = sequence_states + 1
# for every state - vocab_size+1 arcs, [0, 1, ..., vocab_size-1, eps, 0, 1, ..., vocab_size-1, eps, ...]
start_states = sequence_states.expand(vocab_size + num_eps, num_frames).transpose(0, 1).flatten()
# self-loops - all, make forward arcs later
fsa_temporal_arcs[:num_sequence_arcs, 0] = start_states[:-1] # from
fsa_temporal_arcs[:num_sequence_arcs, 1] = start_states[:-1] # to
fsa_temporal_arcs[:num_sequence_arcs, 2] = (
torch.arange(0, vocab_size + num_eps, dtype=torch.int32, device=device)
.expand(num_frames, vocab_size + num_eps)
.flatten()[:-1]
)
# forward arcs
fsa_temporal_arcs[blank_id : num_sequence_arcs : vocab_size + num_eps, 1] = sequence_states_next # blanks
# eps arcs
fsa_temporal_arcs[start_eps_id : num_sequence_arcs : vocab_size + num_eps, 0] = 0
fsa_temporal_arcs[start_eps_id : num_sequence_arcs : vocab_size + num_eps, 1] = sequence_states + 1
fsa_temporal_arcs[end_eps_id : num_sequence_arcs : vocab_size + num_eps, 0] = sequence_states[:-1]
fsa_temporal_arcs[end_eps_id : num_sequence_arcs : vocab_size + num_eps, 1] = (
num_frames - 1 if self.last_blank_mode == self.LastBlankMode.FORCE_FINAL else num_frames
)
# transition to last final state
fsa_temporal_arcs[-1, :3] = torch.tensor((num_frames, num_frames + 1, -1), dtype=torch.int32, device=device)
# need to sort arcs
_, indices = torch.sort(fsa_temporal_arcs[:, 0], dim=0)
fsa_temporal_arcs = fsa_temporal_arcs[indices]
# output symbols: position in the sequence, same as start states for arcs
olabels = fsa_temporal_arcs[:, 0].detach().clone()
olabels[-1] = -1 # transition to the last final state
fsa_temporal = k2.Fsa(fsa_temporal_arcs, olabels)
fsa_temporal = k2.arc_sort(fsa_temporal) # need for compose
return fsa_temporal
def get_grid(self, units_tensor: torch.Tensor, num_frames: int, vocab_size: int) -> "k2.Fsa":
"""
Construct W-Transducer lattice directly (Grid-Transducer).
Args:
units_tensor: 1d tensor with text units
num_frames: length of the sequence (number of frames)
vocab_size: number of total labels (vocab size including blank)
Returns:
transducer lattice (k2.Fsa).
Labels: <unit>:<frame_index>:<unit_position> (k2.Fsa: labels, aux_labels, unit_positions)
"""
blank_id = self.blank
eps_id = vocab_size # beyond vocabulary
text_length = units_tensor.shape[0]
device = units_tensor.device
num_grid_states = num_frames * (text_length + 1)
num_forward_arcs_base = (num_frames - 1) * (text_length + 1)
num_forward_arcs_additional = (num_frames - 1) * 2
num_forward_arcs = num_forward_arcs_base + num_forward_arcs_additional
num_text_arcs = text_length * num_frames
arcs = torch.zeros((num_forward_arcs + num_text_arcs + 2, 4), dtype=torch.int32, device=device)
# blank transitions
# i, i+<text_len + 1>, 0 <blank>, i / <text_len+1>, i % <text_len + 1>
from_states = torch.arange(num_forward_arcs_base, device=device)
to_states = from_states + (text_length + 1)
arcs[:num_forward_arcs_base, 0] = from_states
arcs[:num_forward_arcs_base, 1] = to_states
arcs[:num_forward_arcs_base, 2] = blank_id
from_states = torch.cat(
[
torch.arange(num_frames - 1, device=device) * (text_length + 1),
text_length + torch.arange(num_frames - 1, device=device) * (text_length + 1),
]
)
to_states = from_states + (text_length + 1)
arcs[num_forward_arcs_base : num_forward_arcs_base + (num_frames - 1) * 2, 0] = from_states
arcs[num_forward_arcs_base : num_forward_arcs_base + (num_frames - 1) * 2, 1] = to_states
arcs[num_forward_arcs_base : num_forward_arcs_base + (num_frames - 1), 2] = eps_id
arcs[num_forward_arcs_base + (num_frames - 1) : num_forward_arcs_base + (num_frames - 1) * 2, 2] = eps_id + 1
arcs[num_forward_arcs_base : num_forward_arcs_base + (num_frames - 1), 0] = 0
arcs[num_forward_arcs_base + (num_frames - 1) : num_forward_arcs_base + (num_frames - 1) * 2, 1] = (
num_grid_states - 1
) # if other mode - fix later
# last eps ark - after relabel
# text arcs
from_states = (
torch.arange(num_grid_states, dtype=torch.int32, device=device)
.reshape(num_frames, text_length + 1)[:, :-1]
.flatten()
)
to_states = from_states + 1
ilabels = units_tensor.expand(num_frames, -1).flatten()
arcs[num_forward_arcs:-2, 0] = from_states
arcs[num_forward_arcs:-2, 1] = to_states
arcs[num_forward_arcs:-2, 2] = ilabels
# last 2 states
arcs[-2, :3] = torch.tensor((num_grid_states - 1, num_grid_states, blank_id), dtype=torch.int32, device=device)
arcs[-1, :3] = torch.tensor((num_grid_states, num_grid_states + 1, -1), dtype=torch.int32, device=device)
# sequence indices, time indices
olabels = torch.div(arcs[:, 0], (text_length + 1), rounding_mode="floor") # arcs[:, 0] // (text_length + 1)
unit_positions = arcs[:, 0] % (text_length + 1)
# last state: final
olabels[-1] = -1
unit_positions[-1] = -1
# relabel
# instead of using top sort (extremely expensive) k2.top_sort(rnnt_graph)
arcs[:-2, 0] = self.relabel_states(arcs[:-2, 0], text_length + 1, num_frames)
arcs[:-3, 1] = self.relabel_states(arcs[:-3, 1], text_length + 1, num_frames)
if self.last_blank_mode == self.LastBlankMode.ALLOW_IGNORE:
arcs[
num_forward_arcs_base + (num_frames - 1) : num_forward_arcs_base + (num_frames - 1) * 2, 1
] = num_grid_states
# sort by start state - required in k2
# TODO: maybe it is more optimal to avoid sort, construct arcs in ascending order
_, indices = torch.sort(arcs[:, 0], dim=0)
arcs = arcs[indices]
olabels = olabels[indices]
unit_positions = unit_positions[indices]
rnnt_graph = k2.Fsa(arcs, olabels)
rnnt_graph.unit_positions = unit_positions
return rnnt_graph
def forward(
self, acts: torch.Tensor, labels: torch.Tensor, act_lens: torch.Tensor, label_lens: torch.Tensor,
):
"""
Forward method is similar to RNN-T Graph-Transducer forward method,
but we need to assign eps weight to eps-transitions.
"""
# argument names are consistent with NeMo, see RNNTLoss.forward:
# self._loss(acts=log_probs, labels=targets, act_lens=input_lengths, label_lens=target_lengths)
logits, targets, logits_lengths, target_lengths = acts, labels, act_lens, label_lens
# logits: B x Time x Text+1 x C
vocab_size = logits.shape[-1]
target_fsas_vec = self.get_graphs_batched(logits_lengths, targets, target_lengths, vocab_size)
cast_context = force_float32_context() if self.cast_to_float32 else nullcontext()
with cast_context:
log_probs = F.log_softmax(logits, dim=-1)
with torch.no_grad():
indices = self.get_logits_indices(target_fsas_vec, logits.shape)
# transition to the last state + eps-transitions
# use 0 index (for valid index_select) and manually assign score after index_select for this case
indices[target_fsas_vec.labels == -1] = 0
indices[target_fsas_vec.labels >= vocab_size] = 0 # eps
# NB: do not assign scores -> modify, k2 will not update all scores correctly (modify -> assign)
scores = log_probs.flatten().index_select(-1, indices)
# fix weights for the arcs to the last state + eps-transitions
scores[target_fsas_vec.labels == -1] = 0
scores[target_fsas_vec.labels >= vocab_size] = self.eps_weight # eps
target_fsas_vec.scores = scores
scores = -1 * target_fsas_vec.get_tot_scores(use_double_scores=self.double_scores, log_semiring=True)
return scores
|
NeMo-main
|
nemo/collections/asr/parts/k2/w_transducer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import lru_cache
from typing import List, Optional, Union
import torch
from nemo.core.utils.k2_guard import k2 # import k2 from guard module
def build_topo(name: str, tokens: List[int], blank_num: int, with_self_loops: bool = True) -> 'k2.Fsa':
"""Helper function to build a topology.
It allows to build topologies with a non-zero blank ID.
Args:
name:
The topology name. Choices: default, compact, shared_blank, minimal
tokens:
A list of tokens, e.g., phones, characters, etc.
blank_num:
Blank number. Must be in tokens
with_self_loops:
Whether to add token-to-epsilon self-loops to a topology
Returns:
Returns a topology FST.
"""
if name == "default":
ans = build_default_topo(tokens, with_self_loops)
elif name == "compact":
ans = build_compact_topo(tokens, with_self_loops)
elif name == "shared_blank":
ans = build_shared_blank_topo(tokens, with_self_loops)
elif name == "minimal":
ans = build_minimal_topo(tokens)
else:
raise ValueError(f"Unknown topo name: {name}")
if blank_num != 0:
labels = ans.labels
blank_mask = labels == 0
labels[(labels != -1) & (labels <= blank_num)] -= 1
labels[blank_mask] = blank_num
ans.labels = labels # force update ans.labels property to notify FSA about modifications, required by k2
ans = k2.arc_sort(ans)
return ans
def build_default_topo(tokens: List[int], with_self_loops: bool = True) -> 'k2.Fsa':
"""Build the default CTC topology.
Zero is assumed to be the ID of the blank symbol.
"""
assert -1 not in tokens, "We assume -1 is ID of the final transition"
assert 0 in tokens, "We assume 0 is the ID of the blank symbol"
num_states = len(tokens)
final_state = num_states
arcs = "" if with_self_loops else f"0 0 0 0 0.0\n"
for i in range(num_states):
for j in range(num_states):
if i == j:
if with_self_loops:
arcs += f"{i} {i} {tokens[i]} 0 0.0\n"
else:
arcs += f"{i} {j} {tokens[j]} {tokens[j]} 0.0\n"
arcs += f"{i} {final_state} -1 -1 0.0\n"
arcs += f"{final_state}"
ans = k2.Fsa.from_str(arcs, num_aux_labels=1)
ans = k2.arc_sort(ans)
return ans
def build_compact_topo(tokens: List[int], with_self_loops: bool = True) -> 'k2.Fsa':
"""Build the compact CTC topology.
Zero is assumed to be the ID of the blank symbol.
See https://arxiv.org/abs/2110.03098
"""
assert -1 not in tokens, "We assume -1 is ID of the final transition"
assert 0 in tokens, "We assume 0 is the ID of the blank symbol"
eps_num = tokens[-1] + 1
selfloops_shift = int(with_self_loops)
num_states = len(tokens) + selfloops_shift
final_state = num_states
arcs = ""
for i in range(selfloops_shift, num_states):
arcs += f"0 {i} {tokens[i - selfloops_shift]} {tokens[i - selfloops_shift]} 0.0\n"
arcs += f"0 {final_state} -1 -1 0.0\n"
for i in range(1, num_states):
arcs += f"{i} 0 {eps_num} 0 0.0\n"
if with_self_loops:
arcs += f"{i} {i} {tokens[i - selfloops_shift]} 0 0.0\n"
arcs += f"{final_state}"
ans = k2.Fsa.from_str(arcs, num_aux_labels=1)
ans = k2.arc_sort(ans)
return ans
def build_shared_blank_topo(tokens: List[int], with_self_loops: bool = True) -> 'k2.Fsa':
"""Build the shared blank CTC topology.
Zero is assumed to be the ID of the blank symbol.
See https://github.com/k2-fsa/k2/issues/746#issuecomment-856421616
"""
assert -1 not in tokens, "We assume -1 is ID of the final transition"
assert 0 in tokens, "We assume 0 is the ID of the blank symbol"
tokens = tokens.copy()
tokens.remove(0)
num_tokens = len(tokens)
start = 0
final = num_tokens + 1
arcs = []
arcs.append([start, start, 0, 0, 0])
arcs.append([start, final, -1, -1, 0])
arcs.append([final])
for i, p in enumerate(tokens):
i += 1
arcs.append([start, start, p, p, 0])
arcs.append([start, i, p, p, 0])
arcs.append([i, start, p, 0, 0])
if with_self_loops:
arcs.append([i, i, p, 0, 0])
arcs = sorted(arcs, key=lambda arc: arc[0])
arcs = [[str(i) for i in arc] for arc in arcs]
arcs = [" ".join(arc) for arc in arcs]
arcs = "\n".join(arcs)
ans = k2.Fsa.from_str(arcs, num_aux_labels=1)
ans = k2.arc_sort(ans)
return ans
def build_minimal_topo(tokens: List[int]) -> 'k2.Fsa':
"""Build the minimal topology.
Zero is assumed to be the ID of the blank symbol.
See https://arxiv.org/abs/2110.03098
"""
assert -1 not in tokens, "We assume -1 is ID of the final transition"
assert 0 in tokens, "We assume 0 is the ID of the blank symbol"
num_tokens = len(tokens)
final_state = 1
arcs = ""
for i in range(num_tokens):
arcs += f"0 0 {tokens[i]} {tokens[i]} 0.0\n"
arcs += f"0 {final_state} -1 -1 0.0\n"
arcs += f"{final_state}"
ans = k2.Fsa.from_str(arcs, num_aux_labels=1)
ans = k2.arc_sort(ans)
return ans
class RnntEmissionAdapterBuilder(object):
"""Builder class for RNNT Emission Adapters.
An Emission Adapter is an FSA used to emulate desired temporal Emissions FSA properties of a trivial Emissions FSA.
Temporal properties are emulated by <epsilon>-arcs with zero log-weight.
These additional arcs do not contribute to the lattice scores and can be easily removed from the best path.
k2 does not have Emissions FSAs. Instead, it has DenseFsaVec, which is not a real FSA.
Thus, Emission Adapters should be composed with Supervision FSAs.
IMPOTRANT: <epsilon>-outputs are expected to be present in the DenseFsaVec.
These RNNT adapters do only the <blank> re-routing (emulate <blank> hopping over U dimension).
Redundant non-<blank> are not removed by these adapters.
At initialization, the builder expects a list of tokens, <blank> number and <epsilon> number.
When called, the builder returns adapters according to the provided text lengths.
"""
def __init__(self, tokens: List[int], blank_num: int, eps_num: Optional[int] = None):
assert -1 not in tokens, "We assume -1 is ID of the final transition"
assert blank_num in tokens, "The blank ID must be in tokens"
assert eps_num is None or eps_num not in tokens, "The epsion ID must not be in tokens"
self.tokens = tokens
self.blank_num = blank_num
self.eps_num = self.tokens[-1] + 1 if eps_num is None else eps_num
def __call__(self, adapter_lengths: Union[torch.Tensor, List[int]]) -> 'k2.Fsa':
# if you don't make adapter_lengths a list beforehand,
# "i" will be implicitly converted to int, and this will always be considered a cache miss
return k2.create_fsa_vec([self._build_single_adapter(i) for i in adapter_lengths.tolist()])
@lru_cache(maxsize=1024)
def _build_single_adapter(self, adapter_length: int) -> 'k2.Fsa':
assert adapter_length >= 1, "`adapter_length` cannot be less than one"
first_eps_state = adapter_length + 1
final_state = adapter_length * 2 + 1
arcs = ""
for i in range(adapter_length):
for j in range(len(self.tokens)):
if j != self.blank_num:
arcs += f"{i} {i + 1} {self.tokens[j]} 0.0\n"
arcs += f"{i} {first_eps_state} {self.blank_num} 0.0\n"
arcs += f"{adapter_length} {first_eps_state} {self.blank_num} 0.0\n"
for i in range(first_eps_state, final_state):
arcs += f"{i} {i + 1 if i < final_state - 1 else 0} {self.eps_num} 0.0\n"
arcs += f"{i} {final_state} -1 0.0\n"
arcs += f"{final_state}"
return k2.arc_sort(k2.Fsa.from_str(arcs, acceptor=True))
|
NeMo-main
|
nemo/collections/asr/parts/k2/topologies.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2020, Xiaomi CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import Any, Optional, Tuple, Union
import torch
from omegaconf import DictConfig
from nemo.collections.asr.parts.k2.classes import GraphIntersectDenseConfig
from nemo.collections.asr.parts.k2.loss_mixins import CtcK2Mixin
from nemo.collections.asr.parts.k2.ml_loss import MLLoss
from nemo.collections.asr.parts.k2.utils import (
create_sparse_wrapped,
get_tot_objf_and_finite_mask,
invert_permutation,
load_graph,
)
from nemo.core.utils.k2_guard import k2 # import k2 from guard module
from nemo.utils import logging
class MAPLoss(MLLoss):
"""
Maximum a Posteriori Probability criterion.
It implements Lattice-Free Maximum Mutual Information (LF-MMI) and LF-boosted-MMI (LF-bMMI) losses.
Based on https://github.com/k2-fsa/snowfall/blob/master/snowfall/objectives/mmi.py
cfg takes precedence over all optional parameters
We keep explicit parameter setting to be able to create an instance without the need of a config.
"""
@abstractmethod
def __init__(
self,
num_classes: int,
blank: int,
reduction: str,
cfg: Optional[DictConfig] = None,
topo_type: str = "default",
topo_with_self_loops: bool = True,
token_lm: Optional[Union['k2.Fsa', str]] = None,
intersect_pruned: bool = False,
intersect_conf: GraphIntersectDenseConfig = GraphIntersectDenseConfig(),
boost_coeff: float = 0.0,
):
super().__init__(
num_classes=num_classes,
blank=blank,
reduction=reduction,
cfg=cfg,
topo_type=topo_type,
topo_with_self_loops=topo_with_self_loops,
)
if cfg is not None:
token_lm = cfg.get("token_lm", token_lm)
intersect_pruned = cfg.get("intersect_pruned", intersect_pruned)
intersect_conf = cfg.get("intersect_conf", intersect_conf)
boost_coeff = cfg.get("boost_coeff", boost_coeff)
self.boost_coeff = boost_coeff
self._intersect_calc_scores_impl = (
self._intersect_calc_scores_impl_pruned if intersect_pruned else self._intersect_calc_scores_impl_exact_opt
)
self.intersect_conf = intersect_conf
self.graph_compiler = None # expected to be initialized in .update_graph(...)
if token_lm is None:
logging.warning(
f"""token_lm is empty.
Trainable token_lm is not supported yet.
Please call .update_graph(token_lm) before using."""
)
else:
self.lm_graph = load_graph(token_lm) if isinstance(token_lm, str) else token_lm
if self.lm_graph is None:
raise ValueError(f"""lm_graph is empty.""")
else:
self.update_graph(self.lm_graph)
@abstractmethod
def update_graph(self, graph: 'k2.Fsa'):
# expected to be set in child classes
raise NotImplementedError
def _intersect_calc_scores_impl_exact_opt(
self, dense_fsa_vec: 'k2.DenseFsaVec', num_graphs: 'k2.Fsa', den_graph: 'k2.Fsa', return_lats: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor, Optional['k2.Fsa'], Optional['k2.Fsa']]:
"""Inner intersection method.
Does joint (simultaneous) exact intersection of dense_fsa_vec against num_graphs and den_graph.
Optiolally returns the numerator and the denominator lattices.
"""
device = dense_fsa_vec.device
assert device == num_graphs.device and device == den_graph.device
num_fsas = num_graphs.shape[0]
assert dense_fsa_vec.dim0() == num_fsas
den_graph = den_graph.clone()
num_graphs = num_graphs.clone()
num_den_graphs = k2.cat([num_graphs, den_graph])
# NOTE: The a_to_b_map in k2.intersect_dense must be sorted
# so the following reorders num_den_graphs.
# [0, 1, 2, ... ]
num_graphs_indexes = torch.arange(num_fsas, dtype=torch.int32)
# [num_fsas, num_fsas, num_fsas, ... ]
den_graph_indexes = torch.tensor([num_fsas] * num_fsas, dtype=torch.int32)
# [0, num_fsas, 1, num_fsas, 2, num_fsas, ... ]
num_den_graphs_indexes = torch.stack([num_graphs_indexes, den_graph_indexes]).t().reshape(-1).to(device)
num_den_reordered_graphs = k2.index_fsa(num_den_graphs, num_den_graphs_indexes)
# [[0, 1, 2, ...]]
a_to_b_map = torch.arange(num_fsas, dtype=torch.int32).reshape(1, -1)
# [[0, 1, 2, ...]] -> [0, 0, 1, 1, 2, 2, ... ]
a_to_b_map = a_to_b_map.repeat(2, 1).t().reshape(-1).to(device)
num_den_lats = k2.intersect_dense(
a_fsas=num_den_reordered_graphs,
b_fsas=dense_fsa_vec,
output_beam=self.intersect_conf.output_beam,
a_to_b_map=a_to_b_map,
seqframe_idx_name="seqframe_idx" if return_lats else None,
)
num_den_tot_scores = num_den_lats.get_tot_scores(log_semiring=True, use_double_scores=False)
num_tot_scores = num_den_tot_scores[::2]
den_tot_scores = num_den_tot_scores[1::2]
if return_lats:
lat_slice = torch.arange(num_fsas, dtype=torch.int32).to(device) * 2
return (
num_tot_scores,
den_tot_scores,
k2.index_fsa(num_den_lats, lat_slice),
k2.index_fsa(num_den_lats, lat_slice + 1),
)
else:
return num_tot_scores, den_tot_scores, None, None
def _intersect_calc_scores_impl_pruned(
self, dense_fsa_vec: 'k2.DenseFsaVec', num_graphs: 'k2.Fsa', den_graph: 'k2.Fsa', return_lats: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor, Optional['k2.Fsa'], Optional['k2.Fsa']]:
"""Inner intersection method.
Does exact intersection of dense_fsa_vec against num_graphs and pruned intersection against den_graph.
Optiolally returns the numerator and the denominator lattices.
"""
device = dense_fsa_vec.device
assert device == num_graphs.device and device == den_graph.device
num_fsas = num_graphs.shape[0]
assert dense_fsa_vec.dim0() == num_fsas
num_lats = k2.intersect_dense(
a_fsas=num_graphs,
b_fsas=dense_fsa_vec,
output_beam=self.intersect_conf.output_beam,
seqframe_idx_name="seqframe_idx" if return_lats else None,
)
den_lats = k2.intersect_dense_pruned(
a_fsas=den_graph,
b_fsas=dense_fsa_vec,
search_beam=self.intersect_conf.search_beam,
output_beam=self.intersect_conf.output_beam,
min_active_states=self.intersect_conf.min_active_states,
max_active_states=self.intersect_conf.max_active_states,
seqframe_idx_name="seqframe_idx" if return_lats else None,
)
num_tot_scores = num_lats.get_tot_scores(log_semiring=True, use_double_scores=False)
den_tot_scores = den_lats.get_tot_scores(log_semiring=True, use_double_scores=False)
if return_lats:
return num_tot_scores, den_tot_scores, num_lats, den_lats
else:
return num_tot_scores, den_tot_scores, None, None
def _intersect_calc_scores(
self, emissions_graphs: 'k2.DenseFsaVec', supervision_graphs: Any, supervisions: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Intersects emissions_graphs with supervision_graphs and calculates lattice scores.
This version implicitly assumes supervision_graphs to be a pair of the numerator and the denominator FSAs.
It can also calculate accuracy between the numerator and the denominator lattices to use it as additional loss.
Can be overridden.
"""
boosted = self.boost_coeff != 0.0
num_tot_scores, den_tot_scores, num_lats, den_lats = self._intersect_calc_scores_impl(
emissions_graphs, supervision_graphs[0], supervision_graphs[1], boosted
)
inverted_batch_order = invert_permutation(supervisions[:, 0].to(dtype=torch.long))
self.__batch_order = None
tot_scores = (num_tot_scores - den_tot_scores)[inverted_batch_order]
mmi_tot_scores, mmi_valid_mask = get_tot_objf_and_finite_mask(tot_scores, self.reduction)
if boosted:
assert num_lats is not None and den_lats is not None
size = (
emissions_graphs.dim0(),
emissions_graphs.scores.shape[0],
emissions_graphs.scores.shape[1] - 1,
)
row_ids = emissions_graphs.emissions_graphs.shape().row_ids(1)
num_sparse = create_sparse_wrapped(
indices=[k2.index_select(row_ids, num_lats.seqframe_idx), num_lats.seqframe_idx, num_lats.phones,],
values=num_lats.get_arc_post(False, True).exp(),
size=size,
min_col_index=0,
)
del num_lats
den_sparse = create_sparse_wrapped(
indices=[k2.index_select(row_ids, den_lats.seqframe_idx), den_lats.seqframe_idx, den_lats.phones,],
values=den_lats.get_arc_post(False, True).exp(),
size=size,
min_col_index=0,
)
del den_lats
acc_loss = torch.sparse.sum((num_sparse - den_sparse).coalesce().abs(), (1, 2)).to_dense()
del num_sparse, den_sparse
acc_tot_scores, acc_valid_mask = get_tot_objf_and_finite_mask(acc_loss, self.reduction)
valid_mask = mmi_valid_mask & acc_valid_mask
total_loss = (
(self.boost_coeff * acc_tot_scores[inverted_batch_order][valid_mask] - mmi_tot_scores[valid_mask])
if self.reduction == "none"
else self.boost_coeff * acc_tot_scores - mmi_tot_scores
)
else:
valid_mask = mmi_valid_mask
total_loss = -mmi_tot_scores[valid_mask] if self.reduction == "none" else -mmi_tot_scores
return total_loss, valid_mask
class CtcMmiLoss(MAPLoss, CtcK2Mixin):
"""MMI loss with custom CTC topologies.
Available topologies:
- `default`, with or without self-loops
- `compact`, with or without self-loops
- `shared_blank`, with or without self-loops
- `minimal`, without self-loops
cfg takes precedence over all optional parameters
We keep explicit parameter setting to be able to create an instance without the need of a config.
"""
def __init__(
self,
num_classes: int,
blank: int,
reduction: str,
cfg: Optional[DictConfig] = None,
topo_type: str = "default",
topo_with_self_loops: bool = True,
token_lm: Optional[Union['k2.Fsa', str]] = None,
intersect_pruned: bool = False,
intersect_conf: GraphIntersectDenseConfig = GraphIntersectDenseConfig(),
boost_coeff: float = 0.0,
):
super().__init__(
num_classes=num_classes,
blank=blank,
reduction=reduction,
cfg=cfg,
topo_type=topo_type,
topo_with_self_loops=topo_with_self_loops,
token_lm=token_lm,
intersect_pruned=intersect_pruned,
intersect_conf=intersect_conf,
boost_coeff=boost_coeff,
)
def update_graph(self, graph: 'k2.Fsa'):
self.lm_graph = graph
lm_graph = self.lm_graph.clone()
if hasattr(lm_graph, "aux_labels"):
delattr(lm_graph, "aux_labels")
labels = lm_graph.labels
if labels.max() != self.num_classes - 1:
raise ValueError(f"lm_graph is not compatible with the num_classes: {labels.unique()}, {self.num_classes}")
from nemo.collections.asr.parts.k2.graph_compilers import MmiGraphCompiler as compiler
self.graph_compiler = compiler(
self.num_classes, self.blank, self.topo_type, self.topo_with_self_loops, aux_graph=lm_graph
)
|
NeMo-main
|
nemo/collections/asr/parts/k2/map_loss.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/parts/k2/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from contextlib import nullcontext
from typing import ContextManager
import torch
import torch.nn.functional as F
from nemo.core.classes.loss import Loss
from nemo.core.utils.k2_guard import k2
def force_float32_context() -> ContextManager:
"""Get context manager to force float32 precision in autocast mode."""
if torch.is_autocast_enabled():
return torch.cuda.amp.autocast(dtype=torch.float32)
return nullcontext()
class GraphTransducerLossBase(Loss):
"""
Base class for graph transducer losses.
Implementation of the approach described in "Powerful and Extensible WFST Framework for RNN-Transducer Losses"
https://ieeexplore.ieee.org/document/10096679
Compose-Transducer: compose the unit (target text) and temporal schemas (graphs) into lattice.
Subclass should implement `get_unit_schema` and `get_temporal_schema` methods.
Grid-Transducer: construct the RNN-T lattice (grid) directly in code.
Subclass should implement `get_grid` method.
"""
def __init__(
self, use_grid_implementation: bool, connect_composed=False, double_scores=False, cast_to_float32=False
):
"""
Args:
use_grid_implementation: Whether to use the grid implementation (Grid-Transducer).
connect_composed: Connect graph after composing unit and temporal schemas (only for Compose-Transducer).
`connect` operation is slow, it is useful for visualization, but not necessary for loss computation.
double_scores: Use calculation of loss in double precision (float64) in the lattice.
Does not significantly affect memory usage since the lattice is ~V/2 times smaller
than the joint tensor.
cast_to_float32: Force cast joint tensor to float32 before log-softmax calculation.
"""
super().__init__()
self.use_grid_implementation = use_grid_implementation
self.connect_composed = connect_composed
self.double_scores = double_scores
self.cast_to_float32 = cast_to_float32
@abc.abstractmethod
def get_unit_schema(self, units_tensor: torch.Tensor, vocab_size: int) -> "k2.Fsa":
"""
Get unit schema (target text) graph for Compose-Transducer.
Args:
units_tensor: tensor with target text
vocab_size: number of labels (including blank). Needed to construct additional eps-arcs (in some cases).
Returns:
unit schema graph (k2.Fsa).
Labels: <unit>:<unit>:<unit_position> (k2.Fsa: labels, aux_labels, unit_positions)
"""
pass
@abc.abstractmethod
def get_temporal_schema(self, num_frames: int, vocab_size: int, device: torch.device) -> "k2.Fsa":
"""
Get temporal schema graph for Compose-Transducer.
Args:
num_frames: length of the sequence (in frames)
vocab_size: number of labels (including blank)
device: device for tensor to construct
Returns:
temporal schema graph (k2.Fsa).
Labels: <unit>:<frame_index>. <unit> is a unit from vocab + special units (e.g., additional eps).
"""
pass
@abc.abstractmethod
def get_grid(self, units_tensor: torch.Tensor, num_frames: int, vocab_size: int) -> "k2.Fsa":
"""
Construct the transducer lattice (grid) directly for Grid-Transducer.
Args:
units_tensor: tensor with target text
num_frames: length of the sequence (in frames)
vocab_size: number of labels (including blank)
Returns:
transducer lattice (k2.Fsa).
Labels: <unit>:<frame_index>:<unit_position> (k2.Fsa: labels, aux_labels, unit_positions)
"""
pass
def get_composed_lattice(self, units_tensor: torch.Tensor, num_frames: int, vocab_size: int) -> "k2.Fsa":
"""
Get composed lattice (unit and temporal schemas) for Compose-Transducer. Useful for visualization.
Should be equivalent to the lattice from `get_grid` method.
Args:
units_tensor: tensor with target text
num_frames: length of the sequence (in frames)
vocab_size: vocab size (including blank)
Returns:
composed lattice (k2.Fsa) from unit and temporal schemas
"""
fsa_text = self.get_unit_schema(units_tensor, vocab_size)
fsa_temporal = self.get_temporal_schema(num_frames, vocab_size, units_tensor.device)
composed = k2.compose(fsa_text, fsa_temporal, treat_epsilons_specially=False)
if self.connect_composed:
composed = k2.connect(composed)
return composed
def get_graphs_batched(
self, logits_lengths: torch.Tensor, targets: torch.Tensor, target_lengths: torch.Tensor, vocab_size: int
) -> "k2.Fsa":
"""
Get batched lattice (grid or composed) for the batch of sequences.
Args:
logits_lengths: tensor with lengths of logits
targets: tensor with target units
target_lengths: tensor with lengths of targets
vocab_size: vocab size (including blank)
Returns:
batched lattice - FsaVec (k2.Fsa)
"""
batch_size = logits_lengths.shape[0]
with torch.no_grad():
if self.use_grid_implementation:
return k2.create_fsa_vec(
[
self.get_grid(
units_tensor=targets[i, : target_lengths[i].item()],
num_frames=logits_lengths[i].item(),
vocab_size=vocab_size,
)
for i in range(batch_size)
]
)
# composed version
text_fsas = [
self.get_unit_schema(units_tensor=targets[i, : target_lengths[i].item()], vocab_size=vocab_size,)
for i in range(batch_size)
]
temporal_fsas = [
self.get_temporal_schema(
num_frames=logits_lengths[i].item(), vocab_size=vocab_size, device=targets.device
)
for i in range(batch_size)
]
target_fsas_vec = k2.compose(
k2.create_fsa_vec(text_fsas), k2.create_fsa_vec(temporal_fsas), treat_epsilons_specially=False
)
if self.connect_composed:
k2.connect(target_fsas_vec)
return target_fsas_vec
def get_logits_indices(self, target_fsas_vec: k2.Fsa, logits_shape: torch.Size) -> torch.Tensor:
"""
Get indices of flatten logits for each arc in the lattices.
Args:
target_fsas_vec: batch of target FSAs with lattices
logits_shape: shape of the logits tensor
Returns:
1d tensor with indices
"""
# logits_shape: B x Time x Text+1 x Labels
batch_size = logits_shape[0]
device = target_fsas_vec.device
scores_to_batch_i = torch.repeat_interleave(
torch.arange(batch_size, device=device, dtype=torch.int64),
torch.tensor(
[target_fsas_vec.arcs.index(0, i)[0].values().shape[0] for i in range(batch_size)], device=device,
),
)
indices = (
scores_to_batch_i * logits_shape[1] * logits_shape[2] * logits_shape[3] # Batch
+ target_fsas_vec.aux_labels.to(torch.int64) * logits_shape[2] * logits_shape[3] # Time indices
+ target_fsas_vec.unit_positions.to(torch.int64) * logits_shape[3] # Units (text) indices
+ target_fsas_vec.labels.to(torch.int64) # Labels
)
return indices
class GraphRnntLoss(GraphTransducerLossBase):
"""
RNN-T loss implementation based on WFST according
to "Powerful and Extensible WFST Framework for RNN-Transducer Losses"
https://ieeexplore.ieee.org/document/10096679
"""
def __init__(
self,
blank: int,
use_grid_implementation=True,
connect_composed=False,
double_scores=False,
cast_to_float32=False,
):
"""
Init method
Args:
blank: blank label index
use_grid_implementation: Whether to use the grid implementation (Grid-Transducer).
connect_composed: Connect graph after composing unit and temporal schemas (only for Compose-Transducer).
`connect` operation is slow, it is useful for visualization, but not necessary for loss computation.
double_scores: Use calculation of loss in double precision (float64) in the lattice.
Does not significantly affect memory usage since the lattice is ~V/2 times smaller than the joint tensor.
cast_to_float32: Force cast joint tensor to float32 before log-softmax calculation.
"""
super().__init__(
use_grid_implementation=use_grid_implementation,
connect_composed=connect_composed,
double_scores=double_scores,
cast_to_float32=cast_to_float32,
)
self.blank = blank
def get_unit_schema(self, units_tensor: torch.Tensor, vocab_size: int) -> "k2.Fsa":
"""
Get unit schema (target text) graph for RNN-T loss (Compose-Transducer).
Forward arcs represent text labels.
Example graph: text [1, 2], blank=0.
graph::
0:0:0 0:0:1 0:0:2
+-------+ +-------+ +-------+
v | v | v |
+-----------+ 1:1:0 +-----------+ 2:2:1 +-----------+ -1:-1:-1 #===#
| 0 | -------> | 1 | -------> | 2 | ---------> H 3 H
+-----------+ +-----------+ +-----------+ #===#
Args:
units_tensor: 1d tensor with text units
vocab_size: number of total labels (vocab size including blank)
Returns:
unit schema graph (k2.Fsa).
Labels: <unit>:<unit>:<unit_position> (k2.Fsa: labels, aux_labels, unit_positions)
"""
blank_id = self.blank
device = units_tensor.device
text_len = units_tensor.shape[0]
# arcs
# text_len + 1 states, in every state - self-loops (blank) and forward (text label / last forward -1)
arcs = torch.zeros(((text_len + 1) * 2, 4), dtype=torch.int32, device=device)
text_indices = torch.arange(0, text_len + 1, dtype=torch.int32, device=device)
# blank labels
arcs[::2, 0] = text_indices # from state
arcs[::2, 1] = text_indices # to state
arcs[::2, 2] = blank_id
# text labels
arcs[1::2, 0] = text_indices # from state
arcs[1::2, 1] = text_indices + 1 # to state
arcs[1:-1:2, 2] = units_tensor # labels: text
arcs[-1, 2] = -1 # last transition to final state, ilabel=-1 (special for k2)
olabels = arcs[:, 2].detach().clone() # same as ilabels
fsa_text = k2.Fsa(arcs, olabels)
fsa_text.unit_positions = text_indices.expand(2, -1).transpose(0, 1).flatten()
fsa_text.unit_positions[-1] = -1 # last transition to final state
return fsa_text
def get_temporal_schema(self, num_frames: int, vocab_size: int, device: torch.device) -> "k2.Fsa":
"""
Get temporal schema graph for RNN-T loss (Compose-Transducer).
Forward arc - blank, self-loops - all labels excluding blank
Example graph: blank=0, num_frames=3, vocab_size=3.
Labels: <unit>:<frame_index>. <unit> is a unit from vocab.
graph::
1:0 1:1 1:2
+-----+ +-----+ +-----+
v | v | v |
+---------+ 0:0 +---------+ 0:1 +---------+ 0:2 +---+ -1:-1 #===#
| 0 | -----> | 1 | -----> | 2 | -----> | 3 | -------> H 4 H
+---------+ +---------+ +---------+ +---+ #===#
^ 2:0 | ^ 2:1 | ^ 2:2 |
+-----+ +-----+ +-----+
Args:
num_frames: length of the sequence (in frames)
vocab_size: number of labels (including blank)
device: device for tensor to construct
Returns:
temporal schema graph (k2.Fsa).
Labels: <unit>:<frame_index>. <unit> is a unit from vocab.
"""
blank_id = self.blank
fsa_temporal_arcs = torch.zeros((num_frames * vocab_size + 1, 4), dtype=torch.int32, device=device)
sequence_states = torch.arange(0, num_frames, dtype=torch.int32, device=device)
# for every state - vocab_size arcs, [0, 1, ..., vocab_size-1, 0, 1, ..., vocab_size-1, ...]
start_states = sequence_states.expand(vocab_size, num_frames).transpose(0, 1).flatten()
# first: make all arcs - self-loops
fsa_temporal_arcs[:-1, 0] = start_states # from
fsa_temporal_arcs[:-1, 1] = start_states # to
fsa_temporal_arcs[:-1, 2] = (
torch.arange(0, vocab_size, dtype=torch.int32, device=device).expand(num_frames, vocab_size).flatten()
)
# blank-arcs: forward
fsa_temporal_arcs[blank_id:-1:vocab_size, 1] = sequence_states + 1 # blanks
# transition to last final state
fsa_temporal_arcs[-1, :3] = torch.tensor((num_frames, num_frames + 1, -1), dtype=torch.int32, device=device)
# output symbols: position in the sequence, same as start states for arcs
olabels = fsa_temporal_arcs[:, 0].detach().clone()
olabels[-1] = -1 # last arc to final state
fsa_temporal = k2.Fsa(fsa_temporal_arcs, olabels)
fsa_temporal = k2.arc_sort(fsa_temporal) # need for compose
return fsa_temporal
@staticmethod
def relabel_states(states: torch.Tensor, n: int, m: int) -> torch.Tensor:
"""
Relabel states to be in topological order: by diagonals
Args:
states: tensor with states
n: number of rows
m: number of columns
Returns:
tensor with relabeled states (same shape as `states`)
"""
i = states % n
j = torch.div(states, n, rounding_mode='floor') # states // n, torch.div to avoid pytorch warnings
min_mn = min(m, n)
max_mn = max(m, n)
diag = i + j
anti_diag = m + n - 1 - diag
max_idx = n * m - 1
cur_diag_idx = i if m > n else m - j - 1
states = (
diag.lt(min_mn) * ((diag * (diag + 1) >> 1) + i)
+ torch.logical_and(diag.ge(min_mn), diag.lt(max_mn))
* ((min_mn * (min_mn + 1) >> 1) + (diag - min_mn) * min_mn + cur_diag_idx)
+ diag.ge(max_mn) * (max_idx - (anti_diag * (anti_diag + 1) >> 1) + m - j)
)
return states
def get_grid(self, units_tensor: torch.Tensor, num_frames: int, vocab_size: int) -> "k2.Fsa":
"""
Construct the RNN-T lattice directly (Grid-Transducer).
Args:
units_tensor: 1d tensor with text units
num_frames: length of the sequence (number of frames)
vocab_size: number of total labels (vocab size including blank)
Returns:
transducer lattice (k2.Fsa).
Labels: <unit>:<frame_index>:<unit_position> (k2.Fsa: labels, aux_labels, unit_positions)
"""
blank_id = self.blank
text_length = units_tensor.shape[0]
device = units_tensor.device
num_grid_states = num_frames * (text_length + 1)
num_forward_arcs = (num_frames - 1) * (text_length + 1)
num_text_arcs = text_length * num_frames
arcs = torch.zeros((num_forward_arcs + num_text_arcs + 2, 4), dtype=torch.int32, device=device)
# blank transitions
# i, i+<text_len + 1>, 0 <blank>, i / <text_len+1>, i % <text_len + 1>
from_states = torch.arange(num_forward_arcs, device=device)
to_states = from_states + (text_length + 1)
arcs[:num_forward_arcs, 0] = from_states
arcs[:num_forward_arcs, 1] = to_states
arcs[:num_forward_arcs, 2] = blank_id
# text arcs
from_states = (
torch.arange(num_grid_states, dtype=torch.int32, device=device)
.reshape(num_frames, text_length + 1)[:, :-1]
.flatten()
)
to_states = from_states + 1
ilabels = units_tensor.expand(num_frames, -1).flatten()
arcs[num_forward_arcs:-2, 0] = from_states
arcs[num_forward_arcs:-2, 1] = to_states
arcs[num_forward_arcs:-2, 2] = ilabels
# last 2 states
arcs[-2, :3] = torch.tensor((num_grid_states - 1, num_grid_states, blank_id), dtype=torch.int32, device=device)
arcs[-1, :3] = torch.tensor((num_grid_states, num_grid_states + 1, -1), dtype=torch.int32, device=device)
# sequence indices, time indices
olabels = torch.div(arcs[:, 0], (text_length + 1), rounding_mode="floor") # arcs[:, 0] // (text_length + 1)
unit_positions = arcs[:, 0] % (text_length + 1)
# last state: final
olabels[-1] = -1
unit_positions[-1] = -1
# relabel
# instead of using top sort (extremely expensive) k2.top_sort(rnnt_graph)
arcs[:-2, 0] = self.relabel_states(arcs[:-2, 0], text_length + 1, num_frames)
arcs[:-3, 1] = self.relabel_states(arcs[:-3, 1], text_length + 1, num_frames)
# sort by start state - required in k2
# TODO: maybe it is more optimal to avoid sort, construct arcs in ascending order
_, indices = torch.sort(arcs[:, 0], dim=0)
sorted_arcs = arcs[indices]
olabels = olabels[indices]
unit_positions = unit_positions[indices]
rnnt_graph = k2.Fsa(sorted_arcs, olabels)
rnnt_graph.unit_positions = unit_positions
return rnnt_graph
def forward(
self, acts: torch.Tensor, labels: torch.Tensor, act_lens: torch.Tensor, label_lens: torch.Tensor,
) -> torch.Tensor:
"""
Compute forward method for RNN-T.
Args:
acts: activations (joint tensor). NB: raw logits, not after log-softmax
labels: target labels
act_lens: lengths of activations
label_lens: length of labels sequences
Returns:
batch of RNN-T scores (loss)
"""
# argument names are consistent with NeMo, see RNNTLoss.forward:
# self._loss(acts=log_probs, labels=targets, act_lens=input_lengths, label_lens=target_lengths)
logits, targets, logits_lengths, target_lengths = acts, labels, act_lens, label_lens
# logits: B x Time x Text+1 x C
vocab_size = logits.shape[-1]
target_fsas_vec = self.get_graphs_batched(logits_lengths, targets, target_lengths, vocab_size)
cast_context = force_float32_context() if self.cast_to_float32 else nullcontext()
with cast_context:
log_probs = F.log_softmax(logits, dim=-1)
with torch.no_grad():
indices = self.get_logits_indices(target_fsas_vec, logits.shape)
# transition to the last state
# use 0 index (for valid index_select) and manually assign score after index_select for this case
indices[target_fsas_vec.labels == -1] = 0
# NB: do not assign scores -> modify, k2 will not update all scores correctly (modify -> assign)
scores = log_probs.flatten().index_select(-1, indices)
# fix weights for the arcs to the last state
scores[target_fsas_vec.labels == -1] = 0
target_fsas_vec.scores = scores
scores = -1 * target_fsas_vec.get_tot_scores(use_double_scores=self.double_scores, log_semiring=True)
return scores
|
NeMo-main
|
nemo/collections/asr/parts/k2/graph_transducer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import List, Optional, Tuple
import torch
from nemo.collections.asr.parts.k2.grad_utils import GradExpNormalize
from nemo.collections.asr.parts.k2.utils import (
create_supervision,
get_arc_weights,
get_uniform_rnnt_prune_ranges,
make_non_pad_mask,
make_non_pad_mask_3d,
prep_padded_densefsavec,
)
from nemo.core.utils.k2_guard import k2 # import k2 from guard module
class CtcK2Mixin(ABC):
"""k2 Mixin class that simplifies the construction of various k2-based CTC-like losses.
It does the following:
- Prepares and adapts the input tensors (method _prepare_log_probs_and_targets).
- Creates Emissions graphs (method _prepare_emissions_graphs).
- Extracts the labels and probabilities of the best lattice path (method _extract_labels_and_probabilities).
"""
def _prepare_log_probs_and_targets(
self,
log_probs: torch.Tensor,
input_lengths: torch.Tensor,
targets: Optional[torch.Tensor] = None,
target_lengths: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
"""Creates k2-style supervisions and shifts targets by one if the <blank> number is not zero.
"""
assert log_probs.size(-1) == self.num_classes
supervisions = create_supervision(input_lengths)
# shift targets to make output epsilon ID zero
return (
log_probs,
supervisions,
torch.where(targets < self.blank, targets + 1, targets) if targets is not None else None,
target_lengths,
)
def _prepare_emissions_graphs(self, log_probs: torch.Tensor, supervisions: torch.Tensor) -> 'k2.DenseFsaVec':
"""Creates DenseFsaVec, padding it with <epsilon> frames if the topology is `compact`.
In particular, every second frame of the DenseFsaVec is the <epsilon> frame.
<epsilon> frame is a frame with <epsilon> log-probability zero and every other log-probability is -inf.
"""
return (
prep_padded_densefsavec(log_probs, supervisions)
if self.pad_fsavec
else k2.DenseFsaVec(log_probs, supervisions)
)
def _maybe_normalize_gradients(self, log_probs: torch.Tensor, input_lengths: torch.Tensor) -> torch.Tensor:
"""PyTorch is doing the log-softmax normalization as part of the CTC computation.
More: https://github.com/k2-fsa/k2/issues/575
"""
return GradExpNormalize.apply(log_probs, input_lengths, "mean" if self.reduction != "sum" else "none")
def _extract_labels_and_probabilities(
self, shortest_path_fsas: 'k2.Fsa', return_ilabels: bool = False, output_aligned: bool = True
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""Extracts the labels and probabilities of the best lattice path,
dropping <epsilon> arcs and restoring the targets shift, if needed.
"""
shortest_paths = []
probs = []
# direct iterating does not work as expected
for i in range(shortest_path_fsas.shape[0]):
shortest_path_fsa = shortest_path_fsas[i]
# suppose that artificial input epsilon numbers >= self.num_classes
non_eps_mask = (shortest_path_fsa.labels != -1) & (shortest_path_fsa.labels < self.num_classes)
if return_ilabels:
labels = shortest_path_fsa.labels[non_eps_mask]
else:
labels = shortest_path_fsa.aux_labels[non_eps_mask]
if self.blank != 0:
# suppose output epsilon number == 0
# since the input epsilons were removed, we treat all remaining epsilons as blanks
labels[labels == 0] = self.blank
labels[(labels > 0) & (labels < self.blank)] -= 1
labels = labels.to(dtype=torch.long)
if not return_ilabels and not output_aligned:
labels = labels[labels != self.blank]
shortest_paths.append(labels)
probs.append(get_arc_weights(shortest_path_fsa)[non_eps_mask].exp().to(device=shortest_path_fsas.device))
return shortest_paths, probs
class RnntK2Mixin(CtcK2Mixin):
"""k2 Mixin class that simplifies the construction of various k2-based RNNT-like losses. Inherits CtcK2Mixin.
It does the following:
- Prepares and adapts the input tensors.
- Creates Emissions graphs.
- Extracts the labels and probabilities of the best lattice path (method _extract_labels_and_probabilities).
"""
def _prepare_log_probs_and_targets(
self,
log_probs: torch.Tensor,
input_lengths: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Before calling super()._prepare_log_probs_and_targets, this method reshapes the log_probs tensor
from (B, T, U+1, D) to (B, T', D) where T' = T*(U+1), shifts paddings along T and U towards the end of T',
and recomputes input_lengths.
It also calculates indices on which <epsilon> steps should be applied to the log_probs tensor to emulate
<blank> arcs shift of the Emissions graph for the pruned RNNT variant.
"""
assert len(log_probs.size()) == 4 # B T U D
B, T, U, D = log_probs.size()
TU = T * U
# save step indices if, as we assume, decoder output pruning has been applied
if self.predictor_window_size > 0 and self.predictor_window_size < target_lengths.max():
window_size_with_blank = self.predictor_window_size + 1
ranges_begin = get_uniform_rnnt_prune_ranges(
input_lengths, target_lengths, window_size_with_blank, self.predictor_step_size, T, True
)
step_sizes = ranges_begin[:, 1:] - ranges_begin[:, :-1]
raw_step_indices = torch.where(step_sizes > 0)
if self.predictor_step_size > 1:
raw_step_indices = torch.repeat_interleave(
torch.stack(raw_step_indices).T, step_sizes[raw_step_indices], dim=0
).T
raw_step_indices = (raw_step_indices[0], raw_step_indices[1])
unique, count = torch.unique(raw_step_indices[0], return_counts=True)
shift_mask = raw_step_indices[0].unsqueeze(0).repeat(len(unique), 1) == unique.unsqueeze(-1)
step_indices = (
raw_step_indices[0],
(
torch.arange(ranges_begin.size(1)).unsqueeze(0).repeat(ranges_begin.size(0), 1)
* window_size_with_blank
)[(raw_step_indices[0], raw_step_indices[1] + 1)]
+ torch.cumsum(shift_mask, 1)[shift_mask]
- 1,
)
max_count = count.max()
max_count_vec = torch.full((B,), max_count)
max_count_vec[unique] -= count
pad_indices_row = torch.repeat_interleave(torch.arange(B), max_count_vec)
pad_unique = torch.unique(pad_indices_row)
pad_shift_mask = pad_indices_row.unsqueeze(0).repeat(len(pad_unique), 1) == pad_unique.unsqueeze(-1)
pad_indices = (
pad_indices_row,
T * window_size_with_blank + max_count - torch.cumsum(pad_shift_mask, 1)[pad_shift_mask],
)
self.__step_indices = (
torch.cat((step_indices[0], pad_indices[0])),
torch.cat((step_indices[1], pad_indices[1])),
)
self.__supervisions_add = max_count - max_count_vec
else:
self.__step_indices = None
self.__supervisions_add = None
# reshape 4D log_probs to 3D with respect to target_lengths
non_pad_mask_true = make_non_pad_mask_3d(input_lengths, target_lengths + 1, T, U).flatten(1)
input_lengths = non_pad_mask_true.sum(1)
non_pad_mask_fake = make_non_pad_mask(input_lengths, TU).flatten()
non_pad_mask_true = non_pad_mask_true.flatten()
rearranged_indices = torch.arange(TU * B, device=log_probs.device)
rearranged_indices_buffer = rearranged_indices.clone()
rearranged_indices[non_pad_mask_fake] = rearranged_indices_buffer[non_pad_mask_true]
rearranged_indices[~non_pad_mask_fake] = rearranged_indices_buffer[~non_pad_mask_true]
log_probs = log_probs.reshape(-1, D)[rearranged_indices].view(B, -1, D)
return super()._prepare_log_probs_and_targets(log_probs, input_lengths, targets, target_lengths)
def _prepare_emissions_graphs(self, log_probs: torch.Tensor, supervisions: torch.Tensor) -> 'k2.DenseFsaVec':
"""Overrides super()._prepare_emissions_graphs.
Creates DenseFsaVec, adding <epsilon> outputs to the end of the D dimension.
If pruning is used, this method also pads the DenseFsaVec with <epsilon> frames
according to the <epsilon> steps, calculated before.
<epsilon> frame is a frame with <epsilon> log-probability zero and every other log-probability is -inf.
"""
if self.__step_indices is None or self.__supervisions_add is None:
log_probs_eps = torch.cat(
(log_probs, torch.zeros((log_probs.size(0), log_probs.size(1), 1), device=log_probs.device)), dim=2
)
else:
mask = torch.zeros(
(log_probs.size(0), log_probs.size(1) + int(len(self.__step_indices[0]) / log_probs.size(0))),
dtype=torch.bool,
)
mask[self.__step_indices] = True
log_probs_eps = torch.zeros((mask.size(0), mask.size(1), log_probs.size(2) + 1), device=log_probs.device)
log_probs_eps[mask] = torch.tensor(
[torch.finfo(torch.float32).min] * log_probs.size(2) + [0], device=log_probs.device
)
log_probs_eps[~mask] = torch.cat(
(log_probs, torch.zeros((log_probs.size(0), log_probs.size(1), 1), device=log_probs.device)), dim=2
).view(-1, log_probs.size(-1) + 1)
input_lengths = supervisions[:, -1] + self.__supervisions_add[supervisions[:, 0].to(dtype=torch.long)]
if not torch.all(input_lengths[:-1] - input_lengths[1:] >= 0):
# have to reorder supervisions inplace
order = torch.argsort(input_lengths, descending=True)
# the second column is assumed to be zero
supervisions[:, 0] = supervisions[order, 0]
supervisions[:, -1] = input_lengths[order]
else:
supervisions[:, -1] = input_lengths
self.__step_indices = None
self.__supervisions_add = None
return k2.DenseFsaVec(log_probs_eps, supervisions)
def _maybe_normalize_gradients(self, log_probs: torch.Tensor, input_lengths: torch.Tensor) -> torch.Tensor:
"""Not required for RNNT.
"""
return log_probs
|
NeMo-main
|
nemo/collections/asr/parts/k2/loss_mixins.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2020, Xiaomi CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import struct
from pickle import UnpicklingError
from typing import List, Optional, Tuple, Union
import torch
from nemo.core.utils.k2_guard import k2 # import k2 from guard module
from nemo.utils import logging
def create_supervision(input_lengths: torch.Tensor) -> torch.Tensor:
"""Creates a special supervisions tensor from input lengths.
These supervisions are required for some k2 methods.
"""
supervisions = torch.stack(
(torch.tensor(range(input_lengths.shape[0])), torch.zeros(input_lengths.shape[0]), input_lengths.cpu(),), 1,
).to(dtype=torch.int32)
# the duration column has to be sorted in decreasing order
return supervisions[torch.argsort(supervisions[:, -1], descending=True)]
def invert_permutation(indices: torch.Tensor) -> torch.Tensor:
"""Produces a tensor of reverse permutation for a given indices.
Based on https://github.com/k2-fsa/snowfall/blob/master/snowfall/common.py
"""
ans = torch.zeros(indices.shape, device=indices.device, dtype=indices.dtype)
ans[indices.to(dtype=torch.long)] = torch.arange(0, indices.shape[0], device=indices.device, dtype=indices.dtype)
return ans
def make_non_pad_mask(input_lengths: torch.Tensor, seq_len: int):
"""Converts input_lengths to a non-padding mask. The mask is 2D.
"""
batch_size = input_lengths.shape[0]
seq_range = torch.arange(0, seq_len, device=input_lengths.device)
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, seq_len)
seq_length_expand = input_lengths.clone().detach().to(seq_range_expand.device).unsqueeze(-1)
mask = seq_range_expand < seq_length_expand
return mask
def make_non_pad_mask_3d(
lengths_x: torch.Tensor, lengths_y: torch.Tensor, max_length_x: int, max_length_y: int
) -> torch.Tensor:
"""Converts two orthogonal input_lengths to a non-padding mask. The mask is 3D.
"""
assert lengths_x.size() == lengths_y.size()
return make_non_pad_mask(lengths_x, max_length_x).unsqueeze(2) & make_non_pad_mask(
lengths_y, max_length_y
).unsqueeze(1)
def ragged_to_tensor_2axes_simple(rt: k2.RaggedTensor) -> Optional[torch.Tensor]:
"""Converts k2.RaggedTensor to torch.Tensor if the RaggedTensor is shallow (has two axes).
"""
rt_list = rt.tolist()
result_list = []
for e in rt_list:
if len(e) == 0:
result_list.append(0)
elif len(e) == 1:
result_list.append(e[0])
else:
return None
return torch.tensor(result_list, dtype=torch.int32)
def load_graph(graph_path: str) -> 'k2.Fsa':
"""Fsa graph loading helper function. Loads graphs stored in different formats.
"""
if os.path.exists(graph_path):
errors = []
try:
graph_dict = torch.load(graph_path, map_location="cpu")
graph = k2.Fsa.from_dict(graph_dict)
return graph
except UnpicklingError as e:
errors.append(e)
with open(graph_path, "rt", encoding="utf-8") as f:
graph_txt = f.read()
# order from the most frequent case to the least
for func, acceptor in [(k2.Fsa.from_openfst, False), (k2.Fsa.from_str, True), (k2.Fsa.from_str, False)]:
try:
graph = func(graph_txt, acceptor=acceptor)
return graph
except (TypeError, ValueError, RuntimeError) as e:
errors.append(e)
raise Exception(errors)
else:
logging.warning(f"""No such file: '{graph_path}'""")
return None
def intersect_with_self_loops(base_graph: 'k2.Fsa', aux_graph: 'k2.Fsa') -> 'k2.Fsa':
"""Intersection helper function.
"""
assert hasattr(base_graph, "aux_labels")
assert not hasattr(aux_graph, "aux_labels")
aux_graph_with_self_loops = k2.arc_sort(k2.add_epsilon_self_loops(aux_graph)).to(base_graph.device)
result = k2.intersect(k2.arc_sort(base_graph), aux_graph_with_self_loops, treat_epsilons_specially=False)
setattr(result, "phones", result.labels)
return result
def compose_with_self_loops(base_graph: 'k2.Fsa', aux_graph: 'k2.Fsa') -> 'k2.Fsa':
"""Composition helper function.
"""
aux_graph_with_self_loops = k2.arc_sort(k2.add_epsilon_self_loops(aux_graph)).to(base_graph.device)
return k2.compose(base_graph, aux_graph_with_self_loops, treat_epsilons_specially=False, inner_labels="phones")
def create_sparse_wrapped(
indices: List[torch.Tensor],
values: torch.Tensor,
size: Optional[Union[Tuple[int, int], Tuple[int, int, int]]] = None,
min_col_index: Optional[int] = None,
) -> torch.Tensor:
"""Wraps up k2.create_sparse to create 2- or 3-dimensional sparse tensors.
"""
assert size is None or len(indices) == len(size)
if len(indices) == 2:
return k2.create_sparse(
rows=indices[0], cols=indices[1], values=values, size=size, min_col_index=min_col_index,
)
elif len(indices) == 3:
assert indices[0].ndim == indices[1].ndim == indices[2].ndim == 1
assert indices[0].numel() == indices[1].numel() == indices[2].numel() == values.numel()
if min_col_index is not None:
assert isinstance(min_col_index, int)
kept_indices = indices[-1] >= min_col_index
indices = [i[kept_indices] for i in indices]
values = values[kept_indices]
if size is not None:
return torch.sparse_coo_tensor(
torch.stack(indices), values, size=size, device=values.device, requires_grad=values.requires_grad,
)
else:
return torch.sparse_coo_tensor(
torch.stack(indices), values, device=values.device, requires_grad=values.requires_grad,
)
else:
raise ValueError(f"len(indices) = {len(indices)}")
def prep_padded_densefsavec(log_softmax: torch.Tensor, supervisions: torch.Tensor) -> 'k2.DenseFsaVec':
"""Performs special epsilon-padding required for composition with some of the topologies.
"""
log_softmax_eps = torch.cat(
[
log_softmax,
torch.full((log_softmax.shape[0], log_softmax.shape[1], 1), -float("inf"), device=log_softmax.device,),
],
axis=-1,
)
log_softmax_padded = torch.zeros(
(log_softmax_eps.shape[0], log_softmax_eps.shape[1] * 2, log_softmax_eps.shape[2],), device=log_softmax.device,
)
log_softmax_padded[:, ::2] = log_softmax_eps
supervisions_padded = supervisions.clone()
supervisions_padded[:, 2] *= 2
dense_log_softmax_padded = k2.DenseFsaVec(log_softmax_padded, supervisions_padded)
return dense_log_softmax_padded
def shift_labels_inpl(lattices: List['k2.Fsa'], shift: int):
"""Shifts lattice labels and aux_labels by a given number.
This is an in-place operation, if the lattice is on GPU.
"""
for lattice in lattices:
mask = lattice.labels > 0
lattice.labels[mask] += shift
if hasattr(lattice, "aux_labels"):
mask = lattice.aux_labels > 0
lattice.aux_labels[mask] += shift
return reset_properties_fsa(lattices)
def reset_properties_fsa(graph: 'k2.Fsa'):
"""Resets properties of a graph.
In-place (does not create a new graph) if the graph is on GPU.
Use this every time you alter a graph in-place.
See https://github.com/k2-fsa/k2/issues/978 for more information."""
graph.__dict__["_properties"] = None
# CPU graphs need to be sorted e.g. for intersection
if graph.device == torch.device("cpu"):
graph = k2.arc_sort(graph)
return graph
def add_self_loops(graph: 'k2.Fsa', label: int = 0, mode: str = "auto"):
"""Adds self-loops with given label to a graph.
Supported modes are ``input``, ``output``, and ``auto``,
Where ``input`` leaves aux_labels zeroes, if present, ``output`` leaves labels zeroes"""
assert mode in ("input", "output", "auto"), "Supported modes are ``input``, ``output``, and ``auto``: {mode}"
assert mode != "output" or hasattr(graph, "aux_labels"), "Graph must have aux_labels for mode ``output``"
new_graph, arc_map = k2.add_epsilon_self_loops(graph, ret_arc_map=True)
if mode != "output":
new_graph.labels[arc_map == -1] = label
if mode != "input" and hasattr(graph, "aux_labels"):
new_graph.aux_labels[arc_map == -1] = label
return reset_properties_fsa(new_graph)
def get_arc_weights(graph: 'k2.Fsa') -> torch.Tensor:
"""Returns 1d torch.Tensor with arc weights of a given graph.
"""
if len(graph.shape) > 2:
raise NotImplementedError("FsaVec is not supported at the moment.")
weights_int = graph.arcs.values()[:, -1].tolist()
weights_float = struct.unpack('%sf' % len(weights_int), struct.pack('%si' % len(weights_int), *weights_int))
return torch.Tensor(weights_float)
def get_tot_objf_and_finite_mask(tot_scores: torch.Tensor, reduction: str) -> Tuple[torch.Tensor, torch.Tensor]:
"""Figures out the total score(log-prob) over all successful supervision segments
(i.e. those for which the total score wasn't -infinity).
Args:
tot_scores: a Torch tensor of shape (num_segments,) containing total scores
from forward-backward
reduction: a reduction type ('mean', 'sum' or 'none')
Returns:
Returns a tuple of 2 scalar tensors: (tot_score, finite_mask)
where finite_mask is a tensor containing successful segment mask.
Based on get_tot_objf_and_num_frames
from https://github.com/k2-fsa/snowfall/blob/master/snowfall/objectives/common.py
"""
finite_mask = ~torch.isnan(tot_scores) & torch.ne(tot_scores, -float("inf"))
if reduction == "mean":
tot_scores = tot_scores[finite_mask].mean()
elif reduction == "sum":
tot_scores = tot_scores[finite_mask].sum()
return tot_scores, finite_mask
def get_uniform_rnnt_prune_ranges(
encoded_lengths: torch.Tensor,
target_lengths: torch.Tensor,
window_size_with_blank: int,
step: int = 1,
max_seq_len: Optional[int] = None,
begin_only: bool = False,
) -> torch.Tensor:
"""Creates the pruning ranges for the Encoder and Predictor of RNNT.
The ranges are similar to https://k2-fsa.github.io/k2/python_api/api.html#k2.get_rnnt_prune_ranges
but they are constructed under the assumption of the uniform distribution token activations across time frames
and without any posterior knowledge.
"""
assert window_size_with_blank > 1
assert step >= 1
assert window_size_with_blank > step
assert len(encoded_lengths) == len(target_lengths)
ranges_begin = torch.zeros(
(
len(encoded_lengths),
encoded_lengths.max() if max_seq_len is None else max(max_seq_len, encoded_lengths.max()),
),
dtype=torch.long,
)
for i in (target_lengths >= window_size_with_blank).nonzero(as_tuple=True)[0]:
encoded_len = encoded_lengths[i]
ranges_begin_raw = torch.arange(int((target_lengths[i] - window_size_with_blank) / step + 2)) * step
ranges_begin_raw[-1] = target_lengths[i] - window_size_with_blank + 1
ranges_begin[i, :encoded_len] = torch.nn.functional.interpolate(
ranges_begin_raw.reshape(1, 1, -1).to(dtype=torch.float), encoded_len, mode="nearest-exact"
).to(dtype=torch.long)
ranges_begin[i, encoded_len:] = ranges_begin[i, encoded_len - 1]
return (
ranges_begin
if begin_only
else ranges_begin.unsqueeze(-1).repeat(1, 1, window_size_with_blank) + torch.arange(window_size_with_blank)
)
def apply_rnnt_prune_ranges(
encoder_outputs: torch.Tensor, decoder_outputs: torch.Tensor, ranges: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Prepares pruned encoder and decoder outputs according to the prune ranges.
Based on k2.do_rnnt_pruning(...)
"""
B, T, window_size_with_blank = ranges.size()
D1 = encoder_outputs.size(-1)
_, U, D2 = decoder_outputs.size()
assert B == encoder_outputs.size(0)
assert T == encoder_outputs.size(1)
assert B == decoder_outputs.size(0)
encoder_outputs_pruned = encoder_outputs.unsqueeze(2).expand((B, T, window_size_with_blank, D1))
decoder_outputs_pruned = torch.gather(
decoder_outputs.unsqueeze(1).expand((B, T, U, D2)),
dim=2,
index=ranges.reshape((B, T, window_size_with_blank, 1)).expand((B, T, window_size_with_blank, D2)),
)
return encoder_outputs_pruned, decoder_outputs_pruned
|
NeMo-main
|
nemo/collections/asr/parts/k2/utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2020, Xiaomi CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import Any, Optional, Tuple
import torch
from omegaconf import DictConfig
from nemo.collections.asr.parts.k2.graph_compilers import CtcTopologyCompiler, RnntTopologyCompiler
from nemo.collections.asr.parts.k2.loss_mixins import CtcK2Mixin, RnntK2Mixin
from nemo.collections.asr.parts.k2.utils import get_tot_objf_and_finite_mask, invert_permutation
from nemo.core.utils.k2_guard import k2 # import k2 from guard module
class MLLoss(torch.nn.Module):
"""
Maximum Likelihood criterion.
It implements Connectionist Temporal Classification (CTC) loss,
but can be extended to support other loss functions (ASG, HMM, RNNT, ...).
Based on https://github.com/k2-fsa/snowfall/blob/master/snowfall/objectives/ctc.py
cfg takes precedence over all optional parameters
We keep explicit parameter setting to be able to create an instance without the need of a config.
"""
@abstractmethod
def __init__(
self,
num_classes: int,
blank: int,
reduction: str,
cfg: Optional[DictConfig] = None,
topo_type: str = "default",
topo_with_self_loops: bool = True,
):
super().__init__()
if cfg is not None:
topo_type = cfg.get("topo_type", topo_type)
topo_with_self_loops = cfg.get("topo_with_self_loops", topo_with_self_loops)
self.blank = blank
self.num_classes = num_classes
self.reduction = reduction
self.topo_type = topo_type
self.topo_with_self_loops = topo_with_self_loops
self.pad_fsavec = topo_type == "compact"
self.graph_compiler = None # expected to be initialized in child classes
def _prepare_graphs_for_intersection(
self,
log_probs: torch.Tensor,
targets: torch.Tensor,
input_lengths: torch.Tensor,
target_lengths: torch.Tensor,
) -> Tuple['k2.DenseFsaVec', Any, torch.Tensor]:
"""Converts input tensors to FST graphs:
log_probs to supervision_graphs (DenseFsaVec)
targets to supervision_graphs
Can be overridden.
"""
log_probs, supervisions, targets, target_lengths = self._prepare_log_probs_and_targets(
log_probs, input_lengths, targets, target_lengths
)
log_probs = self._maybe_normalize_gradients(log_probs, supervisions[:, -1].to(dtype=torch.long))
emissions_graphs = self._prepare_emissions_graphs(log_probs, supervisions)
del log_probs
if emissions_graphs.device != self.graph_compiler.device:
self.graph_compiler.to(emissions_graphs.device)
order = supervisions[:, 0].to(dtype=torch.long)
supervision_graphs = self.graph_compiler.compile(targets[order], target_lengths[order])
return emissions_graphs, supervision_graphs, supervisions
def _intersect_calc_scores(
self, emissions_graphs: 'k2.DenseFsaVec', supervision_graphs: Any, supervisions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Intersects emissions_graphs with supervision_graphs and calculates lattice scores.
Can be overridden.
"""
lats = k2.intersect_dense(supervision_graphs, emissions_graphs, torch.finfo(torch.float32).max / 10)
del emissions_graphs
num_tot_scores = lats.get_tot_scores(log_semiring=True, use_double_scores=False)
del lats
tot_scores = num_tot_scores[invert_permutation(supervisions[:, 0].to(dtype=torch.long))]
tot_scores, valid_mask = get_tot_objf_and_finite_mask(tot_scores, self.reduction)
return -tot_scores[valid_mask] if self.reduction == "none" else -tot_scores, valid_mask
def forward(
self,
log_probs: torch.Tensor,
targets: torch.Tensor,
input_lengths: torch.Tensor,
target_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
assert self.graph_compiler is not None
emissions_graphs, supervision_graphs, supervisions = self._prepare_graphs_for_intersection(
log_probs, targets, input_lengths, target_lengths
)
scores, mask = self._intersect_calc_scores(emissions_graphs, supervision_graphs, supervisions)
return scores, mask
class CtcLoss(MLLoss, CtcK2Mixin):
"""Regular CTC loss with custom topologies.
Available topologies:
- `default`, with or without self-loops
- `compact`, with or without self-loops
- `shared_blank`, with or without self-loops
- `minimal`, without self-loops
cfg takes precedence over all optional parameters
We keep explicit parameter setting to be able to create an instance without the need of a config.
"""
def __init__(
self,
num_classes: int,
blank: int,
reduction: str,
cfg: Optional[DictConfig] = None,
topo_type: str = "default",
topo_with_self_loops: bool = True,
):
super().__init__(
num_classes=num_classes,
blank=blank,
reduction=reduction,
cfg=cfg,
topo_type=topo_type,
topo_with_self_loops=topo_with_self_loops,
)
self.graph_compiler = CtcTopologyCompiler(
self.num_classes, self.blank, self.topo_type, self.topo_with_self_loops
)
class RnntLoss(MLLoss, RnntK2Mixin):
"""RNNT loss with the `minimal` topology.
If predictor_window_size is not provided, this loss works as regular RNNT.
With predictor_window_size provided, it applies uniform pruning when compiling Emission FSAs
to reduce memory and compute consumption.
cfg takes precedence over all optional parameters
We keep explicit parameter setting to be able to create an instance without the need of a config.
"""
def __init__(
self,
num_classes: int,
blank: int,
reduction: str,
cfg: Optional[DictConfig] = None,
topo_type: str = "minimal",
topo_with_self_loops: bool = True,
predictor_window_size: int = 0,
predictor_step_size: int = 1,
):
super().__init__(
num_classes=num_classes,
blank=blank,
reduction=reduction,
cfg=cfg,
topo_type=topo_type,
topo_with_self_loops=topo_with_self_loops,
)
if cfg is not None:
topo_type = cfg.get("topo_type", topo_type)
predictor_window_size = cfg.get("predictor_window_size", predictor_window_size)
predictor_step_size = cfg.get("predictor_step_size", predictor_step_size)
if topo_type != "minimal":
raise NotImplementedError(f"Only topo_type=`minimal` is supported at the moment.")
self.predictor_window_size = predictor_window_size
self.predictor_step_size = predictor_step_size
self.graph_compiler = RnntTopologyCompiler(
self.num_classes,
self.blank,
self.topo_type,
self.topo_with_self_loops,
max_adapter_length=self.predictor_window_size,
)
def forward(
self,
log_probs: torch.Tensor,
targets: torch.Tensor,
input_lengths: torch.Tensor,
target_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
assert self.predictor_window_size == 0 or log_probs.size(2) <= self.predictor_window_size + 1
return super().forward(
log_probs=log_probs, targets=targets, input_lengths=input_lengths, target_lengths=target_lengths
)
|
NeMo-main
|
nemo/collections/asr/parts/k2/ml_loss.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig
from nemo.collections.asr.parts.k2.classes import GraphIntersectDenseConfig
from nemo.collections.asr.parts.k2.loss_mixins import CtcK2Mixin, RnntK2Mixin
from nemo.collections.asr.parts.k2.utils import invert_permutation, load_graph
from nemo.utils import logging
class BaseDecoder(object):
"""Base graph decoder with topology for decoding graph.
Typically uses the same parameters as for the corresponding loss function.
Can do decoding and forced alignment.
cfg takes precedence over all optional parameters
We keep explicit parameter setting to be able to create an instance without the need of a config.
"""
@abstractmethod
def __init__(
self,
num_classes: int,
blank: int,
cfg: Optional[DictConfig] = None,
intersect_pruned: bool = False,
intersect_conf: GraphIntersectDenseConfig = GraphIntersectDenseConfig(),
topo_type: str = "default",
topo_with_self_loops: bool = True,
device: torch.device = torch.device("cpu"),
):
if cfg is not None:
intersect_pruned = cfg.get("intersect_pruned", intersect_pruned)
intersect_conf = cfg.get("intersect_conf", intersect_conf)
topo_type = cfg.get("topo_type", topo_type)
topo_with_self_loops = cfg.get("topo_with_self_loops", topo_with_self_loops)
self.num_classes = num_classes
self.blank = blank
self.intersect_pruned = intersect_pruned
self.device = device
self.topo_type = topo_type
self.topo_with_self_loops = topo_with_self_loops
self.pad_fsavec = self.topo_type == "ctc_compact"
self.intersect_conf = intersect_conf
self.graph_compiler = None # expected to be initialized in child classes
self.base_graph = None # expected to be initialized in child classes
self.decoding_graph = None
def to(self, device: torch.device):
if self.graph_compiler.device != device:
self.graph_compiler.to(device)
if self.base_graph.device != device:
self.base_graph = self.base_graph.to(device)
if self.decoding_graph is not None and self.decoding_graph.device != device:
self.decoding_graph = self.decoding_graph.to(device)
self.device = device
def update_graph(self, graph: 'k2.Fsa'):
raise NotImplementedError
def _decode_impl(
self,
log_probs: torch.Tensor,
supervisions: torch.Tensor,
return_lattices: bool = False,
return_ilabels: bool = False,
output_aligned: bool = True,
) -> Union['k2.Fsa', Tuple[List[torch.Tensor], List[torch.Tensor]]]:
if self.decoding_graph is None:
self.decoding_graph = self.base_graph
if log_probs.device != self.device:
self.to(log_probs.device)
emissions_graphs = self._prepare_emissions_graphs(log_probs, supervisions)
if self.intersect_pruned:
lats = k2.intersect_dense_pruned(
a_fsas=self.decoding_graph,
b_fsas=emissions_graphs,
search_beam=self.intersect_conf.search_beam,
output_beam=self.intersect_conf.output_beam,
min_active_states=self.intersect_conf.min_active_states,
max_active_states=self.intersect_conf.max_active_states,
)
else:
indices = torch.zeros(emissions_graphs.dim0(), dtype=torch.int32, device=self.device)
dec_graphs = (
k2.index_fsa(self.decoding_graph, indices)
if self.decoding_graph.shape[0] == 1
else self.decoding_graph
)
lats = k2.intersect_dense(dec_graphs, emissions_graphs, self.intersect_conf.output_beam)
self.decoding_graph = None
order = supervisions[:, 0]
if return_lattices:
lats = k2.index_fsa(lats, invert_permutation(order).to(device=log_probs.device))
if self.blank != 0:
# change only ilabels
# suppose self.blank == self.num_classes - 1
lats.labels = torch.where(lats.labels == 0, self.blank, lats.labels - 1)
return lats
else:
shortest_path_fsas = k2.index_fsa(
k2.shortest_path(lats, True), invert_permutation(order).to(device=log_probs.device),
)
return self._extract_labels_and_probabilities(shortest_path_fsas, return_ilabels, output_aligned)
def decode(
self,
log_probs: torch.Tensor,
log_probs_length: torch.Tensor,
return_lattices: bool = False,
return_ilabels: bool = False,
output_aligned: bool = True,
) -> Union['k2.Fsa', Tuple[List[torch.Tensor], List[torch.Tensor]]]:
log_probs, supervisions, _, _ = self._prepare_log_probs_and_targets(log_probs, log_probs_length, None, None)
return self._decode_impl(
log_probs,
supervisions,
return_lattices=return_lattices,
return_ilabels=return_ilabels,
output_aligned=output_aligned,
)
def align(
self,
log_probs: torch.Tensor,
log_probs_length: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
return_lattices: bool = False,
return_ilabels: bool = False,
output_aligned: bool = True,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
log_probs, supervisions, targets, target_lengths = self._prepare_log_probs_and_targets(
log_probs, log_probs_length, targets, target_lengths
)
order = supervisions[:, 0].to(dtype=torch.long)
self.decoding_graph = self.graph_compiler.compile(targets[order], target_lengths[order])
return self._decode_impl(
log_probs,
supervisions,
return_lattices=return_lattices,
return_ilabels=return_ilabels,
output_aligned=output_aligned,
)
class CtcDecoder(BaseDecoder, CtcK2Mixin):
"""Regular CTC graph decoder with custom topologies.
Available topologies:
- `default`, with or without self-loops
- `compact`, with or without self-loops
- `shared_blank`, with or without self-loops
- `minimal`, without self-loops
Can do decoding and forced alignment.
"""
def __init__(
self,
num_classes: int,
blank: int,
cfg: Optional[DictConfig] = None,
intersect_pruned: bool = False,
intersect_conf: GraphIntersectDenseConfig = GraphIntersectDenseConfig(),
topo_type: str = "default",
topo_with_self_loops: bool = True,
device: torch.device = torch.device("cpu"),
):
super().__init__(
num_classes, blank, cfg, intersect_pruned, intersect_conf, topo_type, topo_with_self_loops, device
)
from nemo.collections.asr.parts.k2.graph_compilers import CtcTopologyCompiler
self.graph_compiler = CtcTopologyCompiler(
self.num_classes, self.blank, self.topo_type, self.topo_with_self_loops, self.device
)
self.base_graph = k2.create_fsa_vec([self.graph_compiler.ctc_topo_inv.invert()]).to(self.device)
class RnntAligner(BaseDecoder, RnntK2Mixin):
"""RNNT graph decoder with the `minimal` topology.
If predictor_window_size is not provided, this decoder works as a Viterbi over regular RNNT lattice.
With predictor_window_size provided, it applies uniform pruning when compiling Emission FSAs
to reduce memory and compute consumption.
Can only do forced alignment.
"""
def __init__(
self,
num_classes: int,
blank: int,
cfg: Optional[DictConfig] = None,
intersect_pruned: bool = False,
intersect_conf: GraphIntersectDenseConfig = GraphIntersectDenseConfig(),
topo_type: str = "default",
topo_with_self_loops: bool = True,
predictor_window_size: int = 0,
predictor_step_size: int = 1,
device: torch.device = torch.device("cpu"),
):
if cfg is not None:
topo_type = cfg.get("topo_type", topo_type)
predictor_window_size = cfg.get("predictor_window_size", predictor_window_size)
predictor_step_size = cfg.get("predictor_step_size", predictor_step_size)
if topo_type != "minimal":
raise NotImplementedError(f"Only topo_type=`minimal` is supported at the moment.")
super().__init__(
num_classes, blank, cfg, intersect_pruned, intersect_conf, topo_type, topo_with_self_loops, device
)
self.predictor_window_size = predictor_window_size
self.predictor_step_size = predictor_step_size
from nemo.collections.asr.parts.k2.graph_compilers import RnntTopologyCompiler
self.graph_compiler = RnntTopologyCompiler(
self.num_classes,
self.blank,
self.topo_type,
self.topo_with_self_loops,
self.device,
max_adapter_length=self.predictor_window_size,
)
self.base_graph = self.graph_compiler.base_graph
def decode(
self,
log_probs: torch.Tensor,
log_probs_length: torch.Tensor,
return_lattices: bool = False,
return_ilabels: bool = False,
output_aligned: bool = True,
) -> Union['k2.Fsa', Tuple[List[torch.Tensor], List[torch.Tensor]]]:
raise NotImplementedError("RNNT decoding is not implemented. Only .align(...) method is supported.")
def align(
self,
log_probs: torch.Tensor,
log_probs_length: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
return_lattices: bool = False,
return_ilabels: bool = False,
output_aligned: bool = True,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
assert self.predictor_window_size == 0 or log_probs.size(2) <= self.predictor_window_size + 1
return super().align(
log_probs,
log_probs_length,
targets,
target_lengths,
return_lattices=return_lattices,
return_ilabels=return_ilabels,
output_aligned=output_aligned,
)
class TokenLMDecoder(BaseDecoder):
"""Graph decoder with token_lm-based decoding graph.
Available topologies:
- `default`, with or without self-loops
- `compact`, with or without self-loops
- `shared_blank`, with or without self-loops
- `minimal`, without self-loops
Can do decoding and forced alignment.
cfg takes precedence over all optional parameters
We keep explicit parameter setting to be able to create an instance without the need of a config.
"""
def __init__(
self,
num_classes: int,
blank: int,
cfg: Optional[DictConfig] = None,
token_lm: Optional[Union['k2.Fsa', str]] = None,
intersect_pruned: bool = False,
intersect_conf: GraphIntersectDenseConfig = GraphIntersectDenseConfig(),
topo_type: str = "default",
topo_with_self_loops: bool = True,
device: torch.device = torch.device("cpu"),
):
super().__init__(
num_classes, blank, cfg, intersect_pruned, intersect_conf, topo_type, topo_with_self_loops, device
)
if cfg is not None:
token_lm = cfg.get("token_lm", token_lm)
if token_lm is not None:
self.token_lm = load_graph(token_lm) if isinstance(token_lm, str) else token_lm
if self.token_lm is not None:
self.update_graph(self.token_lm)
else:
logging.warning(
f"""token_lm was set to None. Use this for debug
purposes only or call .update_graph(token_lm) before using."""
)
else:
logging.warning(
f"""token_lm was set to None. Use this for debug
purposes only or call .update_graph(token_lm) before using."""
)
self.token_lm = None
def update_graph(self, graph: 'k2.Fsa'):
self.token_lm = graph
token_lm = self.token_lm.clone()
if hasattr(token_lm, "aux_labels"):
delattr(token_lm, "aux_labels")
labels = token_lm.labels
if labels.max() != self.num_classes - 1:
raise ValueError(f"token_lm is not compatible with the num_classes: {labels.unique()}, {self.num_classes}")
self.graph_compiler = CtcNumGraphCompiler(
self.num_classes, self.blank, self.topo_type, self.topo_with_self_loops, self.device, token_lm
)
self.base_graph = k2.create_fsa_vec([self.graph_compiler.base_graph]).to(self.device)
|
NeMo-main
|
nemo/collections/asr/parts/k2/graph_decoders.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from dataclasses import dataclass
from typing import Any, Optional, Tuple
import torch
from omegaconf import DictConfig
from nemo.utils import logging
@dataclass
class GraphIntersectDenseConfig:
"""Graph dense intersection config.
"""
search_beam: float = 20.0
output_beam: float = 10.0
min_active_states: int = 30
max_active_states: int = 10000
@dataclass
class GraphModuleConfig:
"""Config for graph modules.
Typically used with graph losses and decoders.
"""
topo_type: str = "default"
topo_with_self_loops: bool = True
token_lm: Optional[Any] = None
intersect_pruned: bool = False
intersect_conf: GraphIntersectDenseConfig = GraphIntersectDenseConfig()
boost_coeff: float = 0.0
predictor_window_size: int = 0
predictor_step_size: int = 1
class ASRK2Mixin(ABC):
"""k2 Mixin class that simplifies the construction of various models with k2-based losses.
It does the following:
- Sets up the graph loss and decoder (methods _init_k2 and update_k2_modules).
- Registers external graphs, if needed.
- Augments forward(...) with optional graph decoding to get accurate predictions.
"""
def _init_k2(self):
"""
k2-related initialization implementation.
This method is expected to run after the __init__ which sets self._cfg
self._cfg is expected to have the attribute graph_module_cfg
"""
if not hasattr(self, "_cfg"):
raise ValueError("self._cfg must be set before calling _init_k2().")
if not hasattr(self._cfg, "graph_module_cfg") or self._cfg.graph_module_cfg is None:
raise ValueError("self._cfg.graph_module_cfg must be set and cannot be None.")
self.graph_module_cfg = self._cfg.graph_module_cfg
# register token_lm for MAPLoss
criterion_type = self.graph_module_cfg.get("criterion_type", "ml")
self.use_graph_lm = criterion_type == "map"
if self.use_graph_lm:
token_lm_path = self.graph_module_cfg.backend_cfg.get("token_lm", None)
if token_lm_path is None:
raise ValueError(
f"graph_module_cfg.backend_cfg.token_lm is empty. It must be set for criterion_type == `{criterion_type}`"
)
token_lm_path = self.register_artifact('graph_module_cfg.backend_cfg.token_lm', token_lm_path)
self.graph_module_cfg.backend_cfg["token_lm"] = token_lm_path
self.update_k2_modules(self.graph_module_cfg)
def update_k2_modules(self, input_cfg: DictConfig):
"""
Helper function to initialize or update k2 loss and transcribe_decoder.
Args:
input_cfg: DictConfig to take new parameters from. Schema is expected as in
nemo.collections.asr.models.configs.k2_sequence_models_config.GraphModuleConfig
"""
del self.loss
if hasattr(self, "transcribe_decoder"):
del self.transcribe_decoder
if hasattr(self, "joint"):
# RNNT
num_classes = self.joint.num_classes_with_blank - 1
else:
# CTC, MMI, ...
num_classes = self.decoder.num_classes_with_blank - 1
remove_consecutive = input_cfg.backend_cfg.get("topo_with_self_loops", True) and input_cfg.backend_cfg.get(
"topo_type", "default"
) not in ["forced_blank", "identity",]
self._wer.remove_consecutive = remove_consecutive
from nemo.collections.asr.losses.lattice_losses import LatticeLoss
self.loss = LatticeLoss(
num_classes=num_classes,
reduction=self._cfg.get("ctc_reduction", "mean_batch"),
backend="k2",
criterion_type=input_cfg.get("criterion_type", "ml"),
loss_type=input_cfg.get("loss_type", "ctc"),
split_batch_size=input_cfg.get("split_batch_size", 0),
graph_module_cfg=input_cfg.backend_cfg,
)
criterion_type = self.loss.criterion_type
self.use_graph_lm = criterion_type == "map"
transcribe_training = input_cfg.get("transcribe_training", False)
if transcribe_training and criterion_type == "ml":
logging.warning(
f"""You do not need to use transcribe_training=`{transcribe_training}`
with criterion_type=`{criterion_type}`. transcribe_training will be set to False."""
)
transcribe_training = False
self.transcribe_training = transcribe_training
if self.use_graph_lm:
from nemo.collections.asr.modules.graph_decoder import ViterbiDecoderWithGraph
self.transcribe_decoder = ViterbiDecoderWithGraph(
num_classes=num_classes,
backend="k2",
dec_type="token_lm",
return_type="1best",
return_ilabels=True,
output_aligned=True,
split_batch_size=input_cfg.get("split_batch_size", 0),
graph_module_cfg=input_cfg.backend_cfg,
)
def _forward_k2_post_processing(
self, log_probs: torch.Tensor, encoded_length: torch.Tensor, greedy_predictions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
k2-related post-processing parf of .forward()
Args:
log_probs: The log probabilities tensor of shape [B, T, D].
encoded_length: The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
greedy_predictions: The greedy token predictions of the model of shape [B, T]
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
# greedy_predictions from .forward() are incorrect for criterion_type=`map`
# getting correct greedy_predictions, if needed
if self.use_graph_lm and (not self.training or self.transcribe_training):
greedy_predictions, encoded_length, _ = self.transcribe_decoder.forward(
log_probs=log_probs, log_probs_length=encoded_length
)
return log_probs, encoded_length, greedy_predictions
|
NeMo-main
|
nemo/collections/asr/parts/k2/classes.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2020, Xiaomi CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from nemo.collections.asr.parts.k2.utils import add_self_loops, compose_with_self_loops, intersect_with_self_loops
from nemo.core.utils.k2_guard import k2 # import k2 from guard module
class CtcTopologyCompiler(object):
"""Default graph compiler.
It applies its topology to the input token sequence to compile the supervision graph.
Based on https://github.com/k2-fsa/snowfall/blob/master/snowfall/training/ctc_graph.py
"""
def __init__(
self,
num_classes: int,
blank: int,
topo_type: str = "default",
topo_with_self_loops: bool = True,
device: torch.device = torch.device("cpu"),
):
self.topo_type = topo_type
self.device = device
from nemo.collections.asr.parts.k2.topologies import build_topo
self.base_graph = k2.arc_sort(build_topo(topo_type, list(range(num_classes)), blank, topo_with_self_loops)).to(
self.device
)
self.ctc_topo_inv = k2.arc_sort(self.base_graph.invert())
def to(self, device: torch.device):
self.ctc_topo_inv = self.ctc_topo_inv.to(device)
if self.base_graph is not None:
self.base_graph = self.base_graph.to(device)
self.device = device
def compile(self, targets: torch.Tensor, target_lengths: torch.Tensor) -> 'k2.Fsa':
token_ids_list = [t[:l].tolist() for t, l in zip(targets, target_lengths)]
label_graph = k2.linear_fsa(token_ids_list).to(self.device)
label_graph.aux_labels = label_graph.labels.clone()
supervision_graphs = compose_with_self_loops(self.base_graph, label_graph)
supervision_graphs = k2.arc_sort(supervision_graphs).to(self.device)
# make sure the gradient is not accumulated
supervision_graphs.requires_grad_(False)
return supervision_graphs
class CtcNumGraphCompiler(CtcTopologyCompiler):
"""Graph compiler with auxiliary graph to compose with the topology.
The supervision graph contains the auxiliary graph information.
"""
def __init__(
self,
num_classes: int,
blank: int,
topo_type: str = "default",
topo_with_self_loops: bool = True,
device: torch.device = torch.device("cpu"),
aux_graph: Optional['k2.Fsa'] = None,
):
super().__init__(num_classes, blank, topo_type, topo_with_self_loops, device)
if aux_graph is None:
self.decoding_graph = k2.create_fsa_vec([self.ctc_topo_inv.invert()]).to(self.device)
else:
self.base_graph = intersect_with_self_loops(self.ctc_topo_inv, aux_graph).invert_()
self.base_graph = k2.arc_sort(self.base_graph).to(self.device)
def compile(
self, targets: torch.Tensor, target_lengths: torch.Tensor, aux_graph: Optional['k2.Fsa'] = None,
) -> 'k2.Fsa':
if aux_graph is None and self.base_graph is None:
raise ValueError(
f"At least one of aux_graph and self.base_graph must be set: {aux_graph}, {self.base_graph}"
)
elif aux_graph is not None:
self.base_graph = intersect_with_self_loops(self.ctc_topo_inv, aux_graph).invert()
self.base_graph = k2.arc_sort(self.base_graph).to(self.device)
return super().compile(targets, target_lengths)
class MmiGraphCompiler(CtcNumGraphCompiler):
"""Graph compiler for MMI loss.
The decoding graph is a composition of the auxiliary graph and the topology.
It is returned along with the supervision graph on every compile() call.
"""
def __init__(
self,
num_classes: int,
blank: int,
topo_type: str = "default",
topo_with_self_loops: bool = True,
device: torch.device = torch.device("cpu"),
aux_graph: Optional['k2.Fsa'] = None,
):
super().__init__(num_classes, blank, topo_type, topo_with_self_loops, device, aux_graph)
if aux_graph is None:
self.decoding_graph = k2.create_fsa_vec([self.ctc_topo_inv.invert()]).to(self.device)
else:
self.decoding_graph = k2.create_fsa_vec([self.base_graph.detach()]).to(self.device)
def to(self, device: torch.device):
if self.decoding_graph is not None:
self.decoding_graph = self.decoding_graph.to(device)
super().to(device)
def compile(
self, targets: torch.Tensor, target_lengths: torch.Tensor, aux_graph: Optional['k2.Fsa'] = None,
) -> Tuple['k2.Fsa', 'k2.Fsa']:
supervision_graphs = super().compile(targets, target_lengths, aux_graph)
if aux_graph is None and self.decoding_graph is None:
raise ValueError(
f"At least one of aux_graph and self.decoding_graph must be set: {aux_graph}, {self.decoding_graph}"
)
elif aux_graph is not None:
self.decoding_graph = k2.create_fsa_vec([self.base_graph.detach()]).to(self.device)
return supervision_graphs, self.decoding_graph
class RnntTopologyCompiler(CtcTopologyCompiler):
"""Default graph compiler for RNNT loss.
Each supervision graph is composed with the corresponding RNNT emission adapter.
If max_adapter_length is provided, the maximum adapter length is limited.
Note:
The actual number of classes is `num_classes` + 1 with <eps> as the class 0.
Warning:
It is currently not recommended to use topologies other than "minimal".
"""
def __init__(
self,
num_classes: int,
blank: int,
topo_type: str = "minimal",
topo_with_self_loops: bool = True,
device: torch.device = torch.device("cpu"),
max_adapter_length: int = 0,
):
if topo_type == "compact":
raise NotImplementedError(f"This compiler does not support topo_type==`compact`.")
super().__init__(num_classes, blank, topo_type, topo_with_self_loops, device)
from nemo.collections.asr.parts.k2.topologies import RnntEmissionAdapterBuilder
self.max_adapter_length = max_adapter_length
self._builder = RnntEmissionAdapterBuilder(list(range(num_classes)), blank, num_classes)
def compile(self, targets: torch.Tensor, target_lengths: torch.Tensor) -> 'k2.Fsa':
supervision_graphs = add_self_loops(super().compile(targets, target_lengths), self._builder.eps_num, "input")
adapters = self._builder(
torch.where(target_lengths > self.max_adapter_length, self.max_adapter_length, target_lengths)
if self.max_adapter_length > 0 and self.max_adapter_length < target_lengths.max()
else target_lengths
).to(device=self.device)
return k2.intersect(adapters, supervision_graphs, treat_epsilons_specially=False)
|
NeMo-main
|
nemo/collections/asr/parts/k2/graph_compilers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import texterrors
import torch
from omegaconf import open_dict
from nemo.collections.asr.models import ASRModel, EncDecRNNTModel
from nemo.collections.asr.parts.utils.confidence_metrics import (
auc_nt,
auc_pr,
auc_roc,
auc_yc,
ece,
nce,
save_confidence_hist,
save_custom_confidence_curve,
save_nt_curve,
save_pr_curve,
save_roc_curve,
)
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
def get_correct_marks(r: Union[List[int], List[str]], h: Union[List[int], List[str]]) -> List[bool]:
"""Get correct marks by aligning the reference text with a hypothesis.
This method considers only insertions and substitutions as incorrect marks.
"""
return [
a == b
for a, b in zip(*(texterrors.align_texts([str(rr) for rr in r], [str(hh) for hh in h], False)[:-1]))
if b != "<eps>"
]
def get_token_targets_with_confidence(hyp: Hypothesis) -> List[Tuple[str, float]]:
return [(y, c) for y, c in zip(hyp.y_sequence, hyp.token_confidence)]
def get_word_targets_with_confidence(hyp: Hypothesis) -> List[Tuple[str, float]]:
return [(y, c) for y, c in zip(hyp.words, hyp.word_confidence)]
def run_confidence_benchmark(
model: ASRModel,
target_level: str,
filepaths: List[str],
reference_texts: List[str],
batch_size: int = 8,
num_workers: int = 4,
plot_dir: Optional[Union[str, Path]] = None,
autocast: Optional = None,
):
"""Run benchmark and plot histograms and curves, if plot_dir is provided.
Returns:
Dictionary with benchmark results of the following scheme:
`level: (auc_roc, auc_pr, auc_nt, nce, ece, auc_yc, std_yc, max_yc)` with `level` being 'token' or 'word'.
"""
draw_plot = plot_dir is not None
if isinstance(plot_dir, str):
plot_dir = Path(plot_dir)
is_rnnt = isinstance(model, EncDecRNNTModel)
# setup autocast if necessary
if autocast is None:
@contextlib.contextmanager
def autocast():
yield
# transcribe audio
with autocast():
with torch.no_grad():
transcriptions = model.transcribe(
paths2audio_files=filepaths, batch_size=batch_size, return_hypotheses=True, num_workers=num_workers
)
if is_rnnt:
transcriptions = transcriptions[0]
levels = []
if target_level != "word":
levels.append("token")
if target_level != "token":
levels.append("word")
results = {}
for level in levels:
if level == "token":
targets_with_confidence = [get_token_targets_with_confidence(tran) for tran in transcriptions]
correct_marks = [
get_correct_marks(model.tokenizer.text_to_ids(r), model.tokenizer.text_to_ids(h.text))
for r, h in zip(reference_texts, transcriptions)
]
else: # "word"
targets_with_confidence = [get_word_targets_with_confidence(tran) for tran in transcriptions]
correct_marks = [get_correct_marks(r.split(), h.words) for r, h in zip(reference_texts, transcriptions)]
y_true, y_score = np.array(
[[f, p[1]] for cm, twc in zip(correct_marks, targets_with_confidence) for f, p in zip(cm, twc)]
).T
# output scheme: yc.mean(), yc.max(), yc.std() or yc.mean(), yc.max(), yc.std(), (thresholds, yc)
result_yc = auc_yc(y_true, y_score, return_std_maximum=True, return_curve=draw_plot)
# output scheme: ece or ece, (thresholds, ece_curve)
results_ece = ece(y_true, y_score, return_curve=draw_plot)
results[level] = [
auc_roc(y_true, y_score),
auc_pr(y_true, y_score),
auc_nt(y_true, y_score),
nce(y_true, y_score),
results_ece if isinstance(results_ece, float) else results_ece[0],
] + list(result_yc[:3])
if draw_plot:
os.makedirs(plot_dir, exist_ok=True)
mask_correct = y_true == 1
y_score_correct = y_score[mask_correct]
y_score_incorrect = y_score[~mask_correct]
# histogram of the correct distribution
save_confidence_hist(y_score_correct, plot_dir, level + "_" + "hist_correct")
# histogram of the incorrect distribution
save_confidence_hist(y_score_incorrect, plot_dir, level + "_" + "hist_incorrect")
# AUC-ROC curve
save_roc_curve(y_true, y_score, plot_dir, level + "_" + "roc")
# AUC-PR curve
save_pr_curve(y_true, y_score, plot_dir, level + "_" + "pr")
# AUC-NT curve
save_nt_curve(y_true, y_score, plot_dir, level + "_" + "nt")
# AUC-YC curve
yc_thresholds, yc_values = result_yc[-1]
save_custom_confidence_curve(
yc_thresholds,
yc_values,
plot_dir,
level + "_" + "yc",
"Threshold",
"True positive rate − False Positive Rate",
)
# ECE curve
ece_thresholds, ece_values = results_ece[-1]
ece_values /= max(ece_values)
save_custom_confidence_curve(
ece_thresholds, ece_values, plot_dir, level + "_" + "ece", "Threshold", "|Accuracy − Confidence score|"
)
return results
def apply_confidence_parameters(decoding_cfg, hp):
"""Apply parameters from a parameter grid to a decoding config.
Returns:
Updated decoding config.
"""
new_decoding_cfg = copy.deepcopy(decoding_cfg)
confidence_cfg_fields = ("aggregation", "exclude_blank")
confidence_measure_cfg_fields = ("name", "alpha", "entropy_type", "entropy_norm")
with open_dict(new_decoding_cfg):
for p, v in hp.items():
if p in confidence_cfg_fields:
new_decoding_cfg.confidence_cfg[p] = v
elif p in confidence_measure_cfg_fields:
new_decoding_cfg.confidence_cfg.measure_cfg[p] = v
return new_decoding_cfg
|
NeMo-main
|
nemo/collections/asr/parts/utils/asr_confidence_benchmarking_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
__all__ = ['Swish']
class Swish(nn.SiLU):
"""
Swish activation function introduced in 'https://arxiv.org/abs/1710.05941'
Mathematically identical to SiLU. See note in nn.SiLU for references.
"""
|
NeMo-main
|
nemo/collections/asr/parts/utils/activations.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import numpy as np
import torch
from omegaconf import OmegaConf
from torch.utils.data import DataLoader
from nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE
from nemo.collections.asr.parts.mixins.streaming import StreamingEncoder
from nemo.collections.asr.parts.preprocessing.features import normalize_batch
from nemo.collections.asr.parts.utils.audio_utils import get_samples
from nemo.core.classes import IterableDataset
from nemo.core.neural_types import LengthsType, NeuralType
# Minimum number of tokens required to assign a LCS merge step, otherwise ignore and
# select all i-1 and ith buffer tokens to merge.
MIN_MERGE_SUBSEQUENCE_LEN = 1
def print_alignment(alignment):
"""
Print an alignment matrix of the shape (m + 1, n + 1)
Args:
alignment: An integer alignment matrix of shape (m + 1, n + 1)
"""
m = len(alignment)
if m > 0:
n = len(alignment[0])
for i in range(m):
for j in range(n):
if j == 0:
print(f"{i:4d} |", end=" ")
print(f"{alignment[i][j]}", end=" ")
print()
def write_lcs_alignment_to_pickle(alignment, filepath, extras=None):
"""
Writes out the LCS alignment to a file, along with any extras provided.
Args:
alignment: An alignment matrix of shape [m + 1, n + 1]
filepath: str filepath
extras: Optional dictionary of items to preserve.
"""
if extras is None:
extras = {}
extras['alignment'] = alignment
torch.save(extras, filepath)
def longest_common_subsequence_merge(X, Y, filepath=None):
"""
Longest Common Subsequence merge algorithm for aligning two consecutive buffers.
Base alignment construction algorithm is Longest Common Subsequence (reffered to as LCS hear after)
LCS Merge algorithm looks at two chunks i-1 and i, determins the aligned overlap at the
end of i-1 and beginning of ith chunk, and then clips the subsegment of the ith chunk.
Assumption is that the two chunks are consecutive chunks, and there exists at least small overlap acoustically.
It is a sub-word token merge algorithm, operating on the abstract notion of integer ids representing the subword ids.
It is independent of text or character encoding.
Since the algorithm is merge based, and depends on consecutive buffers, the very first buffer is processes using
the "middle tokens" algorithm.
It requires a delay of some number of tokens such that:
lcs_delay = math.floor(((total_buffer_in_secs - chunk_len_in_sec)) / model_stride_in_secs)
Total cost of the model is O(m_{i-1} * n_{i}) where (m, n) represents the number of subword ids of the buffer.
Args:
X: The subset of the previous chunk i-1, sliced such X = X[-(lcs_delay * max_steps_per_timestep):]
Therefore there can be at most lcs_delay * max_steps_per_timestep symbols for X, preserving computation.
Y: The entire current chunk i.
filepath: Optional filepath to save the LCS alignment matrix for later introspection.
Returns:
A tuple containing -
- i: Start index of alignment along the i-1 chunk.
- j: Start index of alignment along the ith chunk.
- slice_len: number of tokens to slice off from the ith chunk.
The LCS alignment matrix itself (shape m + 1, n + 1)
"""
# LCSuff is the table with zero
# value initially in each cell
m = len(X)
n = len(Y)
LCSuff = [[0 for k in range(n + 1)] for l in range(m + 1)]
# To store the length of
# longest common substring
result = 0
result_idx = [0, 0, 0] # Contains (i, j, slice_len)
# Following steps to build
# LCSuff[m+1][n+1] in bottom up fashion
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
LCSuff[i][j] = 0
elif X[i - 1] == Y[j - 1]:
LCSuff[i][j] = LCSuff[i - 1][j - 1] + 1
if result <= LCSuff[i][j]:
result = LCSuff[i][j] # max(result, LCSuff[i][j])
result_idx = [i, j, result]
else:
LCSuff[i][j] = 0
# Check if perfect alignment was found or not
# Perfect alignment is found if :
# Longest common subsequence extends to the final row of of the old buffer
# This means that there exists a diagonal LCS backtracking to the beginning of the new buffer
i, j = result_idx[0:2]
is_complete_merge = i == m
# Perfect alignment was found, slice eagerly
if is_complete_merge:
length = result_idx[-1]
# In case the LCS was incomplete - missing a few tokens at the beginning
# Perform backtrack to find the origin point of the slice (j) and how many tokens should be sliced
while length >= 0 and i > 0 and j > 0:
# Alignment exists at the required diagonal
if LCSuff[i - 1][j - 1] > 0:
length -= 1
i, j = i - 1, j - 1
else:
# End of longest alignment
i, j, length = i - 1, j - 1, length - 1
break
else:
# Expand hypothesis to catch partial mismatch
# There are 3 steps for partial mismatch in alignment
# 1) Backward search for leftmost LCS
# 2) Greedy expansion of leftmost LCS to the right
# 3) Backtrack final leftmost expanded LCS to find origin point of slice
# (1) Backward search for Leftmost LCS
# This is required for cases where multiple common subsequences exist
# We only need to select the leftmost one - since that corresponds
# to the last potential subsequence that matched with the new buffer.
# If we just chose the LCS (and not the leftmost LCS), then we can potentially
# slice off major sections of text which are repeated between two overlapping buffers.
# backward linear search for leftmost j with longest subsequence
max_j = 0
max_j_idx = n
i_partial = m # Starting index of i for partial merge
j_partial = -1 # Index holder of j for partial merge
j_skip = 0 # Number of tokens that were skipped along the diagonal
slice_count = 0 # Number of tokens that should be sliced
# Select leftmost LCS
for i_idx in range(m, -1, -1): # start from last timestep of old buffer
for j_idx in range(0, n + 1): # start from first token from new buffer
# Select the longest LCSuff, while minimizing the index of j (token index for new buffer)
if LCSuff[i_idx][j_idx] > max_j and j_idx <= max_j_idx:
max_j = LCSuff[i_idx][j_idx]
max_j_idx = j_idx
# Update the starting indices of the partial merge
i_partial = i_idx
j_partial = j_idx
# EARLY EXIT (if max subsequence length <= MIN merge length)
# Important case where there is long silence
# The end of one buffer will have many blank tokens, the beginning of new buffer may have many blank tokens
# As such, LCS will potentially be from the region of actual tokens.
# This can be detected as the max length of the suffix in LCS
# If this max length of the leftmost suffix is less than some margin, avoid slicing all together.
if max_j <= MIN_MERGE_SUBSEQUENCE_LEN:
# If the number of partiial tokens to be deleted are less than the minimum,
# dont delete any tokens at all.
i = i_partial
j = 0
result_idx[-1] = 0
else:
# Some valid long partial alignment was found
# (2) Expand this alignment along the diagonal *downwards* towards the end of the old buffer
# such that i_partial = m + 1.
# This is a common case where due to LSTM state or reduced buffer size, the alignment breaks
# in the middle but there are common subsequences between old and new buffers towards the end
# We can expand the current leftmost LCS in a diagonal manner downwards to include such potential
# merge regions.
# Expand current partial subsequence with co-located tokens
i_temp = i_partial + 1 # diagonal next i
j_temp = j_partial + 1 # diagonal next j
j_exp = 0 # number of tokens to expand along the diagonal
j_skip = 0 # how many diagonals didnt have the token. Incremented by 1 for every row i
for i_idx in range(i_temp, m + 1): # walk from i_partial + 1 => m + 1
j_any_skip = 0 # If the diagonal element at this location is not found, set to 1
# j_any_skip expands the search space one place to the right
# This allows 1 diagonal misalignment per timestep i (and expands the search for the next timestep)
# walk along the diagonal corresponding to i_idx, plus allowing diagonal skips to occur
# diagonal elements may not be aligned due to ASR model predicting
# incorrect token in between correct tokens
for j_idx in range(j_temp, j_temp + j_skip + 1):
if j_idx < n + 1:
if LCSuff[i_idx][j_idx] == 0:
j_any_skip = 1
else:
j_exp = 1 + j_skip + j_any_skip
# If the diagonal element existed, dont expand the search space,
# otherwise expand the search space 1 token to the right
j_skip += j_any_skip
# Move one step to the right for the next diagonal j corresponding to i
j_temp += 1
# reset j_skip, augment j_partial with expansions
j_skip = 0
j_partial += j_exp
# (3) Given new leftmost j_partial with expansions, backtrack the partial alignments
# counting how many diagonal skips occured to compute slice length
# as well as starting point of slice.
# Partial backward trace to find start of slice
while i_partial > 0 and j_partial > 0:
if LCSuff[i_partial][j_partial] == 0:
# diagonal skip occured, move j to left 1 extra time
j_partial -= 1
j_skip += 1
if j_partial > 0:
# If there are more steps to be taken to the left, slice off the current j
# Then loop for next (i, j) diagonal to the upper left
slice_count += 1
i_partial -= 1
j_partial -= 1
# Recompute total slice length as slice count along diagonal
# plus the number of diagonal skips
i = max(0, i_partial)
j = max(0, j_partial)
result_idx[-1] = slice_count + j_skip
# Set the value of i and j
result_idx[0] = i
result_idx[1] = j
if filepath is not None:
extras = {
"is_complete_merge": is_complete_merge,
"X": X,
"Y": Y,
"slice_idx": result_idx,
}
write_lcs_alignment_to_pickle(LCSuff, filepath=filepath, extras=extras)
print("Wrote alignemnt to :", filepath)
return result_idx, LCSuff
def lcs_alignment_merge_buffer(buffer, data, delay, model, max_steps_per_timestep: int = 5, filepath: str = None):
"""
Merges the new text from the current frame with the previous text contained in the buffer.
The alignment is based on a Longest Common Subsequence algorithm, with some additional heuristics leveraging
the notion that the chunk size is >= the context window. In case this assumptio is violated, the results of the merge
will be incorrect (or at least obtain worse WER overall).
"""
# If delay timesteps is 0, that means no future context was used. Simply concatenate the buffer with new data.
if delay < 1:
buffer += data
return buffer
# If buffer is empty, simply concatenate the buffer and data.
if len(buffer) == 0:
buffer += data
return buffer
# Prepare a subset of the buffer that will be LCS Merged with new data
search_size = int(delay * max_steps_per_timestep)
buffer_slice = buffer[-search_size:]
# Perform LCS Merge
lcs_idx, lcs_alignment = longest_common_subsequence_merge(buffer_slice, data, filepath=filepath)
# Slice off new data
# i, j, slice_len = lcs_idx
slice_idx = lcs_idx[1] + lcs_idx[-1] # slice = j + slice_len
data = data[slice_idx:]
# Concat data to buffer
buffer += data
return buffer
def inplace_buffer_merge(buffer, data, timesteps, model):
"""
Merges the new text from the current frame with the previous text contained in the buffer.
The alignment is based on a Longest Common Subsequence algorithm, with some additional heuristics leveraging
the notion that the chunk size is >= the context window. In case this assumptio is violated, the results of the merge
will be incorrect (or at least obtain worse WER overall).
"""
# If delay timesteps is 0, that means no future context was used. Simply concatenate the buffer with new data.
if timesteps < 1:
buffer += data
return buffer
# If buffer is empty, simply concatenate the buffer and data.
if len(buffer) == 0:
buffer += data
return buffer
# Concat data to buffer
buffer += data
return buffer
class StreamingFeatureBufferer:
"""
Class to append each feature frame to a buffer and return an array of buffers.
This class is designed to perform a real-life streaming decoding where only a single chunk
is provided at each step of a streaming pipeline.
"""
def __init__(self, asr_model, chunk_size, buffer_size):
'''
Args:
asr_model:
Reference to the asr model instance for which the feature needs to be created
chunk_size (float):
Duration of the new chunk of audio
buffer_size (float):
Size of the total audio in seconds maintained in the buffer
'''
self.NORM_CONSTANT = 1e-5
if hasattr(asr_model.preprocessor, 'log') and asr_model.preprocessor.log:
self.ZERO_LEVEL_SPEC_DB_VAL = -16.635 # Log-Melspectrogram value for zero signal
else:
self.ZERO_LEVEL_SPEC_DB_VAL = 0.0
self.asr_model = asr_model
self.sr = asr_model.cfg.sample_rate
self.model_normalize_type = asr_model.cfg.preprocessor.normalize
self.chunk_size = chunk_size
timestep_duration = asr_model.cfg.preprocessor.window_stride
self.n_chunk_look_back = int(timestep_duration * self.sr)
self.n_chunk_samples = int(chunk_size * self.sr)
self.buffer_size = buffer_size
total_buffer_len = int(buffer_size / timestep_duration)
self.n_feat = asr_model.cfg.preprocessor.features
self.sample_buffer = torch.zeros(int(self.buffer_size * self.sr))
self.buffer = torch.ones([self.n_feat, total_buffer_len], dtype=torch.float32) * self.ZERO_LEVEL_SPEC_DB_VAL
self.feature_chunk_len = int(chunk_size / timestep_duration)
self.feature_buffer_len = total_buffer_len
self.reset()
cfg = copy.deepcopy(asr_model.cfg)
OmegaConf.set_struct(cfg.preprocessor, False)
cfg.preprocessor.dither = 0.0
cfg.preprocessor.pad_to = 0
cfg.preprocessor.normalize = "None"
self.raw_preprocessor = EncDecCTCModelBPE.from_config_dict(cfg.preprocessor)
self.raw_preprocessor.to(asr_model.device)
def reset(self):
'''
Reset frame_history and decoder's state
'''
self.buffer = torch.ones(self.buffer.shape, dtype=torch.float32) * self.ZERO_LEVEL_SPEC_DB_VAL
self.frame_buffers = []
self.sample_buffer = torch.zeros(int(self.buffer_size * self.sr))
self.feature_buffer = (
torch.ones([self.n_feat, self.feature_buffer_len], dtype=torch.float32) * self.ZERO_LEVEL_SPEC_DB_VAL
)
def _add_chunk_to_buffer(self, chunk):
"""
Add time-series audio signal to `sample_buffer`
Args:
chunk (Tensor):
Tensor filled with time-series audio signal
"""
self.sample_buffer[: -self.n_chunk_samples] = self.sample_buffer[self.n_chunk_samples :].clone()
self.sample_buffer[-self.n_chunk_samples :] = chunk.clone()
def _update_feature_buffer(self, feat_chunk):
"""
Add an extracted feature to `feature_buffer`
"""
self.feature_buffer[:, : -self.feature_chunk_len] = self.feature_buffer[:, self.feature_chunk_len :].clone()
self.feature_buffer[:, -self.feature_chunk_len :] = feat_chunk.clone()
def get_raw_feature_buffer(self):
return self.feature_buffer
def get_normalized_feature_buffer(self):
normalized_buffer, _, _ = normalize_batch(
x=self.feature_buffer.unsqueeze(0),
seq_len=torch.tensor([len(self.feature_buffer)]),
normalize_type=self.model_normalize_type,
)
return normalized_buffer.squeeze(0)
def _convert_buffer_to_features(self):
"""
Extract features from the time-series audio buffer `sample_buffer`.
"""
# samples for conversion to features.
# Add look_back to have context for the first feature
samples = self.sample_buffer[: -(self.n_chunk_samples + self.n_chunk_look_back)]
device = self.asr_model.device
audio_signal = samples.unsqueeze_(0).to(device)
audio_signal_len = torch.Tensor([samples.shape[1]]).to(device)
features, features_len = self.raw_preprocessor(input_signal=audio_signal, length=audio_signal_len,)
features = features.squeeze()
self._update_feature_buffer(features[:, -self.feature_chunk_len :])
def update_feature_buffer(self, chunk):
"""
Update time-series signal `chunk` to the buffer then generate features out of the
signal in the audio buffer.
Args:
chunk (Tensor):
Tensor filled with time-series audio signal
"""
if len(chunk) > self.n_chunk_samples:
raise ValueError(f"chunk should be of length {self.n_chunk_samples} or less")
if len(chunk) < self.n_chunk_samples:
temp_chunk = torch.zeros(self.n_chunk_samples, dtype=torch.float32)
temp_chunk[: chunk.shape[0]] = chunk
chunk = temp_chunk
self._add_chunk_to_buffer(chunk)
self._convert_buffer_to_features()
class AudioFeatureIterator(IterableDataset):
def __init__(self, samples, frame_len, preprocessor, device):
self._samples = samples
self._frame_len = frame_len
self._start = 0
self.output = True
self.count = 0
timestep_duration = preprocessor._cfg['window_stride']
self._feature_frame_len = frame_len / timestep_duration
audio_signal = torch.from_numpy(self._samples).unsqueeze_(0).to(device)
audio_signal_len = torch.Tensor([self._samples.shape[0]]).to(device)
self._features, self._features_len = preprocessor(input_signal=audio_signal, length=audio_signal_len,)
self._features = self._features.squeeze()
def __iter__(self):
return self
def __next__(self):
if not self.output:
raise StopIteration
last = int(self._start + self._feature_frame_len)
if last <= self._features_len[0]:
frame = self._features[:, self._start : last].cpu()
self._start = last
else:
frame = np.zeros([self._features.shape[0], int(self._feature_frame_len)], dtype='float32')
samp_len = self._features_len[0] - self._start
frame[:, 0:samp_len] = self._features[:, self._start : self._features_len[0]].cpu()
self.output = False
self.count += 1
return frame
def speech_collate_fn(batch):
"""collate batch of audio sig, audio len, tokens, tokens len
Args:
batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
LongTensor): A tuple of tuples of signal, signal lengths,
encoded tokens, and encoded tokens length. This collate func
assumes the signals are 1d torch tensors (i.e. mono audio).
"""
_, audio_lengths = zip(*batch)
max_audio_len = 0
has_audio = audio_lengths[0] is not None
if has_audio:
max_audio_len = max(audio_lengths).item()
audio_signal = []
for sig, sig_len in batch:
if has_audio:
sig_len = sig_len.item()
if sig_len < max_audio_len:
pad = (0, max_audio_len - sig_len)
sig = torch.nn.functional.pad(sig, pad)
audio_signal.append(sig)
if has_audio:
audio_signal = torch.stack(audio_signal)
audio_lengths = torch.stack(audio_lengths)
else:
audio_signal, audio_lengths = None, None
return audio_signal, audio_lengths
# simple data layer to pass buffered frames of audio samples
class AudioBuffersDataLayer(IterableDataset):
@property
def output_types(self):
return {
"processed_signal": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"processed_length": NeuralType(tuple('B'), LengthsType()),
}
def __init__(self):
super().__init__()
def __iter__(self):
return self
def __next__(self):
if self._buf_count == len(self.signal):
raise StopIteration
self._buf_count += 1
return (
torch.as_tensor(self.signal[self._buf_count - 1], dtype=torch.float32),
torch.as_tensor(self.signal_shape[1], dtype=torch.int64),
)
def set_signal(self, signals):
self.signal = signals
self.signal_shape = self.signal[0].shape
self._buf_count = 0
def __len__(self):
return 1
class FeatureFrameBufferer:
"""
Class to append each feature frame to a buffer and return
an array of buffers.
"""
def __init__(self, asr_model, frame_len=1.6, batch_size=4, total_buffer=4.0):
'''
Args:
frame_len: frame's duration, seconds
frame_overlap: duration of overlaps before and after current frame, seconds
offset: number of symbols to drop for smooth streaming
'''
if hasattr(asr_model.preprocessor, 'log') and asr_model.preprocessor.log:
self.ZERO_LEVEL_SPEC_DB_VAL = -16.635 # Log-Melspectrogram value for zero signal
else:
self.ZERO_LEVEL_SPEC_DB_VAL = 0.0
self.asr_model = asr_model
self.sr = asr_model._cfg.sample_rate
self.frame_len = frame_len
timestep_duration = asr_model._cfg.preprocessor.window_stride
self.n_frame_len = int(frame_len / timestep_duration)
total_buffer_len = int(total_buffer / timestep_duration)
self.n_feat = asr_model._cfg.preprocessor.features
self.buffer = np.ones([self.n_feat, total_buffer_len], dtype=np.float32) * self.ZERO_LEVEL_SPEC_DB_VAL
self.batch_size = batch_size
self.signal_end = False
self.frame_reader = None
self.feature_buffer_len = total_buffer_len
self.feature_buffer = (
np.ones([self.n_feat, self.feature_buffer_len], dtype=np.float32) * self.ZERO_LEVEL_SPEC_DB_VAL
)
self.frame_buffers = []
self.buffered_features_size = 0
self.reset()
self.buffered_len = 0
def reset(self):
'''
Reset frame_history and decoder's state
'''
self.buffer = np.ones(shape=self.buffer.shape, dtype=np.float32) * self.ZERO_LEVEL_SPEC_DB_VAL
self.prev_char = ''
self.unmerged = []
self.frame_buffers = []
self.buffered_len = 0
self.feature_buffer = (
np.ones([self.n_feat, self.feature_buffer_len], dtype=np.float32) * self.ZERO_LEVEL_SPEC_DB_VAL
)
def get_batch_frames(self):
if self.signal_end:
return []
batch_frames = []
for frame in self.frame_reader:
batch_frames.append(np.copy(frame))
if len(batch_frames) == self.batch_size:
return batch_frames
self.signal_end = True
return batch_frames
def get_frame_buffers(self, frames):
# Build buffers for each frame
self.frame_buffers = []
for frame in frames:
self.buffer[:, : -self.n_frame_len] = self.buffer[:, self.n_frame_len :]
self.buffer[:, -self.n_frame_len :] = frame
self.buffered_len += frame.shape[1]
self.frame_buffers.append(np.copy(self.buffer))
return self.frame_buffers
def set_frame_reader(self, frame_reader):
self.frame_reader = frame_reader
self.signal_end = False
def _update_feature_buffer(self, feat_frame):
self.feature_buffer[:, : -feat_frame.shape[1]] = self.feature_buffer[:, feat_frame.shape[1] :]
self.feature_buffer[:, -feat_frame.shape[1] :] = feat_frame
self.buffered_features_size += feat_frame.shape[1]
def get_norm_consts_per_frame(self, batch_frames):
norm_consts = []
for i, frame in enumerate(batch_frames):
self._update_feature_buffer(frame)
mean_from_buffer = np.mean(self.feature_buffer, axis=1)
stdev_from_buffer = np.std(self.feature_buffer, axis=1)
norm_consts.append((mean_from_buffer.reshape(self.n_feat, 1), stdev_from_buffer.reshape(self.n_feat, 1)))
return norm_consts
def normalize_frame_buffers(self, frame_buffers, norm_consts):
CONSTANT = 1e-5
for i, frame_buffer in enumerate(frame_buffers):
frame_buffers[i] = (frame_buffer - norm_consts[i][0]) / (norm_consts[i][1] + CONSTANT)
def get_buffers_batch(self):
batch_frames = self.get_batch_frames()
while len(batch_frames) > 0:
frame_buffers = self.get_frame_buffers(batch_frames)
norm_consts = self.get_norm_consts_per_frame(batch_frames)
if len(frame_buffers) == 0:
continue
self.normalize_frame_buffers(frame_buffers, norm_consts)
return frame_buffers
return []
# class for streaming frame-based ASR
# 1) use reset() method to reset FrameASR's state
# 2) call transcribe(frame) to do ASR on
# contiguous signal's frames
class FrameBatchASR:
"""
class for streaming frame-based ASR use reset() method to reset FrameASR's
state call transcribe(frame) to do ASR on contiguous signal's frames
"""
def __init__(
self, asr_model, frame_len=1.6, total_buffer=4.0, batch_size=4,
):
'''
Args:
frame_len: frame's duration, seconds
frame_overlap: duration of overlaps before and after current frame, seconds
offset: number of symbols to drop for smooth streaming
'''
self.frame_bufferer = FeatureFrameBufferer(
asr_model=asr_model, frame_len=frame_len, batch_size=batch_size, total_buffer=total_buffer
)
self.asr_model = asr_model
self.decoder = asr_model.decoder
self.batch_size = batch_size
self.all_logits = []
self.all_preds = []
self.unmerged = []
if hasattr(asr_model.decoder, "vocabulary"):
self.blank_id = len(asr_model.decoder.vocabulary)
else:
self.blank_id = len(asr_model.joint.vocabulary)
self.tokenizer = asr_model.tokenizer
self.toks_unmerged = []
self.frame_buffers = []
self.reset()
cfg = copy.deepcopy(asr_model._cfg)
self.cfg = cfg
self.frame_len = frame_len
OmegaConf.set_struct(cfg.preprocessor, False)
# some changes for streaming scenario
cfg.preprocessor.dither = 0.0
cfg.preprocessor.pad_to = 0
cfg.preprocessor.normalize = "None"
self.raw_preprocessor = EncDecCTCModelBPE.from_config_dict(cfg.preprocessor)
self.raw_preprocessor.to(asr_model.device)
self.preprocessor = self.raw_preprocessor
def reset(self):
"""
Reset frame_history and decoder's state
"""
self.prev_char = ''
self.unmerged = []
self.data_layer = AudioBuffersDataLayer()
self.data_loader = DataLoader(self.data_layer, batch_size=self.batch_size, collate_fn=speech_collate_fn)
self.all_logits = []
self.all_preds = []
self.toks_unmerged = []
self.frame_buffers = []
self.frame_bufferer.reset()
def read_audio_file(self, audio_filepath: str, delay, model_stride_in_secs):
samples = get_samples(audio_filepath)
samples = np.pad(samples, (0, int(delay * model_stride_in_secs * self.asr_model._cfg.sample_rate)))
frame_reader = AudioFeatureIterator(samples, self.frame_len, self.raw_preprocessor, self.asr_model.device)
self.set_frame_reader(frame_reader)
def set_frame_reader(self, frame_reader):
self.frame_bufferer.set_frame_reader(frame_reader)
@torch.no_grad()
def infer_logits(self, keep_logits=False):
frame_buffers = self.frame_bufferer.get_buffers_batch()
while len(frame_buffers) > 0:
self.frame_buffers += frame_buffers[:]
self.data_layer.set_signal(frame_buffers[:])
self._get_batch_preds(keep_logits)
frame_buffers = self.frame_bufferer.get_buffers_batch()
@torch.no_grad()
def _get_batch_preds(self, keep_logits=False):
device = self.asr_model.device
for batch in iter(self.data_loader):
feat_signal, feat_signal_len = batch
feat_signal, feat_signal_len = feat_signal.to(device), feat_signal_len.to(device)
forward_outs = self.asr_model(processed_signal=feat_signal, processed_signal_length=feat_signal_len)
if len(forward_outs) == 2: # hybrid ctc rnnt model
encoded, encoded_len = forward_outs
log_probs = self.asr_model.ctc_decoder(encoder_output=encoded)
predictions = log_probs.argmax(dim=-1, keepdim=False)
else:
log_probs, encoded_len, predictions = forward_outs
preds = torch.unbind(predictions)
for pred in preds:
self.all_preds.append(pred.cpu().numpy())
if keep_logits:
log_probs = torch.unbind(log_probs)
for log_prob in log_probs:
self.all_logits.append(log_prob.cpu())
else:
del log_probs
del encoded_len
del predictions
def transcribe(self, tokens_per_chunk: int, delay: int, keep_logits=False):
self.infer_logits(keep_logits)
self.unmerged = []
for pred in self.all_preds:
decoded = pred.tolist()
self.unmerged += decoded[len(decoded) - 1 - delay : len(decoded) - 1 - delay + tokens_per_chunk]
hypothesis = self.greedy_merge(self.unmerged)
if not keep_logits:
return hypothesis
all_logits = []
for log_prob in self.all_logits:
T = log_prob.shape[0]
log_prob = log_prob[T - 1 - delay : T - 1 - delay + tokens_per_chunk, :]
all_logits.append(log_prob)
all_logits = torch.concat(all_logits, 0)
return hypothesis, all_logits
def greedy_merge(self, preds):
decoded_prediction = []
previous = self.blank_id
for p in preds:
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = self.tokenizer.ids_to_text(decoded_prediction)
return hypothesis
class BatchedFeatureFrameBufferer(FeatureFrameBufferer):
"""
Batched variant of FeatureFrameBufferer where batch dimension is the independent audio samples.
"""
def __init__(self, asr_model, frame_len=1.6, batch_size=4, total_buffer=4.0):
'''
Args:
frame_len: frame's duration, seconds
frame_overlap: duration of overlaps before and after current frame, seconds
offset: number of symbols to drop for smooth streaming
'''
super().__init__(asr_model, frame_len=frame_len, batch_size=batch_size, total_buffer=total_buffer)
# OVERRIDES OF BASE CLASS
timestep_duration = asr_model._cfg.preprocessor.window_stride
total_buffer_len = int(total_buffer / timestep_duration)
self.buffer = (
np.ones([batch_size, self.n_feat, total_buffer_len], dtype=np.float32) * self.ZERO_LEVEL_SPEC_DB_VAL
)
# Preserve list of buffers and indices, one for every sample
self.all_frame_reader = [None for _ in range(self.batch_size)]
self.signal_end = [False for _ in range(self.batch_size)]
self.signal_end_index = [None for _ in range(self.batch_size)]
self.buffer_number = 0 # preserve number of buffers returned since reset.
self.reset()
del self.buffered_len
del self.buffered_features_size
def reset(self):
'''
Reset frame_history and decoder's state
'''
super().reset()
self.feature_buffer = (
np.ones([self.batch_size, self.n_feat, self.feature_buffer_len], dtype=np.float32)
* self.ZERO_LEVEL_SPEC_DB_VAL
)
self.all_frame_reader = [None for _ in range(self.batch_size)]
self.signal_end = [False for _ in range(self.batch_size)]
self.signal_end_index = [None for _ in range(self.batch_size)]
self.buffer_number = 0
def get_batch_frames(self):
# Exit if all buffers of all samples have been processed
if all(self.signal_end):
return []
# Otherwise sequentially process frames of each sample one by one.
batch_frames = []
for idx, frame_reader in enumerate(self.all_frame_reader):
try:
frame = next(frame_reader)
frame = np.copy(frame)
batch_frames.append(frame)
except StopIteration:
# If this sample has finished all of its buffers
# Set its signal_end flag, and assign it the id of which buffer index
# did it finish the sample (if not previously set)
# This will let the alignment module know which sample in the batch finished
# at which index.
batch_frames.append(None)
self.signal_end[idx] = True
if self.signal_end_index[idx] is None:
self.signal_end_index[idx] = self.buffer_number
self.buffer_number += 1
return batch_frames
def get_frame_buffers(self, frames):
# Build buffers for each frame
self.frame_buffers = []
# Loop over all buffers of all samples
for idx in range(self.batch_size):
frame = frames[idx]
# If the sample has a buffer, then process it as usual
if frame is not None:
self.buffer[idx, :, : -self.n_frame_len] = self.buffer[idx, :, self.n_frame_len :]
self.buffer[idx, :, -self.n_frame_len :] = frame
# self.buffered_len += frame.shape[1]
# WRAP the buffer at index idx into a outer list
self.frame_buffers.append([np.copy(self.buffer[idx])])
else:
# If the buffer does not exist, the sample has finished processing
# set the entire buffer for that sample to 0
self.buffer[idx, :, :] *= 0.0
self.frame_buffers.append([np.copy(self.buffer[idx])])
return self.frame_buffers
def set_frame_reader(self, frame_reader, idx):
self.all_frame_reader[idx] = frame_reader
self.signal_end[idx] = False
self.signal_end_index[idx] = None
def _update_feature_buffer(self, feat_frame, idx):
# Update the feature buffer for given sample, or reset if the sample has finished processing
if feat_frame is not None:
self.feature_buffer[idx, :, : -feat_frame.shape[1]] = self.feature_buffer[idx, :, feat_frame.shape[1] :]
self.feature_buffer[idx, :, -feat_frame.shape[1] :] = feat_frame
# self.buffered_features_size += feat_frame.shape[1]
else:
self.feature_buffer[idx, :, :] *= 0.0
def get_norm_consts_per_frame(self, batch_frames):
for idx, frame in enumerate(batch_frames):
self._update_feature_buffer(frame, idx)
mean_from_buffer = np.mean(self.feature_buffer, axis=2, keepdims=True) # [B, self.n_feat, 1]
stdev_from_buffer = np.std(self.feature_buffer, axis=2, keepdims=True) # [B, self.n_feat, 1]
return (mean_from_buffer, stdev_from_buffer)
def normalize_frame_buffers(self, frame_buffers, norm_consts):
CONSTANT = 1e-8
for i in range(len(frame_buffers)):
frame_buffers[i] = (frame_buffers[i] - norm_consts[0][i]) / (norm_consts[1][i] + CONSTANT)
def get_buffers_batch(self):
batch_frames = self.get_batch_frames()
while len(batch_frames) > 0:
# while there exists at least one sample that has not been processed yet
frame_buffers = self.get_frame_buffers(batch_frames)
norm_consts = self.get_norm_consts_per_frame(batch_frames)
self.normalize_frame_buffers(frame_buffers, norm_consts)
return frame_buffers
return []
class BatchedFrameASRRNNT(FrameBatchASR):
"""
Batched implementation of FrameBatchASR for RNNT models, where the batch dimension is independent audio samples.
"""
def __init__(
self,
asr_model,
frame_len=1.6,
total_buffer=4.0,
batch_size=32,
max_steps_per_timestep: int = 5,
stateful_decoding: bool = False,
):
'''
Args:
asr_model: An RNNT model.
frame_len: frame's duration, seconds.
total_buffer: duration of total audio chunk size, in seconds.
batch_size: Number of independent audio samples to process at each step.
max_steps_per_timestep: Maximum number of tokens (u) to process per acoustic timestep (t).
stateful_decoding: Boolean whether to enable stateful decoding for preservation of state across buffers.
'''
super().__init__(asr_model, frame_len=frame_len, total_buffer=total_buffer, batch_size=batch_size)
# OVERRIDES OF THE BASE CLASS
self.max_steps_per_timestep = max_steps_per_timestep
self.stateful_decoding = stateful_decoding
self.all_alignments = [[] for _ in range(self.batch_size)]
self.all_preds = [[] for _ in range(self.batch_size)]
self.all_timestamps = [[] for _ in range(self.batch_size)]
self.previous_hypotheses = None
self.batch_index_map = {
idx: idx for idx in range(self.batch_size)
} # pointer from global batch id : local sub-batch id
try:
self.eos_id = self.asr_model.tokenizer.eos_id
except Exception:
self.eos_id = -1
print("Performing Stateful decoding :", self.stateful_decoding)
# OVERRIDES
self.frame_bufferer = BatchedFeatureFrameBufferer(
asr_model=asr_model, frame_len=frame_len, batch_size=batch_size, total_buffer=total_buffer
)
self.reset()
def reset(self):
"""
Reset frame_history and decoder's state
"""
super().reset()
self.all_alignments = [[] for _ in range(self.batch_size)]
self.all_preds = [[] for _ in range(self.batch_size)]
self.all_timestamps = [[] for _ in range(self.batch_size)]
self.previous_hypotheses = None
self.batch_index_map = {idx: idx for idx in range(self.batch_size)}
self.data_layer = [AudioBuffersDataLayer() for _ in range(self.batch_size)]
self.data_loader = [
DataLoader(self.data_layer[idx], batch_size=1, collate_fn=speech_collate_fn)
for idx in range(self.batch_size)
]
def read_audio_file(self, audio_filepath: list, delay, model_stride_in_secs):
assert len(audio_filepath) == self.batch_size
# Read in a batch of audio files, one by one
for idx in range(self.batch_size):
samples = get_samples(audio_filepath[idx])
samples = np.pad(samples, (0, int(delay * model_stride_in_secs * self.asr_model._cfg.sample_rate)))
frame_reader = AudioFeatureIterator(samples, self.frame_len, self.raw_preprocessor, self.asr_model.device)
self.set_frame_reader(frame_reader, idx)
def set_frame_reader(self, frame_reader, idx):
self.frame_bufferer.set_frame_reader(frame_reader, idx)
@torch.no_grad()
def infer_logits(self):
frame_buffers = self.frame_bufferer.get_buffers_batch()
while len(frame_buffers) > 0:
# While at least 1 sample has a buffer left to process
self.frame_buffers += frame_buffers[:]
for idx, buffer in enumerate(frame_buffers):
self.data_layer[idx].set_signal(buffer[:])
self._get_batch_preds()
frame_buffers = self.frame_bufferer.get_buffers_batch()
@torch.no_grad()
def _get_batch_preds(self):
"""
Perform dynamic batch size decoding of frame buffers of all samples.
Steps:
- Load all data loaders of every sample
- For all samples, determine if signal has finished.
- If so, skip calculation of mel-specs.
- If not, compute mel spec and length
- Perform Encoder forward over this sub-batch of samples. Maintain the indices of samples that were processed.
- If performing stateful decoding, prior to decoder forward, remove the states of samples that were not processed.
- Perform Decoder + Joint forward for samples that were processed.
- For all output RNNT alignment matrix of the joint do:
- If signal has ended previously (this was last buffer of padding), skip alignment
- Otherwise, recalculate global index of this sample from the sub-batch index, and preserve alignment.
- Same for preds
- Update indices of sub-batch with global index map.
- Redo steps until all samples were processed (sub-batch size == 0).
"""
device = self.asr_model.device
data_iters = [iter(data_loader) for data_loader in self.data_loader]
feat_signals = []
feat_signal_lens = []
new_batch_keys = []
# while not all(self.frame_bufferer.signal_end):
for idx in range(self.batch_size):
if self.frame_bufferer.signal_end[idx]:
continue
batch = next(data_iters[idx])
feat_signal, feat_signal_len = batch
feat_signal, feat_signal_len = feat_signal.to(device), feat_signal_len.to(device)
feat_signals.append(feat_signal)
feat_signal_lens.append(feat_signal_len)
# preserve batch indeices
new_batch_keys.append(idx)
if len(feat_signals) == 0:
return
feat_signal = torch.cat(feat_signals, 0)
feat_signal_len = torch.cat(feat_signal_lens, 0)
del feat_signals, feat_signal_lens
encoded, encoded_len = self.asr_model(processed_signal=feat_signal, processed_signal_length=feat_signal_len)
# filter out partial hypotheses from older batch subset
if self.stateful_decoding and self.previous_hypotheses is not None:
new_prev_hypothesis = []
for new_batch_idx, global_index_key in enumerate(new_batch_keys):
old_pos = self.batch_index_map[global_index_key]
new_prev_hypothesis.append(self.previous_hypotheses[old_pos])
self.previous_hypotheses = new_prev_hypothesis
best_hyp, _ = self.asr_model.decoding.rnnt_decoder_predictions_tensor(
encoded, encoded_len, return_hypotheses=True, partial_hypotheses=self.previous_hypotheses
)
if self.stateful_decoding:
# preserve last state from hypothesis of new batch indices
self.previous_hypotheses = best_hyp
for idx, hyp in enumerate(best_hyp):
global_index_key = new_batch_keys[idx] # get index of this sample in the global batch
has_signal_ended = self.frame_bufferer.signal_end[global_index_key]
if not has_signal_ended:
self.all_alignments[global_index_key].append(hyp.alignments)
preds = [hyp.y_sequence for hyp in best_hyp]
for idx, pred in enumerate(preds):
global_index_key = new_batch_keys[idx] # get index of this sample in the global batch
has_signal_ended = self.frame_bufferer.signal_end[global_index_key]
if not has_signal_ended:
self.all_preds[global_index_key].append(pred.cpu().numpy())
timestamps = [hyp.timestep for hyp in best_hyp]
for idx, timestep in enumerate(timestamps):
global_index_key = new_batch_keys[idx] # get index of this sample in the global batch
has_signal_ended = self.frame_bufferer.signal_end[global_index_key]
if not has_signal_ended:
self.all_timestamps[global_index_key].append(timestep)
if self.stateful_decoding:
# State resetting is being done on sub-batch only, global index information is not being updated
reset_states = self.asr_model.decoder.initialize_state(encoded)
for idx, pred in enumerate(preds):
if len(pred) > 0 and pred[-1] == self.eos_id:
# reset states :
self.previous_hypotheses[idx].y_sequence = self.previous_hypotheses[idx].y_sequence[:-1]
self.previous_hypotheses[idx].dec_state = self.asr_model.decoder.batch_select_state(
reset_states, idx
)
# Position map update
if len(new_batch_keys) != len(self.batch_index_map):
for new_batch_idx, global_index_key in enumerate(new_batch_keys):
self.batch_index_map[global_index_key] = new_batch_idx # let index point from global pos -> local pos
del encoded, encoded_len
del best_hyp, pred
def transcribe(
self, tokens_per_chunk: int, delay: int,
):
"""
Performs "middle token" alignment prediction using the buffered audio chunk.
"""
self.infer_logits()
self.unmerged = [[] for _ in range(self.batch_size)]
for idx, alignments in enumerate(self.all_alignments):
signal_end_idx = self.frame_bufferer.signal_end_index[idx]
if signal_end_idx is None:
raise ValueError("Signal did not end")
for a_idx, alignment in enumerate(alignments):
if delay == len(alignment): # chunk size = buffer size
offset = 0
else: # all other cases
offset = 1
alignment = alignment[
len(alignment) - offset - delay : len(alignment) - offset - delay + tokens_per_chunk
]
ids, toks = self._alignment_decoder(alignment, self.asr_model.tokenizer, self.blank_id)
if len(ids) > 0 and a_idx < signal_end_idx:
self.unmerged[idx] = inplace_buffer_merge(self.unmerged[idx], ids, delay, model=self.asr_model,)
output = []
for idx in range(self.batch_size):
output.append(self.greedy_merge(self.unmerged[idx]))
return output
def _alignment_decoder(self, alignments, tokenizer, blank_id):
s = []
ids = []
for t in range(len(alignments)):
for u in range(len(alignments[t])):
_, token_id = alignments[t][u] # (logprob, token_id)
token_id = int(token_id)
if token_id != blank_id:
token = tokenizer.ids_to_tokens([token_id])[0]
s.append(token)
ids.append(token_id)
else:
# blank token
pass
return ids, s
def greedy_merge(self, preds):
decoded_prediction = [p for p in preds]
hypothesis = self.asr_model.tokenizer.ids_to_text(decoded_prediction)
return hypothesis
class LongestCommonSubsequenceBatchedFrameASRRNNT(BatchedFrameASRRNNT):
"""
Implements a token alignment algorithm for text alignment instead of middle token alignment.
For more detail, read the docstring of longest_common_subsequence_merge().
"""
def __init__(
self,
asr_model,
frame_len=1.6,
total_buffer=4.0,
batch_size=4,
max_steps_per_timestep: int = 5,
stateful_decoding: bool = False,
alignment_basepath: str = None,
):
'''
Args:
asr_model: An RNNT model.
frame_len: frame's duration, seconds.
total_buffer: duration of total audio chunk size, in seconds.
batch_size: Number of independent audio samples to process at each step.
max_steps_per_timestep: Maximum number of tokens (u) to process per acoustic timestep (t).
stateful_decoding: Boolean whether to enable stateful decoding for preservation of state across buffers.
alignment_basepath: Str path to a directory where alignments from LCS will be preserved for later analysis.
'''
super().__init__(asr_model, frame_len, total_buffer, batch_size, max_steps_per_timestep, stateful_decoding)
self.sample_offset = 0
self.lcs_delay = -1
self.alignment_basepath = alignment_basepath
def transcribe(
self, tokens_per_chunk: int, delay: int,
):
if self.lcs_delay < 0:
raise ValueError(
"Please set LCS Delay valus as `(buffer_duration - chunk_duration) / model_stride_in_secs`"
)
self.infer_logits()
self.unmerged = [[] for _ in range(self.batch_size)]
for idx, alignments in enumerate(self.all_alignments):
signal_end_idx = self.frame_bufferer.signal_end_index[idx]
if signal_end_idx is None:
raise ValueError("Signal did not end")
for a_idx, alignment in enumerate(alignments):
# Middle token first chunk
if a_idx == 0:
# len(alignment) - 1 - delay + tokens_per_chunk
alignment = alignment[len(alignment) - 1 - delay :]
ids, toks = self._alignment_decoder(alignment, self.asr_model.tokenizer, self.blank_id)
if len(ids) > 0:
self.unmerged[idx] = inplace_buffer_merge(
self.unmerged[idx], ids, delay, model=self.asr_model,
)
else:
ids, toks = self._alignment_decoder(alignment, self.asr_model.tokenizer, self.blank_id)
if len(ids) > 0 and a_idx < signal_end_idx:
if self.alignment_basepath is not None:
basepath = self.alignment_basepath
sample_offset = self.sample_offset + idx
alignment_offset = a_idx
path = os.path.join(basepath, str(sample_offset))
os.makedirs(path, exist_ok=True)
path = os.path.join(path, "alignment_" + str(alignment_offset) + '.pt')
filepath = path
else:
filepath = None
self.unmerged[idx] = lcs_alignment_merge_buffer(
self.unmerged[idx],
ids,
self.lcs_delay,
model=self.asr_model,
max_steps_per_timestep=self.max_steps_per_timestep,
filepath=filepath,
)
output = []
for idx in range(self.batch_size):
output.append(self.greedy_merge(self.unmerged[idx]))
return output
class CacheAwareStreamingAudioBuffer:
"""
A buffer to be used for cache-aware streaming. It can load a single or multiple audio files/processed signals, split them in chunks and return one on one.
It can be used to simulate streaming audio or audios.
"""
def __init__(self, model, online_normalization=None, pad_and_drop_preencoded=False):
'''
Args:
model: An ASR model.
online_normalization (bool): whether to perform online normalization per chunk or normalize the whole audio before chunking
pad_and_drop_preencoded (bool): if true pad first audio chunk and always drop preencoded
'''
self.model = model
self.buffer = None
self.buffer_idx = 0
self.streams_length = None
self.step = 0
self.pad_and_drop_preencoded = pad_and_drop_preencoded
self.online_normalization = online_normalization
if not isinstance(model.encoder, StreamingEncoder):
raise ValueError(
"The model's encoder is not inherited from StreamingEncoder, and likely not to support streaming!"
)
if model.encoder.streaming_cfg is None:
model.encoder.setup_streaming_params()
self.streaming_cfg = model.encoder.streaming_cfg
self.input_features = model.encoder._feat_in
self.preprocessor = self.extract_preprocessor()
if hasattr(model.encoder, "pre_encode") and hasattr(model.encoder.pre_encode, "get_sampling_frames"):
self.sampling_frames = model.encoder.pre_encode.get_sampling_frames()
else:
self.sampling_frames = None
def __iter__(self):
while True:
if self.buffer_idx >= self.buffer.size(-1):
return
if self.buffer_idx == 0 and isinstance(self.streaming_cfg.chunk_size, list):
if self.pad_and_drop_preencoded:
chunk_size = self.streaming_cfg.chunk_size[1]
else:
chunk_size = self.streaming_cfg.chunk_size[0]
else:
chunk_size = (
self.streaming_cfg.chunk_size[1]
if isinstance(self.streaming_cfg.chunk_size, list)
else self.streaming_cfg.chunk_size
)
if self.buffer_idx == 0 and isinstance(self.streaming_cfg.shift_size, list):
if self.pad_and_drop_preencoded:
shift_size = self.streaming_cfg.shift_size[1]
else:
shift_size = self.streaming_cfg.shift_size[0]
else:
shift_size = (
self.streaming_cfg.shift_size[1]
if isinstance(self.streaming_cfg.shift_size, list)
else self.streaming_cfg.shift_size
)
audio_chunk = self.buffer[:, :, self.buffer_idx : self.buffer_idx + chunk_size]
if self.sampling_frames is not None:
# checking to make sure the audio chunk has enough frames to produce at least one output after downsampling
if self.buffer_idx == 0 and isinstance(self.sampling_frames, list):
cur_sampling_frames = self.sampling_frames[0]
else:
cur_sampling_frames = (
self.sampling_frames[1] if isinstance(self.sampling_frames, list) else self.sampling_frames
)
if audio_chunk.size(-1) < cur_sampling_frames:
return
# Adding the cache needed for the pre-encoder part of the model to the chunk
# if there is not enough frames to be used as the pre-encoding cache, zeros would be added
zeros_pads = None
if self.buffer_idx == 0 and isinstance(self.streaming_cfg.pre_encode_cache_size, list):
if self.pad_and_drop_preencoded:
cache_pre_encode_num_frames = self.streaming_cfg.pre_encode_cache_size[1]
else:
cache_pre_encode_num_frames = self.streaming_cfg.pre_encode_cache_size[0]
cache_pre_encode = torch.zeros(
(audio_chunk.size(0), self.input_features, cache_pre_encode_num_frames),
device=audio_chunk.device,
dtype=audio_chunk.dtype,
)
else:
if isinstance(self.streaming_cfg.pre_encode_cache_size, list):
pre_encode_cache_size = self.streaming_cfg.pre_encode_cache_size[1]
else:
pre_encode_cache_size = self.streaming_cfg.pre_encode_cache_size
start_pre_encode_cache = self.buffer_idx - pre_encode_cache_size
if start_pre_encode_cache < 0:
start_pre_encode_cache = 0
cache_pre_encode = self.buffer[:, :, start_pre_encode_cache : self.buffer_idx]
if cache_pre_encode.size(-1) < pre_encode_cache_size:
zeros_pads = torch.zeros(
(
audio_chunk.size(0),
audio_chunk.size(-2),
pre_encode_cache_size - cache_pre_encode.size(-1),
),
device=audio_chunk.device,
dtype=audio_chunk.dtype,
)
added_len = cache_pre_encode.size(-1)
audio_chunk = torch.cat((cache_pre_encode, audio_chunk), dim=-1)
if self.online_normalization:
audio_chunk, x_mean, x_std = normalize_batch(
x=audio_chunk,
seq_len=torch.tensor([audio_chunk.size(-1)] * audio_chunk.size(0)),
normalize_type=self.model_normalize_type,
)
if zeros_pads is not None:
# TODO: check here when zero_pads is not None and added_len is already non-zero
audio_chunk = torch.cat((zeros_pads, audio_chunk), dim=-1)
added_len += zeros_pads.size(-1)
max_chunk_lengths = self.streams_length - self.buffer_idx
max_chunk_lengths = max_chunk_lengths + added_len
chunk_lengths = torch.clamp(max_chunk_lengths, min=0, max=audio_chunk.size(-1))
self.buffer_idx += shift_size
self.step += 1
yield audio_chunk, chunk_lengths
def is_buffer_empty(self):
if self.buffer_idx >= self.buffer.size(-1):
return True
else:
return False
def __len__(self):
return len(self.buffer)
def reset_buffer(self):
self.buffer = None
self.buffer_idx = 0
self.streams_length = None
self.step = 0
def reset_buffer_pointer(self):
self.buffer_idx = 0
self.step = 0
def extract_preprocessor(self):
cfg = copy.deepcopy(self.model._cfg)
self.model_normalize_type = cfg.preprocessor.normalize
OmegaConf.set_struct(cfg.preprocessor, False)
cfg.preprocessor.dither = 0.0
cfg.preprocessor.pad_to = 0
if self.online_normalization:
cfg.preprocessor.normalize = "None"
preprocessor = self.model.from_config_dict(cfg.preprocessor)
return preprocessor.to(self.get_model_device())
def append_audio_file(self, audio_filepath, stream_id=-1):
audio = get_samples(audio_filepath)
processed_signal, processed_signal_length, stream_id = self.append_audio(audio, stream_id)
return processed_signal, processed_signal_length, stream_id
def append_audio(self, audio, stream_id=-1):
processed_signal, processed_signal_length = self.preprocess_audio(audio)
processed_signal, processed_signal_length, stream_id = self.append_processed_signal(
processed_signal, stream_id
)
return processed_signal, processed_signal_length, stream_id
def append_processed_signal(self, processed_signal, stream_id=-1):
processed_signal_length = torch.tensor(processed_signal.size(-1), device=processed_signal.device)
if stream_id >= 0 and (self.streams_length is not None and stream_id >= len(self.streams_length)):
raise ValueError("Not valid stream_id!")
if self.buffer is None:
if stream_id >= 0:
raise ValueError("stream_id can not be specified when there is no stream.")
self.buffer = processed_signal
self.streams_length = torch.tensor([processed_signal_length], device=processed_signal.device)
else:
if self.buffer.size(1) != processed_signal.size(1):
raise ValueError("Buffer and the processed signal have different dimensions!")
if stream_id < 0:
self.buffer = torch.nn.functional.pad(self.buffer, pad=(0, 0, 0, 0, 0, 1))
self.streams_length = torch.cat(
(self.streams_length, torch.tensor([0], device=self.streams_length.device)), dim=-1
)
stream_id = len(self.streams_length) - 1
needed_len = self.streams_length[stream_id] + processed_signal_length
if needed_len > self.buffer.size(-1):
self.buffer = torch.nn.functional.pad(self.buffer, pad=(0, needed_len - self.buffer.size(-1)))
self.buffer[
stream_id, :, self.streams_length[stream_id] : self.streams_length[stream_id] + processed_signal_length
] = processed_signal
self.streams_length[stream_id] = self.streams_length[stream_id] + processed_signal.size(-1)
if self.online_normalization:
processed_signal, x_mean, x_std = normalize_batch(
x=processed_signal,
seq_len=torch.tensor([processed_signal_length]),
normalize_type=self.model_normalize_type,
)
return processed_signal, processed_signal_length, stream_id
def get_model_device(self):
return self.model.device
def preprocess_audio(self, audio, device=None):
if device is None:
device = self.get_model_device()
audio_signal = torch.from_numpy(audio).unsqueeze_(0).to(device)
audio_signal_len = torch.Tensor([audio.shape[0]]).to(device)
processed_signal, processed_signal_length = self.preprocessor(
input_signal=audio_signal, length=audio_signal_len
)
return processed_signal, processed_signal_length
def get_all_audios(self):
processed_signal = self.buffer
if self.online_normalization:
processed_signal, x_mean, x_std = normalize_batch(
x=processed_signal,
seq_len=torch.tensor(self.streams_length),
normalize_type=self.model_normalize_type,
)
return processed_signal, self.streams_length
|
NeMo-main
|
nemo/collections/asr/parts/utils/streaming_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2007-2020 The scikit-learn developers.
# BSD 3-Clause License
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# NME-SC clustering is based on the implementation from the paper
# https://arxiv.org/pdf/2003.02405.pdf and the implementation from
# https://github.com/tango4j/Auto-Tuning-Spectral-Clustering.
from typing import List, Set, Tuple
import numpy as np
import torch
from nemo.collections.asr.parts.utils.offline_clustering import (
NMESC,
SpectralClustering,
getAffinityGraphMat,
getCosAffinityMatrix,
)
from nemo.collections.asr.parts.utils.optimization_utils import linear_sum_assignment
def get_lsa_speaker_mapping(
U_set: torch.Tensor, cmm_P: torch.Tensor, cmm_Q: torch.Tensor, PandQ: torch.Tensor
) -> torch.Tensor:
"""
Find a mapping that minimizes the matching cost between the label P and Q.
One-hot encodding is employed to represent sequence and calculate the cost.
Args:
U_set (list):
Whole set of the estimated speakers
cmm_P (Tensor):
Length-matched old sequence
cmm_Q (Tensor):
Length-matched new sequence
PandQ (Tensor):
Tensor containing the indices of the speakers that are in both old and new sequences
Returns:
mapping_array (np.array):
Mapped labels that minimizes the cost
"""
all_spks_labels = [[x] for x in range(len(U_set))]
common_inds: List[int] = [int(x.item()) for x in PandQ]
# Create tensors for one-hot encoding
enc_P = torch.zeros((len(cmm_P), len(all_spks_labels))).to(cmm_P.device)
enc_Q = torch.zeros((len(cmm_Q), len(all_spks_labels))).to(cmm_Q.device)
# Create one-hot encoding
enc_P[torch.arange(len(cmm_P)), cmm_P] = 1
enc_Q[torch.arange(len(cmm_Q)), cmm_Q] = 1
# Cost matrix from one-hot encoding vectors
cost = -1 * torch.matmul(enc_P.T, enc_Q).T.to(PandQ.device)
_, col_ind = linear_sum_assignment(cost)
# If number of are speakers in each vector is not the same
mapping_array = torch.arange(0, len(U_set)).to(PandQ.device)
for x in range(col_ind.shape[0]):
if x not in common_inds:
mapping_array[x] = x
else:
mapping_array[x] = col_ind[x]
return mapping_array
def get_minimal_indices(Y_new: torch.Tensor) -> torch.Tensor:
"""
Force the unique indices of the labels to use the lowest numbers.
Example:
>>> Y_new = [3, 3, 3, 4, 4, 5]
>>> get_minimal_indices(Y_new)
Return:
[0, 0, 0, 1, 1, 2]
Args:
Y_new (Tensor):
Tensor containing cluster labels
Returns:
(Tensor): Newly mapped cluster labels that has minimized indicies
"""
device = Y_new.device
Y_new_enlisted = torch.unique(Y_new).sort()[0].to(torch.long).to(device)
sequence = torch.arange(torch.max(Y_new_enlisted) + 1).to(device)
sequence[Y_new_enlisted] = torch.arange(len(Y_new_enlisted)).to(device)
return sequence[Y_new]
@torch.jit.script
def stitch_cluster_labels(Y_old: torch.Tensor, Y_new: torch.Tensor) -> torch.Tensor:
"""
Run Hungarian (linear sum assignment) algorithm to find the best permutation mapping between
the cumulated labels in history and the new clustering output labels.
Args:
Y_old (Tensor):
Cumulated diarization labels. This will be concatenated with history embedding speaker label
then compared with the predicted label Y_new.
Y_new (Tensor):
Contains predicted labels for reduced history embeddings concatenated with the predicted label.
Permutation is not matched yet.
Returns:
mapping_array[Y] (Tensor):
An output numpy array where the input Y_new is mapped with mapping_array.
"""
Y_new = get_minimal_indices(Y_new)
if len(Y_old) == 0:
matched_output = Y_new
else:
P_raw, Q_raw = Y_old.to(Y_new.device), Y_new
U_set = torch.unique(torch.cat([P_raw, Q_raw]))
PQ = torch.cat([P_raw, Q_raw])
a_cat_b, counts = torch.unique(PQ, return_counts=True)
# Get a union set of old P and new Q labels
PandQ = a_cat_b[torch.where(counts.gt(1))[0]]
min_len = min(P_raw.shape[0], Q_raw.shape[0])
P, Q = P_raw[:min_len], Q_raw[:min_len]
if len(U_set) == 1:
# When two speaker vectors are exactly the same: No need to encode.
mapping_array = torch.tensor([0, 0]).to(Y_new.device)
else:
# Run Hungarian algorithm if there are more than one speaker in universal set U.
mapping_array = get_lsa_speaker_mapping(U_set=U_set, cmm_P=P, cmm_Q=Q, PandQ=PandQ)
matched_output = mapping_array[Y_new]
matched_output = get_minimal_indices(matched_output)
return matched_output
def calculate_removable_counts(removable_counts_mat: torch.Tensor, remain_count: int, num_clus: int) -> torch.Tensor:
"""
Calculate removable counts based on the arguments and calculate how many counts should be
removed from the each cluster. This function has `O(N)` (N = num_clus) time complexity to
return the desired `removable_counts_mat`.
Example:
The original input to `get_merge_quantity` function:
>>> pre_clus_labels = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2]
>>> num_to_be_removed = 3
>>> min_count_per_cluster = 2
Histogram: (`min_count_per_cluster`=2 is removed)
0 |*****
1 |***
2 |*
Inputs:
>>> removable_counts_mat = [5, 3, 1]
>>> remain_count = 6
>>> num_clus = 3
Interim results:
>>> diff_counts
[1, 2, 2]
>>> gradual_counts
[3, 4, 2]
>>> cumsum_counts
[3, 7, 9]
Return:
>>> removable_counts_mat
[2, 1, 0]
Args:
removable_counts_mat (Tensor):
Tensor containing how many vectors could be removed from each cluster
remain_count (int):
Integer value that indicates the number of vectors removed from the total set
num_clus (int):
Number of clusters in the given label sequence (cardinality of a label set)
Returns:
removable_counts_mat (Tensor):
Tensor containing the number of vectors should be removed from each cluster
"""
device = removable_counts_mat.device
zero_padded_counts = torch.cat(
[torch.tensor([0]).to(device), removable_counts_mat.sort()[0], torch.tensor([0]).to(device)], dim=0
)
removable_count_args = removable_counts_mat.sort(descending=True)[1]
# Calculate the size difference between clusters
diff_counts = (zero_padded_counts[1:] - zero_padded_counts[:-1])[:num_clus]
gradual_counts = torch.arange(num_clus, 0, -1).to(device) * diff_counts
cumsum_counts = torch.cumsum(gradual_counts, dim=0)
remain_count_rem = remain_count
# Find how many remaining counts we can use
ind: int = 0
for ind, num in enumerate(cumsum_counts):
if remain_count < num:
break
# Subtract the common values step by step
if ind > 0:
for knd in range(ind):
removable_counts_mat[removable_count_args[: num_clus - knd]] -= diff_counts[knd]
remain_count_rem -= int(diff_counts[knd].item()) * (num_clus - knd)
assert remain_count >= 0, "remain_count should never be negative."
# Add remaining values
num_labels = remain_count_rem // (num_clus - ind)
rem_labels = remain_count_rem % (num_clus - ind)
removable_counts_mat[removable_count_args[: (num_clus - ind)]] -= num_labels
removable_counts_mat[removable_count_args[:rem_labels]] -= 1
return removable_counts_mat
def get_merge_quantity(
num_to_be_removed: int, pre_clus_labels: torch.Tensor, min_count_per_cluster: int,
) -> torch.Tensor:
"""
Determine which embeddings we need to reduce or merge in history buffer.
We want to merge or remove the embedding in the bigger cluster first.
At the same time, we keep the minimum number of embedding per cluster
with the variable named min_count_per_cluster.
Constraint:
- Each cluster should keep the number of vectors over `min_count_per_cluster`.
- In total, `num_to_be_removed` of vectors should be removed from the total buffer.
- While merging embeddings, minimize the gap between quantities between clusters.
Example:
>>> num_to_be_removed = 3
>>> pre_clus_labels = [0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2]
>>> min_count_per_cluster = 2
>>> get_merge_quantity(num_to_be_removed, pre_clus_labels, min_count_per_cluster)
Return:
torch.tensor([2, 1, 0])
>>> # Sum should be equal to `num_to_be_removed` which is 3
Args:
num_to_be_removed: (int)
the quantity of the newly obtained embedding from the new stream of input.
pre_clus_labels: (Tensor)
the speaker labels of (the history_embedding_buffer_emb) + (the new embeddings to be added)
min_count_per_cluster: (int)
Minimum vector quantity for each cluster
Returns:
removable_counts_mat: (Tensor)
Tensor containing the number of vectors should be removed from each cluster
"""
if num_to_be_removed > pre_clus_labels.shape[0] - 1:
raise ValueError(f"num_to_be_removed: {num_to_be_removed} should be less than pre_clus_labels length - 1")
remain_count = pre_clus_labels.shape[0] - num_to_be_removed
spk_freq_count = torch.bincount(pre_clus_labels)
num_clus = len(torch.unique(pre_clus_labels))
if remain_count < min_count_per_cluster * num_clus:
raise ValueError(f"The remaining embedding vectors should be more than { min_count_per_cluster * num_clus }")
# Minimum vector counts should be excluded from the removable amount
min_seg_count = torch.tensor([min_count_per_cluster] * len(spk_freq_count)).to(pre_clus_labels.device)
min_seg_count_mat = torch.stack((min_seg_count, spk_freq_count)).min(0)[0]
# Exclude minimum quantities from the removable count matrix
remain_count -= int(torch.sum(min_seg_count_mat))
removable_counts_mat = spk_freq_count - min_seg_count_mat
# Calculate removable counts from `remain_count` variable
removable_counts_mat = calculate_removable_counts(removable_counts_mat, remain_count, num_clus)
if int(removable_counts_mat.sum()) != num_to_be_removed:
raise ValueError("Sum of `removable_counts_mat` is not equal to `num_to_be_removed` variable.")
if not torch.all(removable_counts_mat >= 0) or not torch.all(spk_freq_count - min_seg_count_mat >= 0):
raise ValueError(
f"Every value in `removable_counts_mat` should be always non-negative value but got {removable_counts_mat}"
)
return removable_counts_mat
def merge_vectors(
selected_inds: torch.Tensor, emb_ndx: torch.Tensor, pre_cluster_labels: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Merge feature (embedding) vectors estimated to be the same cluster label.
Args:
selected_inds (Tensor):
Selected indices for merging
emb_ndx (Tensor):
Feature (embedding) vectors
Dimension: (original vector counts) x (feature dimension)
pre_cluster_labels (Tensor):
Original cluster labels before merging
Returns:
merged_vecs (Tensor):
Merged feature vectors that are concatenated
Dimension: (merged vector counts) x (feature dimension)
merged_clus_labels (Tensor):
Cluster labels for the merged feature vectors
Dimension: (merged vector counts)
"""
if emb_ndx.shape[0] != pre_cluster_labels.shape[0]:
raise ValueError("pre_cluster_labels and emb_ndx have mismatch in dimension")
avg_emb = torch.mean(emb_ndx[selected_inds, :], dim=0)
merged_clus_labels = pre_cluster_labels[selected_inds]
selected_inds_list: List[int] = selected_inds.tolist()
bypass_inds_list: List[int] = []
for k in range(emb_ndx.shape[0]):
if k not in selected_inds_list:
bypass_inds_list.append(k)
bypass_inds = torch.tensor(bypass_inds_list)
selected_inds = torch.tensor(selected_inds_list)
merged_vecs = torch.vstack((emb_ndx[bypass_inds], avg_emb))
merged_clus_labels = torch.hstack((pre_cluster_labels[bypass_inds], merged_clus_labels[0]))
return merged_vecs, merged_clus_labels
def get_closest_embeddings(affinity_mat: torch.Tensor, n_closest: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Get the indices of the embedding vectors we want to merge.
Example:
>>> n_closest = 2
>>> affinity_mat = [[1.0, 0.2, 0.8],
[0.2, 1.0, 0.4],
[0.8, 0.4, 1.0]]
>>> affinity_mat.sum(0)
[2.0, 1.6, 2.2]
# The closest two embedding vectors are at index 0 and 2.
Args:
affinity_mat: (Tensor)
Symmetric affinity matrix of the given embedding vector set.
n_closest (int):
The amount of vector counts that are expected to be removed from the set
Example:
Input: 10 vectors in a set
n_closest = 5
(5+1) vectors are merged into 1 vector
Output: 5 vectors in a set
Returns:
idx_aff_sum (torch.Tensor):
Indices of the closest `n_closest` embedding vectors
rest_inds (torch.Tensor):
Indices of the complementary set of the indices in `idx_aff_sum`
"""
comb_limit = int(affinity_mat.shape[0] - 1)
if n_closest > comb_limit:
raise ValueError(f"Got n_closest of {n_closest}: {n_closest} is bigger than comb_limit {comb_limit}")
# Take summed values over one axis
sum_cmat = affinity_mat.sum(0)
# `n_closest + 1` will become 1 embedding vector after merging
idx_aff_sum = torch.argsort(sum_cmat, descending=True)[: (n_closest + 1)]
rest_inds = torch.argsort(sum_cmat, descending=True)[(n_closest + 1) :]
return idx_aff_sum, rest_inds
def run_reducer(
pre_embs: torch.Tensor, target_spk_idx: int, merge_quantity: int, pre_clus_labels: torch.Tensor,
):
"""
Reduce the number of embedding vectors by merging the closest embedding vectors.
- This merging algorithm is based on the assumption that the closest embeddings
are the most redundant embedding vectors.
- The closest embedding vectors are chosen by selecting the highest top-N sum of
each column in a given affinity matrix.
- If merge_quantity is N, we choose (N+1) vectors into 1 embedding vector.
Thus, we reduce N embeddings in the original embedding vector set.
Example:
>>> merge_quantity = 1 # We merge 1+1 = 2 embedding vectors
>>> affinity_mat = [[1.0, 0.2, 0.8],
[0.2, 1.0, 0.4],
[0.8, 0.4, 1.0]]
>>> affinity_mat.sum(0)
[2.0, 1.6, 2.2]
The first and the third embedding vectors are merged into one embedding vector.
>>> index_mapping # (bypassed indices, merged indices)
([1], [0, 2])
Args:
pre_embs (Tensor):
Potential Embedding vectors to be merged
affinity_mat (Tensor):
The affinity matrix of the `pre_embs`
target_spk_idx (int):
The targeted speaker index for merging
merge_quantity (int):
The count of embeddings to be reduced
pre_clus_labels (list)
The original cluster (speaker) index
Returns:
merged_embs (torch.Tensor):
The merged embedding vectors.
merged_clus_labels (torch.Tensor):
The cluster (speaker) indices for the merged embedding vectors.
index_mapping (Tuple[torch.Tensor, torch.Tensor]):
A tuple containing the indices of the original embeddings that were not merged (`bypassed indices`)
and the indices of the new merged embeddings (`merged indices`).
"""
if pre_embs.shape[0] != pre_clus_labels.shape[0]:
raise ValueError("Dimension mismatch between `pre_embs` and `pre_clus_labels`.")
target_emb_index = torch.where(pre_clus_labels == target_spk_idx)[0]
org_size = target_emb_index.shape[0]
if merge_quantity > 0:
if merge_quantity > (target_emb_index.shape[0] - 1):
raise ValueError(
f"merge_quantity {merge_quantity} should not be larger than target_emb_index length: {target_emb_index.shape[0]-1}"
)
total_affinity_mat = getCosAffinityMatrix(pre_embs)
# Get the lower triangle of the affinity_mat array
affinity_mat = total_affinity_mat[:, target_emb_index][target_emb_index, :]
if affinity_mat.shape[0] != target_emb_index.shape[0]:
raise ValueError(
"Dimension mismatch between targeted speaker affinity `affinity_mat` and targeted speaker index `target_emb_index`."
)
# Get the indices of the closest embedding vectors
selected_inds, rest_inds = get_closest_embeddings(affinity_mat, merge_quantity)
spk_cluster_labels, selected_embs = pre_clus_labels[target_emb_index], pre_embs[target_emb_index]
# Note that we need to return the indices of speaker-specific indices from `target_emb_index`.
index_mapping = (target_emb_index[rest_inds.sort()[0]], target_emb_index[selected_inds])
# Merge the embeddings targeted by the 2-dim indices `index_2d`
merged_embs, merged_clus_labels = merge_vectors(selected_inds, selected_embs, spk_cluster_labels)
if (org_size - merge_quantity) != merged_embs.shape[0]:
raise ValueError(
f"Reducer output {merged_embs.shape[0]} is not matched to the target quantity {org_size - merge_quantity}."
)
else:
merged_embs = pre_embs[target_emb_index]
merged_clus_labels = pre_clus_labels[target_emb_index]
index_mapping = (target_emb_index, torch.arange(0))
return merged_embs, merged_clus_labels, index_mapping
def get_first_arg_index(mat: torch.Tensor, label: int) -> int:
"""
Get the index of the first element are specified by `index` variable.
Args:
mat (Tensor):
Source matrix filled with indices
label (int):
Label which we want to find the first occuring index
Returns:
(int) The first index of the given label
"""
return int(torch.where(mat == label)[0][0])
class OnlineSpeakerClustering(torch.nn.Module):
"""
Online clustering method for speaker diarization based on cosine similarity.
Regular Clustering Attributes:
max_num_speakers (int):
The upper bound for the number of speakers in each session
max_rp_threshold (float):
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.15.
enhanced_count_thres (int):
For the short audio recordings, clustering algorithm cannot
accumulate enough amount of speaker profile for each cluster.
Thus, function `getEnhancedSpeakerCount` employs anchor embeddings
(dummy representations) to mitigate the effect of cluster sparsity.
enhanced_count_thres = 40 is recommended.
sparse_search_volume (int):
Number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
Lower than 20 might cause a poor parameter estimation.
fixed_thres (float):
A fixed threshold for finding p-closest neighbors in affinity matrix for clustering.
If fixed_thres value is provided, NME-analysis process will be skipped.
This value should be optimized on a development set to obtain a quality result.
Default is None and performs NME-analysis to estimate the threshold.
min_samples_for_nmesc (int):
The minimum number of samples required for NME clustering. This avoids
zero p_neighbour_lists. If the input has fewer segments than min_samples,
it is directed to the enhanced speaker counting mode.
sparse_search (bool):
Toggle sparse search mode. If True, limit the size of p_value_list to sparse_search_volume.
cuda (bool):
Use cuda for Eigen decomposition if cuda=True.
Additional Online Processing Attributes:
history_buffer_size (int):
- This is a buffer where diarization history is saved in the form of averaged speaker embedding vector.
- The values in [50, 200] range is recommended while the system requires bigger buffer size for
sessions with larger number of speakers.
current_buffer_size (int):
- This is a buffer which process the most recent speaker embedding vector inputs.
current-buffer is first-in-first-out (FIFO) queue where the embeddings accepted earlier
get to merged and saved to history buffer.
- In general, [50, 200] range is recommended and the performance can be sensitive on this buffer size.
min_spk_counting_buffer_size (int):
Integer number for speaker counting buffer. Number of speakers are estimated through a small buffer
and the number is obtained by taking majority vote.
min_frame_per_spk (int):
Below this number, the system considers the whole input segments as a single speaker.
p_update_freq (int):
Frequency (interval) of updating p_value for NMESC algorithm.
p_value_skip_frame_thres (int):
After `frame_index` passes this number, `p_value` estimation is skipped for inference speed
p_value_queue_size (int):
`p_value` buffer for major voting
use_temporal_label_major_vote (bool):
Boolean that determines whether to use temporal majorvoting for the final speaker labels
temporal_label_major_vote_buffer_size (int):
Buffer size for major-voting the
num_spk_stat (list):
List of number of speakers for major voting. Number of speakers are estimated through
majority voting of `self.num_spk_stat` list.
p_value_hist (list):
List of p_values for major voting.
To save the computation time, p_value is estimated every `p_update_freq` frames and
saved to `self.p_value_hist`.
Attributes for counters and buffers in streaming system:
is_online (bool):
- If self.is_online is False:
FIFO queue does not push out any speaker embedding vector
- If self.is_online is True:
FIFO queue starts push out speaker embedding vectors and saving them into
history buffer.
max_embed_count (int):
The maximum number of segments the streaming system has ever seen.
This value keeps increasing as the system processes more and more segments.
memory_margin (int):
The margin that is added to keep the segmentation data in the streaming system
minimum_segments_per_buffer (int):
Maximum number of embedding vectors kept in history buffer per speaker.
Example:
history_buffer_size (history_n) = 100
max_num_speakers = 4
minimum_segments_per_buffer = 25
history_buffer_seg_end (int):
Index that indicates the boundary between history embedding sets and current processing buffer
when history embedding vectors and current input embedding vectors are concatenated into a
single matrix.
Attributes for history buffer:
history_embedding_buffer_emb (Tensor)
Tensor containing speaker embedding vectors for saving the history of the previous
speaker profile in the given audio session
history_embedding_buffer_label (Tensor)
Speaker label (cluster label) for embedding vectors saved in the history buffer
Y_fullhist (Tensor)
Tensor containing the speaker label hypothesis from start to current frame
"""
def __init__(
self,
max_num_speakers: int = 8,
max_rp_threshold: float = 0.15,
enhanced_count_thres: float = 40,
fixed_thres: float = -1.0,
sparse_search_volume: int = 10,
history_buffer_size: int = 150,
current_buffer_size: int = 150,
min_spk_counting_buffer_size: int = 3,
min_frame_per_spk: int = 15,
p_update_freq: int = 5,
p_value_skip_frame_thres: int = 50,
p_value_queue_size: int = 3,
use_temporal_label_major_vote: bool = False,
temporal_label_major_vote_buffer_size: int = 11,
cuda: bool = False,
):
super().__init__()
self.max_num_speakers = max_num_speakers
self.max_rp_threshold = max_rp_threshold
self.enhanced_count_thres = enhanced_count_thres
self.sparse_search_volume = sparse_search_volume
self.fixed_thres = fixed_thres
self.history_n = history_buffer_size
self.current_n = current_buffer_size
self.min_spk_counting_buffer_size = min_spk_counting_buffer_size
self.min_frame_per_spk = min_frame_per_spk
self.p_update_freq = p_update_freq
self.p_value_skip_frame_thres = p_value_skip_frame_thres
self.p_value_queue_size = p_value_queue_size
self.use_temporal_label_major_vote = use_temporal_label_major_vote
self.temporal_label_major_vote_buffer_size = temporal_label_major_vote_buffer_size
self.cuda = cuda
self.num_spk_stat: List[torch.Tensor] = [torch.tensor(1)]
self.p_value_hist: List[torch.Tensor] = [torch.tensor(2)]
# Initialize the counters and buffers in streaming system
self.is_online = False
self.max_embed_count = 0
self.memory_margin = 0
self.minimum_segments_per_buffer = int(self.history_n / self.max_num_speakers)
self.history_buffer_seg_end = 0
# Initialize the streaming buffer tensors
self.history_embedding_buffer_emb = torch.tensor([])
self.history_embedding_buffer_label = torch.tensor([])
self.Y_fullhist = torch.tensor([])
def onlineNMEanalysis(self, mat_in: torch.Tensor, frame_index: int) -> Tuple[int, int]:
"""
To save the running time, the p-value is only estimated in the beginning of the session.
After switching to online mode, the system uses the most common estimated p-value.
Estimating p-value requires a plenty of computational resource. The less frequent estimation of
p-value can speed up the clustering algorithm by a huge margin.
Args:
mat_in (Tensor):
Tensor containing the affinity matrix for the current segments
frame_index (int):
Unique index for each segment and embedding vector
Returns:
est_num_of_spk: (int)
The estimated number of speakers.
p_hat_value: (int)
The estimated p-value from NMESC method.
"""
nmesc = NMESC(
mat_in,
max_num_speakers=self.max_num_speakers,
max_rp_threshold=self.max_rp_threshold,
sparse_search=True,
maj_vote_spk_count=False,
sparse_search_volume=self.sparse_search_volume,
fixed_thres=self.fixed_thres,
nme_mat_size=256,
parallelism=False,
device=mat_in.device,
cuda=self.cuda,
)
if len(self.p_value_hist) == 0 or (
frame_index < self.p_value_skip_frame_thres and frame_index % self.p_update_freq == 0
):
est_num_of_spk, p_hat_value = nmesc.forward()
self.p_value_hist.append(p_hat_value)
if len(self.p_value_hist) > self.p_value_queue_size:
self.p_value_hist.pop(0)
p_hat_int_list: List[int] = [int(p) for p in self.p_value_hist]
p_hat_value = torch.mode(torch.tensor(p_hat_int_list))[0].item()
output = nmesc.getEigRatio(p_hat_value)
g_p, est_num_of_spk = output[0], output[1].int()
return est_num_of_spk, p_hat_value
def speaker_counter_buffer(self, est_num_of_spk: int) -> torch.Tensor:
"""
Use a queue to avoid unstable speaker counting results.
Args:
est_num_of_spk (int):
Estimated number of speakers
Returns:
est_num_of_spk (torch.Tensor):
Estimated number of speakers from the speaker counting buffer.
"""
est_num_of_spk = torch.tensor(est_num_of_spk)
self.num_spk_stat.append(est_num_of_spk)
if len(self.num_spk_stat) > self.min_spk_counting_buffer_size:
self.num_spk_stat.pop(0)
num_spk_stat_tensor = torch.tensor([int(s) for s in self.num_spk_stat])
num_spks_bincount = torch.bincount(num_spk_stat_tensor)
est_num_of_spk = torch.argmax(num_spks_bincount)
return est_num_of_spk
def limit_frames_per_speaker(self, frame_index: int, est_num_of_spk: int) -> int:
"""
Limit the estimated number of speakers in proportion to the number of speakers.
Args:
frame_index (int):
Unique index for each segment and embedding vector
est_num_of_spk (int):
Estimated number of speakers
Returns:
(int) Estimated number of speakers capped by `self.min_frame_per_spk`
"""
return min(est_num_of_spk, int(1 + frame_index // self.min_frame_per_spk))
def online_spk_num_estimation(self, mat_in: torch.Tensor, frame_index: int) -> Tuple[int, torch.Tensor]:
"""
Online version of speaker estimation involves speaker counting buffer and application of per-speaker
frame count limit.
Args:
mat_in (Tensor):
Raw affinity matrix containing similarity values of each pair of segments
frame_index (int)
Unique frame index of online processing pipeline
Returns:
est_num_of_spk (int):
Estimated number of speakers
affinity_mat (Tensor):
Affinity matrix after applying the affinity threshold with `p_hat_value`
"""
est_num_of_spk, p_hat_value = self.onlineNMEanalysis(mat_in, frame_index)
affinity_mat = getAffinityGraphMat(mat_in, p_hat_value)
raw_est_num_of_spk = self.speaker_counter_buffer(est_num_of_spk)
est_num_of_spk = self.limit_frames_per_speaker(frame_index, raw_est_num_of_spk.item())
return est_num_of_spk, affinity_mat
def prepare_embedding_update(
self, emb_in: torch.Tensor, segment_indexes_matrix: torch.Tensor
) -> Tuple[bool, int, torch.Tensor, torch.Tensor]:
"""
This function performs the following tasks:
1. Decide whether to extract more embeddings or not (by setting `is_update`)
(Only if we need update):
2. Calculate how many embeddings should be updated (set `new_emb_n` variable)
3. Update history embedding vectors and save it to `pre_embs`.
We only save the index and clustering label of each embedding.
- Case-1: The very first step
This else statement is for the very first diarization loop.
This is the very first reduction frame.
- Case-2: Number of embedding vectors is increased, therefore we need to update.
Since there are new embeddings, we push the same amount (new_emb_n)
of old embeddings to the history buffer.
We should also update self.history_buffer_seg_end which is a pointer.
update to history emb: emb_in[emb_idx_stt:emb_idx_end]
update to history label: self.Y_fullhist[label_stt:_end]
- Case-3: Number of embedding vectors is decreased
If the number of embeddings is decreased compared to the last trial,
then skip embedding merging.
Variables:
hist_curr_boundary (int):
The current boundary of between history buffer and current buffer.
This is the new history-current buffer boundary while self.history_buffer_seg_end is the old one.
Thus, the new set of embedding vectors are collected from
`label_stt=self.hist_buffer_seg_end` to `label_end=hist_curr_boundary`.
total_segments_processed_count (int):
The number of segments that are processed so far in integer format.
Args:
emb_in (Tensor):
Tensor containing embedding vectors
Dimensions: (number of embedding vectors) x (embedding dimension)
segment_indexes_matrix (Tensor):
Tensor containing unique segment (embedding vector) index
Returns:
is_update (bool):
Boolean indicates whether to update speaker embedding vectors.
new_emb_n (int):
The amount of embedding vectors that are exceeding FIFO queue size.
new_emb_n is also an amount of embedding vectors that needs to be merged in history buffer.
pre_embs (Tensor):
Embedding vector matrix before merging.
The subset of `pre_embs` embedding vectors will be merged.
Dimensions: (number of embedding vectors) x (embedding dimension)
pre_clus_labels (Tensor):
A set of clustering labels for each embedding vector in `pre_embs`.
"""
total_segments_processed_count = int(segment_indexes_matrix[-1] + 1)
hist_curr_boundary = int(total_segments_processed_count - self.current_n)
new_emb_n: int = 0
pre_embs: torch.Tensor = torch.empty(0)
pre_clus_labels: torch.Tensor = torch.empty(0)
is_update = True
if total_segments_processed_count > self.max_embed_count:
# Case-1: The very first step
if len(self.history_embedding_buffer_emb) == 0:
new_emb_n = total_segments_processed_count - (self.current_n + self.history_n)
hist_curr_boundary_emb_idx = get_first_arg_index(segment_indexes_matrix, hist_curr_boundary)
pre_embs = emb_in[:hist_curr_boundary_emb_idx]
pre_clus_labels = self.Y_fullhist[:hist_curr_boundary]
# Case-2: Number of embedding vectors is increased, need to update history and its label
else:
# Calculate the number of new embedding vectors: `new_emb_n`
label_stt, label_end = self.history_buffer_seg_end, hist_curr_boundary
new_emb_n = label_end - label_stt
# Add embedding vectors to `pre_embs` so that we can merge it with reducer function.
emb_idx_stt = int(get_first_arg_index(segment_indexes_matrix, label_stt))
emb_idx_end = int(get_first_arg_index(segment_indexes_matrix, label_end))
pre_embs = torch.vstack((self.history_embedding_buffer_emb, emb_in[emb_idx_stt:emb_idx_end]))
# Update labels for `pre_embs`
pre_clus_labels = torch.hstack(
(self.history_embedding_buffer_label, self.Y_fullhist[label_stt:label_end])
)
if new_emb_n > self.current_n:
raise ValueError(
"new_emb_n should be less than or equal to current buffer size (self.current_n)."
f" Getting too many segments: {new_emb_n} for the given current buffer size {self.current_n}."
" Please either (1) increase buffer size or (2) use longer segment lengths to get less number of segments."
)
elif new_emb_n <= 0:
raise ValueError("Segment counting error. `new_emb_n` should be a positve integer number.")
if pre_embs.shape[0] != pre_clus_labels.shape[0]:
raise ValueError(
"`pre_embs` and `pre_clus_labels` should have the same length, "
f"but got {pre_embs.shape[0]} and {pre_clus_labels.shape[0]} respectively."
)
# Case-3: Number of embedding vectors is not increased.
else:
# There will be no embedding update, so new_emb_n is 0, pre_embs and pre_clus_labels are empty.
is_update = False
# Update the history buffer index for the next step
self.history_buffer_seg_end = hist_curr_boundary
self.max_embed_count = max(total_segments_processed_count, self.max_embed_count)
return is_update, new_emb_n, pre_embs, pre_clus_labels
def make_constant_length_emb(self, emb_in: torch.Tensor, base_segment_indexes: torch.Tensor) -> torch.Tensor:
"""
This function deals with edge cases when the number of segments decreases and the number of embedding falls
short for the labels.
- ASR decoder occasionally returns less number of words compared to the previous frame.
- In this case, we obtain fewer embedding vectors for the short period of time. To match the pre-defined
length, the last embedding vector is repeated to fill the voidness.
- The repeated embedding will be soon replaced by the actual embeddings once the system takes new frames.
Args:
emb_in (Tensor):
If self.is_online is False:
`pre_embs` contains only current speaker embedding inputs, which is FIFO queue
If self.is_online is True:
`pre_embs` contains history buffer and FIFO queue
base_segment_indexes (Tensor):
Tensor containing unique segment (embedding vector) index
Returns:
emb_curr (Tensor):
Length preserved speaker embedding vectors
"""
curr_clustered_segments = torch.where(base_segment_indexes >= self.history_buffer_seg_end)[0]
# Check if the current buffer result is falling short compared to `self.current_n`.
if emb_in[curr_clustered_segments].shape[0] < self.current_n:
delta_count = self.current_n - emb_in[curr_clustered_segments].shape[0]
fill_in_emb = torch.tile(emb_in[curr_clustered_segments][-1], (delta_count, 1))
emb_curr = torch.vstack((emb_in[curr_clustered_segments], fill_in_emb))
else:
emb_curr = emb_in[curr_clustered_segments]
return emb_curr
def update_speaker_history_buffer(
self, emb_in: torch.Tensor, base_segment_indexes: torch.Tensor
) -> Tuple[torch.Tensor, bool]:
"""
Merge the given embedding vectors based on the calculate affinity matrix.
if `is_update` is True, update the history buffer .
Args:
emb_in (Tensor):
If self.is_online is False:
`emb` contains only current speaker embedding inputs, which is FIFO queue
If self.is_online is True:
`emb` contains history buffer and FIFO queue
base_segment_indexes (Tensor):
Tensor containing unique segment (embedding vector) index
Returns:
history_embedding_buffer_emb (Tensor):
Matrix containing merged embedding vectors of the previous frames.
This matrix is referred to as "history buffer" in this class.
is_update (bool):
Boolean indicates whether to update speaker
Example:
at the frame index where `is_online` turns to True:
|------hist-buffer------|-----FIFO-queue-----|
self.history_n = 20
self.current_n = 10
Step (1)
|-----------------------|ABCDEF--------------|
If we get two more segments, "NN" as in the description:
history buffer = 20
current buffer = 12
Step (2)
|-----------------------|ABCDEF--------------XY|
|---------emb_in-------|
The newly accepted embeddings go through a FIFO queue (first come, first merge)
history buffer = 22
current buffer = 10
Step (3)
|-----------------------AB|CDEF--------------XY|
|---------pre_embs--------|
After merging (reducing) the embedding set gets back to the original size:
history buffer = 20
current buffer = 10
Step (4)
|======================|CDEF--------------XY|
|-----hist_emb_buff----|
After clustering, `self.Y_fullhist` is updated as:
|0000000000011111111111|11110000110010010011|
The dimension of `self.Y_fullhist` is (`history_n + current_n`) x 1
self.history_buffer_seg_end (int):
The total number of segments that have been merged from the beginning of the session.
(=`hist_curr_boundary`)
"""
is_update, new_emb_n, pre_embs, pre_clus_labels = self.prepare_embedding_update(emb_in, base_segment_indexes)
# Update the history/current_buffer boundary cursor
total_emb, total_cluster_labels = [], []
if is_update:
# Calculate how many embedding vectors should be reduced per speaker
class_target_vol = get_merge_quantity(
num_to_be_removed=new_emb_n,
pre_clus_labels=pre_clus_labels,
min_count_per_cluster=self.minimum_segments_per_buffer,
)
# Merge the segments in the history buffer
for spk_idx, target_num in enumerate(list(class_target_vol)):
merged_embs, merged_clus_labels, _ = run_reducer(
pre_embs=pre_embs,
target_spk_idx=spk_idx,
merge_quantity=target_num,
pre_clus_labels=pre_clus_labels,
)
total_emb.append(merged_embs)
total_cluster_labels.append(merged_clus_labels)
# Update the speaker history buffer
self.history_embedding_buffer_emb = torch.vstack(total_emb)
self.history_embedding_buffer_label = torch.hstack(total_cluster_labels)
if self.history_embedding_buffer_emb.shape[0] != self.history_n:
raise ValueError("History embedding size is not maintained correctly.")
if len(self.history_embedding_buffer_label) != self.history_n:
raise ValueError("History label size is not maintained correctly.")
else:
total_emb.append(self.history_embedding_buffer_emb)
total_cluster_labels.append(self.history_embedding_buffer_label)
# `emb_curr` is the incumbent set of embeddings which is the the latest.
emb_curr = self.make_constant_length_emb(emb_in, base_segment_indexes)
total_emb.append(emb_curr)
# Before perform clustering, we attach the current_n number of estimated speaker labels
# from the previous clustering result.
total_cluster_labels.append(self.Y_fullhist[-self.current_n :])
history_and_current_emb = torch.vstack(total_emb)
history_and_current_labels = torch.hstack(total_cluster_labels)
if history_and_current_emb.shape[0] != len(history_and_current_labels):
raise ValueError("`history_and_current_emb` has a mismatch in length with `history_and_current_labels`.")
return history_and_current_emb, is_update
def get_reduced_mat(self, emb_in: torch.Tensor, base_segment_indexes: torch.Tensor) -> Tuple[torch.Tensor, bool]:
"""
Choose whether we want to add embeddings to the memory or not.
The processing buffer has size of (self.current_n + self.history_n).
Case-1: If margin_seg_n > 0, this means we have more embedding vectors than we can hold in the processing buffer.
- `is_online` should be `True`
- reduce the number of embedding vectors by merging the closest ones.
call `update_speaker_history_buffer` function
Case-2: If margin_seg_n <= 0, this means that we can accept more embedding vectors and yet to fill the processing buffer.
- `is_online` should be `False`
- Replace `merged_emb` variable with the raw input `emb_in`.
- `add_new` is `True`, since we are adding more embedding vectors to `merged_emb` variable.
Args:
emb_in (Tensor):
If self.is_online is False:
`emb` contains only current speaker embedding inputs
base_segment_indexes (Tensor):
Tensor containing unique segment (embedding vector) index
Returns:
merged_emb (Tensor):
Matrix containing merged embedding vectors of the previous frames.
This matrix is referred to as "history buffer" in this class.
If self.is_online is False:
`merged_emb` contains only current speaker embedding inputs
If self.is_online is True:
`merged_emb` is a concatenated matrix with history embedding and current embedding inputs
add_new (bool):
Boolean that indicates whether there is a new set of segments. Depending on the VAD timestamps,
the number of subsegments can be ocassionally decreased. If `add_new=True`, then it adds the newly
acquired cluster labels.
"""
margin_seg_n = emb_in.shape[0] - (self.current_n + self.history_n)
if len(self.Y_fullhist) == 0 and margin_seg_n > 0:
raise ValueError(
"The number of incoming embedding vectors is larger than the total processing buffer size."
"Please either (1) increase the history and current buffer size (2) or use longer segment lengths to reduce number of segments."
)
if margin_seg_n > 0:
self.is_online = True
merged_emb, add_new = self.update_speaker_history_buffer(
emb_in=emb_in, base_segment_indexes=base_segment_indexes
)
else:
self.is_online = False
merged_emb = emb_in
add_new = True
return merged_emb, add_new
def match_labels(self, Y_merged: torch.Tensor, add_new: bool) -> torch.Tensor:
"""
This function matches the newly generated clustering label sequence with the existing speaker labels in the history buffer.
`self.history_buffer_seg_end` is an integer index that tells to which point is history embedding contains from `self.Y_fullhist`.
If embedding reducing is done correctly, we should discard (0, self.history_n) amount and take
(self.history_n, len(Y_merged)) from the new clustering output `Y_merged`.
Args:
Y_merged (Tensor):
The newly generated clustering label sequence that may have different permutations with the existing
speaker labels in the history buffer.
add_new (bool):
This variable indicates whether there is a new set of segments. Depending on the VAD timestamps,
the number of subsegments can be occasionally decreased. If `add_new=True`, then it adds the newly
acquired cluster labels.
Returns:
Y_out (Tensor):
Permutation-matched speaker labels based on history buffer
"""
if self.is_online:
# Online clustering mode with history buffer
Y_old = torch.hstack((self.history_embedding_buffer_label, self.Y_fullhist[self.history_buffer_seg_end :]))
# Stitch the old history and new cluster labels
Y_matched = stitch_cluster_labels(Y_old=Y_old, Y_new=Y_merged).to(Y_merged.device)
if add_new:
if Y_matched[self.history_n :].shape[0] != self.current_n:
raise ValueError("Update point sync is not correct.")
# Concatenate the newly generated speaker labels
Y_out = torch.hstack((self.Y_fullhist[: self.history_buffer_seg_end], Y_matched[self.history_n :]))
self.Y_fullhist = Y_out
else:
# Do not update cumulative labels since there are no new segments.
Y_out = self.Y_fullhist
else:
# If no memory is used, offline clustering is applied.
Y_out = stitch_cluster_labels(Y_old=self.Y_fullhist, Y_new=Y_merged).to(Y_merged.device)
self.Y_fullhist = Y_out
return Y_out
def forward(
self,
curr_emb,
base_segment_indexes,
max_num_speakers: int,
max_rp_threshold: float,
enhanced_count_thres: int,
sparse_search_volume: int,
frame_index: int,
cuda: bool = False,
) -> torch.Tensor:
"""
Wrapper function for torch.jit.script compatibility.
NOTE: jit scripted classes only contain the methods which are included in the computation graph in the forward pass.
"""
Y = self.forward_infer(
curr_emb=curr_emb,
base_segment_indexes=base_segment_indexes,
max_num_speakers=max_num_speakers,
max_rp_threshold=max_rp_threshold,
enhanced_count_thres=enhanced_count_thres,
sparse_search_volume=sparse_search_volume,
frame_index=frame_index,
cuda=cuda,
)
return Y
def forward_infer(
self,
curr_emb: torch.Tensor,
base_segment_indexes: torch.Tensor,
max_num_speakers: int = 4,
max_rp_threshold: float = 0.15,
enhanced_count_thres: int = 40,
sparse_search_volume: int = 10,
fixed_thres: float = -1.0,
frame_index: int = 0,
cuda: bool = False,
) -> torch.Tensor:
"""
Perform speaker clustering in online mode. Embedding vector set `emb` is expected to be containing
history embeddings to count the number of speakers.
Args:
curr_emb (Tensor):
Current embedding vector input.
base_segment_indexes (Tensor):
Tensor containing unique segment (embedding vector) index
max_num_speakers (int):
Maximum number of speakers to be detected during online diarization session
max_rp_threshold (float):
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.25.
max_rp_threshold (float):
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.15.
frame_index (int):
Unique index for each segment (also each embedding vector)
cuda (bool):
Boolean that determines whether cuda is used or not
device (torch.device):
`torch.device` variable
Returns:
Y (Tensor):
Speaker labels for history embeddings and current embedding inputs
"""
self.max_num_speakers = max_num_speakers
self.max_rp_threshold = max_rp_threshold
self.enhanced_count_thres = enhanced_count_thres
self.sparse_search_volume = sparse_search_volume
self.fixed_thres = fixed_thres
# Merge the closest embeddings and reduce the size of the embedding count.
if cuda and (curr_emb.device == torch.device("cpu") or base_segment_indexes.device == torch.device("cpu")):
raise ValueError(f"CUDA is enabled but the input {curr_emb} or {base_segment_indexes} is not on the GPU.")
merged_embs, add_new = self.get_reduced_mat(emb_in=curr_emb, base_segment_indexes=base_segment_indexes,)
# Perform clustering on the embedding matrix containing history and current FIFO buffer merged_embeddings
if merged_embs.shape[0] == 1:
Y = torch.zeros((1,), dtype=torch.int32)
else:
mat = getCosAffinityMatrix(merged_embs)
est_num_of_spk, affinity_mat = self.online_spk_num_estimation(mat, frame_index)
spectral_model = SpectralClustering(n_clusters=est_num_of_spk, cuda=cuda, device=merged_embs.device)
Y = spectral_model.forward(affinity_mat).to(merged_embs.device)
# Match the permutation of the newly obtained speaker labels and the previous labels
merged_clus_labels = self.match_labels(Y_merged=Y, add_new=add_new)
return merged_clus_labels
|
NeMo-main
|
nemo/collections/asr/parts/utils/online_clustering.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The original code of Linear Sum Assignment solver is
# from: https://github.com/scipy/scipy/blob/v0.18.1/scipy/optimize/_hungarian.py
# The following is the full text of the license:
# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment
# problem. Taken from scikit-learn. Based on original code by Brian Clapper,
# adapted to NumPy by Gael Varoquaux.
# Further improvements by Ben Root, Vlad Niculae and Lars Buitinck.
# Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# License: 3-clause BSD
import torch
@torch.jit.script
def unravel_index(index: int, shape: torch.Tensor):
"""
Unravel the index input to fit the given shape.
This function is needed for torch.jit.script compatibility.
Args:
index (int): The index to unravel.
shape (Tesnor): The shape to unravel the index to.
Returns:
Tensor: The unraveled index.
"""
out = []
shape = torch.flip(shape, dims=(0,))
for dim in shape:
out.append(index % dim)
index = index // dim
out = torch.tensor([int(x.item()) for x in out])
return torch.flip(out, dims=(0,))
@torch.jit.script
class LinearSumAssignmentSolver(object):
"""
A Solver class for the linear sum assignment (LSA) problem.
Designed for torch.jit.script compatibility in NeMo.
The LSA problem is also referred to as bipartite matching problem. An LSA problem is described
by a matrix `cost_mat`, where each cost_mat[i,j] is the cost of matching vertex i of the first partite
set (e.g. a "worker") and vertex j of the second set (e.g. a "job").
Thus, the goal of LSA-solver is to find a complete assignment of column element to row element with
the minimal cost. Note that the solution may not be unique and there could be multiple solutions that
yield the same minimal cost.
LSA problem solver is needed for the following tasks in NeMo:
- Permutation Invariant Loss (PIL) for diarization model training
- Label permutation matching for online speaker diarzation
- Concatenated minimum-permutation Word Error Rate (cp-WER) calculation
This implementation is based on the LAP solver from scipy:
https://github.com/scipy/scipy/blob/v0.18.1/scipy/optimize/_hungarian.py
The scipy implementation comes with the following license:
Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux
Author: Brian M. Clapper, Gael Varoquaux
License: 3-clause BSD
References
1. http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html
2. https://en.wikipedia.org/wiki/Hungarian_algorithm
3. https://github.com/scipy/scipy/blob/v0.18.1/scipy/optimize/_hungarian.py
Attributes:
cost_mat (Tensor): 2D matrix containing cost matrix. Number of columns must be larger than number of rows.
row_uncovered (Tensor): 1D matrix containing boolean values indicating whether a row is covered.
col_uncovered (Tensor): 1D matrix containing boolean values indicating whether a column is covered.
zero_row (Tensor): 1D matrix containing the row index of the last zero found.
zero_col (Tensor): 1D matrix containing the column index of the last zero found.
path (Tensor): 2D matrix containing the path taken through the matrix.
marked (Tensor): 2D matrix containing the marked zeros.
"""
def __init__(self, cost_matrix: torch.Tensor):
# The main cost matrix
self.cost_mat = cost_matrix
row_len, col_len = self.cost_mat.shape
# Initialize the solver state
self.zero_row = torch.tensor(0, dtype=torch.long).to(cost_matrix.device)
self.zero_col = torch.tensor(0, dtype=torch.long).to(cost_matrix.device)
# Initialize the covered matrices
self.row_uncovered = torch.ones(row_len, dtype=torch.bool).to(cost_matrix.device)
self.col_uncovered = torch.ones(col_len, dtype=torch.bool).to(cost_matrix.device)
# Initialize the path matrix and the mark matrix
self.path = torch.zeros((row_len + col_len, 2), dtype=torch.long).to(cost_matrix.device)
self.marked = torch.zeros((row_len, col_len), dtype=torch.long).to(cost_matrix.device)
def _reset_uncovered_mat(self):
"""
Clear all covered matrix cells and assign `True` to all uncovered elements.
"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
def _step1(self):
"""
Step 1
Goal: Subtract the smallest element of each row from its elements.
- All elements of the matrix are now non-negative.
- Therefore, an assignment of total cost 0 is the minimum cost assignment.
- This operation leads to at least one zero in each row.
Procedure:
- For each row of the matrix, find the smallest element and subtract it from every element in its row.
- Go to Step 2.
"""
self.cost_mat -= torch.min(self.cost_mat, dim=1)[0].unsqueeze(1)
return 2
def _step2(self):
"""
Step 2
Goal: Make sure assignment with cost sum 0 is feasible.
Procedure:
- Find a zero in the resulting cost matrix.
- If there are no marked zeros in its row or column, mark the zero.
- Repeat for each element in the matrix.
- Go to step 3.
"""
ind_out = torch.where(self.cost_mat == 0)
ind, val = list(ind_out[0]), list(ind_out[1])
for i, j in zip(ind, val):
if self.col_uncovered[j] and self.row_uncovered[i]:
self.marked[i, j] = 1
self.col_uncovered[j] = False
self.row_uncovered[i] = False
self._reset_uncovered_mat()
return 3
def _step3(self) -> int:
"""
Step 3
Goal: All zeros in the matrix must be covered by marking with the least numbers of rows and columns.
Procedure:
- Cover each column containing a marked zero.
- If n columns are covered, the marked zeros describe a complete set of unique assignments.
In this case, Go to Step 0 (Done state)
- Otherwise, Go to Step 4.
"""
marked = self.marked == 1
self.col_uncovered[torch.any(marked, dim=0)] = False
if marked.sum() < self.cost_mat.shape[0]:
return 4 # Go to step 4
else:
return 0 # Go to step 0 (Done state)
def _step4(self, bypass: bool = False) -> int:
"""
Step 4
Goal: Cover all columns containing a marked zero.
Procedure:
- Find a non-covered zero and put a prime mark on it.
- If there is no marked zero in the row containing this primed zero, Go to Step 5.
- Otherwise, cover this row and uncover the column containing the marked zero.
- Continue in this manner until there are no uncovered zeros left.
- Save the smallest uncovered value.
- Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
cost_mat = (self.cost_mat == 0).int()
covered_cost_mat = cost_mat * self.row_uncovered.unsqueeze(1)
covered_cost_mat *= self.col_uncovered.long()
row_len, col_len = self.cost_mat.shape
if not bypass:
while True:
urv = unravel_index(torch.argmax(covered_cost_mat).item(), torch.tensor([col_len, row_len]))
row, col = int(urv[0].item()), int(urv[1].item())
if covered_cost_mat[row, col] == 0:
return 6
else:
self.marked[row, col] = 2 # Find the first marked element in the row
mark_col = torch.argmax((self.marked[row] == 1).int())
if self.marked[row, mark_col] != 1: # No marked element in the row
self.zero_row = torch.tensor(row)
self.zero_col = torch.tensor(col)
return 5
else:
col = mark_col
self.row_uncovered[row] = False
self.col_uncovered[col] = True
covered_cost_mat[:, col] = cost_mat[:, col] * self.row_uncovered
covered_cost_mat[row] = 0
return 0
def _step5(self) -> int:
"""
Step 5
Goal: Construct a series of alternating primed and marked zeros as follows.
Procedure:
- Let Z0 represent the uncovered primed zero found in Step 4.
- Let Z1 denote the marked zero in the column of Z0 (if any).
- Let Z2 denote the primed zero in the row of Z1 (there will always be one).
- Continue until the series terminates at a primed zero that has no marked zero in its column.
- Unmark each marked zero of the series.
- Mark each primed zero of the series.
- Erase all primes and uncover every line in the matrix.
- Return to Step 3
"""
count = torch.tensor(0)
path = self.path
path[count, 0] = self.zero_row.long()
path[count, 1] = self.zero_col.long()
while True: # Unmark each marked zero of the series
# Find the first marked element in the col defined by the path (= `val`)
row = torch.argmax((self.marked[:, path[count, 1]] == 1).int())
if self.marked[row, path[count, 1]] != 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the first path step
col = int(torch.argmax((self.marked[path[count, 0]] == 2).int()))
if self.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(int(count.item()) + 1):
if self.marked[path[i, 0], path[i, 1]] == 1:
self.marked[path[i, 0], path[i, 1]] = 0
else:
self.marked[path[i, 0], path[i, 1]] = 1
self._reset_uncovered_mat()
# Remove all prime markings in marked matrix
self.marked[self.marked == 2] = 0
return 3
def _step6(self) -> int:
"""
Step 6
Goal: Prepare for another iteration by modifying the cost matrix.
Procedure:
- Add the value found in Step 4 to every element of each covered row.
- Subtract it from every element of each uncovered column.
- Return to Step 4 without altering any marks, primes, or covered lines.
"""
if torch.any(self.row_uncovered) and torch.any(self.col_uncovered):
row_minval = torch.min(self.cost_mat[self.row_uncovered], dim=0)[0]
minval = torch.min(row_minval[self.col_uncovered])
self.cost_mat[~self.row_uncovered] += minval
self.cost_mat[:, self.col_uncovered] -= minval
return 4
@torch.jit.script
def linear_sum_assignment(cost_matrix: torch.Tensor, max_size: int = 100):
"""
Launch the linear sum assignment algorithm on a cost matrix.
Args:
cost_matrix (Tensor): The cost matrix of shape (N, M) where M should be larger than N.
Returns:
row_index (Tensor): The row indices of the optimal assignments.
col_index (Tensor): The column indices of the optimal assignments.
"""
cost_matrix = cost_matrix.clone().detach()
if len(cost_matrix.shape) != 2:
raise ValueError(f"2-d tensor is expected but got a {cost_matrix.shape} tensor")
if max(cost_matrix.shape) > max_size:
raise ValueError(
f"Cost matrix size {cost_matrix.shape} is too large. The maximum supported size is {max_size}x{max_size}."
)
# The algorithm expects more columns than rows in the cost matrix.
if cost_matrix.shape[1] < cost_matrix.shape[0]:
cost_matrix = cost_matrix.T
transposed = True
else:
transposed = False
lap_solver = LinearSumAssignmentSolver(cost_matrix)
f_int: int = 0 if 0 in cost_matrix.shape else 1
# while step is not Done (step 0):
# NOTE: torch.jit.scipt does not support getattr with string argument.
# Do not use getattr(lap_solver, f"_step{f_int}")()
while f_int != 0:
if f_int == 1:
f_int = lap_solver._step1()
elif f_int == 2:
f_int = lap_solver._step2()
elif f_int == 3:
f_int = lap_solver._step3()
elif f_int == 4:
f_int = lap_solver._step4()
elif f_int == 5:
f_int = lap_solver._step5()
elif f_int == 6:
f_int = lap_solver._step6()
if transposed:
marked = lap_solver.marked.T
else:
marked = lap_solver.marked
row_index, col_index = torch.where(marked == 1)
return row_index, col_index
|
NeMo-main
|
nemo/collections/asr/parts/utils/optimization_utils.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/asr/parts/utils/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numba import jit
def phase_vocoder(D: np.ndarray, rate: float, phi_advance: np.ndarray, scale_buffer: np.ndarray):
"""
Optimized implementation of phase vocoder from Librosa.
Reference implementation:
- https://librosa.github.io/librosa/generated/librosa.core.phase_vocoder.html
Args:
D: Complex spectograms of shape [d, t, complex=2].
rate: Speed rate, must be float greater than 0.
phi_advance: Precomputed phase advance buffer array of length [n_fft + 1]
scale_buffer: Precomputed numpy buffer array of length [n_fft + 1]
Returns:
Complex64 ndarray of shape [d, t / rate, complex=2]
"""
time_steps = np.arange(0, D.shape[1], rate, dtype=np.float)
# Create an empty output array
d_stretch = np.zeros((D.shape[0], len(time_steps)), D.dtype, order='F')
# Phase accumulator; initialize to the first sample
phase_acc = np.angle(D[:, 0])
# Pad 0 columns to simplify boundary logic
D = np.pad(D, [(0, 0), (0, 2)], mode='constant')
d_stretch = _phase_vocoder_kernel(D, time_steps, phi_advance, d_stretch, phase_acc, scale_buffer)
return d_stretch
@jit(nopython=True, nogil=True)
def _phase_vocoder_kernel(D, time_steps, phi_advance, d_stretch, phase_acc, scale_buffer):
"""
Numba optimized kernel to compute the phase vocoder step.
Args:
D: Complex spectograms of shape [d, t, complex=2].
rate: Speed rate, must be float greater than 0.
time_steps: Numpy ndarray of linearly spaced time steps, shape = [t]
phi_advance: Precomputed phase advance buffer array of length [n_fft + 1]
d_stretch: Output complex matrix of shape [d, t / rate, complex=2]
phase_acc: Phase accumulator initialized to first sample of shape [d, complex=2]
scale_buffer: Precomputed numpy buffer array of length [n_fft + 1]
Returns:
Complex64 ndarray of shape [d, t / rate, complex=2]
"""
two_pi = 2.0 * np.pi
for (t, step) in enumerate(time_steps):
columns = D[:, int(step) : int(step + 2)]
columns_0 = columns[:, 0]
columns_1 = columns[:, 1]
# Weighting for linear magnitude interpolation
alpha = np.mod(step, 1.0)
mag = (1.0 - alpha) * np.abs(columns_0) + alpha * np.abs(columns_1)
# Store to output array
d_stretch[:, t] = mag * np.exp(1.0j * phase_acc)
# Compute phase advance
dphase = np.angle(columns_1) - np.angle(columns_0) - phi_advance
# Wrap to -pi:pi range
scale = dphase / two_pi
np.round(scale, 0, scale_buffer)
dphase = dphase - two_pi * scale_buffer
# Accumulate phase
phase_acc += phi_advance + dphase
return d_stretch
|
NeMo-main
|
nemo/collections/asr/parts/utils/numba_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.