# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # *****************************************************************************\ import torch import random import common.layers as layers from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu class MelAudioLoader(torch.utils.data.Dataset): """ 1) loads audio,text pairs 2) computes mel-spectrograms from audio files. """ def __init__(self, dataset_path, audiopaths_and_text, segment_length, n_mel_channels, max_wav_value, sampling_rate, filter_length, hop_length, win_length, mel_fmin, mel_fmax, args): self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text) self.max_wav_value = max_wav_value self.sampling_rate = sampling_rate self.stft = layers.TacotronSTFT( filter_length, hop_length, win_length, n_mel_channels, sampling_rate, mel_fmin, mel_fmax) self.segment_length = segment_length random.seed(1234) random.shuffle(self.audiopaths_and_text) def get_mel_audio_pair(self, filename): audio, sampling_rate = load_wav_to_torch(filename) if sampling_rate != self.stft.sampling_rate: raise ValueError("{} {} SR doesn't match target {} SR".format( sampling_rate, self.stft.sampling_rate)) # Take segment if audio.size(0) >= self.segment_length: max_audio_start = audio.size(0) - self.segment_length audio_start = random.randint(0, max_audio_start) audio = audio[audio_start:audio_start+self.segment_length] else: audio = torch.nn.functional.pad( audio, (0, self.segment_length - audio.size(0)), 'constant').data audio = audio / self.max_wav_value audio_norm = audio.unsqueeze(0) audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False) melspec = self.stft.mel_spectrogram(audio_norm) melspec = melspec.squeeze(0) return (melspec, audio, len(audio)) def __getitem__(self, index): return self.get_mel_audio_pair(self.audiopaths_and_text[index][0]) def __len__(self): return len(self.audiopaths_and_text) def batch_to_gpu(batch): x, y, len_y = batch x = to_gpu(x).float() y = to_gpu(y).float() len_y = to_gpu(torch.sum(len_y)) return ((x, y), y, len_y)