python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2017-2018 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mxnet as mx
import mxnet.ndarray as nd
import random
import argparse
from mxnet.io import DataBatch, DataIter
import numpy as np
import horovod.mxnet as hvd
import dali
def add_data_args(parser):
def float_list(x):
return list(map(float, x.split(',')))
def int_list(x):
return list(map(int, x.split(',')))
data = parser.add_argument_group('Data')
data.add_argument('--data-train', type=str, help='the training data')
data.add_argument('--data-train-idx', type=str, default='', help='the index of training data')
data.add_argument('--data-val', type=str, help='the validation data')
data.add_argument('--data-val-idx', type=str, default='', help='the index of validation data')
data.add_argument('--data-pred', type=str, help='the image on which run inference (only for pred mode)')
data.add_argument('--data-backend', choices=('dali-gpu', 'dali-cpu', 'mxnet', 'synthetic'), default='dali-gpu',
help='set data loading & augmentation backend')
data.add_argument('--image-shape', type=int_list, default=[3, 224, 224],
help='the image shape feed into the network')
data.add_argument('--rgb-mean', type=float_list, default=[123.68, 116.779, 103.939],
help='a tuple of size 3 for the mean rgb')
data.add_argument('--rgb-std', type=float_list, default=[58.393, 57.12, 57.375],
help='a tuple of size 3 for the std rgb')
data.add_argument('--input-layout', type=str, default='NCHW', choices=('NCHW', 'NHWC'),
help='the layout of the input data')
data.add_argument('--conv-layout', type=str, default='NCHW', choices=('NCHW', 'NHWC'),
help='the layout of the data assumed by the conv operation')
data.add_argument('--batchnorm-layout', type=str, default='NCHW', choices=('NCHW', 'NHWC'),
help='the layout of the data assumed by the batchnorm operation')
data.add_argument('--pooling-layout', type=str, default='NCHW', choices=('NCHW', 'NHWC'),
help='the layout of the data assumed by the pooling operation')
data.add_argument('--num-examples', type=int, default=1281167,
help="the number of training examples (doesn't work with mxnet data backend)")
data.add_argument('--data-val-resize', type=int, default=256,
help='base length of shorter edge for validation dataset')
return data
def add_data_aug_args(parser):
aug = parser.add_argument_group(
'MXNet data backend', 'entire group applies only to mxnet data backend')
aug.add_argument('--data-mxnet-threads', type=int, default=40,
help='number of threads for data decoding for mxnet data backend')
aug.add_argument('--random-crop', type=int, default=0,
help='if or not randomly crop the image')
aug.add_argument('--random-mirror', type=int, default=1,
help='if or not randomly flip horizontally')
aug.add_argument('--max-random-h', type=int, default=0,
help='max change of hue, whose range is [0, 180]')
aug.add_argument('--max-random-s', type=int, default=0,
help='max change of saturation, whose range is [0, 255]')
aug.add_argument('--max-random-l', type=int, default=0,
help='max change of intensity, whose range is [0, 255]')
aug.add_argument('--min-random-aspect-ratio', type=float, default=0.75,
help='min value of aspect ratio, whose value is either None or a positive value.')
aug.add_argument('--max-random-aspect-ratio', type=float, default=1.33,
help='max value of aspect ratio. If min_random_aspect_ratio is None, '
'the aspect ratio range is [1-max_random_aspect_ratio, '
'1+max_random_aspect_ratio], otherwise it is '
'[min_random_aspect_ratio, max_random_aspect_ratio].')
aug.add_argument('--max-random-rotate-angle', type=int, default=0,
help='max angle to rotate, whose range is [0, 360]')
aug.add_argument('--max-random-shear-ratio', type=float, default=0,
help='max ratio to shear, whose range is [0, 1]')
aug.add_argument('--max-random-scale', type=float, default=1,
help='max ratio to scale')
aug.add_argument('--min-random-scale', type=float, default=1,
help='min ratio to scale, should >= img_size/input_shape. '
'otherwise use --pad-size')
aug.add_argument('--max-random-area', type=float, default=1,
help='max area to crop in random resized crop, whose range is [0, 1]')
aug.add_argument('--min-random-area', type=float, default=0.05,
help='min area to crop in random resized crop, whose range is [0, 1]')
aug.add_argument('--min-crop-size', type=int, default=-1,
help='Crop both width and height into a random size in '
'[min_crop_size, max_crop_size]')
aug.add_argument('--max-crop-size', type=int, default=-1,
help='Crop both width and height into a random size in '
'[min_crop_size, max_crop_size]')
aug.add_argument('--brightness', type=float, default=0,
help='brightness jittering, whose range is [0, 1]')
aug.add_argument('--contrast', type=float, default=0,
help='contrast jittering, whose range is [0, 1]')
aug.add_argument('--saturation', type=float, default=0,
help='saturation jittering, whose range is [0, 1]')
aug.add_argument('--pca-noise', type=float, default=0,
help='pca noise, whose range is [0, 1]')
aug.add_argument('--random-resized-crop', type=int, default=1,
help='whether to use random resized crop')
return aug
def get_data_loader(args):
if args.data_backend == 'dali-gpu':
return (lambda *args, **kwargs: dali.get_rec_iter(*args, **kwargs, dali_cpu=False))
if args.data_backend == 'dali-cpu':
return (lambda *args, **kwargs: dali.get_rec_iter(*args, **kwargs, dali_cpu=True))
if args.data_backend == 'synthetic':
return get_synthetic_rec_iter
if args.data_backend == 'mxnet':
return get_rec_iter
raise ValueError('Wrong data backend')
class DataGPUSplit:
def __init__(self, dataloader, ctx, dtype):
self.dataloader = dataloader
self.ctx = ctx
self.dtype = dtype
self.batch_size = dataloader.batch_size // len(ctx)
self._num_gpus = len(ctx)
def __iter__(self):
return DataGPUSplit(iter(self.dataloader), self.ctx, self.dtype)
def __next__(self):
data = next(self.dataloader)
ret = []
for i in range(len(self.ctx)):
start = i * len(data.data[0]) // len(self.ctx)
end = (i + 1) * len(data.data[0]) // len(self.ctx)
pad = max(0, min(data.pad - (len(self.ctx) - i - 1) * self.batch_size, self.batch_size))
ret.append(mx.io.DataBatch(
[data.data[0][start:end].as_in_context(self.ctx[i]).astype(self.dtype)],
[data.label[0][start:end].as_in_context(self.ctx[i])],
pad=pad))
return ret
def next(self):
return next(self)
def reset(self):
self.dataloader.reset()
def get_rec_iter(args, kv=None):
gpus = args.gpus
if 'horovod' in args.kv_store:
rank = hvd.rank()
nworker = hvd.size()
gpus = [gpus[0]]
batch_size = args.batch_size // hvd.size()
else:
rank = kv.rank if kv else 0
nworker = kv.num_workers if kv else 1
batch_size = args.batch_size
if args.input_layout == 'NHWC':
raise ValueError('ImageRecordIter cannot handle layout {}'.format(args.input_layout))
train = DataGPUSplit(mx.io.ImageRecordIter(
path_imgrec = args.data_train,
path_imgidx = args.data_train_idx,
label_width = 1,
mean_r = args.rgb_mean[0],
mean_g = args.rgb_mean[1],
mean_b = args.rgb_mean[2],
std_r = args.rgb_std[0],
std_g = args.rgb_std[1],
std_b = args.rgb_std[2],
data_name = 'data',
label_name = 'softmax_label',
data_shape = args.image_shape,
batch_size = batch_size,
rand_crop = args.random_crop,
max_random_scale = args.max_random_scale,
random_resized_crop = args.random_resized_crop,
min_random_scale = args.min_random_scale,
max_aspect_ratio = args.max_random_aspect_ratio,
min_aspect_ratio = args.min_random_aspect_ratio,
max_random_area = args.max_random_area,
min_random_area = args.min_random_area,
min_crop_size = args.min_crop_size,
max_crop_size = args.max_crop_size,
brightness = args.brightness,
contrast = args.contrast,
saturation = args.saturation,
pca_noise = args.pca_noise,
random_h = args.max_random_h,
random_s = args.max_random_s,
random_l = args.max_random_l,
max_rotate_angle = args.max_random_rotate_angle,
max_shear_ratio = args.max_random_shear_ratio,
rand_mirror = args.random_mirror,
preprocess_threads = args.data_mxnet_threads,
shuffle = True,
num_parts = nworker,
part_index = rank,
seed = args.seed or '0',
), [mx.gpu(gpu) for gpu in gpus], args.dtype)
if args.data_val is None:
return (train, None)
val = DataGPUSplit(mx.io.ImageRecordIter(
path_imgrec = args.data_val,
path_imgidx = args.data_val_idx,
label_width = 1,
mean_r = args.rgb_mean[0],
mean_g = args.rgb_mean[1],
mean_b = args.rgb_mean[2],
std_r = args.rgb_std[0],
std_g = args.rgb_std[1],
std_b = args.rgb_std[2],
data_name = 'data',
label_name = 'softmax_label',
batch_size = batch_size,
round_batch = False,
data_shape = args.image_shape,
preprocess_threads = args.data_mxnet_threads,
rand_crop = False,
rand_mirror = False,
num_parts = nworker,
part_index = rank,
resize = args.data_val_resize,
), [mx.gpu(gpu) for gpu in gpus], args.dtype)
return (train, val)
class SyntheticDataIter(DataIter):
def __init__(self, num_classes, data_shape, max_iter, ctx, dtype):
self.batch_size = data_shape[0]
self.cur_iter = 0
self.max_iter = max_iter
self.dtype = dtype
label = np.random.randint(0, num_classes, [self.batch_size,])
data = np.random.uniform(-1, 1, data_shape)
self.data = []
self.label = []
self._num_gpus = len(ctx)
for dev in ctx:
self.data.append(mx.nd.array(data, dtype=self.dtype, ctx=dev))
self.label.append(mx.nd.array(label, dtype=self.dtype, ctx=dev))
def __iter__(self):
return self
def next(self):
self.cur_iter += 1
if self.cur_iter <= self.max_iter:
return [DataBatch(data=(data,), label=(label,), pad=0) for data, label in zip(self.data, self.label)]
else:
raise StopIteration
def __next__(self):
return self.next()
def reset(self):
self.cur_iter = 0
def get_synthetic_rec_iter(args, kv=None):
gpus = args.gpus
if 'horovod' in args.kv_store:
gpus = [gpus[0]]
batch_size = args.batch_size // hvd.size()
else:
batch_size = args.batch_size
if args.input_layout == 'NCHW':
data_shape = (batch_size, *args.image_shape)
elif args.input_layout == 'NHWC':
data_shape = (batch_size, *args.image_shape[1:], args.image_shape[0])
else:
raise ValueError('Wrong input layout')
train = SyntheticDataIter(args.num_classes, data_shape,
args.num_examples // args.batch_size,
[mx.gpu(gpu) for gpu in gpus], args.dtype)
if args.data_val is None:
return (train, None)
val = SyntheticDataIter(args.num_classes, data_shape,
args.num_examples // args.batch_size,
[mx.gpu(gpu) for gpu in gpus], args.dtype)
return (train, val)
def load_image(args, path, ctx=mx.cpu()):
image = mx.image.imread(path).astype('float32')
image = mx.image.imresize(image, *args.image_shape[1:])
image = (image - nd.array(args.rgb_mean)) / nd.array(args.rgb_std)
image = image.as_in_context(ctx)
if args.input_layout == 'NCHW':
image = image.transpose((2, 0, 1))
image = image.astype(args.dtype)
if args.image_shape[0] == 4:
dim = 0 if args.input_layout == 'NCHW' else 2
image = nd.concat(image, nd.zeros((1, *image.shape[1:]), dtype=image.dtype, ctx=image.context), dim=dim)
return image
|
DeepLearningExamples-master
|
MxNet/Classification/RN50v1.5/data.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mxnet.io import DataIter
import time
class BenchmarkingDataIter:
def __init__(self, data_iter, benchmark_iters=None):
self.data_iter = data_iter
self.benchmark_iters = benchmark_iters
self.overall_time = 0
self.num = 0
def __iter__(self):
iter(self.data_iter)
return self
def next(self):
if self.benchmark_iters is not None and self.num >= self.benchmark_iters:
raise StopIteration
try:
start_time = time.time()
ret = self.data_iter.next()
end_time = time.time()
except StopIteration:
if self.benchmark_iters is None:
raise
self.data_iter.reset()
start_time = time.time()
ret = self.data_iter.next()
end_time = time.time()
if self.num != 0:
self.overall_time += end_time - start_time
self.num += 1
return ret
def __next__(self):
return self.next()
def __getattr__(self, attr):
return getattr(self.data_iter, attr)
def get_avg_time(self):
if self.num <= 1:
avg = float('nan')
else:
avg = self.overall_time / (self.num - 1)
return avg
def reset(self):
self.overall_time = 0
self.num = 0
self.data_iter.reset()
|
DeepLearningExamples-master
|
MxNet/Classification/RN50v1.5/benchmarking.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import pathlib
import sys
import time
import fire
import librosa
import torch
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.text_dataset import TextDataset
from fastspeech.inferencer.fastspeech_inferencer import FastSpeechInferencer
from fastspeech.model.fastspeech import Fastspeech
from fastspeech import hparam as hp, DEFAULT_DEVICE
from fastspeech.utils.logging import tprint
from fastspeech.utils.time import TimeElapsed
from fastspeech.utils.pytorch import to_device_async, to_cpu_numpy
from fastspeech.infer import get_inferencer
from fastspeech.inferencer.waveglow_inferencer import WaveGlowInferencer
MAX_FILESIZE=128
# TODO test with different speeds
def generate(hparam='infer.yaml',
text='test_sentences.txt',
results_path='results',
device=DEFAULT_DEVICE,
**kwargs):
"""The script for generating waveforms from texts with a vocoder.
By default, this script assumes to load parameters in the default config file, fastspeech/hparams/infer.yaml.
Besides the flags, you can also set parameters in the config file via the command-line. For examples,
--checkpoint_path=CHECKPOINT_PATH
Path to checkpoint directory. The latest checkpoint will be loaded.
--waveglow_path=WAVEGLOW_PATH
Path to the WaveGlow checkpoint file.
--waveglow_engine_path=WAVEGLOW_ENGINE_PATH
Path to the WaveGlow engine file. It can be only used with --use_trt=True.
--batch_size=BATCH_SIZE
Batch size to use. Defaults to 1.
Refer to fastspeech/hparams/infer.yaml to see more parameters.
Args:
hparam (str, optional): Path to default config file. Defaults to "infer.yaml".
text (str, optional): a sample text or a text file path to generate its waveform. Defaults to 'test_sentences.txt'.
results_path (str, optional): Path to output waveforms directory. Defaults to 'results'.
device (str, optional): Device to use. Defaults to "cuda" if avaiable, or "cpu".
"""
hp.set_hparam(hparam, kwargs)
if os.path.isfile(text):
f = open(text, 'r', encoding="utf-8")
texts = f.read().splitlines()
else: # single string
texts = [text]
dataset = TextDataset(texts)
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
shuffle=False,
drop_last=False)
# text to mel
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
fs_inferencer = get_inferencer(model, data_loader, device)
# set up WaveGlow
if hp.use_trt:
from fastspeech.trt.waveglow_trt_inferencer import WaveGlowTRTInferencer
wb_inferencer = WaveGlowTRTInferencer(
ckpt_file=hp.waveglow_path, engine_file=hp.waveglow_engine_path, use_fp16=hp.use_fp16)
else:
wb_inferencer = WaveGlowInferencer(
ckpt_file=hp.waveglow_path, device=device, use_fp16=hp.use_fp16)
tprint("Generating {} sentences.. ".format(len(dataset)))
with fs_inferencer, wb_inferencer:
try:
for i in range(len(data_loader)):
tprint("------------- BATCH # {} -------------".format(i))
with TimeElapsed(name="Inferece Time: E2E", format=":.6f"):
## Text-to-Mel ##
with TimeElapsed(name="Inferece Time: FastSpeech", device=device, cuda_sync=True, format=":.6f"), torch.no_grad():
outputs = fs_inferencer.infer()
texts = outputs["text"]
mels = outputs["mel"] # (b, n_mels, t)
mel_masks = outputs['mel_mask'] # (b, t)
# assert(mels.is_cuda)
# remove paddings
mel_lens = mel_masks.sum(axis=1)
max_len = mel_lens.max()
mels = mels[..., :max_len]
mel_masks = mel_masks[..., :max_len]
## Vocoder ##
with TimeElapsed(name="Inferece Time: WaveGlow", device=device, cuda_sync=True, format=":.6f"), torch.no_grad():
wavs = wb_inferencer.infer(mels)
wavs = to_cpu_numpy(wavs)
## Write wavs ##
pathlib.Path(results_path).mkdir(parents=True, exist_ok=True)
for i, (text, wav) in enumerate(zip(texts, wavs)):
tprint("TEXT #{}: \"{}\"".format(i, text))
# remove paddings in case of batch size > 1
wav_len = mel_lens[i] * hp.hop_len
wav = wav[:wav_len]
path = os.path.join(results_path, text[:MAX_FILESIZE] + ".wav")
librosa.output.write_wav(path, wav, hp.sr)
except StopIteration:
tprint("Generation has been done.")
except KeyboardInterrupt:
tprint("Generation has been canceled.")
if __name__ == '__main__':
fire.Fire(generate)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/generate.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
def get_requirements(filename='requirements.txt'):
deps = []
with open(filename, 'r') as f:
for pkg in f.readlines():
if pkg.strip():
deps.append(pkg)
return deps
setup(
name='fastspeech',
version='0.2.2',
description='FastSpeech training and inference in PyTorch and TensorRT',
author='Dabi Ahn',
keywords='tts',
packages=find_packages(),
install_requires=get_requirements(),
python_requires='>=3',
include_package_data=True
)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/setup.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/waveglow/__init__.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************\
import torch
import random
import common.layers as layers
from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
class MelAudioLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) computes mel-spectrograms from audio files.
"""
def __init__(self, dataset_path, audiopaths_and_text, args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.max_wav_value = args.max_wav_value
self.sampling_rate = args.sampling_rate
self.stft = layers.TacotronSTFT(
args.filter_length, args.hop_length, args.win_length,
args.n_mel_channels, args.sampling_rate, args.mel_fmin,
args.mel_fmax)
self.segment_length = args.segment_length
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def get_mel_audio_pair(self, filename):
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:audio_start+self.segment_length]
else:
audio = torch.nn.functional.pad(
audio, (0, self.segment_length - audio.size(0)), 'constant').data
audio = audio / self.max_wav_value
audio_norm = audio.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = melspec.squeeze(0)
return (melspec, audio, len(audio))
def __getitem__(self, index):
return self.get_mel_audio_pair(self.audiopaths_and_text[index][0])
def __len__(self):
return len(self.audiopaths_and_text)
def batch_to_gpu(batch):
x, y, len_y = batch
x = to_gpu(x).float()
y = to_gpu(y).float()
len_y = to_gpu(torch.sum(len_y))
return ((x, y), y, len_y)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/waveglow/data_function.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from torch.autograd import Variable
import torch.nn.functional as F
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor' or z.type() == 'torch.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).float()).squeeze()
z = self.conv(z)
return z, log_det_W
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary
difference from WaveNet is the convolutions need not be causal. There is
also no dilation size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.cond_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels, 1)
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
self.cond_layers.append(cond_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(
res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
for i in range(self.n_layers):
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
self.cond_layers[i](spect),
torch.IntTensor([self.n_channels]))
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = res_skip_acts[:, :self.n_channels, :] + audio
skip_acts = res_skip_acts[:, self.n_channels:, :]
else:
skip_acts = res_skip_acts
if i == 0:
output = skip_acts
else:
output = skip_acts + output
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group / 2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size / 2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:, :self.n_early_size, :])
audio = audio[:, self.n_early_size:, :]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s) * audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1], 1)
output_audio.append(audio)
return torch.cat(output_audio, 1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = torch.randn(spect.size(0),
self.n_remaining_channels,
spect.size(2), device=spect.device).to(spect.dtype)
audio = torch.autograd.Variable(sigma * audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
z = torch.randn(spect.size(0), self.n_early_size, spect.size(
2), device=spect.device).to(spect.dtype)
audio = torch.cat((sigma * z, audio), 1)
audio = audio.permute(
0, 2, 1).contiguous().view(
audio.size(0), -1).data
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layers = remove(WN.cond_layers)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/waveglow/model.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
sys.path.append('tacotron2')
import torch
from common.layers import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with waveglow """
def __init__(self, waveglow, cpu_run=False, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
if cpu_run:
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length)
else:
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).cuda()
if mode == 'zeros':
mel_input = torch.zeros(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
elif mode == 'normal':
mel_input = torch.randn(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/waveglow/denoiser.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output, clean_audio):
# clean_audio is unused;
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(
z * z) / (2 * self.sigma * self.sigma) - log_s_total - log_det_W_total # noqa: E501
return loss / (z.size(0) * z.size(1) * z.size(2))
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/waveglow/loss_function.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
def parse_waveglow_args(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help)
# misc parameters
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# glow parameters
parser.add_argument('--flows', default=12, type=int,
help='Number of steps of flow')
parser.add_argument('--groups', default=8, type=int,
help='Number of samples in a group processed by the steps of flow')
parser.add_argument('--early-every', default=4, type=int,
help='Determines how often (i.e., after how many coupling layers) \
a number of channels (defined by --early-size parameter) are output\
to the loss function')
parser.add_argument('--early-size', default=2, type=int,
help='Number of channels output to the loss function')
parser.add_argument('--sigma', default=1.0, type=float,
help='Standard deviation used for sampling from Gaussian')
parser.add_argument('--segment-length', default=8000, type=int,
help='Segment length (audio samples) processed per iteration')
# wavenet parameters
wavenet = parser.add_argument_group('WaveNet parameters')
wavenet.add_argument('--wn-kernel-size', default=3, type=int,
help='Kernel size for dialted convolution in the affine coupling layer (WN)')
wavenet.add_argument('--wn-channels', default=256, type=int,
help='Number of channels in WN')
wavenet.add_argument('--wn-layers', default=8, type=int,
help='Number of layers in WN')
return parser
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/waveglow/arg_parser.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pprint
import sys
import time
import fire
import torch
from tqdm import tqdm
from fastspeech import DEFAULT_DEVICE
from fastspeech import hparam as hp
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.ljspeech_dataset import LJSpeechDataset
from fastspeech.model.fastspeech import Fastspeech
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy, to_device_async
from fastspeech.infer import get_inferencer
from fastspeech.inferencer.waveglow_inferencer import WaveGlowInferencer
from contextlib import ExitStack
import numpy as np
try:
from apex import amp
except ImportError:
ImportError('Required to install apex.')
pp = pprint.PrettyPrinter(indent=4, width=1000)
WARMUP_ITERS = 3
def perf_inference(hparam="infer.yaml",
with_vocoder=False,
n_iters=None,
device=DEFAULT_DEVICE,
**kwargs):
"""The script for estimating inference performance.
By default, this script assumes to load parameters in the default config file, fastspeech/hparams/infer.yaml.
Besides the flags, you can also set parameters in the config file via the command-line. For examples,
--dataset_path=DATASET_PATH
Path to dataset directory.
--checkpoint_path=CHECKPOINT_PATH
Path to checkpoint directory. The latest checkpoint will be loaded.
--batch_size=BATCH_SIZE
Batch size to use. Defaults to 1.
Refer to fastspeech/hparams/infer.yaml to see more parameters.
Args:
hparam (str, optional): Path to default config file. Defaults to "infer.yaml".
with_vocoder (bool, optional): Whether or not to estimate with a vocoder. Defaults to False.
n_iters (int, optional): Number of batches to estimate. Defaults to None (an epoch).
device (str, optional): Device to use. Defaults to "cuda" if avaiable, or "cpu".
"""
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
tprint("Device count: {}".format(torch.cuda.device_count()))
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
dataset = LJSpeechDataset(root_path=hp.dataset_path,
sr=hp.sr,
n_fft=hp.n_fft,
win_len=hp.win_len,
hop_len=hp.hop_len,
n_mels=hp.num_mels,
mel_fmin=hp.mel_fmin,
mel_fmax=hp.mel_fmax,
exclude_mels=True,
sort_by_length=True if hp.use_trt and hp.trt_multi_engine else False
)
tprint("Dataset size: {}".format(len(dataset)))
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
shuffle=False if hp.use_trt and hp.trt_multi_engine else True,
drop_last=True,
)
fs_inferencer = get_inferencer(model, data_loader, device)
if with_vocoder:
if hp.use_trt:
from fastspeech.trt.waveglow_trt_inferencer import WaveGlowTRTInferencer
wb_inferencer = WaveGlowTRTInferencer(ckpt_file=hp.waveglow_path, engine_file=hp.waveglow_engine_path, use_fp16=hp.use_fp16)
else:
wb_inferencer = WaveGlowInferencer(ckpt_file=hp.waveglow_path, device=device, use_fp16=hp.use_fp16)
with fs_inferencer, wb_inferencer if with_vocoder else ExitStack():
tprint("Perf started. Batch size={}.".format(hp.batch_size))
latencies = []
throughputs = []
n_iters = min(n_iters, len(data_loader)) if n_iters else len(data_loader)
assert(n_iters > WARMUP_ITERS)
for i in tqdm(range(n_iters)):
start = time.time()
outputs = fs_inferencer.infer()
mels = outputs['mel']
mel_masks = outputs['mel_mask']
assert(mels.is_cuda)
if with_vocoder:
# remove padding
max_len = mel_masks.sum(axis=1).max()
mels = mels[..., :max_len]
mel_masks = mel_masks[..., :max_len]
with torch.no_grad():
wavs = wb_inferencer.infer(mels)
wavs = to_cpu_numpy(wavs)
else:
# include time for DtoH copy
to_cpu_numpy(mels)
to_cpu_numpy(mel_masks)
end = time.time()
if i > WARMUP_ITERS-1:
time_elapsed = end - start
generated_samples = len(mel_masks.nonzero()) * hp.hop_len
throughput = generated_samples / time_elapsed
latencies.append(time_elapsed)
throughputs.append(throughput)
latencies.sort()
avg_latency = np.mean(latencies)
std_latency = np.std(latencies)
latency_90 = max(latencies[:int(len(latencies)*0.90)]) if n_iters > 1 else 0
latency_95 = max(latencies[:int(len(latencies)*0.95)]) if n_iters > 1 else 0
latency_99 = max(latencies[:int(len(latencies)*0.99)]) if n_iters > 1 else 0
throughput = np.mean(throughputs)
rtf = throughput / (hp.sr * hp.batch_size)
tprint("Batch size\tPrecision\tAvg Latency(s)\tStd Latency(s)\tLatency 90%(s)\tLatency 95%(s)\tLatency 99%(s)\tThroughput(samples/s)\tAvg RTF\n\
{}\t{}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{}\t{:.2f}".format(
hp.batch_size,
"FP16" if hp.use_fp16 else "FP32",
avg_latency,
std_latency,
latency_90,
latency_95,
latency_99,
int(throughput),
rtf))
if __name__ == '__main__':
fire.Fire(perf_inference)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/perf_infer_ljspeech.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from fastspeech.utils.hparam import Hparam
import torch
# hyperparamter
HP_ROOT_PATH = os.path.join(os.path.dirname(__file__), 'hparams')
hparam = Hparam(HP_ROOT_PATH)
# device
DEFAULT_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pathlib
import fire
import torch
from tqdm import tqdm
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.ljspeech_dataset import LJSpeechDataset
import tacotron2.train
import tacotron2.hparams
from fastspeech import hparam as hp, DEFAULT_DEVICE
import os
import numpy as np
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async, to_cpu_numpy
def get_tacotron2(device, is_training=False):
hparams = tacotron2.hparams.create_hparams()
model = tacotron2.train.load_model(hparams)
model.load_state_dict(torch.load(
hp.tacotron2_path, map_location=torch.device(device))["state_dict"])
if is_training:
model.train()
else:
model.eval()
return model
def get_duration(texts, text_lens, mels, mel_lens, tacotron2, device):
texts = to_device_async(texts, device)
text_lens = to_device_async(text_lens, device)
mels = to_device_async(mels, device)
mel_lens = to_device_async(mel_lens, device)
_, _, _, aligns = tacotron2.forward(
(texts, text_lens, mels, None, mel_lens))
aligns = to_cpu_numpy(aligns)
durs = torch.FloatTensor([compute_duration(align) for align in aligns])
return durs
def compute_duration(align):
"""
Warning. This code assumes the attention is monotonic.
"""
d_mel, d_text = align.shape
dur = np.array([0 for _ in range(d_text)])
for i in range(d_mel):
idx = np.argmax(align[i])
dur[idx] += 1
return dur
def preprocess_aligns(
hparam="base.yaml",
device=DEFAULT_DEVICE):
""" The script for preprocessing alignments.
By default, this script assumes to load parameters in the default config file, fastspeech/hparams/base.yaml.
--dataset_path=DATASET_PATH
Path to dataset directory.
--tacotron2_path=TACOTRON2_PATH
Path to tacotron2 checkpoint file.
--aligns_path=ALIGNS_PATH
Path to output preprocessed alignments directory.
Refer to fastspeech/hparams/base.yaml to see more parameters.
Args:
hparam (str, optional): Path to default config file. Defaults to "base.yaml".
device (str, optional): Device to use. Defaults to "cuda" if avaiable, or "cpu".
"""
hp.set_hparam(hparam)
pathlib.Path(hp.aligns_path).mkdir(parents=True, exist_ok=True)
dataset = LJSpeechDataset(hp.dataset_path)
dataloader = PadDataLoader(
dataset, batch_size=1, shuffle=False, num_workers=32, drop_last=True)
tacotron2 = get_tacotron2(device, is_training=True)
to_device_async(tacotron2, device)
for batched in tqdm(dataloader):
names = batched['name']
texts = batched['text_encoded']
text_lens = batched['text_len']
mels = batched['mel']
mel_lens = batched['mel_len']
tprint("Processing {}.".format(', '.join(names)))
durs = get_duration(texts, text_lens, mels,
mel_lens, tacotron2, device)
for i, (name, dur) in enumerate(zip(names, durs)):
save_path = os.path.join(hp.aligns_path, name + ".align.npy")
if os.path.exists(save_path):
continue
np.save(save_path, dur)
# assert sum(duration) == len(align)
if __name__ == '__main__':
fire.Fire(preprocess_aligns)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/align_tacotron2.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pprint
import fire
import torch
from torch.optim.lr_scheduler import LambdaLR
from fastspeech import DEFAULT_DEVICE
from fastspeech import hparam as hp
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.ljspeech_dataset import LJSpeechDataset
from fastspeech.model.fastspeech import Fastspeech
from fastspeech.trainer.fastspeech_trainer import FastspeechTrainer
from fastspeech.utils.logging import tprint
try:
import apex
except ImportError:
ImportError('Required to install apex.')
# import multiprocessing
# multiprocessing.set_start_method('spawn', True)
pp = pprint.PrettyPrinter(indent=4, width=1000)
def train(hparam="train.yaml",
device=DEFAULT_DEVICE,
**kwargs):
""" The FastSpeech model training script.
By default, this script assumes to load parameters in the default config file, fastspeech/hparams/train.yaml.
Besides the flags, you can also set parameters in the config file via the command-line. For examples,
--dataset_path=DATASET_PATH
Path to dataset directory.
--tacotron2_path=TACOTRON2_PATH
Path to tacotron2 checkpoint file.
--mels_path=MELS_PATH
Path to preprocessed mels directory.
--aligns_path=ALIGNS_PATH
Path to preprocessed alignments directory.
--log_path=LOG_PATH
Path to log directory.
--checkpoint_path=CHECKPOINT_PATH
Path to checkpoint directory. The latest checkpoint will be loaded.
--batch_size=BATCH_SIZE
Batch size to use. Defaults to 16.
Refer to fastspeech/hparams/train.yaml to see more parameters.
Args:
hparam (str, optional): Path to default config file. Defaults to "train.yaml".
device (str, optional): Device to use. Defaults to "cuda" if avaiable, or "cpu".
"""
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
tprint("Device count: {}".format(torch.cuda.device_count()))
# model
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
# dataset
dataset = LJSpeechDataset(root_path=hp.dataset_path,
meta_file=hp.meta_file,
mels_path=hp.mels_path,
aligns_path=hp.aligns_path,
sr=hp.sr,
n_fft=hp.n_fft,
win_len=hp.win_len,
hop_len=hp.hop_len,
n_mels=hp.num_mels,
mel_fmin=hp.mel_fmin,
mel_fmax=hp.mel_fmax,
)
tprint("Dataset size: {}".format(len(dataset)))
# data loader
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
drop_last=True,
)
# optimizer
def get_optimizer(model):
optimizer = torch.optim.Adam(
model.parameters(),
lr=hp.learning_rate,
betas=(0.9, 0.98),
eps=1e-9)
return optimizer
def get_warmup_lr_scheduler(optimizer):
d_model = hp.d_model
warmup_steps = hp.warmup_steps
lr = lambda step: d_model ** -0.5 * min((step + 1) ** -0.5,
(step + 1) * warmup_steps ** -1.5) / hp.learning_rate
scheduler = LambdaLR(optimizer, lr_lambda=[lr])
return scheduler
# trainer
trainer = FastspeechTrainer(data_loader,
'fastspeech',
model,
optimizer_fn=get_optimizer,
final_steps=hp.final_steps,
log_steps=hp.log_step,
ckpt_path=hp.checkpoint_path,
save_steps=hp.save_step,
log_path=hp.log_path,
lr_scheduler_fn=get_warmup_lr_scheduler,
pre_aligns=True if hp.aligns_path else False,
device=device,
use_amp=hp.use_amp,
nvprof_iter_start=hp.nvprof_iter_start,
nvprof_iter_end=hp.nvprof_iter_end,
pyprof_enabled=hp.pyprof_enabled,
)
trainer.train()
if __name__ == '__main__':
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
fire.Fire(train)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/train.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pprint
import sys
import time
import fire
import torch
from tqdm import tqdm
from fastspeech import DEFAULT_DEVICE
from fastspeech import hparam as hp
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.ljspeech_dataset import LJSpeechDataset
from fastspeech.model.fastspeech import Fastspeech
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy, to_device_async
from fastspeech.infer import get_inferencer
from fastspeech.inferencer.waveglow_inferencer import WaveGlowInferencer
from contextlib import ExitStack
from fastspeech.dataset.text_dataset import TextDataset
import numpy as np
try:
from apex import amp
except ImportError:
ImportError('Required to install apex.')
pp = pprint.PrettyPrinter(indent=4, width=1000)
SAMPLE_TEXT = "The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves."
INPUT_LEN = 128
INPUT_TEXT = SAMPLE_TEXT[:INPUT_LEN]
WARMUP_ITERS = 3
def perf_inference(hparam="infer.yaml",
with_vocoder=False,
n_iters=None,
device=DEFAULT_DEVICE,
**kwargs):
"""The script for estimating inference performance.
By default, this script assumes to load parameters in the default config file, fastspeech/hparams/infer.yaml.
Besides the flags, you can also set parameters in the config file via the command-line. For examples,
--dataset_path=DATASET_PATH
Path to dataset directory.
--checkpoint_path=CHECKPOINT_PATH
Path to checkpoint directory. The latest checkpoint will be loaded.
--batch_size=BATCH_SIZE
Batch size to use. Defaults to 1.
Refer to fastspeech/hparams/infer.yaml to see more parameters.
Args:
hparam (str, optional): Path to default config file. Defaults to "infer.yaml".
with_vocoder (bool, optional): Whether or not to estimate with a vocoder. Defaults to False.
n_iters (int, optional): Number of batches to estimate. Defaults to None (an epoch).
device (str, optional): Device to use. Defaults to "cuda" if avaiable, or "cpu".
"""
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
tprint("Device count: {}".format(torch.cuda.device_count()))
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
dataset_size = hp.batch_size * (n_iters if n_iters else 1)
tprint("Dataset size: {}".format(dataset_size))
dataset = TextDataset([INPUT_TEXT] * (dataset_size + (WARMUP_ITERS * hp.batch_size)))
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
shuffle=False if hp.use_trt and hp.trt_multi_engine else True,
drop_last=True,
)
fs_inferencer = get_inferencer(model, data_loader, device)
if with_vocoder:
if hp.use_trt:
from fastspeech.trt.waveglow_trt_inferencer import WaveGlowTRTInferencer
wb_inferencer = WaveGlowTRTInferencer(ckpt_file=hp.waveglow_path, engine_file=hp.waveglow_engine_path, use_fp16=hp.use_fp16)
else:
wb_inferencer = WaveGlowInferencer(ckpt_file=hp.waveglow_path, device=device, use_fp16=hp.use_fp16)
with fs_inferencer, wb_inferencer if with_vocoder else ExitStack():
tprint("Perf started. Batch size={}.".format(hp.batch_size))
latencies = []
throughputs = []
for i in tqdm(range(len(data_loader))):
start = time.time()
outputs = fs_inferencer.infer()
mels = outputs['mel']
mel_masks = outputs['mel_mask']
assert(mels.is_cuda)
if with_vocoder:
# remove padding
max_len = mel_masks.sum(axis=1).max()
mels = mels[..., :max_len]
mel_masks = mel_masks[..., :max_len]
with torch.no_grad():
wavs = wb_inferencer.infer(mels)
wavs = to_cpu_numpy(wavs)
else:
# include time for DtoH copy
to_cpu_numpy(mels)
to_cpu_numpy(mel_masks)
end = time.time()
if i > WARMUP_ITERS-1:
time_elapsed = end - start
generated_samples = len(mel_masks.nonzero()) * hp.hop_len
throughput = generated_samples / time_elapsed
latencies.append(time_elapsed)
throughputs.append(throughput)
latencies.sort()
avg_latency = np.mean(latencies)
std_latency = np.std(latencies)
latency_90 = max(latencies[:int(len(latencies)*0.90)]) if n_iters > 1 else 0
latency_95 = max(latencies[:int(len(latencies)*0.95)]) if n_iters > 1 else 0
latency_99 = max(latencies[:int(len(latencies)*0.99)]) if n_iters > 1 else 0
throughput = np.mean(throughputs)
rtf = throughput / (hp.sr * hp.batch_size)
tprint("Batch size\tPrecision\tAvg Latency(s)\tStd Latency(s)\tLatency 90%(s)\tLatency 95%(s)\tLatency 99%(s)\tThroughput(samples/s)\tAvg RTF\n\
{}\t{}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{}\t{:.2f}".format(
hp.batch_size,
"FP16" if hp.use_fp16 else "FP32",
avg_latency,
std_latency,
latency_90,
latency_95,
latency_99,
int(throughput),
rtf))
if __name__ == '__main__':
fire.Fire(perf_inference)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/perf_infer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import fire
from fastspeech import hparam as hp, DEFAULT_DEVICE
from fastspeech.dataset.ljspeech_dataset import LJSpeechDataset
from fastspeech.inferencer.fastspeech_inferencer import FastSpeechInferencer
from fastspeech.model.fastspeech import Fastspeech
from fastspeech.data_load import PadDataLoader
from fastspeech.utils.logging import tprint
import torch
import pprint
from fastspeech.utils.time import TimeElapsed
# import multiprocessing
# multiprocessing.set_start_method('spawn', True)
pp = pprint.PrettyPrinter(indent=4, width=1000)
def infer(hparam="infer.yaml",
device=DEFAULT_DEVICE,
n_iters=1,
**kwargs):
""" The FastSpeech model inference script.
By default, this script assumes to load parameters in the default config file, fastspeech/hparams/infer.yaml.
Besides the flags, you can also set parameters in the config file via the command-line. For examples,
--dataset_path=DATASET_PATH
Path to dataset directory.
--checkpoint_path=CHECKPOINT_PATH
Path to checkpoint directory. The latest checkpoint will be loaded.
--batch_size=BATCH_SIZE
Batch size to use. Defaults to 1.
Refer to fastspeech/hparams/infer.yaml to see more parameters.
Args:
hparam (str, optional): Path to default config file. Defaults to "infer.yaml".
device (str, optional): Device to use. Defaults to "cuda" if avaiable, or "cpu".
n_iters (int, optional): Number of batches to infer. Defaults to 1.
"""
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
tprint("Device count: {}".format(torch.cuda.device_count()))
# model
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
dataset = LJSpeechDataset(root_path=hp.dataset_path,
meta_file=hp.meta_file,
sr=hp.sr,
n_fft=hp.n_fft,
win_len=hp.win_len,
hop_len=hp.hop_len,
n_mels=hp.num_mels,
mel_fmin=hp.mel_fmin,
mel_fmax=hp.mel_fmax,
exclude_mels=True,
sort_by_length=True if hp.use_trt and hp.trt_multi_engine else False
)
tprint("Dataset size: {}".format(len(dataset)))
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
shuffle=False if hp.use_trt and hp.trt_multi_engine else True,
drop_last=True,
)
inferencer = get_inferencer(model, data_loader, device)
try:
n_iters = min(len(data_loader), n_iters) if n_iters else len(data_loader)
tprint("Num of iters: {}".format(n_iters))
with inferencer:
for i in range(n_iters):
tprint("------------- INFERENCE : batch #{} -------------".format(i))
with TimeElapsed(name="Inference Time", cuda_sync=True):
out_batch = inferencer.infer()
# tprint("Output:\n{}".format(pp.pformat(out_batch)))
tprint("Inference has been done.")
except KeyboardInterrupt:
tprint("Inference has been canceled.")
def get_inferencer(model, data_loader, device):
if hp.use_trt:
if hp.trt_multi_engine:
from fastspeech.trt.fastspeech_trt_multi_engine_inferencer import FastSpeechTRTMultiEngineInferencer
inferencer = FastSpeechTRTMultiEngineInferencer('fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
trt_max_ws_size=hp.trt_max_ws_size,
trt_force_build=hp.trt_force_build,
use_fp16=hp.use_fp16,
trt_file_path_list=hp.trt_file_path_list,
trt_max_input_seq_len_list=hp.trt_max_input_seq_len_list,
trt_max_output_seq_len_list=hp.trt_max_output_seq_len_list,
)
else:
from fastspeech.trt.fastspeech_trt_inferencer import FastSpeechTRTInferencer
inferencer = FastSpeechTRTInferencer('fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
trt_max_ws_size=hp.trt_max_ws_size,
trt_file_path=hp.trt_file_path,
use_fp16=hp.use_fp16,
trt_force_build=hp.trt_force_build,
trt_max_input_seq_len=hp.trt_max_input_seq_len,
trt_max_output_seq_len=hp.trt_max_output_seq_len,
)
else:
inferencer = FastSpeechInferencer(
'fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
log_path=hp.log_path,
device=device,
use_fp16=hp.use_fp16)
return inferencer
if __name__ == '__main__':
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
fire.Fire(infer)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/infer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return np.exp(x) / C
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/audio.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
from torch.utils.data import DataLoader
class PadDataLoader(DataLoader):
@staticmethod
def pad_collate_fn(batch):
"""
Apply zero-padding.
"""
# TODO refactor
result = dict()
for key in batch[0].keys():
# apply padding on dataset
sub_batch = [elem[key] for elem in batch]
# check diff dims
if not isinstance(sub_batch[0], np.ndarray):
# if list of float or int
assert all([type(x) == type(sub_batch[0]) for x in sub_batch[1:]]), sub_batch
if isinstance(sub_batch[0], int):
sub_batch = torch.LongTensor(sub_batch)
elif isinstance(sub_batch[0], float):
sub_batch = torch.DoubleTensor(sub_batch)
elif any(list(map(lambda x: x.shape != sub_batch[0].shape, sub_batch[1:]))):
sub_batch = torch.from_numpy(__class__.pad_zero(sub_batch))
else:
sub_batch = torch.from_numpy(np.concatenate(np.expand_dims(sub_batch, axis=0)))
result[key] = sub_batch
return result
def __init__(self, dataset, batch_size, num_workers, shuffle=True, pin_memory=True, drop_last=True):
super().__init__(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
collate_fn=self.pad_collate_fn,
drop_last=drop_last
)
@staticmethod
def pad_zero(sub_batch):
dims = [b.shape for b in sub_batch]
max_dims = list(dims[0])
for d_li in dims[1:]:
for d_idx in range(len(d_li)):
if max_dims[d_idx] < d_li[d_idx]:
max_dims[d_idx] = d_li[d_idx]
temp = np.zeros((len(sub_batch), *max_dims), dtype=sub_batch[0].dtype)
for i, b in enumerate(sub_batch):
if len(b.shape) == 1:
temp[i, :b.shape[0]] = b
elif len(b.shape) == 2:
temp[i, :b.shape[0], :b.shape[1]] = b
elif len(b.shape) == 3:
temp[i, :b.shape[0], :b.shape[1], :b.shape[2]] = b
else:
raise ValueError
return temp
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/data_load.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pprint
import fire
import torch
from fastspeech import DEFAULT_DEVICE
from fastspeech import hparam as hp
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.text_dataset import TextDataset
from fastspeech.inferencer.fastspeech_inferencer import FastSpeechInferencer
from fastspeech.model.fastspeech import Fastspeech
from fastspeech.trt.fastspeech_trt_inferencer import FastSpeechTRTInferencer
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy
from collections import OrderedDict
import sys
import numpy as np
from torch.nn import functional as F
# import multiprocessing
# multiprocessing.set_start_method('spawn', True)
pp = pprint.PrettyPrinter(indent=4, width=1000)
np.set_printoptions(threshold=sys.maxsize)
SAMPLE_TEXT = "the more you buy, the more you save."
def verify(hparam="trt.yaml",
text=SAMPLE_TEXT,
**kwargs):
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
tprint("Device count: {}".format(torch.cuda.device_count()))
outs_trt, acts_trt = infer_trt(text)
outs, acts = infer_pytorch(text)
both, pytorch, trt = join_dict(acts, acts_trt)
# print diff
print("## Diff ##\n\n")
for name, (act, act_trt) in both.items():
act = act.float()
act_trt = act_trt.float()
diff = act.reshape(-1) - act_trt.reshape(-1)
is_identical = diff.eq(0).all()
errors = diff[diff.ne(0)]
max_error = torch.max(torch.abs(errors)) if len(errors) > 0 else 0
print("# {} #\n\n[PyTorch]\n{}\n\n[TRT]: \n{}\n\n[Diff]: \n{}\n\n[Errors]: \n{}\n- identical? {}\n- {} errors out of {}\n- max: {}\n\n".format(name,
act,
act_trt,
diff,
errors,
is_identical,
len(errors),
len(diff),
max_error,
))
# print("## PyTorch ##\n\n")
# for name, act in pytorch.items():
# print("[{}]\npytorch:\n{}\n\n".format(name, act))
# print("## TRT ##\n\n")
# for name, act in trt.items():
# print("[{}]\ttrt:\n{}\n\n".format(name, act_trt))
def join_dict(acts, acts_trt):
both = dict()
left = dict()
right = dict()
for k in acts:
if k in acts_trt:
both[k] = (acts[k], acts_trt[k])
else:
left[k] = acts[k]
for k in acts_trt:
if k not in acts:
right[k] = acts_trt[k]
return both, left, right
def infer_trt(text):
# model
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
# dataset
dataset = TextDataset([text for _ in range(hp.batch_size)])
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
drop_last=False)
# inferencer
inferencer = FastSpeechTRTInferencer('fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
trt_max_ws_size=hp.trt_max_ws_size,
trt_file_path=hp.trt_file_path,
trt_force_build=hp.trt_force_build,
use_fp16=hp.use_fp16,
trt_max_input_seq_len=hp.trt_max_input_seq_len,
trt_max_output_seq_len=hp.trt_max_output_seq_len,
validate_accuracy=True,
)
with inferencer:
acts = dict()
outs = inferencer.infer(acts=acts)
return outs, acts
def infer_pytorch(text):
# model
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
# dataset
dataset = TextDataset([text for _ in range(hp.batch_size)])
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
drop_last=False)
# inferencer
with torch.no_grad():
inferencer = FastSpeechInferencer('fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
device='cuda',
use_fp16=hp.use_fp16,
)
acts = dict()
outs = inferencer.infer(acts=acts,
seq_input_len=hp.trt_max_input_seq_len,
seq_output_len=hp.trt_max_output_seq_len)
return outs, acts
if __name__ == '__main__':
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
fire.Fire(verify)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/verify_trt.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import torch
import tensorrt as trt
from fastspeech.trt import TRT_BASE_PATH, TRT_LOGGER
import fastspeech.trt.common as common
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy, to_gpu_async
from fastspeech.inferencer.waveglow_inferencer import WaveGlowInferencer
from fastspeech.inferencer.denoiser import Denoiser
import pycuda.driver as cuda
class WaveGlowTRTInferencer(object):
def __init__(self, ckpt_file, engine_file, use_fp16=False, use_denoiser=False, stride=256, n_groups=8):
self.ckpt_file = ckpt_file
self.engine_file = engine_file
self.use_fp16 = use_fp16
self.use_denoiser = use_denoiser
self.stride = stride
self.n_groups = n_groups
if self.use_denoiser:
sys.path.append('waveglow')
waveglow = torch.load(self.ckpt_file)['model']
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow.eval()
self.denoiser = Denoiser(waveglow)
self.denoiser = to_gpu_async(self.denoiser)
tprint('Using WaveGlow denoiser.')
# after initialization, we don't need WaveGlow PyTorch checkpoint
# anymore - deleting
del waveglow
torch.cuda.empty_cache()
# load engine
with open(self.engine_file, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
if self.engine:
tprint('TRT Engine Loaded from {} successfully.'.format(self.engine_file))
return
else:
tprint('Loading TRT Engine from {} failed.'.format(self.engine_file))
def __enter__(self):
self.context = self.engine.create_execution_context()
def __exit__(self, exception_type, exception_value, traceback):
self.context.__del__()
self.engine.__del__()
def infer(self, mels):
batch_size, _, mel_size = mels.shape
mels = mels.unsqueeze(3)
z = torch.randn(batch_size, self.n_groups, mel_size * self.stride // self.n_groups, 1)
wavs = torch.zeros(batch_size, mel_size * self.stride)
if self.use_fp16:
z = z.half()
mels = mels.half()
wavs = wavs.half()
mels = to_gpu_async(mels)
z = to_gpu_async(z)
wavs = to_gpu_async(wavs)
# create inputs/outputs buffers
input_buffers = common.create_inputs_from_torch(self.engine, [mels, z])
output_buffers = common.create_outputs_from_torch(self.engine, [wavs.shape])
# set shapes of inputs
self.context = common.set_input_shapes(self.engine, self.context, input_buffers)
# execute
stream = cuda.Stream()
bindings = [int(data.data_ptr()) for data in (input_buffers + output_buffers)]
self.context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
stream.synchronize()
wavs = output_buffers[0]
# denoise
if self.use_denoiser:
wavs = self.denoiser(wavs, strength=0.01)
return wavs.float()
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/waveglow_trt_inferencer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tensorrt as trt
TRT_BASE_PATH = os.path.dirname(__file__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
from itertools import chain
import numpy as np
import torch
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
##
# Common
##
def GiB(val):
return val * 1 << 30
def input_binding_indices(engine):
return [i for i in range(engine.num_bindings) if engine.binding_is_input(i)]
def output_binding_indices(engine):
return [i for i in range(engine.num_bindings) if not engine.binding_is_input(i)]
def trt_input_names(engine):
return [engine.get_binding_name(i) for i in input_binding_indices(engine)]
def trt_output_names(engine):
return [engine.get_binding_name(i) for i in output_binding_indices(engine)]
def set_input_shapes(engine, context, inputs):
def is_dimension_dynamic(dim):
return dim is None or dim <= 0
def is_shape_dynamic(shape):
return any([is_dimension_dynamic(dim) for dim in shape])
for idx, tensor in enumerate(inputs):
if engine.is_shape_binding(idx) and is_shape_dynamic(context.get_shape(idx)):
context.set_shape_input(idx, tensor)
elif is_shape_dynamic(engine.get_binding_shape(idx)):
context.set_binding_shape(idx, tensor.shape)
return context
##
# Pytorch Compatibility
##
# Modified from https://github.com/NVIDIA-AI-IOT/jetbot/blob/cf3e264ae6/jetbot/tensorrt_model.py
def torch_dtype_to_trt(dtype):
if dtype == torch.bool:
return trt.bool
elif dtype == torch.int8:
return trt.int8
elif dtype == torch.int32:
return trt.int32
elif dtype == torch.float16:
return trt.float16
elif dtype == torch.float32:
return trt.float32
else:
raise TypeError('%s is not supported by tensorrt' % dtype)
def torch_dtype_from_trt(dtype):
if dtype == trt.bool:
return torch.bool
elif dtype == trt.int8:
return torch.int8
elif dtype == trt.int32:
return torch.int32
elif dtype == trt.float16:
return torch.float16
elif dtype == trt.float32:
return torch.float32
else:
raise TypeError('%s is not supported by torch' % dtype)
def torch_device_to_trt(device):
if device.type == torch.device('cuda').type:
return trt.TensorLocation.DEVICE
elif device.type == torch.device('cpu').type:
return trt.TensorLocation.HOST
else:
return TypeError('%s is not supported by tensorrt' % device)
def torch_device_from_trt(device):
if device == trt.TensorLocation.DEVICE:
return torch.device('cuda')
elif device == trt.TensorLocation.HOST:
return torch.device('cpu')
else:
return TypeError('%s is not supported by torch' % device)
def create_inputs_from_torch(engine, inputs_torch):
input_ids = input_binding_indices(engine)
for i, idx in enumerate(input_ids):
inputs_torch[i] = inputs_torch[i].to(torch_device_from_trt(engine.get_location(idx)))
inputs_torch[i] = inputs_torch[i].type(torch_dtype_from_trt(engine.get_binding_dtype(idx)))
return inputs_torch
def create_outputs_from_torch(engine, outputs_shapes=None):
output_ids = output_binding_indices(engine)
outputs = [None] * len(output_ids)
for i, idx in enumerate(output_ids):
dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))
shape = outputs_shapes[i] if outputs_shapes and outputs_shapes[i] else tuple(engine.get_binding_shape(idx))
device = torch_device_from_trt(engine.get_location(idx))
output = torch.empty(size=shape, dtype=dtype, device=device)
outputs[i] = output
return outputs
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/common.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import ctypes
import glob
import os
import pathlib
import sys
from collections import OrderedDict
import numpy as np
import pycuda.driver as cuda
import tensorrt as trt
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorrt import Dims, ElementWiseOperation, MatrixOperation, Weights
import fastspeech.trt.common as common
from fastspeech.trt import TRT_BASE_PATH, TRT_LOGGER
from fastspeech.trt.trt_inferencer import TRTInferencer
from fastspeech.utils.logging import tprint
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.pytorch import (remove_module_in_state_dict,
to_cpu_numpy, to_gpu_async)
class FastSpeechTRTInferencer(TRTInferencer):
def __init__(self, model_name, model, data_loader, ckpt_path=None, ckpt_file=None,
trt_max_ws_size=1, trt_file_path=None, trt_force_build=False, use_fp16=False,
trt_max_input_seq_len=256, trt_max_output_seq_len=1024, validate_accuracy=False):
self.trt_max_input_seq_len = trt_max_input_seq_len
self.trt_max_output_seq_len = trt_max_output_seq_len
self.validate_accuracy = validate_accuracy
self.load_plugin(os.path.join(TRT_BASE_PATH, 'plugins/repeat/RepeatPlugin.so'))
self.load_plugin(os.path.join(TRT_BASE_PATH, 'plugins/add_pos_enc/AddPosEncPlugin.so'))
super(FastSpeechTRTInferencer, self).__init__(model_name, model, data_loader, ckpt_path, ckpt_file, trt_max_ws_size, trt_file_path, trt_force_build, use_fp16)
def build_engine(self):
engine = None
if self.trt_file_path and os.path.isfile(self.trt_file_path) and not self.trt_force_build:
with open(self.trt_file_path, 'rb') as f:
engine_str = f.read()
with trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(engine_str)
if engine:
tprint('TRT Engine Loaded from {} successfully.'.format(self.trt_file_path))
return engine
else:
tprint('Loading TRT Engine from {} failed.'.format(self.trt_file_path))
tprint('Building a TRT Engine..')
engine = self.do_build_engine()
tprint('TRT Engine Built.')
if self.trt_file_path:
with open(self.trt_file_path, 'wb') as f:
f.write(engine.serialize())
tprint('TRT Engine Saved in {}.'.format(self.trt_file_path))
return engine
def create_plugins(self):
# create "adding positional encoding" plugin
self.plugins['AddPosEncPlugin'] = self.get_plugin_creator(
'AddPosEncPlugin').create_plugin('AddPosEncPlugin', trt.PluginFieldCollection())
# create "repeat" plugin
self.plugins['RepeatPlugin'] = self.get_plugin_creator('RepeatPlugin').create_plugin('RepeatPlugin', trt.PluginFieldCollection([
trt.PluginField('maxOutputLength', np.array(
[self.trt_max_output_seq_len], dtype=np.int32), trt.PluginFieldType.INT32)
]))
def do_build_engine(self):
weights = self.model.state_dict()
weights = self.preprocess_weights(weights)
self.create_plugins()
flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(flags) as network:
builder.max_workspace_size = common.GiB(self.trt_max_ws_size)
builder.fp16_mode = self.use_fp16
# builder.strict_type_constraints = True
network = self.populate_network(network, weights, self.batch_size, self.trt_max_input_seq_len, self.trt_max_output_seq_len)
return builder.build_cuda_engine(network)
def infer(self, acts=None):
inputs = next(self.data_loader_iter)
text_encoded = inputs["text_encoded"] # (b, t)
text_pos = inputs["text_pos"] # (b, t)
text_encoded = F.pad(text_encoded, pad=(0, self.trt_max_input_seq_len - text_encoded.size(1))) # (b, t)
text_pos = F.pad(text_pos, pad=(0, self.trt_max_input_seq_len - text_pos.size(1))) # (b, t)
text_mask = text_pos.ne(0) # padded is False
# TODO: process word emb in TRT if the API allows.
with torch.no_grad():
text_encoded = self.model.word_emb(text_encoded)
if self.use_fp16:
text_encoded = text_encoded.half()
# create input/output buffers
input_buffers = common.create_inputs_from_torch(self.engine, [text_encoded, text_mask])
output_buffers = common.create_outputs_from_torch(self.engine)
# execute
# self.context.profiler = trt.Profiler()
stream = cuda.Stream()
bindings = [int(data.data_ptr()) for data in (input_buffers + output_buffers)]
self.context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# self.context.execute(batch_size=self.batch_size, bindings=bindings)
stream.synchronize()
outputs = dict()
outputs['mel'] = output_buffers[-2]
outputs['mel_mask'] = output_buffers[-1]
outputs['text'] = inputs["text_norm"]
# activations for verifying accuracy.
if acts is not None:
act_names = common.trt_output_names(self.engine)
n_acts = len(output_buffers) - 2 # exclude outputs(mel and mel_mask)
for i in range(n_acts):
acts[act_names[i]] = output_buffers[i]
return outputs
def add_activation_as_output(self, network, tensor, tensor_name):
tensor.name = tensor_name
network.mark_output(tensor=tensor)
def populate_network(self, network, weights, batch_size, trt_max_input_seq_len, trt_max_output_seq_len):
d_model = self.model.d_model
##
# Inputs
##
out_seq = network.add_input(
name="input_seq", dtype=trt.float32, shape=(batch_size, trt_max_input_seq_len, d_model)) # (b, t, d_model)
#
zeros = network.add_constant(weights=Weights(
np.zeros(shape=(batch_size, trt_max_input_seq_len, 1), dtype=np.float32)),
shape=(batch_size, trt_max_input_seq_len, 1)) # (b, t, 1)
out_zeros = zeros.get_output(0) # (b, t, 1)
seq = network.add_elementwise(input1=out_seq, input2=out_zeros, op=trt.ElementWiseOperation.SUM)
out_seq = seq.get_output(0) # (b, t, d_model)
if self.validate_accuracy:
self.add_activation_as_output(network, out_seq, "act.emb")
#
out_seq_mask = network.add_input( # paddings are False
name="input_mask", dtype=trt.bool, shape=(batch_size, trt_max_input_seq_len, 1)) # (b, t, 1)
##
# Phoneme-side FFT Blocks
##
# Positional Encoding
# The plugin adds positional encoding to the padding values also (for better performance), whereas Pytorch impl does not.
# It's fine because the padding values will be eventually masked out in coming layers, giving accurate output.
seq = network.add_plugin_v2([out_seq], self.get_plugin('AddPosEncPlugin'))
seq.name = "phoneme_side.add_pos_enc"
out_seq = seq.get_output(0) # (b, t, d_model)
if self.validate_accuracy:
self.add_activation_as_output(network, out_seq, "act.phoneme_side.add_pos_enc")
for layer_idx in range(self.model.phoneme_side_n_layer):
out_seq = self.populate_fft(name='phoneme_side.layer_stack.{}'.format(layer_idx),
network=network,
weights=weights,
seq_tensor=out_seq,
seq_mask_tensor=out_seq_mask,
batch_size=self.batch_size,
max_seq_len=trt_max_input_seq_len,
d_model=d_model,
n_heads=self.model.phoneme_side_head,
d_k=self.model.phoneme_side.d_k,
d_v=self.model.phoneme_side.d_v,
self_attn_temp=self.model.phoneme_side.d_k**0.5,
conv_filter_size=self.model.phoneme_side_conv1d_filter_size,
conv_kernel_size=self.model.fft_conv1d_kernel,
conv_padding=self.model.fft_conv1d_padding)
if self.validate_accuracy:
self.add_activation_as_output(network, out_seq, "act.phoneme_side.seq")
out_seq, out_seq_mask, out_dur = self.populate_length_regulator(name="length_regulator",
network=network,
weights=weights,
seq_tensor=out_seq,
seq_mask_tensor=out_seq_mask,
batch_size=batch_size,
trt_max_input_seq_len=trt_max_input_seq_len,
trt_max_output_seq_len=trt_max_output_seq_len,
d_model=d_model)
if self.validate_accuracy:
self.add_activation_as_output(network, out_seq, "act.length_regulator.seq")
self.add_activation_as_output(network, out_dur, "act.length_regulator.dur")
##
# Mel-side FFT Blocks
##
# Type int to bool: out_seq_mask. TODO: remove if bool output is allowed in the plugin.
ones = network.add_constant(weights=Weights(
np.ones(shape=(batch_size, trt_max_output_seq_len, 1), dtype=np.int32)),
shape=(batch_size, trt_max_output_seq_len, 1)) # (b, t, 1)
out_ones = ones.get_output(0) # (b, t, 1)
seq_mask = network.add_elementwise(input1=out_seq_mask,
input2=out_ones,
op=ElementWiseOperation.EQUAL) # (b, t, 1)
seq_mask.name = "mel_side.seq_mask"
out_seq_mask = seq_mask.get_output(0)
# Positional Encoding
seq = network.add_plugin_v2([out_seq], self.get_plugin('AddPosEncPlugin'))
seq.name = "mel_side.add_pos_enc"
out_seq = seq.get_output(0)
if self.validate_accuracy:
self.add_activation_as_output(network, out_seq, "act.mel_side.add_pos_enc")
for layer_idx in range(self.model.mel_side_n_layer):
out_seq = self.populate_fft(name="mel_side.layer_stack.{}".format(layer_idx),
network=network,
weights=weights,
seq_tensor=out_seq,
seq_mask_tensor=out_seq_mask,
batch_size=self.batch_size,
max_seq_len=trt_max_output_seq_len,
d_model=d_model,
n_heads=self.model.mel_side_head,
d_k=self.model.mel_side.d_k,
d_v=self.model.mel_side.d_v,
self_attn_temp=self.model.mel_side.d_k**0.5,
conv_filter_size=self.model.mel_side_conv1d_filter_size,
conv_kernel_size=self.model.fft_conv1d_kernel,
conv_padding=self.model.fft_conv1d_padding)
if self.validate_accuracy:
self.add_activation_as_output(network, out_seq, "act.mel_side.seq")
##
# Linear
##
# Pytorch: self.mel_linear = nn.Linear(mel_side_output_size, n_mels, bias=True)
w = weights["mel_linear.weight"] # (n_mels, d_model)
out_w = network.add_constant(shape=(1, self.model.n_mels, d_model), weights=trt.Weights(w)).get_output(0) # (1, n_mels, d_model)
linear_w = network.add_matrix_multiply(out_seq, MatrixOperation.NONE, out_w, MatrixOperation.TRANSPOSE) # (b, t, d_model) * (1->b, d_model, n_mels) => (b, t, n_mels)
linear_w.name = "linear.w"
out_seq = linear_w.get_output(0) # (b, t, n_mels)
b = weights["mel_linear.bias"] # (n_mels,)
out_b = network.add_constant(shape=(1, 1, self.model.n_mels), weights=trt.Weights(b)).get_output(0) # (1, 1, n_mels)
linear_b = network.add_elementwise(input1=out_seq, input2=out_b, op=trt.ElementWiseOperation.SUM)
linear_b.name = "linear.b"
out_seq = linear_b.get_output(0) # (b, t, n_mels)
##
# Outputs
##
if self.validate_accuracy:
self.add_activation_as_output(network, out_seq_mask, "out.seq_mask")
self.add_activation_as_output(network, out_seq, "out.seq")
seq = network.add_shuffle(input=out_seq) # (b, t, n_mels) to (b, n_mels, t)
seq.reshape_dims = Dims((batch_size, trt_max_output_seq_len, self.model.n_mels))
seq.second_transpose = trt.Permutation([0, 2, 1])
seq.name = "trans_seq"
out_seq = seq.get_output(0)
seq_mask = network.add_shuffle(input=out_seq_mask) # (b, t, 1) to (b, t)
seq_mask.reshape_dims = Dims((batch_size, trt_max_output_seq_len))
out_seq_mask = seq_mask.get_output(0) # (b, t)
network.mark_output(tensor=out_seq) # (b, n_mels, t)
network.mark_output(tensor=out_seq_mask) # (b, t)
return network
def populate_fft(self, name, network, weights, seq_tensor, seq_mask_tensor, batch_size,
max_seq_len, d_model, n_heads, d_k, d_v, self_attn_temp,
conv_filter_size, conv_kernel_size, conv_padding):
# Self attn
out = self.populate_slf_attn("{}.slf_attn".format(name), network, weights, seq_tensor, seq_mask_tensor, batch_size,
max_seq_len, d_model, n_heads, d_k, d_v) # (b, t, d_model)
# Masking
zeros = network.add_constant(weights=Weights(
np.zeros(shape=(batch_size, max_seq_len, 1), dtype=np.float32)),
shape=(batch_size, max_seq_len, 1)) # (b, t, 1)
out_zeros = zeros.get_output(0) # (b, t, 1)
seq = network.add_select(condition=seq_mask_tensor, then_input=out, else_input=out_zeros)
seq.name = "{}.mask1".format(name)
out = seq.get_output(0) # (b, t, d_model)
# Position-wise
out = self.populate_pos_wise("{}.pos_ffn".format(name), network, weights, out,
batch_size, max_seq_len, d_model,
conv_filter_size, conv_kernel_size, conv_padding) # (b, t, d_model)
# Masking
seq = network.add_select(condition=seq_mask_tensor, then_input=out, else_input=out_zeros)
seq.name = "{}.mask2".format(name)
out = seq.get_output(0) # (b, t, d_model)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}".format(name))
return out
def populate_slf_attn(self, name, network, weights, seq_tensor, seq_mask_tensor, batch_size,
max_seq_len, d_model, n_heads, d_k, d_v):
d_qkv = d_k + d_k + d_v
# Pytorch: x = self.linear(x)
w = weights["{}.linear.weight".format(name)] # (n_heads * d_qkv, d_model)
out_w = network.add_constant(shape=(1, d_model, n_heads * d_qkv), weights=trt.Weights(w)).get_output(0) # (1, n_heads * d_qkv, d_model)
linear_w = network.add_matrix_multiply(seq_tensor, MatrixOperation.NONE, out_w, MatrixOperation.TRANSPOSE) # (b, t, d_model) * (1->b, d_model, n_heads * d_qkv) => (b, t, n_heads * d_qkv)
linear_w.name = "{}.linear.w".format(name)
out = linear_w.get_output(0) # (b, t, n_heads * d_qkv)
b = weights["{}.linear.bias".format(name)] # (n_heads * d_qkv,)
out_b = network.add_constant(shape=(1, 1, n_heads * d_qkv), weights=trt.Weights(b)).get_output(0) # (1, 1, n_heads * d_qkv)
linear_b = network.add_elementwise(input1=out, input2=out_b, op=trt.ElementWiseOperation.SUM)
linear_b.name = "{}.linear.b".format(name)
out = linear_b.get_output(0) # (b, t, n_heads * d_qkv)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}.linear".format(name))
trans1 = network.add_shuffle(input=out) # (b, t, n_heads * d_qkv) to (b, n_heads, t, d_qkv)
trans1.reshape_dims = Dims(
(batch_size, max_seq_len, n_heads, d_qkv))
trans1.second_transpose = trt.Permutation([0, 2, 1, 3])
trans1.name = "{}.trans1".format(name)
out = trans1.get_output(0) # (b, n_heads, t, d_qkv)
# if self.validate_accuracy:
# self.add_activation_as_output(network, out, "act.{}.reshape".format(name))
q = network.add_slice(input=out,
start=Dims((0, 0, 0, 0)),
shape=Dims(
(batch_size, n_heads, max_seq_len, d_k)),
stride=Dims((1, 1, 1, 1)))
q.name = "{}.slide_q".format(name)
k = network.add_slice(input=out,
start=Dims((0, 0, 0, d_k)),
shape=Dims(
(batch_size, n_heads, max_seq_len, d_k)),
stride=Dims((1, 1, 1, 1)))
k.name = "{}.slide_k".format(name)
v = network.add_slice(input=out,
start=Dims((0, 0, 0, 2 * d_k)),
shape=Dims(
(batch_size, n_heads, max_seq_len, d_k)),
stride=Dims((1, 1, 1, 1)))
v.name = "{}.slide_v".format(name)
out_q = q.get_output(0) # (b, n_heads, t, d_q)
out_k = k.get_output(0) # (b, n_heads, t, d_k)
out_v = v.get_output(0) # (b, n_heads, t, d_v)
# Pytorch: output, attn = self.attention(q, k, v, mask=mask)
out = self.populate_scaled_dot(
name="{}.scaled_dot".format(name), # (b, n_heads, t, d_k)
network=network,
q_tensor=out_q,
k_tensor=out_k,
v_tensor=out_v,
mask_tensor=seq_mask_tensor,
batch_size=batch_size,
max_seq_len=max_seq_len,
n_heads=n_heads,
temperature=d_k**0.5)
# Pytorch:
# output = output.view(self.n_head, bs, seq_len, self.d_v)
# output = output.permute(1, 2, 0, 3).contiguous().view(bs, seq_len, self.n_head * self.d_v)
trans2 = network.add_shuffle(input=out) # b, n_heads, t, d_k) to (b, t, n_heads * d_k)
trans2.first_transpose = trt.Permutation([0, 2, 1, 3])
trans2.reshape_dims = Dims((batch_size, max_seq_len, n_heads * d_v))
trans2.name = "{}.trans2".format(name)
out = trans2.get_output(0) # (b, t, n_heads * d_k)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}.scaled_dot".format(name))
# Pytorch: output = self.fc(output)
w = weights["{}.fc.weight".format(name)] # (d_model, n_heads * d_v)
out_w = network.add_constant(shape=(1, d_model, n_heads * d_v), weights=trt.Weights(w)).get_output(0) # (1, d_model, n_heads * d_v)
fc_w = network.add_matrix_multiply(out, MatrixOperation.NONE, out_w, MatrixOperation.TRANSPOSE) # (b, t, n_heads * d_k) * (1->b, n_heads * d_k, d_model) => (b, t, d_model)
fc_w.name = "{}.fc.w".format(name)
out = fc_w.get_output(0) # (b, t, d_model)
b = weights["{}.fc.bias".format(name)] # (d_model,)
out_b = network.add_constant(shape=(1, 1, n_heads * d_qkv), weights=trt.Weights(b)).get_output(0) # (1, 1, d_model)
fc_b = network.add_elementwise(input1=out, input2=out_b, op=trt.ElementWiseOperation.SUM)
fc_b.name = "{}.fc.b".format(name)
out = fc_b.get_output(0) # (b, t, d_model)
# if self.validate_accuracy:
# self.add_activation_as_output(network, out, "act.{}.fc".format(name))
# Pytorch: output += residual
residual = network.add_elementwise(input1=seq_tensor, input2=out, op=ElementWiseOperation.SUM)
residual.name = "{}.residual".format(name)
out = residual.get_output(0) # (b, t, d_model)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}.residual".format(name))
# Pytorch: output = self.layer_norm(output)
out = self.populate_layernorm(name="{}.layer_norm".format(name),
network=network,
weights=weights,
seq_tensor=out,
batch_size=self.batch_size,
max_seq_len=max_seq_len,
d_layer=d_model,
) # (b, t, d_model)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}.ln".format(name))
return out
def populate_scaled_dot(self, name, network, q_tensor, k_tensor, v_tensor, mask_tensor, batch_size, max_seq_len, n_heads, temperature):
# if self.validate_accuracy:
# self.add_activation_as_output(network, q_tensor, "act.{}.q".format(name))
# self.add_activation_as_output(network, k_tensor, "act.{}.k".format(name))
# self.add_activation_as_output(network, v_tensor, "act.{}.v".format(name))
# Pytorch: attn = self.bmm1(q, k.transpose(1, 2))
attn = network.add_matrix_multiply(q_tensor, MatrixOperation.NONE, k_tensor, MatrixOperation.TRANSPOSE) # (b, n, t, d_k) * (b, n, d_k, t) = (b, n, t, t)
attn.name = "{}.bmm1".format(name)
out = attn.get_output(0)
# if self.validate_accuracy:
# self.add_activation_as_output(network, out, "act.{}.bmm1".format(name))
# Pytorch: attn = attn / self.temperature
temperature = network.add_constant(weights=Weights(np.full((batch_size, n_heads, max_seq_len, max_seq_len), temperature, dtype=np.float32)),
shape=Dims((batch_size, n_heads, max_seq_len, max_seq_len))) # (b, n, t, t)
output_temperature = temperature.get_output(0)
attn = network.add_elementwise(input1=out, input2=output_temperature, op=ElementWiseOperation.DIV) # (b, n, t, t)
attn.name = "{}.div".format(name)
out = attn.get_output(0)
# Pytorch: attn = attn.masked_fill(mask, -65504)
minus_inf = network.add_constant(weights=Weights(np.full((batch_size, n_heads, max_seq_len, max_seq_len), -65504, dtype=np.float32)),
shape=Dims((batch_size, n_heads, max_seq_len, max_seq_len))) # (b, n, t, t)
output_minus_inf = minus_inf.get_output(0)
mask = network.add_shuffle(input=mask_tensor)
mask.reshape_dims = Dims((batch_size, 1, 1, max_seq_len)) # (b, t, 1) -> (b, 1, 1, t)
mask.name = "{}.mask_reshape".format(name)
mask_tensor = mask.get_output(0)
attn = network.add_select(condition=mask_tensor, # (b, 1->n, 1, t)
then_input=out, # (b, n, t, t)
else_input=output_minus_inf) # (b, n, t, t)
attn.name = "{}.mask".format(name)
out = attn.get_output(0)
# if self.validate_accuracy:
# self.add_activation_as_output(network, out, "act.{}.masked_fill".format(name))
# Pytorch: attn = self.softmax(attn)
softmax = network.add_softmax(input=out)
softmax.axes = (1 << 3) # dim=3
softmax.name = "{}.softmax".format(name)
out = softmax.get_output(0)
# if self.validate_accuracy:
# self.add_activation_as_output(network, out, "act.{}.softmax".format(name))
# Pytorch: output = self.bmm2(attn, v)
attn = network.add_matrix_multiply(out, MatrixOperation.NONE, v_tensor, MatrixOperation.NONE) # (b, n, t, t) * (b, n, t, d_k) => (b, n, t, d_k)
attn.name = "{}.bmm2".format(name)
out = attn.get_output(0)
# if self.validate_accuracy:
# self.add_activation_as_output(network, out, "act.{}.bmm2".format(name))
return out
def populate_pos_wise(self, name, network, weights, seq_tensor,
batch_size, max_seq_len, d_model,
conv_filter_size, conv_kernel_size, conv_padding):
# Pytorch: output = x.transpose(1, 2)
trans1 = network.add_shuffle(input=seq_tensor) # (b, t, d_model) to (b, d_model, t, 1)
trans1.first_transpose = trt.Permutation([0, 2, 1])
trans1.reshape_dims = Dims((batch_size, d_model, max_seq_len, 1))
trans1.name = "{}.trans1".format(name)
out = trans1.get_output(0) # (b, d_model, t, 1)
# Pytorch: output = self.w_1(output)
conv1_w = weights["{}.w_1.weight".format(name)] # (1, conv_filter_size, d_model, conv_kernel_size, 1)
conv1_b = weights["{}.w_1.bias".format(name)] # (cov_filter_size,)
conv1 = network.add_convolution(input=out, num_output_maps=conv_filter_size, kernel_shape=trt.DimsHW(conv_kernel_size, 1),
kernel=Weights(conv1_w), bias=Weights(conv1_b))
conv1.padding = trt.DimsHW(1, 0)
conv1.name = "{}.conv1".format(name)
out = conv1.get_output(0) # (b, conv_filter_size, t, 1)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}.conv1".format(name))
# Pytorch: output = F.relu(output)
relu = network.add_activation(input=out, type=trt.ActivationType.RELU)
relu.name = "{}.relu".format(name)
out = relu.get_output(0) # (b, conv_filter_size, t, 1)
# Pytorch: output = self.w_2(output)
conv2_w = weights["{}.w_2.weight".format(name)] # (1, d_model, conv_filter_size, conv_kernel_size, 1)
conv2_b = weights["{}.w_2.bias".format(name)] # (d_model, )
conv2 = network.add_convolution(input=out, num_output_maps=d_model, kernel_shape=trt.DimsHW(conv_kernel_size, 1),
kernel=Weights(conv2_w), bias=Weights(conv2_b))
conv2.padding = trt.DimsHW(1, 0)
conv2.name = "{}.conv2".format(name)
out = conv2.get_output(0) # (b, d_model, t, 1)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}.conv2".format(name))
# Pytorch: output = output.transpose(1, 2)
trans2 = network.add_shuffle(input=out) # (b, d_model, t, 1) to (b, t, d_model)
trans2.first_transpose = trt.Permutation([0, 2, 1, 3])
trans2.reshape_dims = Dims((batch_size, max_seq_len, d_model))
trans2.name = "{}.trans2".format(name)
out = trans2.get_output(0) # (b, t, d_model)
# Pytorch: output += residual
residual = network.add_elementwise(input1=seq_tensor, input2=out, op=trt.ElementWiseOperation.SUM)
residual.name = "{}.residual".format(name)
out = residual.get_output(0) # (b, t, d_model)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}.residual".format(name))
# Pytorch: output = self.layer_norm(output)
out = self.populate_layernorm(name="{}.layer_norm".format(name),
network=network,
weights=weights,
seq_tensor=out,
batch_size=self.batch_size,
max_seq_len=max_seq_len,
d_layer=d_model,
) # (b, t, d_model)
if self.validate_accuracy:
self.add_activation_as_output(network, out, "act.{}.ln".format(name))
return out
def populate_length_regulator(self, name, network, weights, seq_tensor, seq_mask_tensor, batch_size, trt_max_input_seq_len, trt_max_output_seq_len, d_model):
out_dur = self.populate_duration_predictor(name="{}.duration_predictor".format(name),
network=network,
weights=weights,
seq_tensor=seq_tensor,
seq_mask_tensor=seq_mask_tensor,
batch_size=batch_size,
max_seq_len=trt_max_input_seq_len,
d_model=d_model) # (b, t)
# Pytorch: output.append(torch.repeat_interleave(input[i], repeats, dim=0))
seq = network.add_plugin_v2([seq_tensor, out_dur], self.get_plugin('RepeatPlugin'))
seq.name = "{}.repeat_seq".format(name)
out_seq = seq.get_output(0) # (b, t, d), (b, t) => (b, t', d), dtype: float32
# Type bool to int: seq_mask_tensor. TODO: remove if bool input is allowed in the plugin.
zeros = network.add_constant(weights=Weights(
np.zeros(shape=(batch_size, trt_max_input_seq_len, 1), dtype=np.int32)),
shape=(batch_size, trt_max_input_seq_len, 1))
out_zeros = zeros.get_output(0) # (b, t, 1)
ones = network.add_constant(weights=Weights(
np.ones(shape=(batch_size, trt_max_input_seq_len, 1), dtype=np.int32)),
shape=(batch_size, trt_max_input_seq_len, 1))
out_ones = ones.get_output(0) # (b, t, 1)
seq_mask = network.add_select(condition=seq_mask_tensor, then_input=out_ones, else_input=out_zeros)
seq_mask.name = "{}.seq_mask".format(name)
out_seq_mask = seq_mask.get_output(0) # (b, t, 1)
seq_mask = network.add_plugin_v2([out_seq_mask, out_dur], self.get_plugin('RepeatPlugin'))
seq_mask.name = "{}.repeat_seq_mask".format(name)
out_seq_mask = seq_mask.get_output(0) # (b, t, 1), (b, t) => (b, t', 1), dtype: int32
return out_seq, out_seq_mask, out_dur
def populate_duration_predictor(self, name, network, weights, seq_tensor, seq_mask_tensor, batch_size, max_seq_len, d_model):
duration_predictor_filter_size=self.model.duration_predictor_filter_size
duration_predictor_kernel_size=self.model.duration_predictor_kernel_size
# Pytorch: input *= input_mask.to(input.dtype)
# can be skipped.
# Pytorch: out = self.conv1d_1(input.transpose(1,2)).transpose(1,2)
trans1 = network.add_shuffle(input=seq_tensor) # (b, t, d_model) to (b, d_model, t, 1)
trans1.first_transpose = trt.Permutation([0, 2, 1])
trans1.reshape_dims = Dims((batch_size, d_model, max_seq_len, 1))
trans1.name = "{}.trans1".format(name)
out = trans1.get_output(0) # (b, d_model, t, 1)
conv1_w = weights["{}.conv1d_1.weight".format(name)] # (1, d_model, duration_predictor_filter_size, duration_predictor_kernel_size, 1)
conv1_b = weights["{}.conv1d_1.bias".format(name)] # (duration_predictor_filter_size, )
conv1 = network.add_convolution(input=out, num_output_maps=duration_predictor_filter_size, kernel_shape=trt.DimsHW(duration_predictor_kernel_size, 1),
kernel=Weights(conv1_w), bias=Weights(conv1_b))
conv1.padding = trt.DimsHW(1, 0)
conv1.name = "{}.conv1".format(name)
out = conv1.get_output(0) # (b, duration_predictor_filter_size, t, 1)
trans2 = network.add_shuffle(input=out) # (b, duration_predictor_filter_size, t, 1) to (b, t, duration_predictor_filter_size)
trans2.first_transpose = trt.Permutation([0, 2, 1, 3])
trans2.reshape_dims = Dims((batch_size, max_seq_len, duration_predictor_filter_size))
trans2.name = "{}.trans2".format(name)
out = trans2.get_output(0) # (b, t, duration_predictor_filter_size)
# Pytorch: out = self.relu_1(out)
relu = network.add_activation(input=out, type=trt.ActivationType.RELU)
relu.name = "{}.relu1".format(name)
out_relu = relu.get_output(0) # (b, t, duration_predictor_filter_size)
# Pytorch: out = self.layer_norm_1(out)
out = self.populate_layernorm(name="{}.layer_norm_1".format(name),
network=network,
weights=weights,
seq_tensor=out_relu,
d_layer=duration_predictor_filter_size,
batch_size=batch_size,
max_seq_len=max_seq_len)
# Pytorch: out = self.conv1d_2(out.transpose(1,2)).transpose(1,2)
trans3 = network.add_shuffle(input=out) # (b, t, duration_predictor_filter_size) to (b, duration_predictor_filter_size, t, 1)
trans3.first_transpose = trt.Permutation([0, 2, 1])
trans3.reshape_dims = Dims((batch_size, duration_predictor_filter_size, max_seq_len, 1))
trans3.name = "{}.trans3".format(name)
out = trans3.get_output(0) # (b, duration_predictor_filter_size, t, 1)
conv2_w = weights["{}.conv1d_2.weight".format(name)] # (1, duration_predictor_filter_size, duration_predictor_filter_size, duration_predictor_kernel_size, 1)
conv2_b = weights["{}.conv1d_2.bias".format(name)] # (duration_predictor_filter_size, )
conv2 = network.add_convolution(input=out, num_output_maps=duration_predictor_filter_size, kernel_shape=trt.DimsHW(duration_predictor_kernel_size, 1),
kernel=Weights(conv2_w), bias=Weights(conv2_b))
conv2.padding = trt.DimsHW(1, 0)
conv2.name = "{}.conv2".format(name)
out = conv2.get_output(0)
trans4 = network.add_shuffle(input=out) # (b, duration_predictor_filter_size, t, 1) to (b, t, duration_predictor_filter_size)
trans4.first_transpose = trt.Permutation([0, 2, 1, 3])
trans4.reshape_dims = Dims((batch_size, max_seq_len, duration_predictor_filter_size))
trans4.name = "{}.trans4".format(name)
out = trans4.get_output(0) # (b, t, duration_predictor_filter_size)
# Pytorch: out = self.relu_2(out)
relu = network.add_activation(input=out, type=trt.ActivationType.RELU)
relu.name = "{}.relu2".format(name)
out_relu = relu.get_output(0) # (b, t, duration_predictor_filter_size)
# Pytorch: out = self.layer_norm_2(out)
out = self.populate_layernorm(name="{}.layer_norm_2".format(name),
network=network,
weights=weights,
seq_tensor=out_relu,
d_layer=duration_predictor_filter_size,
batch_size=batch_size,
max_seq_len=max_seq_len,
) # (b, t, duration_predictor_filter_size)
# Pytorch: out = self.linear_layer(out)
w = weights["{}.linear_layer.weight".format(name)] # (1, duration_predictor_filter_size)
out_w = network.add_constant(shape=(1, 1, duration_predictor_filter_size), weights=trt.Weights(w)).get_output(0) # (1, 1, duration_predictor_filter_size)
linear_w = network.add_matrix_multiply(out, MatrixOperation.NONE, out_w, MatrixOperation.TRANSPOSE) # (b, t, duration_predictor_filter_size) * (1->b, duration_predictor_filter_size, 1) => (b, t, 1)
linear_w.name = "{}.linear.w".format(name)
out = linear_w.get_output(0) # (b, t, 1)
b = weights["{}.linear_layer.bias".format(name)] # (1,)
out_b = network.add_constant(shape=(1, 1, 1), weights=trt.Weights(b)).get_output(0) # (1, 1, 1)
linear_b = network.add_elementwise(input1=out, input2=out_b, op=trt.ElementWiseOperation.SUM)
linear_b.name = "{}.linear.b".format(name)
out = linear_b.get_output(0) # (b, t, 1)
# Pytorch: out *= input_mask.to(out.dtype)
zeros = network.add_constant(weights=Weights(
np.zeros(shape=(batch_size, max_seq_len, 1), dtype=np.float32)),
shape=(batch_size, max_seq_len, 1))
out_zeros = zeros.get_output(0) # (b, t, 1)
dur = network.add_select(condition=seq_mask_tensor, then_input=out, else_input=out_zeros)
dur.name = "{}.mask".format(name)
out_dur = dur.get_output(0)
# Pytorch: duration = torch.clamp_min(torch.exp(duration) - 1, 0)
exp = network.add_unary(input=out_dur, op=trt.UnaryOperation.EXP)
exp.name = "{}.exp".format(name)
out_exp = exp.get_output(0)
ones = network.add_constant(weights=Weights(
np.ones(shape=(batch_size, max_seq_len, 1), dtype=np.float32)),
shape=(batch_size, max_seq_len, 1))
out_ones = ones.get_output(0) # (b, t, 1)
sub = network.add_elementwise(input1=out_exp, input2=out_ones, op=trt.ElementWiseOperation.SUB)
sub.name = "{}.sub_one".format(name)
out_sub = sub.get_output(0)
dur = network.add_elementwise(input1=out_sub, input2=out_zeros, op=trt.ElementWiseOperation.MAX)
dur.name = "{}.max".format(name)
out_dur = dur.get_output(0)
# Pytorch: repeats = torch.round(repeats).long()
half_ones = network.add_constant(weights=Weights(
np.full((batch_size, max_seq_len, 1), 0.5, dtype=np.float32)),
shape=(batch_size, max_seq_len, 1))
out_half_ones = half_ones.get_output(0) # (b, t, 1)
add = network.add_elementwise(input1=out_dur, input2=out_half_ones, op=trt.ElementWiseOperation.SUM)
add.name = "{}.round_add".format(name)
out_add = add.get_output(0) # (b, t, 1)
dur = network.add_elementwise(input1=out_add, input2=out_ones, op=trt.ElementWiseOperation.FLOOR_DIV)
dur.name = "{}.round_floor_div".format(name)
out_dur = dur.get_output(0) # (b, t, 1)
dur = network.add_shuffle(input=out_dur) # (b, t, 1) to (b, t)
dur.reshape_dims = Dims(shape=(batch_size, max_seq_len))
out_dur = dur.get_output(0) # (b, t)
return out_dur
def populate_layernorm(self, name, network, weights, seq_tensor, batch_size, max_seq_len, d_layer):
# m
mean = network.add_reduce(input=seq_tensor, op=trt.ReduceOperation.AVG, axes=(1 << 2), keep_dims=True)
mean.name = "{}.mean".format(name)
out_mean = mean.get_output(0) # (b, t, 1)
# m^2
square_mean = network.add_elementwise(input1=out_mean, input2=out_mean, op=ElementWiseOperation.PROD)
square_mean.name = "{}.square_mean".format(name)
out_square_mean = square_mean.get_output(0) # (b, t, 1)
# x^2
square = network.add_elementwise(input1=seq_tensor, input2=seq_tensor, op=ElementWiseOperation.PROD)
square.name = "{}.square".format(name)
out_square = square.get_output(0) # (b, t, h)
# e[x^2]
mean_square = network.add_reduce(input=out_square, op=trt.ReduceOperation.AVG, axes=(1 << 2), keep_dims=True)
mean_square.name = "{}.mean_square".format(name)
out_mean_square = mean_square.get_output(0) # (b, t, 1)
# e[x^2] - m^2
sub_square = network.add_elementwise(input1=out_mean_square, input2=out_square_mean, op=ElementWiseOperation.SUB)
sub_square.name = "{}.sub_square".format(name)
out_sub_square = sub_square.get_output(0) # (b, t, 1)
# + eps
eps = network.add_constant(weights=Weights(np.full((batch_size, max_seq_len, 1), 1e-5, dtype=np.float32)),
shape=Dims((batch_size, max_seq_len, 1))) # (b, t, 1)
out_eps = eps.get_output(0)
eps.name = "{}.eps".format(name)
std = network.add_elementwise(input1=out_sub_square, input2=out_eps, op=ElementWiseOperation.SUM)
std.name = "{}.std".format(name)
out_std = std.get_output(0) # (b, t, 1)
# std
sqrt = network.add_unary(input=out_std, op=trt.UnaryOperation.SQRT)
sqrt.name = "{}.sqrt".format(name)
out_sqrt = sqrt.get_output(0) # (b, t, 1)
# y = (x - mean) / std
sub = network.add_elementwise(input1=seq_tensor, input2=out_mean, op=ElementWiseOperation.SUB)
sub.name = "{}.sub".format(name)
out_sub_square = sub.get_output(0) # (b, t, h)
div = network.add_elementwise(input1=out_sub_square, input2=out_sqrt, op=ElementWiseOperation.DIV)
div.name = "{}.div".format(name)
out = div.get_output(0) # (b, t, h)
# Pytorch: y = self.weight * y + self.bias
w = weights["{}.weight".format(name)] # (h, )
out_w = network.add_constant(shape=(1, 1, d_layer), weights=trt.Weights(w)).get_output(0) # (1, 1, h)
scale_w = network.add_elementwise(input1=out, input2=out_w, op=ElementWiseOperation.PROD) # (b, t, h) * (1->b, 1->t, h) => (b, t, h)
scale_w.name = "{}.scale.w".format(name)
out = scale_w.get_output(0) # (b, t, h)
b = weights["{}.bias".format(name)] # (h, )
out_b = network.add_constant(shape=(1, 1, d_layer), weights=trt.Weights(b)).get_output(0) # (1, 1, h)
scale_b = network.add_elementwise(input1=out, input2=out_b, op=ElementWiseOperation.SUM) # (b, t, h) * (1->b, 1->t, h) => (b, t, h)
scale_b.name = "{}.scale.b".format(name)
out = scale_b.get_output(0) # (b, t, h)
return out
def preprocess_weights(self, weights):
# torch.Tensor to numpy
weights = OrderedDict({k:v.numpy() for k,v in weights.items()})
return weights
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/fastspeech_trt_inferencer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import ctypes
import glob
import os
import pathlib
import sys
from collections import OrderedDict
import numpy as np
import tensorrt as trt
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorrt import Dims, ElementWiseOperation, MatrixOperation, Weights
from fastspeech.text_norm.symbols import symbols
from fastspeech.trt import TRT_LOGGER
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import remove_module_in_state_dict, to_cpu_numpy
class TRTInferencer(object):
def __init__(self, model_name, model, data_loader, ckpt_path=None, ckpt_file=None, trt_max_ws_size=1, trt_file_path=None, trt_force_build=False, use_fp16=False):
self.model_name = model_name
self.model = model
self.data_loader = data_loader
self.ckpt_path = ckpt_path
self.ckpt_file = ckpt_file
self.trt_max_ws_size = trt_max_ws_size
self.trt_file_path = trt_file_path
self.trt_force_build = trt_force_build
self.use_fp16 = use_fp16
self.batch_size = data_loader.batch_size
self.plugins = dict()
self.data_loader_iter = iter(self.data_loader)
# checkpoint path
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
# load checkpoint
self.load(ckpt_file)
self.engine = self.build_engine()
def __enter__(self):
self.context = self.engine.create_execution_context()
def __exit__(self, exception_type, exception_value, traceback):
self.context.__del__()
self.engine.__del__()
def load(self, ckpt_file):
# load latest checkpoint file if not defined.
if not ckpt_file:
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
ckpt_file = max(files_exist, key=os.path.getctime)
if ckpt_file:
state_dict = torch.load(ckpt_file, map_location='cpu')
self.step = state_dict['step']
self.model.load_state_dict(
remove_module_in_state_dict(state_dict['model']))
tprint('[Load] Checkpoint \'{}\'. Step={}'.format(
ckpt_file, self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))
def load_plugin(self, path):
ctypes.cdll.LoadLibrary(path)
def get_plugin_creator(self, plugin_name):
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
plugin_creator_list = trt.get_plugin_registry().plugin_creator_list
plugin_creator = None
for c in plugin_creator_list:
if c.name == plugin_name:
plugin_creator = c
return plugin_creator
def get_plugin(self, name):
return self.plugins[name]
@abc.abstractmethod
def create_plugins(self):
return NotImplemented
@abc.abstractmethod
def build_engine(self):
return NotImplemented
@abc.abstractmethod
def infer(self):
return NotImplemented
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/trt_inferencer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import ctypes
import glob
import os
import pathlib
import sys
from collections import OrderedDict
import numpy as np
import pycuda.driver as cuda
import tensorrt as trt
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorrt import Dims, ElementWiseOperation, MatrixOperation, Weights
import fastspeech.trt.common as common
from fastspeech.trt import TRT_LOGGER
from fastspeech.trt.fastspeech_trt_inferencer import FastSpeechTRTInferencer
from fastspeech.trt.trt_inferencer import TRTInferencer
from fastspeech.utils.logging import tprint
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.pytorch import (remove_module_in_state_dict,
to_cpu_numpy, to_gpu_async)
class FastSpeechTRTMultiEngineInferencer(FastSpeechTRTInferencer):
def __init__(self, model_name, model, data_loader, ckpt_path=None, ckpt_file=None,
trt_max_ws_size=1, trt_force_build=False, use_fp16=False,
trt_file_path_list=[], trt_max_input_seq_len_list=[], trt_max_output_seq_len_list=[]):
self.trt_file_path_list = trt_file_path_list
self.trt_max_input_seq_len_list = trt_max_input_seq_len_list
self.trt_max_output_seq_len_list = trt_max_output_seq_len_list
# sort by trt_max_input_seq_len in ascending order.
self.max_seq_lens_and_file_path_list = sorted(zip(self.trt_max_input_seq_len_list,
self.trt_max_output_seq_len_list,
self.trt_file_path_list))
self.engine = None
self.context = None
super(FastSpeechTRTMultiEngineInferencer, self).__init__(model_name, model, data_loader, ckpt_path, ckpt_file,
trt_max_ws_size, None, trt_force_build, use_fp16,
None, None, False)
def __enter__(self):
for engine in self.engine_list:
self.context_list.append(engine.create_execution_context())
def __exit__(self, exception_type, exception_value, traceback):
for engine, context in zip(self.engine_list, self.context_list):
context.__del__()
engine.__del__()
def build_engine(self):
# load engines and create contexts
self.engine_list = []
self.context_list = []
for i, (trt_max_input_seq_len, trt_max_output_seq_len, trt_file_path) in enumerate(self.max_seq_lens_and_file_path_list):
if trt_file_path and os.path.isfile(trt_file_path) and not self.trt_force_build:
with open(trt_file_path, 'rb') as f:
engine_str = f.read()
with trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(engine_str)
tprint('TRT Engine Loaded from {} successfully.'.format(trt_file_path))
else:
self.trt_max_input_seq_len = trt_max_input_seq_len
self.trt_max_output_seq_len = trt_max_output_seq_len
self.trt_file_path = trt_file_path
tprint('Building a TRT Engine..')
engine = self.do_build_engine()
tprint('TRT Engine Built.')
with open(self.trt_file_path, 'wb') as f:
f.write(engine.serialize())
tprint('TRT Engine Saved in {}.'.format(self.trt_file_path))
self.engine_list.append(engine)
def set_engine_and_context(self, length):
for i, (trt_max_input_seq_len, trt_max_output_seq_len, trt_file_path) in enumerate(self.max_seq_lens_and_file_path_list):
if length <= trt_max_input_seq_len:
self.engine = self.engine_list[i]
self.context = self.context_list[i]
self.trt_max_input_seq_len = trt_max_input_seq_len
self.trt_max_output_seq_len = trt_max_output_seq_len
self.trt_file_path = trt_file_path
break
else:
self.engine = self.engine_list[-1]
self.context = self.context_list[-1]
self.trt_max_input_seq_len = trt_max_input_seq_len
self.trt_max_output_seq_len = trt_max_output_seq_len
self.trt_file_path = trt_file_path
tprint('TRT Engine {} is selected.'.format(self.trt_file_path))
def infer(self, acts=None):
inputs = next(self.data_loader_iter)
text_encoded = inputs["text_encoded"] # (b, t)
text_pos = inputs["text_pos"] # (b, t)
self.set_engine_and_context(length=text_encoded.size(1))
text_encoded = F.pad(text_encoded, pad=(0, self.trt_max_input_seq_len - text_encoded.size(1))) # (b, t)
text_pos = F.pad(text_pos, pad=(0, self.trt_max_input_seq_len - text_pos.size(1))) # (b, t)
text_mask = text_pos.ne(0) # padded is False
# TODO: process word emb in TRT if the API allows.
with torch.no_grad():
text_encoded = self.model.word_emb(text_encoded)
# create input/output buffers
input_buffers = common.create_inputs_from_torch(self.engine, [text_encoded, text_mask])
output_buffers = common.create_outputs_from_torch(self.engine)
# bindings
bindings = [int(data.data_ptr()) for data in (input_buffers + output_buffers)]
# execute
# self.context.profiler = trt.Profiler()
stream = cuda.Stream()
self.context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# self.context.execute(batch_size=self.batch_size, bindings=bindings)
stream.synchronize()
outputs = dict()
outputs['mel'] = output_buffers[-2]
outputs['mel_mask'] = output_buffers[-1]
outputs['text'] = inputs["text_norm"]
# activations for verifying accuracy.
if acts is not None:
act_names = common.trt_output_names(self.engine)
n_acts = len(output_buffers) - 2 # exclude outputs(mel and mel_mask)
for i in range(n_acts):
acts[act_names[i]] = output_buffers[i]
return outputs
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/fastspeech_trt_multi_engine_inferencer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import numpy as np
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
import ctypes
import os
import time
import sys
logger = trt.Logger(trt.Logger.INFO)
PLUGIN_PATH = '/home/dahn/git/fastspeech/fastspeech/trt/plugins/repeat/RepeatPlugin.so'
ctypes.cdll.LoadLibrary(PLUGIN_PATH)
def get_plugin_creator(plugin_name):
trt.init_libnvinfer_plugins(logger, '')
plugin_creator_list = trt.get_plugin_registry().plugin_creator_list
plugin_creator = None
for c in plugin_creator_list:
if c.name == plugin_name:
plugin_creator = c
return plugin_creator
def build_engine(shape, shape2):
plugin_creator = get_plugin_creator('RepeatPlugin')
if plugin_creator == None:
print('Plugin not found. Exiting')
exit()
builder = trt.Builder(logger)
builder.max_batch_size = 1024
builder.max_workspace_size = 1 << 20
builder.fp16_mode = use_fp16
network = builder.create_network()
tensor = network.add_input('input1', trt.DataType.FLOAT, shape)
tensor2 = network.add_input('input2', trt.DataType.FLOAT, shape2)
tensor = network.add_plugin_v2(
[tensor, tensor2],
plugin_creator.create_plugin('RepeatPlugin', trt.PluginFieldCollection([
trt.PluginField('maxOutputLength', np.array([MAX_OUTPUT_LENGTH], dtype=np.int32), trt.PluginFieldType.INT32)
]))
).get_output(0)
network.mark_output(tensor)
return builder.build_cuda_engine(network)
def run_trt(input1, input2):
batch_size = input1.shape[0]
engine = build_engine(input1.shape[1:], input2.shape[1:])
context = engine.create_execution_context()
d_input1 = cuda.mem_alloc(input1.nbytes)
d_input2 = cuda.mem_alloc(input2.nbytes)
output = np.zeros(shape=(batch_size, MAX_OUTPUT_LENGTH, input1.shape[2]), dtype=np.float32)
d_output = cuda.mem_alloc(output.nbytes)
cuda.memcpy_htod(d_input1, input1)
cuda.memcpy_htod(d_input2, input2)
bindings = [int(d_input1), int(d_input2), int(d_output)]
start = time.time()
context.execute(batch_size, bindings)
end = time.time()
time_elapsed = end - start
print("time elapsed: {:06f}".format(time_elapsed))
cuda.memcpy_dtoh(output, d_output)
return output
use_fp16 = len(sys.argv) > 1 and sys.argv[1].isdigit() and int(sys.argv[1]) == 1
print('Use FP16:', use_fp16)
##
# accuray test
##
MAX_OUTPUT_LENGTH=8
inputs = np.array([
[[1, 2], [4, 5], [7, 8]],
[[3, 4], [5, 6], [8, 9]]
], np.float32)
masks = np.ones((2,3,1), np.float32)
repeats = np.array([
[[0, 2, 10]],
[[1, 2, 1]]
], np.float32)
output = run_trt(inputs, repeats)
print(output)
print(output.shape)
print(type(output))
output_mask = run_trt(masks, repeats)
print(output_mask)
print(output_mask.shape)
print(type(output_mask))
##
# latency test
##
# MAX_OUTPUT_LENGTH=1024
# inputs = np.full((16, 256, 384), 2, np.float32)
# masks = np.ones((16, 256, 384), np.float32)
# repeats = np.full((16, 256), 4, np.float32)
# output = run_trt(inputs, repeats)
# output_mask = run_trt(masks, repeats)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/plugins/repeat/test_repeat_plugin.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import numpy as np
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
import ctypes
import os
import sys
import time
logger = trt.Logger(trt.Logger.INFO)
PLUGIN_PATH = '/home/dahn/git/fastspeech/fastspeech/trt/plugins/add_pos_enc/AddPosEncPlugin.so'
ctypes.cdll.LoadLibrary(PLUGIN_PATH)
def get_plugin_creator(plugin_name):
trt.init_libnvinfer_plugins(logger, '')
plugin_creator_list = trt.get_plugin_registry().plugin_creator_list
plugin_creator = None
for c in plugin_creator_list:
if c.name == plugin_name:
plugin_creator = c
return plugin_creator
def build_engine(shape):
plugin_creator = get_plugin_creator('AddPosEncPlugin')
if plugin_creator == None:
print('Plugin not found. Exiting')
exit()
builder = trt.Builder(logger)
builder.max_batch_size = 1024
builder.max_workspace_size = 1 << 20
builder.fp16_mode = use_fp16
network = builder.create_network()
tensor = network.add_input('data', trt.DataType.FLOAT, shape)
tensor = network.add_plugin_v2(
[tensor],
plugin_creator.create_plugin('AddPosEncPlugin', trt.PluginFieldCollection())
).get_output(0)
network.mark_output(tensor)
return builder.build_cuda_engine(network)
def run_trt(data):
engine = build_engine(data.shape[1:])
context = engine.create_execution_context()
d_data = cuda.mem_alloc(data.nbytes)
output = np.zeros_like(data, dtype=np.float32)
d_output = cuda.mem_alloc(output.nbytes)
cuda.memcpy_htod(d_data, data)
bindings = [int(d_data), int(d_output)]
start = time.time()
context.execute(data.shape[0], bindings)
end = time.time()
time_elapsed = end - start
print("time elapsed: {:06f}".format(time_elapsed))
cuda.memcpy_dtoh(output, d_output)
return output
use_fp16 = len(sys.argv) > 1 and sys.argv[1].isdigit() and int(sys.argv[1]) == 1
print('Use FP16:', use_fp16)
output = run_trt(np.zeros((16, 128, 384), np.float32))
print(output)
print(output.shape)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trt/plugins/add_pos_enc/test_add_pos_enc_plugin.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/dataset/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from torch.utils.data import Dataset
from fastspeech.text_norm import text_to_sequence
class TextDataset(Dataset):
def __init__(self, text_list, text_cleaner=['english_cleaners']):
self.texts = text_list
self.text_cleaner = text_cleaner
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts[idx]
# Text normalization
text_encoded = np.array(text_to_sequence(text, self.text_cleaner))
text_pos = np.array([idx+1 for idx, _ in enumerate(text_encoded)])
data = {
"text": text,
"text_norm": text,
"text_encoded": text_encoded,
"text_pos": text_pos,
}
return data
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/dataset/text_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import csv
import pprint
import librosa
from torch.utils.data import Dataset
import pandas as pd
from fastspeech.text_norm import text_to_sequence
from fastspeech import audio
from fastspeech.utils.logging import tprint
import os
import pathlib
import fire
import numpy as np
from tqdm import tqdm
from fastspeech import hparam as hp
pp = pprint.PrettyPrinter(indent=4, width=1000)
class LJSpeechDataset(Dataset):
def __init__(self, root_path, meta_file="metadata.csv",
sr=22050, n_fft=1024, win_len=1024, hop_len=256, n_mels=80, mel_fmin=0.0, mel_fmax=8000.0, exclude_mels=False, mels_path=None,
aligns_path=None, text_cleaner=['english_cleaners'], sort_by_length=False):
self.root_path = root_path
self.meta_file = meta_file
self.text_cleaner = text_cleaner
self.sr = sr
self.n_fft = n_fft
self.win_len = win_len
self.hop_len = hop_len
self.n_mels = n_mels
self.mel_fmin = mel_fmin
self.mel_fmax = mel_fmax
self.aligns_path = aligns_path
self.mels_path = mels_path
self.exclude_mels = exclude_mels
self.sort_by_length = sort_by_length
# Read metadata file.
# - column: <name, transcription, normalized_transcription>
self.metas = pd.read_csv(os.path.join(root_path, meta_file),
sep="|",
header=None,
keep_default_na=False,
quoting=csv.QUOTE_NONE,
names=["name", "transcription", "normalized_transcription"],
)
if sort_by_length:
self.metas.insert(3, 'length', self.metas['normalized_transcription'].str.len())
self.metas.sort_values('length', ascending=True, inplace=True)
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
name = self.metas.iloc[idx, 0]
path = "{}/wavs/{}.wav".format(self.root_path, name)
# Text normalization
text = self.metas.iloc[idx, 1]
text_norm = self.metas.iloc[idx, 2]
text_encoded = np.array(text_to_sequence(text_norm, self.text_cleaner))
text_pos = np.array([idx+1 for idx, _ in enumerate(text_encoded)])
data = {
"name": name,
"text": text,
"text_norm": text_norm,
"text_encoded": text_encoded,
"text_pos": text_pos,
"text_len": text_encoded.shape[-1],
"sr": self.sr
}
if not self.exclude_mels:
wav, sr = librosa.load(path, sr=self.sr) # wav is [-1.0, 1.0]
if sr != self.sr:
raise ValueError("{} SR doesn't match target {} SR".format(sr, self.sr))
# Audio processing
wav, _ = librosa.effects.trim(wav, frame_length=self.win_len, hop_length=self.hop_len)
if self.mels_path:
mel = np.load(os.path.join(self.mels_path, name + ".mel.npy"))
else:
mel = librosa.feature.melspectrogram(wav,
sr=sr,
n_fft=self.n_fft,
win_length=self.win_len,
hop_length=self.hop_len,
n_mels=self.n_mels,
fmin=self.mel_fmin,
fmax=self.mel_fmax,
power=1.0)
mel = audio.dynamic_range_compression(mel)
data_mel = {
"wav": wav,
"mel": mel,
"mel_len": mel.shape[-1],
}
data.update(data_mel)
if self.aligns_path:
aligns = np.load(os.path.join(self.aligns_path, name + ".align.npy"))
data['align'] = aligns
return data
def preprocess_mel(hparam="base.yaml", **kwargs):
"""The script for preprocessing mel-spectrograms from the dataset.
By default, this script assumes to load parameters in the default config file, fastspeech/hparams/base.yaml.
Besides the flags, you can also set parameters in the config file via the command-line. For examples,
--dataset_path=DATASET_PATH
Path to dataset directory.
--mels_path=MELS_PATH
Path to output preprocessed mels directory.
Refer to fastspeech/hparams/base.yaml to see more parameters.
Args:
hparam (str, optional): Path to default config file. Defaults to "base.yaml".
"""
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
pathlib.Path(hp.mels_path).mkdir(parents=True, exist_ok=True)
dataset = LJSpeechDataset(hp.dataset_path, mels_path=None)
for data in tqdm(dataset):
name = data["name"]
mel = data["mel"]
save_path = os.path.join(hp.mels_path, name + ".mel.npy")
if os.path.exists(save_path):
continue
# print(name, mel)
np.save(save_path, mel)
if __name__ == '__main__':
fire.Fire(preprocess_mel)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/dataset/ljspeech_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import torch
from fastspeech.utils.logging import tprint
class TimeElapsed(object):
def __init__(self, name, device='cuda', cuda_sync=False, format=""):
self.name = name
self.device = device
self.cuda_sync = cuda_sync
self.format = format
def __enter__(self):
self.start()
def __exit__(self, *exc_info):
self.end()
def start(self):
if self.device == 'cuda' and self.cuda_sync:
torch.cuda.synchronize()
self.start_time = time.time()
def end(self):
if not hasattr(self, "start_time"):
return
if self.device == 'cuda' and self.cuda_sync:
torch.cuda.synchronize()
self.end_time = time.time()
self.time_elapsed = self.end_time - self.start_time
tprint(("[{}] Time elapsed: {" + self.format + "}").format(self.name, self.time_elapsed))
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/time.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
def tprint(msg):
print('[{}] {}'.format(time.strftime('%Y%m%d %H:%M:%S'), msg))
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/logging.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
"""
Revised based on apex/apex/amp/_initialize.py
"""
def _applier(value, fn):
if isinstance(value, torch.cuda.FloatTensor):
return fn(value)
elif isinstance(value, torch.cuda.HalfTensor):
return fn(value)
elif isinstance(value, dict):
return dict({k : _applier(v, fn) for k, v in value.items()})
elif isinstance(value, tuple):
return tuple(_applier(v, fn) for v in value)
else:
return value
def _cast_module_to_half(module, op_list):
for op in op_list:
if isinstance(module, op):
module.half()
module.register_forward_pre_hook(lambda module, input: _applier(input, lambda x: x.half()))
module.register_forward_hook(lambda module, input, output: _applier(output, lambda x: x.float()))
break
else:
for child in module.children():
_cast_module_to_half(child, op_list)
return module
def cast_model_to_half(model, op_list=[nn.Linear, nn.Conv1d]):
model = _cast_module_to_half(model, op_list)
return model
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/fp16.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
def to_device_async(tensor, device):
return tensor.to(device, non_blocking=True)
def to_gpu_async(cpu_tensor):
return cpu_tensor.to('cuda', non_blocking=True)
def to_cpu_numpy(gpu_tensor):
if not isinstance(gpu_tensor, torch.Tensor):
return gpu_tensor
return gpu_tensor.detach().cpu().numpy()
def remove_module_in_state_dict(state_dict):
"""
If model is saved with DataParallel, checkpoint keys is started with 'module.' remove it and return new state dict
:param checkpoint:
:return: new checkpoint
"""
new_state_dict = {}
for key, val in state_dict.items():
new_key = key.replace('module.', '')
new_state_dict[new_key] = val
return new_state_dict
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/pytorch.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import yaml
def load_hparam(filepath):
hparam_dict = dict()
if not filepath:
return hparam_dict
stream = open(filepath, 'r')
docs = yaml.load_all(stream)
for doc in docs:
for k, v in doc.items():
hparam_dict[k] = v
return hparam_dict
def merge_dict(new, default):
if isinstance(new, dict) and isinstance(default, dict):
for k, v in default.items():
if k not in new:
new[k] = v
else:
new[k] = merge_dict(new[k], v)
return new
class Dotdict(dict):
"""
a dictionary that supports dot notation
as well as dictionary access notation
usage: d = DotDict() or d = DotDict({'val1':'first'})
set attributes: d.val2 = 'second' or d['val2'] = 'second'
get attributes: d.val2 or d['val2']
"""
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __init__(self, dct=None):
dct = dict() if not dct else dct
for key, value in dct.items():
if hasattr(value, 'keys'):
value = Dotdict(value)
self[key] = value
def __getattr__(self, item):
try:
return self.__getitem__(item)
except KeyError as e:
return None
class Hparam(Dotdict):
__getattr__ = Dotdict.__getattr__
__setattr__ = Dotdict.__setitem__
__delattr__ = Dotdict.__delitem__
def __init__(self, root_path):
self.hp_root_path = root_path
super(Hparam, self).__init__()
def set_hparam(self, filename, hp_commandline=dict()):
def get_hp(file_path):
"""
It merges parent_yaml in yaml recursively.
:param file_rel_path: relative hparam file path.
:return: merged hparam dict.
"""
hp = load_hparam(file_path)
if 'parent_yaml' not in hp:
return hp
parent_path = os.path.join(self.hp_root_path, hp['parent_yaml'])
if parent_path == file_path:
raise Exception('To set myself({}) on parent_yaml is not allowed.'.format(file_path))
base_hp = get_hp(parent_path)
hp = merge_dict(hp, base_hp)
return hp
hparam_path = os.path.join(self.hp_root_path, filename)
hp = get_hp(hparam_path)
hp = merge_dict(hp_commandline, hp)
hp = Dotdict(hp)
for k, v in hp.items():
setattr(self, k, v)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/hparam.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
class ScheduledOptim():
''' A simple wrapper class for learning rate scheduling '''
def __init__(self, optimizer, d_model, n_warmup_steps, current_steps):
self._optimizer = optimizer
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = current_steps
self.init_lr = np.power(d_model, -0.5)
def step_and_update_lr_frozen(self, learning_rate_frozen):
for param_group in self._optimizer.param_groups:
param_group['lr'] = learning_rate_frozen
self._optimizer.step()
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def get_learning_rate(self):
learning_rate = 0.0
for param_group in self._optimizer.param_groups:
learning_rate = param_group['lr']
return learning_rate
def zero_grad(self):
# print(self.init_lr)
self._optimizer.zero_grad()
def _get_lr_scale(self):
return np.min([
np.power(self.n_current_steps, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.n_current_steps])
def _update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_current_steps += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/optimizer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from torch.cuda import nvtx
class Nvtx(object):
def __init__(self, name, enabled=True):
self.name = name
self.enabled = enabled
def __call__(self, f):
def wrapped_f(*args, **kwargs):
with Nvtx(self.name, self.enabled):
return f(*args, **kwargs)
return wrapped_f
def __enter__(self):
if self.enabled:
nvtx.range_push(self.name)
def __exit__(self, *exc_info):
if self.enabled:
nvtx.range_pop()
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/nvtx.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import matplotlib.pyplot as plt
import numpy as np
import cv2
import data as global_data
plt.switch_backend('Agg')
def image_plot(x, name='image'):
fig, ax = plt.subplots()
ax.imshow(x, cmap='magma', aspect='auto')
fig.canvas.draw()
buf = np.array(fig.canvas.renderer._renderer)
plt.clf()
plt.close('all')
cv2.imshow(name, buf)
cv2.waitKey(0)
def plot_to_buf(x, align=True):
fig, ax = plt.subplots()
ax.plot(x)
if align:
ax.set_ylim([-1, 1])
fig.canvas.draw()
im = np.array(fig.canvas.renderer._renderer)
plt.clf()
plt.close('all')
return np.rollaxis(im[..., :3], 2)
def imshow_to_buf(x, scale01=False):
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
if scale01:
x = (x - x.min()) / (x.max() - x.min())
if x.max() > 1.:
x = softmax(x)
if len(x.shape) == 3:
x = x[0]
fig, ax = plt.subplots()
ax.imshow(x, cmap='magma', aspect='auto')
fig.canvas.draw()
im = np.array(fig.canvas.renderer._renderer)
plt.clf()
plt.close('all')
return np.rollaxis(im[..., :3], 2)
def origin_to_chrs(target):
results = []
for t in target:
idx = t - 1 if t - 1 >= 0 else 0
if idx < len(global_data.idx2chr):
results.append(global_data.idx2chr[idx])
else:
break
return ''.join(results)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/utils/tensorboard.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/hparams/__init__.py
|
# Copyright (c) 2017 Keith Ito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/text_norm/cmudict.py
|
# Copyright (c) 2017 Keith Ito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" from https://github.com/keithito/tacotron """
import re
from . import cleaners
from .symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
# print(m)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/text_norm/__init__.py
|
# Copyright (c) 2017 Keith Ito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/text_norm/numbers.py
|
# Copyright (c) 2017 Keith Ito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from . import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/text_norm/symbols.py
|
# Copyright (c) 2017 Keith Ito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/text_norm/cleaners.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
import numpy as np
import torch
from torch import nn as nn
from fastspeech.model.module import FFTBlocks, LengthRegulator
from fastspeech.utils.pytorch import to_device_async
from fastspeech.utils.nvtx import Nvtx
from torch.nn import functional as F
from fastspeech.utils.logging import tprint
from fastspeech.text_norm.symbols import symbols
class Fastspeech(nn.Module):
""" FastSpeech """
def __init__(self,
max_seq_len,
d_model,
phoneme_side_n_layer,
phoneme_side_head,
phoneme_side_conv1d_filter_size,
phoneme_side_output_size,
mel_side_n_layer,
mel_side_head,
mel_side_conv1d_filter_size,
mel_side_output_size,
fft_conv1d_kernel,
fft_conv1d_padding,
duration_predictor_filter_size,
duration_predictor_kernel_size,
dropout,
n_mels,
fused_layernorm=False):
super(Fastspeech, self).__init__()
self.max_seq_len = max_seq_len
self.d_model = d_model
self.phoneme_side_n_layer = phoneme_side_n_layer
self.phoneme_side_head = phoneme_side_head
self.phoneme_side_conv1d_filter_size = phoneme_side_conv1d_filter_size
self.phoneme_side_output_size = phoneme_side_output_size
self.mel_side_n_layer = mel_side_n_layer
self.mel_side_head = mel_side_head
self.mel_side_conv1d_filter_size = mel_side_conv1d_filter_size
self.mel_side_output_size = mel_side_output_size
self.fft_conv1d_kernel = fft_conv1d_kernel
self.fft_conv1d_padding = fft_conv1d_padding
self.duration_predictor_filter_size = duration_predictor_filter_size
self.duration_predictor_kernel_size = duration_predictor_kernel_size
self.dropout = dropout
self.n_mels = n_mels
self.fused_layernorm = fused_layernorm
self.n_phns = len(symbols)+1
self.word_emb = nn.Embedding(
self.n_phns,
d_model,
padding_idx=0)
self.phoneme_side = FFTBlocks(
max_seq_len=max_seq_len,
n_layers=phoneme_side_n_layer,
n_head=phoneme_side_head,
d_k=64,
d_v=64,
d_model=d_model,
d_inner=phoneme_side_conv1d_filter_size,
fft_conv1d_kernel=fft_conv1d_kernel,
fft_conv1d_padding=fft_conv1d_padding,
dropout=dropout,
name="phoneme_side",
fused_layernorm=fused_layernorm
)
self.length_regulator = LengthRegulator(
input_size=phoneme_side_output_size,
duration_predictor_filter_size=duration_predictor_filter_size,
duration_predictor_kernel_size=duration_predictor_kernel_size,
dropout=dropout,
fused_layernorm=fused_layernorm
)
self.mel_side = FFTBlocks(
max_seq_len=max_seq_len,
n_layers=mel_side_n_layer,
n_head=mel_side_head,
d_k=64,
d_v=64,
d_model=d_model,
d_inner=mel_side_conv1d_filter_size,
fft_conv1d_kernel=fft_conv1d_kernel,
fft_conv1d_padding=fft_conv1d_padding,
dropout=dropout,
name="mel_side",
fused_layernorm=fused_layernorm
)
self.mel_linear = nn.Linear(mel_side_output_size, n_mels, bias=True)
def forward(self, seq, pos, duration_target=None, alpha=1.0, seq_output_len=None, use_fp16=False, acts=None):
# Phoneme Embedding
output = self.word_emb(seq)
if acts is not None:
acts["act.emb"] = output
if use_fp16:
output = output.half()
# Phoneme Side FFT Blocks
output, output_mask = self.phoneme_side(output, pos, acts=acts)
if acts is not None:
acts["act.phoneme_side.seq"] = output
# Length Regulator
output, pos, duration = self.length_regulator(
output,
output_mask,
target=duration_target,
alpha=alpha)
if seq_output_len:
output = F.pad(output, pad=(0, 0, 0, seq_output_len - output.size(1)))
pos = F.pad(pos, pad=(0, seq_output_len - pos.size(1)))
# length of output mel shouldn't exceed max_seq_len
output = output[:, :self.max_seq_len]
pos = pos[:, :self.max_seq_len]
if acts is not None:
acts["act.length_regulator.seq"] = output
acts["act.length_regulator.dur"] = torch.round(duration)
if self.training or output.bool().any():
# Mel Side FFT Blocks
output, output_mask = self.mel_side(output, pos, acts=acts)
if acts is not None:
acts["act.mel_side.seq"] = output
# Linear Layer
output = self.mel_linear(output)
if acts is not None:
acts["out.seq_mask"] = output_mask
acts["out.seq"] = output
else:
# seq length could be zero, in case duration predictor outputs all zeros.
# In this case, skip feed-forwarding.
tprint("Duration Predictor outputs all zeros. Output will be zero length.")
output_shape = (output.size(0), 0, output_mask.size(2))
output = torch.zeros(size=(output_shape))
output_mask = torch.ones(size=(output_shape))
if torch.cuda.device_count() > 1:
# In a multi-gpu setting, all output mels from devices must have the same length.
# otherwise, an error occurs in process of gathering output.
if not seq_output_len:
seq_output_len = self.max_seq_len
padding = (0, 0, 0, seq_output_len - output.size(1))
output = F.pad(output, padding)
output = output[:, :seq_output_len, :]
output_mask = F.pad(output_mask, padding)
output_mask = output_mask[:, :seq_output_len, :]
return output, output_mask, duration
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/model/fastspeech.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/model/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module
from torch.nn.utils.rnn import pad_sequence
from fastspeech.text_norm.symbols import symbols
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.pytorch import to_device_async
try:
import apex
except ImportError:
ImportError('Required to install apex.')
class Bmm(Module):
""" Required for manual fp16 casting. If not using amp_opt_level='O2', just use torch.bmm.
"""
def forward(self, a, b):
return torch.bmm(a, b)
class FFTBlocks(nn.Module):
def __init__(self,
max_seq_len,
n_layers,
n_head,
d_k,
d_v,
d_model,
d_inner,
fft_conv1d_kernel,
fft_conv1d_padding,
dropout,
name,
fused_layernorm=False,
):
self.max_seq_len = max_seq_len
self.n_layers = n_layers
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.d_inner = d_inner
self.fft_conv1_kernel = fft_conv1d_kernel
self.fft_conv1d_padding = fft_conv1d_padding
self.droupout = dropout
self.fused_layernorm = fused_layernorm
self.name = name
super(FFTBlocks, self).__init__()
n_position = max_seq_len + 1
self.position = nn.Embedding.from_pretrained(
get_sinusoid_encoding_table(n_position, d_model, padding_idx=0),
freeze=True)
self.layer_stack = nn.ModuleList([FFTBlock(
d_model, d_inner, n_head, d_k, d_v,
fft_conv1d_kernel=fft_conv1d_kernel,
fft_conv1d_padding=fft_conv1d_padding,
dropout=dropout,
fused_layernorm=fused_layernorm,
name="{}.layer_stack.{}".format(self.name, i),
) for i in range(n_layers)])
def forward(self, seq, pos, return_attns=False, acts=None):
slf_attn_list = []
# -- Prepare masks
slf_attn_mask = get_attn_key_pad_mask(seq_k=pos, seq_q=pos) # (b, t, t)
non_pad_mask = get_non_pad_mask(pos) # (b, t, 1)
# -- Forward
pos_enc = self.position(pos)
output = seq + pos_enc
if acts is not None:
acts["act.{}.add_pos_enc".format(self.name)] = output
for i, layer in enumerate(self.layer_stack):
output, slf_attn = layer(
output,
non_pad_mask=non_pad_mask,
slf_attn_mask=slf_attn_mask,
acts=acts)
if return_attns:
slf_attn_list += [slf_attn]
if acts is not None:
acts['act.{}.layer_stack.{}'.format(self.name, i)] = output
return output, non_pad_mask
class FFTBlock(torch.nn.Module):
"""FFT Block"""
def __init__(self,
d_model,
d_inner,
n_head,
d_k,
d_v,
fft_conv1d_kernel,
fft_conv1d_padding,
dropout,
name,
fused_layernorm=False):
super(FFTBlock, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.fft_conv1_kernel = fft_conv1d_kernel
self.fft_conv1d_padding = fft_conv1d_padding
self.droupout = dropout
self.name = name
self.fused_layernorm = fused_layernorm
self.slf_attn = MultiHeadAttention(
n_head=n_head,
d_model=d_model,
d_k=d_k,
d_v=d_v,
dropout=dropout,
name="{}.slf_attn".format(name),
fused_layernorm=fused_layernorm)
self.pos_ffn = PositionwiseFeedForward(
d_in=d_model,
d_hid=d_inner,
fft_conv1d_kernel=fft_conv1d_kernel,
fft_conv1d_padding=fft_conv1d_padding,
dropout=dropout,
name="{}.pos_ffn".format(name),
fused_layernorm=fused_layernorm)
@Nvtx("fftblock", enabled=False)
def forward(self, input, non_pad_mask=None, slf_attn_mask=None, acts=None):
output, slf_attn = self.slf_attn(
input, mask=slf_attn_mask, acts=acts)
output *= non_pad_mask.to(output.dtype)
output = self.pos_ffn(output, acts=acts)
output *= non_pad_mask.to(output.dtype)
return output, slf_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout, name, fused_layernorm=False):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.name = name
d_out = d_k + d_k + d_v
self.linear = nn.Linear(d_model, n_head * d_out)
nn.init.xavier_normal_(self.linear.weight)
self.attention = ScaledDotProductAttention(
temperature=np.power(d_k, 0.5),
name="{}.scaled_dot".format(self.name))
self.layer_norm = apex.normalization.FusedLayerNorm(
d_model) if fused_layernorm else nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
@Nvtx("slf_attn", enabled=False)
def forward(self, x, mask=None, acts=None):
bs, seq_len, _ = x.size()
residual = x
with Nvtx("linear", enabled=False):
d_out = self.d_k + self.d_k + self.d_v
x = self.linear(x) # (b, t, n_heads * h)
if acts is not None:
acts['act.{}.linear'.format(self.name)] = x
x = x.view(bs, seq_len, self.n_head, d_out) # (b, t, n_heads, h)
x = x.permute(2, 0, 1, 3).contiguous().view(self.n_head * bs, seq_len, d_out) # (n * b, t, h)
q = x[..., :self.d_k] # (n * b, t, d_k)
k = x[..., self.d_k: 2*self.d_k] # (n * b, t, d_k)
v = x[..., 2*self.d_k:] # (n * b, t, d_k)
with Nvtx("mask repeat", enabled=False):
mask = mask.repeat(self.n_head, 1, 1) # (b, t, h) -> (n * b, t, h)
with Nvtx("scaled dot", enabled=False):
output, attn = self.attention(q, k, v, mask=mask, acts=acts)
output = output.view(self.n_head, bs, seq_len, self.d_v) # (n, b, t, d_k)
output = output.permute(1, 2, 0, 3).contiguous().view(
bs, seq_len, self.n_head * self.d_v) # (b, t, n * d_k)
if acts is not None:
acts['act.{}.scaled_dot'.format(self.name)] = output
with Nvtx("fc", enabled=False):
output = self.fc(output)
with Nvtx("dropout", enabled=False):
output = self.dropout(output)
output += residual
if acts is not None:
acts['act.{}.residual'.format(self.name)] = output
with Nvtx("layer norm", enabled=False):
output = self.layer_norm(output)
if acts is not None:
acts['act.{}.ln'.format(self.name)] = output
return output, attn
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1, name=None):
super().__init__()
self.temperature = temperature
self.name = name
self.bmm1 = Bmm()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
self.bmm2 = Bmm()
@Nvtx("scaled_dot", enabled=False)
def forward(self, q, k, v, mask=None, acts=None):
with Nvtx("bmm1", enabled=False):
attn = self.bmm1(q, k.transpose(1, 2))
attn = attn / self.temperature
with Nvtx("mask", enabled=False):
if mask is not None:
attn = attn.masked_fill(mask, -65504)
with Nvtx("softmax", enabled=False):
attn = self.softmax(attn)
with Nvtx("dropout", enabled=False):
attn = self.dropout(attn)
with Nvtx("bmm2", enabled=False):
output = self.bmm2(attn, v)
return output, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self,
d_in,
d_hid,
fft_conv1d_kernel,
fft_conv1d_padding,
dropout,
name,
fused_layernorm=False):
super().__init__()
self.name = name
self.w_1 = nn.Conv1d(
d_in, d_hid, kernel_size=fft_conv1d_kernel, padding=fft_conv1d_padding)
self.w_2 = nn.Conv1d(
d_hid, d_in, kernel_size=fft_conv1d_kernel, padding=fft_conv1d_padding)
self.layer_norm = apex.normalization.FusedLayerNorm(
d_in) if fused_layernorm else nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
@Nvtx("position wise", enabled=False)
def forward(self, x, acts=None):
residual = x
output = x.transpose(1, 2)
output = self.w_1(output)
if acts is not None:
acts['act.{}.conv1'.format(self.name)] = output
output = F.relu(output)
output = self.w_2(output)
if acts is not None:
acts['act.{}.conv2'.format(self.name)] = output
output = output.transpose(1, 2)
output = self.dropout(output)
output += residual
if acts is not None:
acts['act.{}.residual'.format(self.name)] = output
output = self.layer_norm(output)
if acts is not None:
acts['act.{}.ln'.format(self.name)] = output
return output
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
''' Sinusoid position encoding table '''
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i)
for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.
return torch.FloatTensor(sinusoid_table)
def get_attn_key_pad_mask(seq_k, seq_q):
''' For masking out the padding part of key sequence. '''
# Expand to fit the shape of key query attention matrix.
len_q = seq_q.size(1)
padding_mask = seq_k.eq(0) # (b, t)
padding_mask = padding_mask.unsqueeze(
1).expand(-1, len_q, -1) # (b, t, t)
return padding_mask
def get_non_pad_mask(seq):
assert seq.dim() == 2
return seq.ne(0).unsqueeze(-1)
class LengthRegulator(nn.Module):
""" Length Regulator """
def __init__(self, input_size, duration_predictor_filter_size, duration_predictor_kernel_size, dropout, fused_layernorm=False):
super(LengthRegulator, self).__init__()
self.duration_predictor = DurationPredictor(
input_size=input_size,
filter_size=duration_predictor_filter_size,
kernel=duration_predictor_kernel_size,
dropout=dropout,
fused_layernorm=fused_layernorm
)
@Nvtx("length regulator", enabled=False)
def forward(self, input, input_mask, target=None, alpha=1.0):
duration = self.duration_predictor(
input, input_mask)
# print(duration_predictor_output)
if self.training:
output, output_pos = self.get_output(
input, target, alpha)
else:
duration = torch.clamp_min(torch.exp(duration) - 1, 0)
output, output_pos = self.get_output(
input, duration, alpha)
return output, output_pos, duration
def get_output(self, input, duration, alpha):
output, output_pos = list(), list()
# TODO: parallelize the loop.
for i in range(input.size(0)):
repeats = duration[i].float() * alpha
with Nvtx("round #{}".format(i), enabled=False):
repeats = torch.round(repeats).long()
with Nvtx("repeat #{}".format(i), enabled=False):
output.append(torch.repeat_interleave(
input[i], repeats, dim=0))
output_pos.append(torch.from_numpy(
np.indices((output[i].shape[0],))[0] + 1))
output = pad_sequence(output, batch_first=True)
output_pos = pad_sequence(output_pos, batch_first=True)
with Nvtx("pos to gpu", enabled=False):
output_pos = to_device_async(output_pos, device=output.device)
return output, output_pos
class DurationPredictor(nn.Module):
""" Duration Predictor """
def __init__(self, input_size, filter_size, kernel, dropout, fused_layernorm=False):
super(DurationPredictor, self).__init__()
self.input_size = input_size
self.filter_size = filter_size
self.kernel = kernel
self.dropout = dropout
self.conv1d_1 = nn.Conv1d(self.input_size,
self.filter_size,
kernel_size=self.kernel,
padding=1)
self.relu_1 = nn.ReLU()
self.layer_norm_1 = apex.normalization.FusedLayerNorm(
self.filter_size) if fused_layernorm else nn.LayerNorm(self.filter_size)
self.dropout_1 = nn.Dropout(self.dropout)
self.conv1d_2 = nn.Conv1d(self.filter_size,
self.filter_size,
kernel_size=self.kernel,
padding=1)
self.relu_2 = nn.ReLU()
self.layer_norm_2 = apex.normalization.FusedLayerNorm(
self.filter_size) if fused_layernorm else nn.LayerNorm(self.filter_size)
self.dropout_2 = nn.Dropout(self.dropout)
self.linear_layer = nn.Linear(self.filter_size, 1, bias=True)
@Nvtx("duration predictor", enabled=False)
def forward(self, input, input_mask):
input *= input_mask.to(input.dtype)
out = self.conv1d_1(input.transpose(1,2)).transpose(1,2)
out = self.relu_1(out)
out = self.layer_norm_1(out)
out = self.dropout_1(out)
out = self.conv1d_2(out.transpose(1,2)).transpose(1,2)
out = self.relu_2(out)
out = self.layer_norm_2(out)
out = self.dropout_2(out)
out = self.linear_layer(out)
out *= input_mask.to(out.dtype)
out = out.squeeze(-1)
return out
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/model/module.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import torch
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy, to_device_async
from fastspeech.inferencer.denoiser import Denoiser
from waveglow.model import WaveGlow
import argparse
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
class WaveGlowInferencer(object):
def __init__(self, ckpt_file, device='cuda', use_fp16=False, use_denoiser=False):
self.ckpt_file = ckpt_file
self.device = device
self.use_fp16 = use_fp16
self.use_denoiser = use_denoiser
# model
# sys.path.append('waveglow')
from waveglow.arg_parser import parse_waveglow_args
parser = parser = argparse.ArgumentParser()
model_parser= parse_waveglow_args(parser)
args, _ = model_parser.parse_known_args()
model_config = dict(
n_mel_channels=args.n_mel_channels,
n_flows=args.flows,
n_group=args.groups,
n_early_every=args.early_every,
n_early_size=args.early_size,
WN_config=dict(
n_layers=args.wn_layers,
kernel_size=args.wn_kernel_size,
n_channels=args.wn_channels
)
)
self.model = WaveGlow(**model_config)
state_dict = torch.load(self.ckpt_file, map_location=self.device)['state_dict']
state_dict = unwrap_distributed(state_dict)
self.model.load_state_dict(state_dict)
self.model = to_device_async(self.model, self.device)
self.model = self.model.remove_weightnorm(self.model)
self.model.eval()
if self.use_fp16:
self.model = self.model.half()
self.model = self.model
if self.use_denoiser:
self.denoiser = Denoiser(self.model, device=device)
self.denoiser = to_device_async(self.denoiser, self.device)
tprint('Using WaveGlow denoiser.')
def __enter__(self):
pass
def __exit__(self, exception_type, exception_value, traceback):
pass
def infer(self, mels):
if self.use_fp16:
mels = mels.half()
mels = to_device_async(mels, self.device)
wavs = self.model.infer(mels, sigma=0.6)
if self.use_denoiser:
wavs = self.denoiser(wavs, strength=0.01)
return wavs.float()
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/inferencer/waveglow_inferencer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import pathlib
import time
import abc
import numpy as np
import torch
from tensorboardX import SummaryWriter
import glob
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async, to_cpu_numpy
import torch.nn as nn
class Inferencer(object):
"""
set seed
load model
logging
"""
def __init__(self, model_name, model, data_loader=None, ckpt_path=None, ckpt_file=None, log_path=None, device='cuda', use_fp16=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.ckpt_path = ckpt_path
self.log_path = log_path
self.device = device
self.seed = seed
self.step = 0
self.ckpt_file = ckpt_file
self.use_fp16 = use_fp16
# model
self.model.eval()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name, num_param))
# precision
if self.use_fp16:
self.model = self.model.half()
# data parallel
self.model = nn.DataParallel(self.model)
# set seed
if seed is None:
seed = np.random.randint(2**16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = iter(self.data_loader)
# logging
if log_path:
# tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
# checkpoint path
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
# load checkpoint
self.load(ckpt_file)
def __enter__(self):
pass
def __exit__(self, exception_type, exception_value, traceback):
pass
@abc.abstractmethod
def infer(self):
return NotImplemented
def load(self, ckpt_file):
# load latest checkpoint file if not defined.
if not ckpt_file:
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
ckpt_file = max(files_exist, key=os.path.getctime)
if ckpt_file:
state_dict = torch.load(ckpt_file, map_location=self.device)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
tprint('[Load] Checkpoint \'{}\'. Step={}'.format(ckpt_file, self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))
raise Exception("No checkpoints found.")
def log(self, output):
output = {k: to_cpu_numpy(v) for k, v in output.items()}
self.console_log('infer', output)
if self.log_path:
self.tensorboard_log('infer', output)
@abc.abstractmethod
def console_log(self, tag, output):
raise NotImplemented
@abc.abstractmethod
def tensorboard_log(self, tag, output):
raise NotImplemented
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/inferencer/inferencer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from fastspeech.inferencer.inferencer import Inferencer
from fastspeech.utils.logging import tprint
from fastspeech.utils.tensorboard import imshow_to_buf
from fastspeech.utils.pytorch import to_device_async, to_cpu_numpy
from torch.nn import functional as F
class FastSpeechInferencer(Inferencer):
def __init__(self, model_name, model, data_loader, ckpt_path=None, ckpt_file=None, log_path=None, device='cuda', use_fp16=False, seed=None):
super(FastSpeechInferencer, self).__init__(model_name, model, data_loader, ckpt_path, ckpt_file, log_path, device, use_fp16, seed)
def infer(self, acts=None, seq_input_len=None, seq_output_len=None):
inputs = next(self.data_loader_iter)
text_encoded = inputs["text_encoded"]
text_pos = inputs["text_pos"]
if seq_input_len:
text_encoded = F.pad(text_encoded, pad=(0, seq_input_len - text_encoded.size(1))) # (b, t)
text_pos = F.pad(text_pos, pad=(0, seq_input_len - text_pos.size(1))) # (b, t)
text_encoded = to_device_async(text_encoded, self.device)
text_pos = to_device_async(text_pos, self.device)
mel, mel_mask, _ = self.model(
seq=text_encoded,
pos=text_pos,
seq_output_len=seq_output_len,
use_fp16=self.use_fp16,
acts=acts
)
# (B,T,H) => (B,H,T)
mel = mel.transpose(1, 2)
mel_mask = mel_mask.squeeze(2)
outputs = dict()
outputs['mel'] = mel
outputs['mel_mask'] = mel_mask
outputs['text'] = inputs["text_norm"]
if "mel" in inputs:
outputs['mel_tgt'] = inputs["mel"]
if "wav" in inputs:
outputs['wav_tgt'] = inputs["wav"]
if "sr" in inputs:
outputs['sr'] = inputs["sr"]
return outputs
def console_log(self, tag, output):
# console logging
msg = ""
for key, value in sorted(output.items()):
msg += ',\t{}: {}'.format(key, value)
tprint(msg)
# TODO generalize
def tensorboard_log(self, tag, output_tensor):
self.tbwriter.add_image('{}/{}'.format(tag, "mel"), imshow_to_buf(output_tensor['mel']), global_step=self.step)
self.tbwriter.add_image('{}/{}'.format(tag, "mel_tgt"), imshow_to_buf(output_tensor['mel_tgt']), global_step=self.step)
self.tbwriter.add_audio('{}/{}'.format(tag, "wav_tgt"), output_tensor['wav_tgt'], global_step=self.step, sample_rate=int(output_tensor['sr']))
self.tbwriter.add_text('{}/{}'.format(tag, "text"), output_tensor['text'], global_step=self.step)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/inferencer/fastspeech_inferencer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/inferencer/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Modified from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/waveglow/denoiser.py
import sys
sys.path.append('tacotron2')
import torch
from stft import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with waveglow """
def __init__(self, waveglow, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros', device='cuda'):
super(Denoiser, self).__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).to(device)
if mode == 'zeros':
mel_input = torch.zeros(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
elif mode == 'normal':
mel_input = torch.randn(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised.squeeze(1)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/inferencer/denoiser.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from fastspeech.align_tacotron2 import get_tacotron2, get_duration
from fastspeech.trainer.trainer import Trainer
from fastspeech.utils.pytorch import to_device_async, to_cpu_numpy
from torch.nn import functional as F
class FastspeechTrainer(Trainer):
def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None,
n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp='O0', nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None, pre_aligns=True):
super(FastspeechTrainer, self).__init__(data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn, step, ckpt_path,
log_path, n_epochs, save_steps, log_steps, device, use_amp, nvprof_iter_start, nvprof_iter_end, pyprof_enabled, detect_anomaly, seed)
self.pre_aligns = pre_aligns
if not pre_aligns:
self.tacotron2 = get_tacotron2(device, is_training=True)
to_device_async(self.tacotron2, device)
def loss(self, inputs, model):
text = inputs["text_encoded"]
text_pos = inputs["text_pos"]
mel_tgt = inputs["mel"]
text = to_device_async(text, self.device)
text_pos = to_device_async(text_pos, self.device)
mel_tgt = to_device_async(mel_tgt, self.device)
if self.pre_aligns:
dur_tgt = inputs["align"] # preprocessed align
dur_tgt = dur_tgt.float()
dur_tgt = to_device_async(dur_tgt, self.device)
else:
text_len = inputs['text_len']
mel_len = inputs['mel_len']
dur_tgt = get_duration(
text, text_len, mel_tgt, mel_len, self.tacotron2, self.device)
# (B,H,T) => (B,T,H)
mel_tgt = mel_tgt.transpose(1, 2)
# Forward
mel, mask, dur = model(
text,
text_pos,
duration_target=dur_tgt,
seq_output_len=mel_tgt.size(1))
assert(mel.size(1) == mel_tgt.size(1))
# Loss
mel_loss = F.mse_loss(mel, mel_tgt, reduction='none')
mel_mask = mel_tgt.ne(0).float()
mel_loss *= mel_mask
mel_loss = mel_loss.mean()
dur_tgt = torch.log(dur_tgt + 1)
dur_mask = text_pos.ne(0).float()
dur_tgt *= dur_mask
dur_pred_loss = F.mse_loss(dur, dur_tgt)
loss = mel_loss + dur_pred_loss
meta = {
'mel_loss': to_cpu_numpy(mel_loss),
'duration_predictor_loss': to_cpu_numpy(dur_pred_loss),
}
# meta = {}
return loss, meta
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trainer/fastspeech_trainer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trainer/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import glob
import pathlib
import numpy as np
import torch
from tensorboardX import SummaryWriter
import time
import os
import matplotlib.pyplot as plt
from torch import nn
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.fp16 import cast_model_to_half
import torch.cuda.profiler as profiler
from fastspeech.utils.logging import tprint
from fastspeech.utils.time import TimeElapsed
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
# model
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(
self.model_name, num_param))
# optimizer
self.optimizer = optimizer_fn(model)
# lr scheduler
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
# automatic mixed precision
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# profile
if nvprof_iter_start and nvprof_iter_end is not None and pyprof_enabled:
from apex import pyprof
pyprof.nvtx.init()
# data parallel
self.model = nn.DataParallel(self.model)
# set seed
if seed is None:
seed = np.random.randint(2**16)
np.random.seed(seed)
torch.manual_seed(seed)
# data loader
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
# logging
if log_path:
# tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
# checkpoint path
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
# load checkpoint
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled):
for i in range(self.step+1, self.final_steps + 1):
self.step = i
tprint("------------- TRAIN step : {} -------------".format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name="Training time during profiling", format=":.6f")
timer.start()
with Nvtx("step #{}".format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint("lr: {:06f}".format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if self.ckpt_path and self.save_steps and i % self.save_steps == 0:
self.save()
tprint("Training has been done.")
except StopIteration: # done by n_epochs
tprint("Training has been done. (by n_epochs)")
except KeyboardInterrupt:
tprint("Training has been canceled.")
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx("data load", enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx("forward"):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx("backward"):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx("weight update"):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {
'step': self.step,
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict(),
}
torch.save(state_dict, self.ckpt_path +
'/checkpoint_{:06d}.pt'.format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(
self.model_name, self.step))
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
# load the latest created file.
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint('[Load] Checkpoint \'{}\'. Step={}'.format(
latest_file, self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))
def console_log(self, tag, loss, meta):
# console logging
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar(
'{}/loss'.format(tag), loss, global_step=self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/fastspeech/trainer/trainer.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import numpy as np
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def plot_alignment_to_numpy(alignment, info=None):
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_spectrogram_to_numpy(spectrogram):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5,
color='green', marker='+', s=1, label='target')
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/plotting_utils.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
from fastspeech.text_norm import symbols
class Hparams:
""" hyper parameters """
def __init__(self):
################################
# Experiment Parameters #
################################
self.epochs = 500
self.iters_per_checkpoint = 1000
self.seed = 1234
self.dynamic_loss_scaling = True
self.fp16_run = False
self.distributed_run = False
self.dist_backend = "nccl"
self.dist_url = "tcp://localhost:54321"
self.cudnn_enabled = True
self.cudnn_benchmark = False
self.ignore_layers = ['embedding.weight']
################################
# Data Parameters #
################################
self.load_mel_from_disk = False
self.training_files = 'filelists/ljs_audio_text_train_filelist.txt'
self.validation_files = 'filelists/ljs_audio_text_val_filelist.txt'
self.text_cleaners = ['english_cleaners']
################################
# Audio Parameters #
################################
self.max_wav_value = 32768.0
self.sampling_rate = 22050
self.filter_length = 1024
self.hop_length = 256
self.win_length = 1024
self.n_mel_channels = 80
self.mel_fmin = 0.0
self.mel_fmax = 8000.0
################################
# Model Parameters #
################################
self.n_symbols = len(symbols)
self.symbols_embedding_dim = 512
# Encoder parameters
self.encoder_kernel_size = 5
self.encoder_n_convolutions = 3
self.encoder_embedding_dim = 512
# Decoder parameters
self.n_frames_per_step = 1 # currently only 1 is supported
self.decoder_rnn_dim = 1024
self.prenet_dim = 256
self.max_decoder_steps = 1000
self.gate_threshold = 0.5
self.p_attention_dropout = 0.1
self.p_decoder_dropout = 0.1
# Attention parameters
self.attention_rnn_dim = 1024
self.attention_dim = 128
# Location Layer parameters
self.attention_location_n_filters = 32
self.attention_location_kernel_size = 31
# Mel-post processing network parameters
self.postnet_embedding_dim = 512
self.postnet_kernel_size = 5
self.postnet_n_convolutions = 5
################################
# Optimization Hyperparameters #
################################
self.use_saved_learning_rate = False
self.learning_rate = 1e-3
self.weight_decay = 1e-6
self.grad_clip_thresh = 1.0
self.batch_size = 64
self.mask_padding = True # set model's padded outputs to padded values
def return_self(self):
return self
def create_hparams():
hparams = Hparams()
return hparams.return_self()
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/hparams.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import tacotron2.model
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/__init__.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)
] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/audio_processing.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
from math import sqrt
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from tacotron2.layers import ConvNorm, LinearNorm
from tacotron2.utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -1e9
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(
attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, hparams):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
for i in range(1, hparams.postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim,
hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int(
(hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(hparams.n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(
self.convolutions[i](x)), 0.5, self.training)
x = F.dropout(self.convolutions[-1](x), 0.5, self.training)
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, hparams):
super(Encoder, self).__init__()
convolutions = []
for _ in range(hparams.encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
kernel_size=hparams.encoder_kernel_size, stride=1,
padding=int((hparams.encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(hparams.encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.encoder_embedding_dim,
int(hparams.encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def inference(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Decoder(nn.Module):
def __init__(self, hparams):
super(Decoder, self).__init__()
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.encoder_embedding_dim = hparams.encoder_embedding_dim
self.attention_rnn_dim = hparams.attention_rnn_dim
self.decoder_rnn_dim = hparams.decoder_rnn_dim
self.prenet_dim = hparams.prenet_dim
self.max_decoder_steps = hparams.max_decoder_steps
self.gate_threshold = hparams.gate_threshold
self.p_attention_dropout = hparams.p_attention_dropout
self.p_decoder_dropout = hparams.p_decoder_dropout
self.prenet = Prenet(
hparams.n_mel_channels * hparams.n_frames_per_step,
[hparams.prenet_dim, hparams.prenet_dim])
self.attention_rnn = nn.LSTMCell(
hparams.prenet_dim + hparams.encoder_embedding_dim,
hparams.attention_rnn_dim)
self.attention_layer = Attention(
hparams.attention_rnn_dim, hparams.encoder_embedding_dim,
hparams.attention_dim, hparams.attention_location_n_filters,
hparams.attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
hparams.attention_rnn_dim + hparams.encoder_embedding_dim,
hparams.decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
hparams.decoder_rnn_dim + hparams.encoder_embedding_dim,
hparams.n_mel_channels * hparams.n_frames_per_step)
self.gate_layer = LinearNorm(
hparams.decoder_rnn_dim + hparams.encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, mask):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_hidden = F.dropout(
self.attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
self.decoder_hidden = F.dropout(
self.decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
mel_outputs += [mel_output]
gate_outputs += [gate_output.squeeze(1)]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def inference(self, memory):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
mel_outputs, gate_outputs, alignments = [], [], []
while True:
decoder_input = self.prenet(decoder_input)
mel_output, gate_output, alignment = self.decode(decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output]
alignments += [alignment]
if torch.sigmoid(gate_output.data) > self.gate_threshold:
break
elif len(mel_outputs) == self.max_decoder_steps:
# print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, hparams):
super(Tacotron2, self).__init__()
self.mask_padding = hparams.mask_padding
self.fp16_run = hparams.fp16_run
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.embedding = nn.Embedding(
hparams.n_symbols, hparams.symbols_embedding_dim)
std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.embedding.weight.data.uniform_(-val, val)
self.encoder = Encoder(hparams)
self.decoder = Decoder(hparams)
self.postnet = Postnet(hparams)
def parse_batch(self, batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
return (
(text_padded, input_lengths, mel_padded, max_len, output_lengths),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
text_inputs, text_lengths, mels, max_len, output_lengths = inputs
text_lengths, output_lengths = text_lengths.data, output_lengths.data
embedded_inputs = self.embedding(text_inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, text_lengths)
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, mels, memory_lengths=text_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def inference(self, inputs):
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder.inference(embedded_inputs)
mel_outputs, gate_outputs, alignments = self.decoder.inference(
encoder_outputs)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
outputs = self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
return outputs
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/model.py
|
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from tacotron2.audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
# print(self.forward_basis.dtype, input_data.dtype, forward_transform.dtype, phase.dtype)
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :,
approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:,
:, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/stft.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import torch
import torch.distributed as dist
from torch.nn.modules import Module
from torch.autograd import Variable
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
'''
This version of DistributedDataParallel is designed to be used in conjunction with the multiproc.py
launcher included with this example. It assumes that your run is using multiprocess with 1
GPU/process, that the model is on the correct device, and that torch.set_device has been
used to set the device.
Parameters are broadcasted to the other processes on initialization of DistributedDataParallel,
and will be allreduced at the finish of the backward pass.
'''
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
#fallback for PyTorch 0.3
if not hasattr(dist, '_backend'):
self.warn_on_half = True
else:
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
for p in self.module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(self.needs_reduction):
self.needs_reduction = False
buckets = {}
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if self.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
self.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
param._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
'''
def _sync_buffers(self):
buffers = list(self.module._all_buffers())
if len(buffers) > 0:
# cross-node buffer sync
flat_buffers = _flatten_dense_tensors(buffers)
dist.broadcast(flat_buffers, 0)
for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):
buf.copy_(synced)
def train(self, mode=True):
# Clear NCCL communicator and CUDA event cache of the default group ID,
# These cache will be recreated at the later call. This is currently a
# work-around for a potential NCCL deadlock.
if dist._backend == dist.dist_backend.NCCL:
dist._clear_group_cache()
super(DistributedDataParallel, self).train(mode)
self.module.train(mode)
'''
'''
Modifies existing model to do gradient allreduce, but doesn't change class
so you don't need "module"
'''
def apply_gradient_allreduce(module):
if not hasattr(dist, '_backend'):
module.warn_on_half = True
else:
module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
for p in module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(module.needs_reduction):
module.needs_reduction = False
buckets = {}
for param in module.parameters():
if param.requires_grad and param.grad is not None:
tp = param.data.dtype
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if module.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
module.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def set_needs_reduction(self, input, output):
self.needs_reduction = True
module.register_forward_hook(set_needs_reduction)
return module
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/distributed.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import numpy as np
from scipy.io.wavfile import read
import torch
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(filename, split="|"):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/utils.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import os
from numpy import finfo
import torch
from tacotron2.distributed import apply_gradient_allreduce
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from tacotron2.model import Tacotron2
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
print("Done initializing distributed")
def load_model(hparams):
model = Tacotron2(hparams).to(device)
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
val_loss += reduced_val_loss
val_loss = val_loss / (i + 1)
model.train()
if rank == 0:
print("Validation loss {}: {:9f} ".format(iteration, reduced_val_loss))
logger.log_validation(reduced_val_loss, model, y, y_pred, iteration)
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/train.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import torch
from librosa.filters import mel as librosa_mel_fn
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/layers.py
|
# BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import time
import torch
import sys
import subprocess
argslist = list(sys.argv)[1:]
num_gpus = torch.cuda.device_count()
argslist.append('--n_gpus={}'.format(num_gpus))
workers = []
job_id = time.strftime("%Y_%m_%d-%H%M%S")
argslist.append("--group_name=group_{}".format(job_id))
for i in range(num_gpus):
argslist.append('--rank={}'.format(i))
stdout = None if i == 0 else open("logs/{}_GPU_{}.log".format(job_id, i),
"w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
argslist = argslist[:-1]
for p in workers:
p.wait()
|
DeepLearningExamples-master
|
CUDA-Optimized/FastSpeech/tacotron2/multiproc.py
|
from setuptools import setup, find_packages
setup(
name='se3-transformer',
packages=find_packages(exclude=['tests']),
include_package_data=True,
version='1.2.0',
description='PyTorch + DGL implementation of SE(3)-Transformers',
author='Alexandre Milesi',
author_email='alexandrem@nvidia.com',
)
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/setup.py
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/__init__.py
|
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Tuple
import dgl
import pathlib
import torch
from dgl.data import QM9EdgeDataset
from dgl import DGLGraph
from torch import Tensor
from torch.utils.data import random_split, DataLoader, Dataset
from tqdm import tqdm
from se3_transformer.data_loading.data_module import DataModule
from se3_transformer.model.basis import get_basis
from se3_transformer.runtime.utils import get_local_rank, str2bool, using_tensor_cores
def _get_relative_pos(qm9_graph: DGLGraph) -> Tensor:
x = qm9_graph.ndata['pos']
src, dst = qm9_graph.edges()
rel_pos = x[dst] - x[src]
return rel_pos
def _get_split_sizes(full_dataset: Dataset) -> Tuple[int, int, int]:
len_full = len(full_dataset)
len_train = 100_000
len_test = int(0.1 * len_full)
len_val = len_full - len_train - len_test
return len_train, len_val, len_test
class QM9DataModule(DataModule):
"""
Datamodule wrapping https://docs.dgl.ai/en/latest/api/python/dgl.data.html#qm9edge-dataset
Training set is 100k molecules. Test set is 10% of the dataset. Validation set is the rest.
This includes all the molecules from QM9 except the ones that are uncharacterized.
"""
NODE_FEATURE_DIM = 6
EDGE_FEATURE_DIM = 4
def __init__(self,
data_dir: pathlib.Path,
task: str = 'homo',
batch_size: int = 240,
num_workers: int = 8,
num_degrees: int = 4,
amp: bool = False,
precompute_bases: bool = False,
**kwargs):
self.data_dir = data_dir # This needs to be before __init__ so that prepare_data has access to it
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=self._collate)
self.amp = amp
self.task = task
self.batch_size = batch_size
self.num_degrees = num_degrees
qm9_kwargs = dict(label_keys=[self.task], verbose=False, raw_dir=str(data_dir))
if precompute_bases:
bases_kwargs = dict(max_degree=num_degrees - 1, use_pad_trick=using_tensor_cores(amp), amp=amp)
full_dataset = CachedBasesQM9EdgeDataset(bases_kwargs=bases_kwargs, batch_size=batch_size,
num_workers=num_workers, **qm9_kwargs)
else:
full_dataset = QM9EdgeDataset(**qm9_kwargs)
self.ds_train, self.ds_val, self.ds_test = random_split(full_dataset, _get_split_sizes(full_dataset),
generator=torch.Generator().manual_seed(0))
train_targets = full_dataset.targets[self.ds_train.indices, full_dataset.label_keys[0]]
self.targets_mean = train_targets.mean()
self.targets_std = train_targets.std()
def prepare_data(self):
# Download the QM9 preprocessed data
QM9EdgeDataset(verbose=True, raw_dir=str(self.data_dir))
def _collate(self, samples):
graphs, y, *bases = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
edge_feats = {'0': batched_graph.edata['edge_attr'][:, :self.EDGE_FEATURE_DIM, None]}
batched_graph.edata['rel_pos'] = _get_relative_pos(batched_graph)
# get node features
node_feats = {'0': batched_graph.ndata['attr'][:, :self.NODE_FEATURE_DIM, None]}
targets = (torch.cat(y) - self.targets_mean) / self.targets_std
if bases:
# collate bases
all_bases = {
key: torch.cat([b[key] for b in bases[0]], dim=0)
for key in bases[0][0].keys()
}
return batched_graph, node_feats, edge_feats, all_bases, targets
else:
return batched_graph, node_feats, edge_feats, targets
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("QM9 dataset")
parser.add_argument('--task', type=str, default='homo', const='homo', nargs='?',
choices=['mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv',
'U0_atom', 'U_atom', 'H_atom', 'G_atom', 'A', 'B', 'C'],
help='Regression task to train on')
parser.add_argument('--precompute_bases', type=str2bool, nargs='?', const=True, default=False,
help='Precompute bases at the beginning of the script during dataset initialization,'
' instead of computing them at the beginning of each forward pass.')
return parent_parser
def __repr__(self):
return f'QM9({self.task})'
class CachedBasesQM9EdgeDataset(QM9EdgeDataset):
""" Dataset extending the QM9 dataset from DGL with precomputed (cached in RAM) pairwise bases """
def __init__(self, bases_kwargs: dict, batch_size: int, num_workers: int, *args, **kwargs):
"""
:param bases_kwargs: Arguments to feed the bases computation function
:param batch_size: Batch size to use when iterating over the dataset for computing bases
"""
self.bases_kwargs = bases_kwargs
self.batch_size = batch_size
self.bases = None
self.num_workers = num_workers
super().__init__(*args, **kwargs)
def load(self):
super().load()
# Iterate through the dataset and compute bases (pairwise only)
# Potential improvement: use multi-GPU and gather
dataloader = DataLoader(self, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers,
collate_fn=lambda samples: dgl.batch([sample[0] for sample in samples]))
bases = []
for i, graph in tqdm(enumerate(dataloader), total=len(dataloader), desc='Precomputing QM9 bases',
disable=get_local_rank() != 0):
rel_pos = _get_relative_pos(graph)
# Compute the bases with the GPU but convert the result to CPU to store in RAM
bases.append({k: v.cpu() for k, v in get_basis(rel_pos.cuda(), **self.bases_kwargs).items()})
self.bases = bases # Assign at the end so that __getitem__ isn't confused
def __getitem__(self, idx: int):
graph, label = super().__getitem__(idx)
if self.bases:
bases_idx = idx // self.batch_size
bases_cumsum_idx = self.ne_cumsum[idx] - self.ne_cumsum[bases_idx * self.batch_size]
bases_cumsum_next_idx = self.ne_cumsum[idx + 1] - self.ne_cumsum[bases_idx * self.batch_size]
return graph, label, {key: basis[bases_cumsum_idx:bases_cumsum_next_idx] for key, basis in
self.bases[bases_idx].items()}
else:
return graph, label
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/data_loading/qm9.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import torch.distributed as dist
from abc import ABC
from torch.utils.data import DataLoader, DistributedSampler, Dataset
from se3_transformer.runtime.utils import get_local_rank
def _get_dataloader(dataset: Dataset, shuffle: bool, **kwargs) -> DataLoader:
# Classic or distributed dataloader depending on the context
sampler = DistributedSampler(dataset, shuffle=shuffle) if dist.is_initialized() else None
return DataLoader(dataset, shuffle=(shuffle and sampler is None), sampler=sampler, **kwargs)
class DataModule(ABC):
""" Abstract DataModule. Children must define self.ds_{train | val | test}. """
def __init__(self, **dataloader_kwargs):
super().__init__()
if get_local_rank() == 0:
self.prepare_data()
# Wait until rank zero has prepared the data (download, preprocessing, ...)
if dist.is_initialized():
dist.barrier(device_ids=[get_local_rank()])
self.dataloader_kwargs = {'pin_memory': True, 'persistent_workers': dataloader_kwargs.get('num_workers', 0) > 0,
**dataloader_kwargs}
self.ds_train, self.ds_val, self.ds_test = None, None, None
def prepare_data(self):
""" Method called only once per node. Put here any downloading or preprocessing """
pass
def train_dataloader(self) -> DataLoader:
return _get_dataloader(self.ds_train, shuffle=True, **self.dataloader_kwargs)
def val_dataloader(self) -> DataLoader:
return _get_dataloader(self.ds_val, shuffle=False, **self.dataloader_kwargs)
def test_dataloader(self) -> DataLoader:
return _get_dataloader(self.ds_test, shuffle=False, **self.dataloader_kwargs)
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/data_loading/data_module.py
|
from .qm9 import QM9DataModule
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/data_loading/__init__.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from abc import ABC, abstractmethod
import torch
import torch.distributed as dist
from torch import Tensor
class Metric(ABC):
""" Metric class with synchronization capabilities similar to TorchMetrics """
def __init__(self):
self.states = {}
def add_state(self, name: str, default: Tensor):
assert name not in self.states
self.states[name] = default.clone()
setattr(self, name, default)
def synchronize(self):
if dist.is_initialized():
for state in self.states:
dist.all_reduce(getattr(self, state), op=dist.ReduceOp.SUM, group=dist.group.WORLD)
def __call__(self, *args, **kwargs):
self.update(*args, **kwargs)
def reset(self):
for name, default in self.states.items():
setattr(self, name, default.clone())
def compute(self):
self.synchronize()
value = self._compute().item()
self.reset()
return value
@abstractmethod
def _compute(self):
pass
@abstractmethod
def update(self, preds: Tensor, targets: Tensor):
pass
class MeanAbsoluteError(Metric):
def __init__(self):
super().__init__()
self.add_state('error', torch.tensor(0, dtype=torch.float32, device='cuda'))
self.add_state('total', torch.tensor(0, dtype=torch.int32, device='cuda'))
def update(self, preds: Tensor, targets: Tensor):
preds = preds.detach()
n = preds.shape[0]
error = torch.abs(preds.view(n, -1) - targets.view(n, -1)).sum()
self.total += n
self.error += error
def _compute(self):
return self.error / self.total
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/metrics.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import collections
import itertools
import os
import pathlib
import re
import pynvml
class Device:
# assume nvml returns list of 64 bit ints
_nvml_bit_affinity = 64
_nvml_affinity_elements = (
os.cpu_count() + _nvml_bit_affinity - 1
) // _nvml_bit_affinity
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def get_name(self):
return pynvml.nvmlDeviceGetName(self.handle)
def get_uuid(self):
return pynvml.nvmlDeviceGetUUID(self.handle)
def get_cpu_affinity(self, scope):
if scope == 'socket':
nvml_scope = pynvml.NVML_AFFINITY_SCOPE_SOCKET
elif scope == 'node':
nvml_scope = pynvml.NVML_AFFINITY_SCOPE_NODE
else:
raise RuntimeError('Unknown scope')
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinityWithinScope(
self.handle, Device._nvml_affinity_elements, nvml_scope
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def get_thread_siblings_list():
"""
Returns a list of 2-element integer tuples representing pairs of
hyperthreading cores.
"""
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(sorted(map(int, res[0])))
thread_siblings_list.append(pair)
thread_siblings_list = list(set(thread_siblings_list))
return thread_siblings_list
def build_thread_siblings_dict(siblings_list):
siblings_dict = {}
for siblings_tuple in siblings_list:
for core in siblings_tuple:
siblings_dict[core] = siblings_tuple
return siblings_dict
def group_list_by_key(the_list, key):
sorted_list = sorted(the_list, key=key)
grouped = [
tuple(group) for key, group in itertools.groupby(sorted_list, key=key)
]
return grouped
def ungroup_affinities(affinities, scope, cores, min_cores=1, max_cores=None):
if scope == 'socket':
affinities = [
list(itertools.chain(*zip(*affinity))) for affinity in affinities
]
elif scope == 'node':
affinities = [
[group[0] for group in affinity] for affinity in affinities
]
for gpu_id, affinity in enumerate(affinities):
if len(affinity) < min_cores:
raise RuntimeError(
f'Number of available physical cores for GPU {gpu_id} is less '
f'the predefinied minimum, min_cores={min_cores}, available '
f'physical cores: {affinity} (count={len(affinity)})'
)
if max_cores is not None:
affinities = [affinity[:max_cores] for affinity in affinities]
if cores == 'all_logical':
affinities = [
list(itertools.chain(*affinity)) for affinity in affinities
]
elif cores == 'single_logical':
affinities = [
[group[0] for group in affinity] for affinity in affinities
]
else:
raise RuntimeError('Unknown cores mode')
return affinities
def check_affinities(affinities):
# sets of cores should be either identical or disjoint
for i, j in itertools.product(affinities, affinities):
if not set(i) == set(j) and not set(i).isdisjoint(set(j)):
raise RuntimeError(
f'Sets of cores should be either identical or disjoint, '
f'but got {i} and {j}.'
)
def get_affinities(nproc_per_node, scope, exclude_unavailable_cores=True):
devices = [Device(i) for i in range(nproc_per_node)]
affinities = [dev.get_cpu_affinity(scope) for dev in devices]
if exclude_unavailable_cores:
available_cores = os.sched_getaffinity(0)
affinities = [
sorted(list(set(affinity) & available_cores))
for affinity in affinities
]
check_affinities(affinities)
return affinities
def get_grouped_affinities(nproc_per_node, exclude_unavailable_cores=True):
siblings_list = get_thread_siblings_list()
siblings_dict = build_thread_siblings_dict(siblings_list)
socket_affinities = get_affinities(
nproc_per_node, 'socket', exclude_unavailable_cores
)
node_affinities = get_affinities(
nproc_per_node, 'node', exclude_unavailable_cores
)
siblings_key = lambda x: siblings_dict.get(x, (x,))
sibling_node_affinities = [
tuple(group_list_by_key(affinity, key=siblings_key))
for affinity in node_affinities
]
sibling_socket_affinities = [
tuple(group_list_by_key(affinity, key=siblings_key))
for affinity in socket_affinities
]
socket_node_assigned_cores = collections.defaultdict(list)
for socket, node_cores in zip(
sibling_socket_affinities, sibling_node_affinities
):
socket_node_assigned_cores[socket].extend(node_cores)
socket_node_assigned_cores = {
key: tuple(sorted(set(value)))
for key, value in socket_node_assigned_cores.items()
}
node_grouping = collections.defaultdict(list)
for socket_cores, assigned_cores in socket_node_assigned_cores.items():
unassigned_cores = sorted(
list(set(socket_cores) - set(assigned_cores))
)
for assigned_core in assigned_cores:
node_grouping[assigned_core].append(assigned_core)
for assigned, unassigned in zip(
itertools.cycle(assigned_cores), unassigned_cores
):
node_grouping[assigned].append(unassigned)
node_grouping = {key: tuple(value) for key, value in node_grouping.items()}
grouped_affinities = [
tuple(node_grouping[item] for item in sibling_node_affinity)
for sibling_node_affinity in sibling_node_affinities
]
return grouped_affinities
def set_all(gpu_id, nproc_per_node, scope, cores, min_cores, max_cores):
"""
The process is assigned with all available physical CPU cores recommended by
pynvml for the GPU with a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
gpu_id: index of a GPU
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
ungrouped_affinities = ungroup_affinities(
grouped_affinities, scope, cores, min_cores, max_cores
)
os.sched_setaffinity(0, ungrouped_affinities[gpu_id])
def set_single(gpu_id, nproc_per_node, scope, cores, min_cores=1, max_cores=1):
"""
The process is assigned with the first available physical CPU core from the
list of all physical CPU cores recommended by pynvml for the GPU with a
given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
gpu_id: index of a GPU
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
single_grouped_affinities = [group[:1] for group in grouped_affinities]
ungrouped_affinities = ungroup_affinities(
single_grouped_affinities, scope, cores, min_cores, max_cores
)
os.sched_setaffinity(0, ungrouped_affinities[gpu_id])
def set_single_unique(
gpu_id, nproc_per_node, scope, cores, min_cores=1, max_cores=1
):
"""
The process is assigned with a single unique available physical CPU core
from the list of all physical CPU cores recommended by pynvml for the GPU
with a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
gpu_id: index of a GPU
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
affinities = []
assigned_groups = set()
for grouped_affinity in grouped_affinities:
for group in grouped_affinity:
if group not in assigned_groups:
affinities.append([group])
assigned_groups.add(group)
break
ungrouped_affinities = ungroup_affinities(
affinities, scope, cores, min_cores, max_cores
)
os.sched_setaffinity(0, ungrouped_affinities[gpu_id])
def set_unique(
gpu_id,
nproc_per_node,
scope,
cores,
mode,
min_cores,
max_cores,
balanced=True,
):
"""
The process is assigned with a unique subset of available physical CPU
cores from the list of all CPU cores recommended by pynvml for the GPU with
a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
gpu_id: index of a GPU
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
mode: 'unique_contiguous' or 'unique_interleaved'
balanced: assign an equal number of physical cores to each process,
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
grouped_affinities_to_device_ids = collections.defaultdict(list)
for idx, grouped_affinity in enumerate(grouped_affinities):
grouped_affinities_to_device_ids[tuple(grouped_affinity)].append(idx)
# compute minimal number of physical cores per GPU across all GPUs and
# sockets, code assigns this number of cores per GPU if balanced == True
min_physical_cores_per_gpu = min(
[
len(cores) // len(gpus)
for cores, gpus in grouped_affinities_to_device_ids.items()
]
)
grouped_unique_affinities = [None] * nproc_per_node
for (
grouped_affinity,
device_ids,
) in grouped_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
if balanced:
cores_per_device = min_physical_cores_per_gpu
grouped_affinity = grouped_affinity[
: devices_per_group * min_physical_cores_per_gpu
]
else:
cores_per_device = len(grouped_affinity) // devices_per_group
for subgroup_id, device_id in enumerate(device_ids):
# In theory there should be no difference in performance between
# 'interleaved' and 'contiguous' pattern on Intel-based DGX-1,
# but 'contiguous' should be better for DGX A100 because on AMD
# Rome 4 consecutive cores are sharing L3 cache.
# TODO: code doesn't attempt to automatically detect layout of
# L3 cache, also external environment may already exclude some
# cores, this code makes no attempt to detect it and to align
# mapping to multiples of 4.
if mode == 'unique_interleaved':
unique_grouped_affinity = list(
grouped_affinity[subgroup_id::devices_per_group]
)
elif mode == 'unique_contiguous':
unique_grouped_affinity = list(
grouped_affinity[
subgroup_id
* cores_per_device : (subgroup_id + 1)
* cores_per_device
]
)
else:
raise RuntimeError('Unknown set_unique mode')
grouped_unique_affinities[device_id] = unique_grouped_affinity
ungrouped_affinities = ungroup_affinities(
grouped_unique_affinities, scope, cores, min_cores, max_cores
)
os.sched_setaffinity(0, ungrouped_affinities[gpu_id])
def set_affinity(
gpu_id,
nproc_per_node,
*,
mode='unique_contiguous',
scope='node',
cores='all_logical',
balanced=True,
min_cores=1,
max_cores=None,
):
"""
The process is assigned with a proper CPU affinity that matches CPU-GPU
hardware architecture on a given platform. Usually, setting proper affinity
improves and stabilizes the performance of deep learning training workloads.
This function assumes that the workload runs in multi-process single-device
mode (there are multiple training processes, and each process is running on
a single GPU). This is typical for multi-GPU data-parallel training
workloads (e.g., using `torch.nn.parallel.DistributedDataParallel`).
Available affinity modes:
* 'all' - the process is assigned with all available physical CPU cores
recommended by pynvml for the GPU with a given id.
* 'single' - the process is assigned with the first available
physical CPU core from the list of all physical CPU cores recommended by
pynvml for the GPU with a given id (multiple GPUs could be assigned with
the same CPU core).
* 'single_unique' - the process is assigned with a single unique
available physical CPU core from the list of all CPU cores recommended by
pynvml for the GPU with a given id.
* 'unique_interleaved' - the process is assigned with a unique subset of
available physical CPU cores from the list of all physical CPU cores
recommended by pynvml for the GPU with a given id, cores are assigned with
interleaved indexing pattern
* 'unique_contiguous' - (the default mode) the process is assigned with a
unique subset of available physical CPU cores from the list of all physical
CPU cores recommended by pynvml for the GPU with a given id, cores are
assigned with contiguous indexing pattern
Available "scope" modes:
* 'node' - sets the scope for pynvml affinity queries to NUMA node
* 'socket' - sets the scope for pynvml affinity queries to processor socket
Available "cores" modes:
* 'all_logical' - assigns the process with all logical cores associated with
a given corresponding physical core (i.e., automatically includes all
available hyperthreading siblings)
* 'single_logical' - assigns the process with only one logical core
associated with a given corresponding physical core (i.e., excludes
hyperthreading siblings)
'unique_contiguous' is the recommended mode for deep learning
training workloads on NVIDIA DGX machines.
Args:
gpu_id: integer index of a GPU, value from 0 to 'nproc_per_node' - 1
nproc_per_node: number of processes per node
mode: affinity mode
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
balanced: assign an equal number of physical cores to each process,
affects only 'unique_interleaved' and
'unique_contiguous' affinity modes
min_cores: (default=1) the intended minimum number of physical cores per
process, code raises RuntimeError if the number of available cores
is less than 'min_cores'
max_cores: (default=None) the intended maxmimum number of physical cores
per process, the list of assigned cores is trimmed to the first
'max_cores' cores if max_cores is not None
Returns a set of logical CPU cores on which the process is eligible to run.
WARNING: On DGX A100, only half of the CPU cores have direct access to GPUs.
set_affinity with scope='node' restricts execution only to the CPU cores
directly connected to GPUs. On DGX A100, it will limit the code to half of
the CPU cores and half of CPU memory bandwidth (which may be fine for many
DL models). Use scope='socket' to use all available DGX A100 CPU cores.
WARNING: Intel's OpenMP implementation resets affinity on the first call to
an OpenMP function after a fork. It's recommended to run with env variable:
`KMP_AFFINITY=disabled` if the affinity set by gpu_affinity should be
preserved after a fork (e.g. in PyTorch DataLoader workers).
Example:
import argparse
import os
import gpu_affinity
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--local_rank',
type=int,
default=os.getenv('LOCAL_RANK', 0),
)
args = parser.parse_args()
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(args.local_rank, nproc_per_node)
print(f'{args.local_rank}: core affinity: {affinity}')
if __name__ == "__main__":
main()
Launch the example with:
python -m torch.distributed.launch --nproc_per_node <#GPUs> example.py
"""
pynvml.nvmlInit()
if mode == 'all':
set_all(gpu_id, nproc_per_node, scope, cores, min_cores, max_cores)
elif mode == 'single':
set_single(gpu_id, nproc_per_node, scope, cores)
elif mode == 'single_unique':
set_single_unique(gpu_id, nproc_per_node, scope, cores)
elif mode == 'unique_interleaved' or mode == 'unique_contiguous':
set_unique(
gpu_id,
nproc_per_node,
scope,
cores,
mode,
min_cores,
max_cores,
balanced,
)
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/gpu_affinity.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import pathlib
from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, Any, Callable, Optional
import dllogger
import torch.distributed as dist
import wandb
from dllogger import Verbosity
from se3_transformer.runtime.utils import rank_zero_only
class Logger(ABC):
@rank_zero_only
@abstractmethod
def log_hyperparams(self, params):
pass
@rank_zero_only
@abstractmethod
def log_metrics(self, metrics, step=None):
pass
@staticmethod
def _sanitize_params(params):
def _sanitize(val):
if isinstance(val, Callable):
try:
_val = val()
if isinstance(_val, Callable):
return val.__name__
return _val
except Exception:
return getattr(val, "__name__", None)
elif isinstance(val, pathlib.Path) or isinstance(val, Enum):
return str(val)
return val
return {key: _sanitize(val) for key, val in params.items()}
class LoggerCollection(Logger):
def __init__(self, loggers):
super().__init__()
self.loggers = loggers
def __getitem__(self, index):
return [logger for logger in self.loggers][index]
@rank_zero_only
def log_metrics(self, metrics, step=None):
for logger in self.loggers:
logger.log_metrics(metrics, step)
@rank_zero_only
def log_hyperparams(self, params):
for logger in self.loggers:
logger.log_hyperparams(params)
class DLLogger(Logger):
def __init__(self, save_dir: pathlib.Path, filename: str):
super().__init__()
if not dist.is_initialized() or dist.get_rank() == 0:
save_dir.mkdir(parents=True, exist_ok=True)
dllogger.init(
backends=[dllogger.JSONStreamBackend(Verbosity.DEFAULT, str(save_dir / filename))])
@rank_zero_only
def log_hyperparams(self, params):
params = self._sanitize_params(params)
dllogger.log(step="PARAMETER", data=params)
@rank_zero_only
def log_metrics(self, metrics, step=None):
if step is None:
step = tuple()
dllogger.log(step=step, data=metrics)
class WandbLogger(Logger):
def __init__(
self,
name: str,
save_dir: pathlib.Path,
id: Optional[str] = None,
project: Optional[str] = None
):
super().__init__()
if not dist.is_initialized() or dist.get_rank() == 0:
save_dir.mkdir(parents=True, exist_ok=True)
self.experiment = wandb.init(name=name,
project=project,
id=id,
dir=str(save_dir),
resume='allow',
anonymous='must')
@rank_zero_only
def log_hyperparams(self, params: Dict[str, Any]) -> None:
params = self._sanitize_params(params)
self.experiment.config.update(params, allow_val_change=True)
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
if step is not None:
self.experiment.log({**metrics, 'epoch': step})
else:
self.experiment.log(metrics)
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/loggers.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import argparse
import pathlib
from se3_transformer.data_loading import QM9DataModule
from se3_transformer.model import SE3TransformerPooled
from se3_transformer.runtime.utils import str2bool
PARSER = argparse.ArgumentParser(description='SE(3)-Transformer')
paths = PARSER.add_argument_group('Paths')
paths.add_argument('--data_dir', type=pathlib.Path, default=pathlib.Path('./data'),
help='Directory where the data is located or should be downloaded')
paths.add_argument('--log_dir', type=pathlib.Path, default=pathlib.Path('./results'),
help='Directory where the results logs should be saved')
paths.add_argument('--dllogger_name', type=str, default='dllogger_results.json',
help='Name for the resulting DLLogger JSON file')
paths.add_argument('--save_ckpt_path', type=pathlib.Path, default=None,
help='File where the checkpoint should be saved')
paths.add_argument('--load_ckpt_path', type=pathlib.Path, default=None,
help='File of the checkpoint to be loaded')
optimizer = PARSER.add_argument_group('Optimizer')
optimizer.add_argument('--optimizer', choices=['adam', 'sgd', 'lamb'], default='adam')
optimizer.add_argument('--learning_rate', '--lr', dest='learning_rate', type=float, default=0.002)
optimizer.add_argument('--min_learning_rate', '--min_lr', dest='min_learning_rate', type=float, default=None)
optimizer.add_argument('--momentum', type=float, default=0.9)
optimizer.add_argument('--weight_decay', type=float, default=0.1)
PARSER.add_argument('--epochs', type=int, default=100, help='Number of training epochs')
PARSER.add_argument('--batch_size', type=int, default=240, help='Batch size')
PARSER.add_argument('--seed', type=int, default=None, help='Set a seed globally')
PARSER.add_argument('--num_workers', type=int, default=8, help='Number of dataloading workers')
PARSER.add_argument('--amp', type=str2bool, nargs='?', const=True, default=False, help='Use Automatic Mixed Precision')
PARSER.add_argument('--gradient_clip', type=float, default=None, help='Clipping of the gradient norms')
PARSER.add_argument('--accumulate_grad_batches', type=int, default=1, help='Gradient accumulation')
PARSER.add_argument('--ckpt_interval', type=int, default=-1, help='Save a checkpoint every N epochs')
PARSER.add_argument('--eval_interval', dest='eval_interval', type=int, default=20,
help='Do an evaluation round every N epochs')
PARSER.add_argument('--silent', type=str2bool, nargs='?', const=True, default=False,
help='Minimize stdout output')
PARSER.add_argument('--wandb', type=str2bool, nargs='?', const=True, default=False,
help='Enable W&B logging')
PARSER.add_argument('--benchmark', type=str2bool, nargs='?', const=True, default=False,
help='Benchmark mode')
QM9DataModule.add_argparse_args(PARSER)
SE3TransformerPooled.add_argparse_args(PARSER)
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/arguments.py
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/__init__.py
|
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import argparse
import ctypes
import logging
import os
import random
from functools import wraps
from typing import Union, List, Dict
import numpy as np
import torch
import torch.distributed as dist
from torch import Tensor
def aggregate_residual(feats1, feats2, method: str):
""" Add or concatenate two fiber features together. If degrees don't match, will use the ones of feats2. """
if method in ['add', 'sum']:
return {k: (v + feats1[k]) if k in feats1 else v for k, v in feats2.items()}
elif method in ['cat', 'concat']:
return {k: torch.cat([v, feats1[k]], dim=1) if k in feats1 else v for k, v in feats2.items()}
else:
raise ValueError('Method must be add/sum or cat/concat')
def degree_to_dim(degree: int) -> int:
return 2 * degree + 1
def unfuse_features(features: Tensor, degrees: List[int]) -> Dict[str, Tensor]:
return dict(zip(map(str, degrees), features.split([degree_to_dim(deg) for deg in degrees], dim=-1)))
def str2bool(v: Union[bool, str]) -> bool:
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def to_cuda(x):
""" Try to convert a Tensor, a collection of Tensors or a DGLGraph to CUDA """
if isinstance(x, Tensor):
return x.cuda(non_blocking=True)
elif isinstance(x, tuple):
return (to_cuda(v) for v in x)
elif isinstance(x, list):
return [to_cuda(v) for v in x]
elif isinstance(x, dict):
return {k: to_cuda(v) for k, v in x.items()}
else:
# DGLGraph or other objects
return x.to(device=torch.cuda.current_device(), non_blocking=True)
def get_local_rank() -> int:
return int(os.environ.get('LOCAL_RANK', 0))
def init_distributed() -> bool:
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = world_size > 1
if distributed:
backend = 'nccl' if torch.cuda.is_available() else 'gloo'
dist.init_process_group(backend=backend, init_method='env://')
if backend == 'nccl':
torch.cuda.set_device(get_local_rank())
else:
logging.warning('Running on CPU only!')
assert torch.distributed.is_initialized()
return distributed
def increase_l2_fetch_granularity():
# maximum fetch granularity of L2: 128 bytes
_libcudart = ctypes.CDLL('libcudart.so')
# set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def seed_everything(seed):
seed = int(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def rank_zero_only(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
if not dist.is_initialized() or dist.get_rank() == 0:
return fn(*args, **kwargs)
return wrapped_fn
def using_tensor_cores(amp: bool) -> bool:
major_cc, minor_cc = torch.cuda.get_device_capability()
return (amp and major_cc >= 7) or major_cc >= 8
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/utils.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import logging
import time
from abc import ABC, abstractmethod
from typing import Optional
import numpy as np
import torch
from se3_transformer.runtime.loggers import Logger
from se3_transformer.runtime.metrics import MeanAbsoluteError
class BaseCallback(ABC):
def on_fit_start(self, optimizer, args, start_epoch):
pass
def on_fit_end(self):
pass
def on_epoch_end(self):
pass
def on_batch_start(self):
pass
def on_validation_step(self, input, target, pred):
pass
def on_validation_end(self, epoch=None):
pass
def on_checkpoint_load(self, checkpoint):
pass
def on_checkpoint_save(self, checkpoint):
pass
class LRSchedulerCallback(BaseCallback):
def __init__(self, logger: Optional[Logger] = None):
self.logger = logger
self.scheduler = None
@abstractmethod
def get_scheduler(self, optimizer, args, last_epoch):
pass
def on_fit_start(self, optimizer, args, start_epoch):
self.scheduler = self.get_scheduler(optimizer, args, start_epoch - 1)
if hasattr(self, 'state_dict'):
self.scheduler.load_state_dict(self.state_dict)
def on_checkpoint_load(self, checkpoint):
self.state_dict = checkpoint['scheduler_state_dict']
def on_checkpoint_save(self, checkpoint):
checkpoint['scheduler_state_dict'] = self.scheduler.state_dict()
def on_epoch_end(self):
if self.logger is not None:
self.logger.log_metrics({'learning rate': self.scheduler.get_last_lr()[0]}, step=self.scheduler.last_epoch)
self.scheduler.step()
class QM9MetricCallback(BaseCallback):
""" Logs the rescaled mean absolute error for QM9 regression tasks """
def __init__(self, logger, targets_std, prefix=''):
self.mae = MeanAbsoluteError()
self.logger = logger
self.targets_std = targets_std
self.prefix = prefix
self.best_mae = float('inf')
self.last_mae = None
def on_validation_step(self, input, target, pred):
self.mae(pred.detach(), target.detach())
def on_validation_end(self, epoch=None):
mae = self.mae.compute() * self.targets_std
logging.info(f'{self.prefix} MAE: {mae}')
self.logger.log_metrics({f'{self.prefix} MAE': mae}, epoch)
self.best_mae = min(self.best_mae, mae)
self.last_mae = mae
def on_fit_end(self):
if self.best_mae != float('inf'):
self.logger.log_metrics({f'{self.prefix} best MAE': self.best_mae})
self.logger.log_metrics({f'{self.prefix} loss': self.last_mae / self.targets_std})
class QM9LRSchedulerCallback(LRSchedulerCallback):
def __init__(self, logger, epochs):
super().__init__(logger)
self.epochs = epochs
def get_scheduler(self, optimizer, args, last_epoch):
min_lr = args.min_learning_rate if args.min_learning_rate else args.learning_rate / 10.0
return torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, self.epochs, eta_min=min_lr, last_epoch=last_epoch)
class PerformanceCallback(BaseCallback):
def __init__(self, logger, batch_size: int, warmup_epochs: int = 1, mode: str = 'train'):
self.batch_size = batch_size
self.warmup_epochs = warmup_epochs
self.epoch = 0
self.timestamps = []
self.mode = mode
self.logger = logger
def on_batch_start(self):
if self.epoch >= self.warmup_epochs:
torch.cuda.synchronize()
self.timestamps.append(time.time() * 1000.0)
def _log_perf(self):
stats = self.process_performance_stats()
for k, v in stats.items():
logging.info(f'performance {k}: {v}')
self.logger.log_metrics(stats)
def on_epoch_end(self):
self.epoch += 1
def on_fit_end(self):
if self.epoch > self.warmup_epochs:
self._log_perf()
self.timestamps = []
def process_performance_stats(self):
timestamps = np.asarray(self.timestamps)
deltas = np.diff(timestamps)
throughput = self.batch_size / deltas.mean()
stats = {
f"throughput_{self.mode}": throughput,
f"latency_{self.mode}_mean": deltas.mean(),
f"total_time_{self.mode}": timestamps[-1] - timestamps[0],
}
for level in [90, 95, 99]:
stats.update({f"latency_{self.mode}_{level}": np.percentile(deltas, level)})
return stats
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/callbacks.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import List
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
from se3_transformer.runtime import gpu_affinity
from se3_transformer.runtime.arguments import PARSER
from se3_transformer.runtime.callbacks import BaseCallback
from se3_transformer.runtime.loggers import DLLogger, WandbLogger, LoggerCollection
from se3_transformer.runtime.utils import to_cuda, get_local_rank
@torch.inference_mode()
def evaluate(model: nn.Module,
dataloader: DataLoader,
callbacks: List[BaseCallback],
args):
model.eval()
for i, batch in tqdm(enumerate(dataloader), total=len(dataloader), unit='batch', desc=f'Evaluation',
leave=False, disable=(args.silent or get_local_rank() != 0)):
*input, target = to_cuda(batch)
for callback in callbacks:
callback.on_batch_start()
with torch.cuda.amp.autocast(enabled=args.amp):
pred = model(*input)
for callback in callbacks:
callback.on_validation_step(input, target, pred)
if __name__ == '__main__':
from se3_transformer.runtime.callbacks import QM9MetricCallback, PerformanceCallback
from se3_transformer.runtime.utils import init_distributed, seed_everything
from se3_transformer.model import SE3TransformerPooled, Fiber
from se3_transformer.data_loading import QM9DataModule
import torch.distributed as dist
import logging
import sys
is_distributed = init_distributed()
local_rank = get_local_rank()
args = PARSER.parse_args()
logging.getLogger().setLevel(logging.CRITICAL if local_rank != 0 or args.silent else logging.INFO)
logging.info('====== SE(3)-Transformer ======')
logging.info('| Inference on the test set |')
logging.info('===============================')
if not args.benchmark and args.load_ckpt_path is None:
logging.error('No load_ckpt_path provided, you need to provide a saved model to evaluate')
sys.exit(1)
if args.benchmark:
logging.info('Running benchmark mode with one warmup pass')
if args.seed is not None:
seed_everything(args.seed)
major_cc, minor_cc = torch.cuda.get_device_capability()
loggers = [DLLogger(save_dir=args.log_dir, filename=args.dllogger_name)]
if args.wandb:
loggers.append(WandbLogger(name=f'QM9({args.task})', save_dir=args.log_dir, project='se3-transformer'))
logger = LoggerCollection(loggers)
datamodule = QM9DataModule(**vars(args))
model = SE3TransformerPooled(
fiber_in=Fiber({0: datamodule.NODE_FEATURE_DIM}),
fiber_out=Fiber({0: args.num_degrees * args.num_channels}),
fiber_edge=Fiber({0: datamodule.EDGE_FEATURE_DIM}),
output_dim=1,
tensor_cores=(args.amp and major_cc >= 7) or major_cc >= 8, # use Tensor Cores more effectively
**vars(args)
)
callbacks = [QM9MetricCallback(logger, targets_std=datamodule.targets_std, prefix='test')]
model.to(device=torch.cuda.current_device())
if args.load_ckpt_path is not None:
checkpoint = torch.load(str(args.load_ckpt_path), map_location={'cuda:0': f'cuda:{local_rank}'})
model.load_state_dict(checkpoint['state_dict'])
if is_distributed:
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(local_rank, nproc_per_node, scope='socket')
model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
model._set_static_graph()
torch.set_float32_matmul_precision('high')
test_dataloader = datamodule.test_dataloader() if not args.benchmark else datamodule.train_dataloader()
if not args.benchmark:
evaluate(model,
test_dataloader,
callbacks,
args)
for callback in callbacks:
callback.on_validation_end()
else:
world_size = dist.get_world_size() if dist.is_initialized() else 1
callbacks = [PerformanceCallback(
logger, args.batch_size * world_size,
warmup_epochs=1 if args.epochs > 1 else 0,
mode='inference'
)]
for _ in range(args.epochs):
evaluate(model,
test_dataloader,
callbacks,
args)
callbacks[0].on_epoch_end()
callbacks[0].on_fit_end()
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/inference.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import logging
import pathlib
from typing import List
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from apex.optimizers import FusedAdam, FusedLAMB
from torch.nn.modules.loss import _Loss
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Optimizer
from torch.utils.data import DataLoader, DistributedSampler
from tqdm import tqdm
from se3_transformer.data_loading import QM9DataModule
from se3_transformer.model import SE3TransformerPooled
from se3_transformer.model.fiber import Fiber
from se3_transformer.runtime import gpu_affinity
from se3_transformer.runtime.arguments import PARSER
from se3_transformer.runtime.callbacks import QM9MetricCallback, QM9LRSchedulerCallback, BaseCallback, \
PerformanceCallback
from se3_transformer.runtime.inference import evaluate
from se3_transformer.runtime.loggers import LoggerCollection, DLLogger, WandbLogger, Logger
from se3_transformer.runtime.utils import to_cuda, get_local_rank, init_distributed, seed_everything, \
using_tensor_cores, increase_l2_fetch_granularity
def save_state(model: nn.Module, optimizer: Optimizer, epoch: int, path: pathlib.Path, callbacks: List[BaseCallback]):
""" Saves model, optimizer and epoch states to path (only once per node) """
if get_local_rank() == 0:
state_dict = model.module.state_dict() if isinstance(model, DistributedDataParallel) else model.state_dict()
checkpoint = {
'state_dict': state_dict,
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch
}
for callback in callbacks:
callback.on_checkpoint_save(checkpoint)
torch.save(checkpoint, str(path))
logging.info(f'Saved checkpoint to {str(path)}')
def load_state(model: nn.Module, optimizer: Optimizer, path: pathlib.Path, callbacks: List[BaseCallback]):
""" Loads model, optimizer and epoch states from path """
checkpoint = torch.load(str(path), map_location={'cuda:0': f'cuda:{get_local_rank()}'})
if isinstance(model, DistributedDataParallel):
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for callback in callbacks:
callback.on_checkpoint_load(checkpoint)
logging.info(f'Loaded checkpoint from {str(path)}')
return checkpoint['epoch']
def train_epoch(model, train_dataloader, loss_fn, epoch_idx, grad_scaler, optimizer, local_rank, callbacks, args):
loss_acc = torch.zeros((1,), device='cuda')
for i, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader), unit='batch',
desc=f'Epoch {epoch_idx}', disable=(args.silent or local_rank != 0)):
*inputs, target = to_cuda(batch)
for callback in callbacks:
callback.on_batch_start()
with torch.cuda.amp.autocast(enabled=args.amp):
pred = model(*inputs)
loss = loss_fn(pred, target) / args.accumulate_grad_batches
loss_acc += loss.detach()
grad_scaler.scale(loss).backward()
# gradient accumulation
if (i + 1) % args.accumulate_grad_batches == 0 or (i + 1) == len(train_dataloader):
if args.gradient_clip:
grad_scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip)
grad_scaler.step(optimizer)
grad_scaler.update()
model.zero_grad(set_to_none=True)
return loss_acc / (i + 1)
def train(model: nn.Module,
loss_fn: _Loss,
train_dataloader: DataLoader,
val_dataloader: DataLoader,
callbacks: List[BaseCallback],
logger: Logger,
args):
device = torch.cuda.current_device()
model.to(device=device)
local_rank = get_local_rank()
world_size = dist.get_world_size() if dist.is_initialized() else 1
if dist.is_initialized():
model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
model._set_static_graph()
model.train()
grad_scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
if args.optimizer == 'adam':
optimizer = FusedAdam(model.parameters(), lr=args.learning_rate, betas=(args.momentum, 0.999),
weight_decay=args.weight_decay)
elif args.optimizer == 'lamb':
optimizer = FusedLAMB(model.parameters(), lr=args.learning_rate, betas=(args.momentum, 0.999),
weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum,
weight_decay=args.weight_decay)
epoch_start = load_state(model, optimizer, args.load_ckpt_path, callbacks) if args.load_ckpt_path else 0
for callback in callbacks:
callback.on_fit_start(optimizer, args, epoch_start)
for epoch_idx in range(epoch_start, args.epochs):
if isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch_idx)
loss = train_epoch(model, train_dataloader, loss_fn, epoch_idx, grad_scaler, optimizer, local_rank, callbacks,
args)
if dist.is_initialized():
torch.distributed.all_reduce(loss)
loss /= world_size
loss = loss.item()
logging.info(f'Train loss: {loss}')
logger.log_metrics({'train loss': loss}, epoch_idx)
if epoch_idx + 1 == args.epochs:
logger.log_metrics({'train loss': loss})
for callback in callbacks:
callback.on_epoch_end()
if not args.benchmark and args.save_ckpt_path is not None and args.ckpt_interval > 0 \
and (epoch_idx + 1) % args.ckpt_interval == 0:
save_state(model, optimizer, epoch_idx, args.save_ckpt_path, callbacks)
if not args.benchmark and (
(args.eval_interval > 0 and (epoch_idx + 1) % args.eval_interval == 0) or epoch_idx + 1 == args.epochs):
evaluate(model, val_dataloader, callbacks, args)
model.train()
for callback in callbacks:
callback.on_validation_end(epoch_idx)
if args.save_ckpt_path is not None and not args.benchmark:
save_state(model, optimizer, args.epochs, args.save_ckpt_path, callbacks)
for callback in callbacks:
callback.on_fit_end()
def print_parameters_count(model):
num_params_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
logging.info(f'Number of trainable parameters: {num_params_trainable}')
if __name__ == '__main__':
is_distributed = init_distributed()
local_rank = get_local_rank()
args = PARSER.parse_args()
logging.getLogger().setLevel(logging.CRITICAL if local_rank != 0 or args.silent else logging.INFO)
logging.info('====== SE(3)-Transformer ======')
logging.info('| Training procedure |')
logging.info('===============================')
if args.seed is not None:
logging.info(f'Using seed {args.seed}')
seed_everything(args.seed)
loggers = [DLLogger(save_dir=args.log_dir, filename=args.dllogger_name)]
if args.wandb:
loggers.append(WandbLogger(name=f'QM9({args.task})', save_dir=args.log_dir, project='se3-transformer'))
logger = LoggerCollection(loggers)
datamodule = QM9DataModule(**vars(args))
model = SE3TransformerPooled(
fiber_in=Fiber({0: datamodule.NODE_FEATURE_DIM}),
fiber_out=Fiber({0: args.num_degrees * args.num_channels}),
fiber_edge=Fiber({0: datamodule.EDGE_FEATURE_DIM}),
output_dim=1,
tensor_cores=using_tensor_cores(args.amp), # use Tensor Cores more effectively
**vars(args)
)
loss_fn = nn.L1Loss()
if args.benchmark:
logging.info('Running benchmark mode')
world_size = dist.get_world_size() if dist.is_initialized() else 1
callbacks = [PerformanceCallback(
logger, args.batch_size * world_size, warmup_epochs=1 if args.epochs > 1 else 0
)]
else:
callbacks = [QM9MetricCallback(logger, targets_std=datamodule.targets_std, prefix='validation'),
QM9LRSchedulerCallback(logger, epochs=args.epochs)]
if is_distributed:
gpu_affinity.set_affinity(gpu_id=get_local_rank(), nproc_per_node=torch.cuda.device_count(), scope='socket')
torch.set_float32_matmul_precision('high')
print_parameters_count(model)
logger.log_hyperparams(vars(args))
increase_l2_fetch_granularity()
train(model,
loss_fn,
datamodule.train_dataloader(),
datamodule.val_dataloader(),
callbacks,
logger,
args)
logging.info('Training finished successfully')
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime/training.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from functools import lru_cache
from typing import Dict, List
import e3nn.o3 as o3
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.cuda.nvtx import range as nvtx_range
from se3_transformer.runtime.utils import degree_to_dim
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
@lru_cache(maxsize=None)
def get_clebsch_gordon(J: int, d_in: int, d_out: int, device) -> Tensor:
""" Get the (cached) Q^{d_out,d_in}_J matrices from equation (8) """
return o3.wigner_3j(J, d_in, d_out, dtype=torch.float64, device=device).permute(2, 1, 0)
@lru_cache(maxsize=None)
def get_all_clebsch_gordon(max_degree: int, device) -> List[List[Tensor]]:
all_cb = []
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
K_Js = []
for J in range(abs(d_in - d_out), d_in + d_out + 1):
K_Js.append(get_clebsch_gordon(J, d_in, d_out, device))
all_cb.append(K_Js)
return all_cb
def get_spherical_harmonics(relative_pos: Tensor, max_degree: int) -> List[Tensor]:
all_degrees = list(range(2 * max_degree + 1))
sh = o3.spherical_harmonics(all_degrees, relative_pos, normalize=True)
return torch.split(sh, [degree_to_dim(d) for d in all_degrees], dim=1)
@torch.jit.script
def get_basis_script(max_degree: int,
use_pad_trick: bool,
spherical_harmonics: List[Tensor],
clebsch_gordon: List[List[Tensor]],
amp: bool) -> Dict[str, Tensor]:
"""
Compute pairwise bases matrices for degrees up to max_degree
:param max_degree: Maximum input or output degree
:param use_pad_trick: Pad some of the odd dimensions for a better use of Tensor Cores
:param spherical_harmonics: List of computed spherical harmonics
:param clebsch_gordon: List of computed CB-coefficients
:param amp: When true, return bases in FP16 precision
"""
basis = {}
idx = 0
# Double for loop instead of product() because of JIT script
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
key = f'{d_in},{d_out}'
K_Js = []
for freq_idx, J in enumerate(range(abs(d_in - d_out), d_in + d_out + 1)):
Q_J = clebsch_gordon[idx][freq_idx]
K_Js.append(torch.einsum('n f, k l f -> n l k', spherical_harmonics[J].float(), Q_J.float()))
basis[key] = torch.stack(K_Js, 2) # Stack on second dim so order is n l f k
if amp:
basis[key] = basis[key].half()
if use_pad_trick:
basis[key] = F.pad(basis[key], (0, 1)) # Pad the k dimension, that can be sliced later
idx += 1
return basis
@torch.jit.script
def update_basis_with_fused(basis: Dict[str, Tensor],
max_degree: int,
use_pad_trick: bool,
fully_fused: bool) -> Dict[str, Tensor]:
""" Update the basis dict with partially and optionally fully fused bases """
num_edges = basis['0,0'].shape[0]
device = basis['0,0'].device
dtype = basis['0,0'].dtype
sum_dim = sum([degree_to_dim(d) for d in range(max_degree + 1)])
# Fused per output degree
for d_out in range(max_degree + 1):
sum_freq = sum([degree_to_dim(min(d, d_out)) for d in range(max_degree + 1)])
basis_fused = torch.zeros(num_edges, sum_dim, sum_freq, degree_to_dim(d_out) + int(use_pad_trick),
device=device, dtype=dtype)
acc_d, acc_f = 0, 0
for d_in in range(max_degree + 1):
basis_fused[:, acc_d:acc_d + degree_to_dim(d_in), acc_f:acc_f + degree_to_dim(min(d_out, d_in)),
:degree_to_dim(d_out)] = basis[f'{d_in},{d_out}'][:, :, :, :degree_to_dim(d_out)]
acc_d += degree_to_dim(d_in)
acc_f += degree_to_dim(min(d_out, d_in))
basis[f'out{d_out}_fused'] = basis_fused
# Fused per input degree
for d_in in range(max_degree + 1):
sum_freq = sum([degree_to_dim(min(d, d_in)) for d in range(max_degree + 1)])
basis_fused = torch.zeros(num_edges, degree_to_dim(d_in), sum_freq, sum_dim,
device=device, dtype=dtype)
acc_d, acc_f = 0, 0
for d_out in range(max_degree + 1):
basis_fused[:, :, acc_f:acc_f + degree_to_dim(min(d_out, d_in)), acc_d:acc_d + degree_to_dim(d_out)] \
= basis[f'{d_in},{d_out}'][:, :, :, :degree_to_dim(d_out)]
acc_d += degree_to_dim(d_out)
acc_f += degree_to_dim(min(d_out, d_in))
basis[f'in{d_in}_fused'] = basis_fused
if fully_fused:
# Fully fused
# Double sum this way because of JIT script
sum_freq = sum([
sum([degree_to_dim(min(d_in, d_out)) for d_in in range(max_degree + 1)]) for d_out in range(max_degree + 1)
])
basis_fused = torch.zeros(num_edges, sum_dim, sum_freq, sum_dim, device=device, dtype=dtype)
acc_d, acc_f = 0, 0
for d_out in range(max_degree + 1):
b = basis[f'out{d_out}_fused']
basis_fused[:, :, acc_f:acc_f + b.shape[2], acc_d:acc_d + degree_to_dim(d_out)] = b[:, :, :,
:degree_to_dim(d_out)]
acc_f += b.shape[2]
acc_d += degree_to_dim(d_out)
basis['fully_fused'] = basis_fused
del basis['0,0'] # We know that the basis for l = k = 0 is filled with a constant
return basis
def get_basis(relative_pos: Tensor,
max_degree: int = 4,
compute_gradients: bool = False,
use_pad_trick: bool = False,
amp: bool = False) -> Dict[str, Tensor]:
with nvtx_range('spherical harmonics'):
spherical_harmonics = get_spherical_harmonics(relative_pos, max_degree)
with nvtx_range('CB coefficients'):
clebsch_gordon = get_all_clebsch_gordon(max_degree, relative_pos.device)
with torch.autograd.set_grad_enabled(compute_gradients):
with nvtx_range('bases'):
basis = get_basis_script(max_degree=max_degree,
use_pad_trick=use_pad_trick,
spherical_harmonics=spherical_harmonics,
clebsch_gordon=clebsch_gordon,
amp=amp)
return basis
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/basis.py
|
from .transformer import SE3Transformer, SE3TransformerPooled
from .fiber import Fiber
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/__init__.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import logging
from typing import Optional, Literal, Dict
import torch
import torch.nn as nn
from dgl import DGLGraph
from torch import Tensor
from se3_transformer.model.basis import get_basis, update_basis_with_fused
from se3_transformer.model.layers.attention import AttentionBlockSE3
from se3_transformer.model.layers.convolution import ConvSE3, ConvSE3FuseLevel
from se3_transformer.model.layers.norm import NormSE3
from se3_transformer.model.layers.pooling import GPooling
from se3_transformer.runtime.utils import str2bool
from se3_transformer.model.fiber import Fiber
class Sequential(nn.Sequential):
""" Sequential module with arbitrary forward args and kwargs. Used to pass graph, basis and edge features. """
def forward(self, input, *args, **kwargs):
for module in self:
input = module(input, *args, **kwargs)
return input
def get_populated_edge_features(relative_pos: Tensor, edge_features: Optional[Dict[str, Tensor]] = None):
""" Add relative positions to existing edge features """
edge_features = edge_features.copy() if edge_features else {}
r = relative_pos.norm(dim=-1, keepdim=True)
if '0' in edge_features:
edge_features['0'] = torch.cat([edge_features['0'], r[..., None]], dim=1)
else:
edge_features['0'] = r[..., None]
return edge_features
class SE3Transformer(nn.Module):
def __init__(self,
num_layers: int,
fiber_in: Fiber,
fiber_hidden: Fiber,
fiber_out: Fiber,
num_heads: int,
channels_div: int,
fiber_edge: Fiber = Fiber({}),
return_type: Optional[int] = None,
pooling: Optional[Literal['avg', 'max']] = None,
norm: bool = True,
use_layer_norm: bool = True,
tensor_cores: bool = False,
low_memory: bool = False,
**kwargs):
"""
:param num_layers: Number of attention layers
:param fiber_in: Input fiber description
:param fiber_hidden: Hidden fiber description
:param fiber_out: Output fiber description
:param fiber_edge: Input edge fiber description
:param num_heads: Number of attention heads
:param channels_div: Channels division before feeding to attention layer
:param return_type: Return only features of this type
:param pooling: 'avg' or 'max' graph pooling before MLP layers
:param norm: Apply a normalization layer after each attention block
:param use_layer_norm: Apply layer normalization between MLP layers
:param tensor_cores: True if using Tensor Cores (affects the use of fully fused convs, and padded bases)
:param low_memory: If True, will use slower ops that use less memory
"""
super().__init__()
self.num_layers = num_layers
self.fiber_edge = fiber_edge
self.num_heads = num_heads
self.channels_div = channels_div
self.return_type = return_type
self.pooling = pooling
self.max_degree = max(*fiber_in.degrees, *fiber_hidden.degrees, *fiber_out.degrees)
self.tensor_cores = tensor_cores
self.low_memory = low_memory
if low_memory:
self.fuse_level = ConvSE3FuseLevel.NONE
else:
# Fully fused convolutions when using Tensor Cores (and not low memory mode)
self.fuse_level = ConvSE3FuseLevel.FULL if tensor_cores else ConvSE3FuseLevel.PARTIAL
graph_modules = []
for i in range(num_layers):
graph_modules.append(AttentionBlockSE3(fiber_in=fiber_in,
fiber_out=fiber_hidden,
fiber_edge=fiber_edge,
num_heads=num_heads,
channels_div=channels_div,
use_layer_norm=use_layer_norm,
max_degree=self.max_degree,
fuse_level=self.fuse_level,
low_memory=low_memory))
if norm:
graph_modules.append(NormSE3(fiber_hidden))
fiber_in = fiber_hidden
graph_modules.append(ConvSE3(fiber_in=fiber_in,
fiber_out=fiber_out,
fiber_edge=fiber_edge,
self_interaction=True,
use_layer_norm=use_layer_norm,
max_degree=self.max_degree,
fuse_level=self.fuse_level,
low_memory=low_memory))
self.graph_modules = Sequential(*graph_modules)
if pooling is not None:
assert return_type is not None, 'return_type must be specified when pooling'
self.pooling_module = GPooling(pool=pooling, feat_type=return_type)
def forward(self, graph: DGLGraph, node_feats: Dict[str, Tensor],
edge_feats: Optional[Dict[str, Tensor]] = None,
basis: Optional[Dict[str, Tensor]] = None):
# Compute bases in case they weren't precomputed as part of the data loading
basis = basis or get_basis(graph.edata['rel_pos'], max_degree=self.max_degree, compute_gradients=False,
use_pad_trick=self.tensor_cores and not self.low_memory,
amp=torch.is_autocast_enabled())
# Add fused bases (per output degree, per input degree, and fully fused) to the dict
basis = update_basis_with_fused(basis, self.max_degree, use_pad_trick=self.tensor_cores and not self.low_memory,
fully_fused=self.fuse_level == ConvSE3FuseLevel.FULL)
edge_feats = get_populated_edge_features(graph.edata['rel_pos'], edge_feats)
node_feats = self.graph_modules(node_feats, edge_feats, graph=graph, basis=basis)
if self.pooling is not None:
return self.pooling_module(node_feats, graph=graph)
if self.return_type is not None:
return node_feats[str(self.return_type)]
return node_feats
@staticmethod
def add_argparse_args(parser):
parser.add_argument('--num_layers', type=int, default=7,
help='Number of stacked Transformer layers')
parser.add_argument('--num_heads', type=int, default=8,
help='Number of heads in self-attention')
parser.add_argument('--channels_div', type=int, default=2,
help='Channels division before feeding to attention layer')
parser.add_argument('--pooling', type=str, default=None, const=None, nargs='?', choices=['max', 'avg'],
help='Type of graph pooling')
parser.add_argument('--norm', type=str2bool, nargs='?', const=True, default=False,
help='Apply a normalization layer after each attention block')
parser.add_argument('--use_layer_norm', type=str2bool, nargs='?', const=True, default=False,
help='Apply layer normalization between MLP layers')
parser.add_argument('--low_memory', type=str2bool, nargs='?', const=True, default=False,
help='If true, will use fused ops that are slower but that use less memory '
'(expect 25 percent less memory). '
'Only has an effect if AMP is enabled on Volta GPUs, or if running on Ampere GPUs')
return parser
class SE3TransformerPooled(nn.Module):
def __init__(self,
fiber_in: Fiber,
fiber_out: Fiber,
fiber_edge: Fiber,
num_degrees: int,
num_channels: int,
output_dim: int,
**kwargs):
super().__init__()
kwargs['pooling'] = kwargs['pooling'] or 'max'
self.transformer = SE3Transformer(
fiber_in=fiber_in,
fiber_hidden=Fiber.create(num_degrees, num_channels),
fiber_out=fiber_out,
fiber_edge=fiber_edge,
return_type=0,
**kwargs
)
n_out_features = fiber_out.num_features
self.mlp = nn.Sequential(
nn.Linear(n_out_features, n_out_features),
nn.ReLU(),
nn.Linear(n_out_features, output_dim)
)
def forward(self, graph, node_feats, edge_feats, basis=None):
feats = self.transformer(graph, node_feats, edge_feats, basis).squeeze(-1)
y = self.mlp(feats).squeeze(-1)
return y
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("Model architecture")
SE3Transformer.add_argparse_args(parser)
parser.add_argument('--num_degrees',
help='Number of degrees to use. Hidden features will have types [0, ..., num_degrees - 1]',
type=int, default=4)
parser.add_argument('--num_channels', help='Number of channels for the hidden features', type=int, default=32)
return parent_parser
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/transformer.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from collections import namedtuple
from itertools import product
from typing import Dict
import torch
from torch import Tensor
from se3_transformer.runtime.utils import degree_to_dim
FiberEl = namedtuple('FiberEl', ['degree', 'channels'])
class Fiber(dict):
"""
Describes the structure of some set of features.
Features are split into types (0, 1, 2, 3, ...). A feature of type k has a dimension of 2k+1.
Type-0 features: invariant scalars
Type-1 features: equivariant 3D vectors
Type-2 features: equivariant symmetric traceless matrices
...
As inputs to a SE3 layer, there can be many features of the same types, and many features of different types.
The 'multiplicity' or 'number of channels' is the number of features of a given type.
This class puts together all the degrees and their multiplicities in order to describe
the inputs, outputs or hidden features of SE3 layers.
"""
def __init__(self, structure):
if isinstance(structure, dict):
structure = [FiberEl(int(d), int(m)) for d, m in sorted(structure.items(), key=lambda x: x[1])]
elif not isinstance(structure[0], FiberEl):
structure = list(map(lambda t: FiberEl(*t), sorted(structure, key=lambda x: x[1])))
self.structure = structure
super().__init__({d: m for d, m in self.structure})
@property
def degrees(self):
return sorted([t.degree for t in self.structure])
@property
def channels(self):
return [self[d] for d in self.degrees]
@property
def num_features(self):
""" Size of the resulting tensor if all features were concatenated together """
return sum(t.channels * degree_to_dim(t.degree) for t in self.structure)
@staticmethod
def create(num_degrees: int, num_channels: int):
""" Create a Fiber with degrees 0..num_degrees-1, all with the same multiplicity """
return Fiber([(degree, num_channels) for degree in range(num_degrees)])
@staticmethod
def from_features(feats: Dict[str, Tensor]):
""" Infer the Fiber structure from a feature dict """
structure = {}
for k, v in feats.items():
degree = int(k)
assert len(v.shape) == 3, 'Feature shape should be (N, C, 2D+1)'
assert v.shape[-1] == degree_to_dim(degree)
structure[degree] = v.shape[-2]
return Fiber(structure)
def __getitem__(self, degree: int):
""" fiber[degree] returns the multiplicity for this degree """
return dict(self.structure).get(degree, 0)
def __iter__(self):
""" Iterate over namedtuples (degree, channels) """
return iter(self.structure)
def __mul__(self, other):
"""
If other in an int, multiplies all the multiplicities by other.
If other is a fiber, returns the cartesian product.
"""
if isinstance(other, Fiber):
return product(self.structure, other.structure)
elif isinstance(other, int):
return Fiber({t.degree: t.channels * other for t in self.structure})
def __add__(self, other):
"""
If other in an int, add other to all the multiplicities.
If other is a fiber, add the multiplicities of the fibers together.
"""
if isinstance(other, Fiber):
return Fiber({t.degree: t.channels + other[t.degree] for t in self.structure})
elif isinstance(other, int):
return Fiber({t.degree: t.channels + other for t in self.structure})
def __repr__(self):
return str(self.structure)
@staticmethod
def combine_max(f1, f2):
""" Combine two fiber by taking the maximum multiplicity for each degree in both fibers """
new_dict = dict(f1.structure)
for k, m in f2.structure:
new_dict[k] = max(new_dict.get(k, 0), m)
return Fiber(list(new_dict.items()))
@staticmethod
def combine_selectively(f1, f2):
""" Combine two fiber by taking the sum of multiplicities for each degree in the first fiber """
# only use orders which occur in fiber f1
new_dict = dict(f1.structure)
for k in f1.degrees:
if k in f2.degrees:
new_dict[k] += f2[k]
return Fiber(list(new_dict.items()))
def to_attention_heads(self, tensors: Dict[str, Tensor], num_heads: int):
# dict(N, num_channels, 2d+1) -> (N, num_heads, -1)
fibers = [tensors[str(degree)].reshape(*tensors[str(degree)].shape[:-2], num_heads, -1) for degree in
self.degrees]
fibers = torch.cat(fibers, -1)
return fibers
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/fiber.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import dgl
import numpy as np
import torch
import torch.nn as nn
from dgl import DGLGraph
from dgl.ops import edge_softmax
from torch import Tensor
from typing import Dict, Optional, Union
from se3_transformer.model.fiber import Fiber
from se3_transformer.model.layers.convolution import ConvSE3, ConvSE3FuseLevel
from se3_transformer.model.layers.linear import LinearSE3
from se3_transformer.runtime.utils import degree_to_dim, aggregate_residual, unfuse_features
from torch.cuda.nvtx import range as nvtx_range
class AttentionSE3(nn.Module):
""" Multi-headed sparse graph self-attention (SE(3)-equivariant) """
def __init__(
self,
num_heads: int,
key_fiber: Fiber,
value_fiber: Fiber
):
"""
:param num_heads: Number of attention heads
:param key_fiber: Fiber for the keys (and also for the queries)
:param value_fiber: Fiber for the values
"""
super().__init__()
self.num_heads = num_heads
self.key_fiber = key_fiber
self.value_fiber = value_fiber
def forward(
self,
value: Union[Tensor, Dict[str, Tensor]], # edge features (may be fused)
key: Union[Tensor, Dict[str, Tensor]], # edge features (may be fused)
query: Dict[str, Tensor], # node features
graph: DGLGraph
):
with nvtx_range('AttentionSE3'):
with nvtx_range('reshape keys and queries'):
if isinstance(key, Tensor):
# case where features of all types are fused
key = key.reshape(key.shape[0], self.num_heads, -1)
# need to reshape queries that way to keep the same layout as keys
out = torch.cat([query[str(d)] for d in self.key_fiber.degrees], dim=-1)
query = out.reshape(list(query.values())[0].shape[0], self.num_heads, -1)
else:
# features are not fused, need to fuse and reshape them
key = self.key_fiber.to_attention_heads(key, self.num_heads)
query = self.key_fiber.to_attention_heads(query, self.num_heads)
with nvtx_range('attention dot product + softmax'):
# Compute attention weights (softmax of inner product between key and query)
edge_weights = dgl.ops.e_dot_v(graph, key, query).squeeze(-1)
edge_weights = edge_weights / np.sqrt(self.key_fiber.num_features)
edge_weights = edge_softmax(graph, edge_weights)
edge_weights = edge_weights[..., None, None]
with nvtx_range('weighted sum'):
if isinstance(value, Tensor):
# features of all types are fused
v = value.view(value.shape[0], self.num_heads, -1, value.shape[-1])
weights = edge_weights * v
feat_out = dgl.ops.copy_e_sum(graph, weights)
feat_out = feat_out.view(feat_out.shape[0], -1, feat_out.shape[-1]) # merge heads
out = unfuse_features(feat_out, self.value_fiber.degrees)
else:
out = {}
for degree, channels in self.value_fiber:
v = value[str(degree)].view(-1, self.num_heads, channels // self.num_heads,
degree_to_dim(degree))
weights = edge_weights * v
res = dgl.ops.copy_e_sum(graph, weights)
out[str(degree)] = res.view(-1, channels, degree_to_dim(degree)) # merge heads
return out
class AttentionBlockSE3(nn.Module):
""" Multi-headed sparse graph self-attention block with skip connection, linear projection (SE(3)-equivariant) """
def __init__(
self,
fiber_in: Fiber,
fiber_out: Fiber,
fiber_edge: Optional[Fiber] = None,
num_heads: int = 4,
channels_div: int = 2,
use_layer_norm: bool = False,
max_degree: bool = 4,
fuse_level: ConvSE3FuseLevel = ConvSE3FuseLevel.FULL,
low_memory: bool = False,
**kwargs
):
"""
:param fiber_in: Fiber describing the input features
:param fiber_out: Fiber describing the output features
:param fiber_edge: Fiber describing the edge features (node distances excluded)
:param num_heads: Number of attention heads
:param channels_div: Divide the channels by this integer for computing values
:param use_layer_norm: Apply layer normalization between MLP layers
:param max_degree: Maximum degree used in the bases computation
:param fuse_level: Maximum fuse level to use in TFN convolutions
"""
super().__init__()
if fiber_edge is None:
fiber_edge = Fiber({})
self.fiber_in = fiber_in
# value_fiber has same structure as fiber_out but #channels divided by 'channels_div'
value_fiber = Fiber([(degree, channels // channels_div) for degree, channels in fiber_out])
# key_query_fiber has the same structure as fiber_out, but only degrees which are in in_fiber
# (queries are merely projected, hence degrees have to match input)
key_query_fiber = Fiber([(fe.degree, fe.channels) for fe in value_fiber if fe.degree in fiber_in.degrees])
self.to_key_value = ConvSE3(fiber_in, value_fiber + key_query_fiber, pool=False, fiber_edge=fiber_edge,
use_layer_norm=use_layer_norm, max_degree=max_degree, fuse_level=fuse_level,
allow_fused_output=True, low_memory=low_memory)
self.to_query = LinearSE3(fiber_in, key_query_fiber)
self.attention = AttentionSE3(num_heads, key_query_fiber, value_fiber)
self.project = LinearSE3(value_fiber + fiber_in, fiber_out)
def forward(
self,
node_features: Dict[str, Tensor],
edge_features: Dict[str, Tensor],
graph: DGLGraph,
basis: Dict[str, Tensor]
):
with nvtx_range('AttentionBlockSE3'):
with nvtx_range('keys / values'):
fused_key_value = self.to_key_value(node_features, edge_features, graph, basis)
key, value = self._get_key_value_from_fused(fused_key_value)
with nvtx_range('queries'):
query = self.to_query(node_features)
z = self.attention(value, key, query, graph)
z_concat = aggregate_residual(node_features, z, 'cat')
return self.project(z_concat)
def _get_key_value_from_fused(self, fused_key_value):
# Extract keys and queries features from fused features
if isinstance(fused_key_value, Tensor):
# Previous layer was a fully fused convolution
value, key = torch.chunk(fused_key_value, chunks=2, dim=-2)
else:
key, value = {}, {}
for degree, feat in fused_key_value.items():
if int(degree) in self.fiber_in.degrees:
value[degree], key[degree] = torch.chunk(feat, chunks=2, dim=-2)
else:
value[degree] = feat
return key, value
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers/attention.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from enum import Enum
from itertools import product
from typing import Dict
import dgl
import numpy as np
import torch
import torch.nn as nn
import torch.utils.checkpoint
from dgl import DGLGraph
from torch import Tensor
from torch.cuda.nvtx import range as nvtx_range
from se3_transformer.model.fiber import Fiber
from se3_transformer.runtime.utils import degree_to_dim, unfuse_features
class ConvSE3FuseLevel(Enum):
"""
Enum to select a maximum level of fusing optimizations that will be applied when certain conditions are met.
If a desired level L is picked and the level L cannot be applied to a level, other fused ops < L are considered.
A higher level means faster training, but also more memory usage.
If you are tight on memory and want to feed large inputs to the network, choose a low value.
If you want to train fast, choose a high value.
Recommended value is FULL with AMP.
Fully fused TFN convolutions requirements:
- all input channels are the same
- all output channels are the same
- input degrees span the range [0, ..., max_degree]
- output degrees span the range [0, ..., max_degree]
Partially fused TFN convolutions requirements:
* For fusing by output degree:
- all input channels are the same
- input degrees span the range [0, ..., max_degree]
* For fusing by input degree:
- all output channels are the same
- output degrees span the range [0, ..., max_degree]
Original TFN pairwise convolutions: no requirements
"""
FULL = 2
PARTIAL = 1
NONE = 0
class RadialProfile(nn.Module):
"""
Radial profile function.
Outputs weights used to weigh basis matrices in order to get convolution kernels.
In TFN notation: $R^{l,k}$
In SE(3)-Transformer notation: $\phi^{l,k}$
Note:
In the original papers, this function only depends on relative node distances ||x||.
Here, we allow this function to also take as input additional invariant edge features.
This does not break equivariance and adds expressive power to the model.
Diagram:
invariant edge features (node distances included) ───> MLP layer (shared across edges) ───> radial weights
"""
def __init__(
self,
num_freq: int,
channels_in: int,
channels_out: int,
edge_dim: int = 1,
mid_dim: int = 32,
use_layer_norm: bool = False
):
"""
:param num_freq: Number of frequencies
:param channels_in: Number of input channels
:param channels_out: Number of output channels
:param edge_dim: Number of invariant edge features (input to the radial function)
:param mid_dim: Size of the hidden MLP layers
:param use_layer_norm: Apply layer normalization between MLP layers
"""
super().__init__()
modules = [
nn.Linear(edge_dim, mid_dim),
nn.LayerNorm(mid_dim) if use_layer_norm else None,
nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
nn.LayerNorm(mid_dim) if use_layer_norm else None,
nn.ReLU(),
nn.Linear(mid_dim, num_freq * channels_in * channels_out, bias=False)
]
self.net = torch.jit.script(nn.Sequential(*[m for m in modules if m is not None]))
def forward(self, features: Tensor) -> Tensor:
return self.net(features)
class VersatileConvSE3(nn.Module):
"""
Building block for TFN convolutions.
This single module can be used for fully fused convolutions, partially fused convolutions, or pairwise convolutions.
"""
def __init__(self,
freq_sum: int,
channels_in: int,
channels_out: int,
edge_dim: int,
use_layer_norm: bool,
fuse_level: ConvSE3FuseLevel):
super().__init__()
self.freq_sum = freq_sum
self.channels_out = channels_out
self.channels_in = channels_in
self.fuse_level = fuse_level
self.radial_func = RadialProfile(num_freq=freq_sum,
channels_in=channels_in,
channels_out=channels_out,
edge_dim=edge_dim,
use_layer_norm=use_layer_norm)
def forward(self, features: Tensor, invariant_edge_feats: Tensor, basis: Tensor):
with nvtx_range(f'VersatileConvSE3'):
num_edges = features.shape[0]
in_dim = features.shape[2]
with nvtx_range(f'RadialProfile'):
radial_weights = self.radial_func(invariant_edge_feats) \
.view(-1, self.channels_out, self.channels_in * self.freq_sum)
if basis is not None:
# This block performs the einsum n i l, n o i f, n l f k -> n o k
basis_view = basis.view(num_edges, in_dim, -1)
tmp = (features @ basis_view).view(num_edges, -1, basis.shape[-1])
return radial_weights @ tmp
else:
# k = l = 0 non-fused case
return radial_weights @ features
class ConvSE3(nn.Module):
"""
SE(3)-equivariant graph convolution (Tensor Field Network convolution).
This convolution can map an arbitrary input Fiber to an arbitrary output Fiber, while preserving equivariance.
Features of different degrees interact together to produce output features.
Note 1:
The option is given to not pool the output. This means that the convolution sum over neighbors will not be
done, and the returned features will be edge features instead of node features.
Note 2:
Unlike the original paper and implementation, this convolution can handle edge feature of degree greater than 0.
Input edge features are concatenated with input source node features before the kernel is applied.
"""
def __init__(
self,
fiber_in: Fiber,
fiber_out: Fiber,
fiber_edge: Fiber,
pool: bool = True,
use_layer_norm: bool = False,
self_interaction: bool = False,
max_degree: int = 4,
fuse_level: ConvSE3FuseLevel = ConvSE3FuseLevel.FULL,
allow_fused_output: bool = False,
low_memory: bool = False
):
"""
:param fiber_in: Fiber describing the input features
:param fiber_out: Fiber describing the output features
:param fiber_edge: Fiber describing the edge features (node distances excluded)
:param pool: If True, compute final node features by averaging incoming edge features
:param use_layer_norm: Apply layer normalization between MLP layers
:param self_interaction: Apply self-interaction of nodes
:param max_degree: Maximum degree used in the bases computation
:param fuse_level: Maximum fuse level to use in TFN convolutions
:param allow_fused_output: Allow the module to output a fused representation of features
"""
super().__init__()
self.pool = pool
self.fiber_in = fiber_in
self.fiber_out = fiber_out
self.self_interaction = self_interaction
self.max_degree = max_degree
self.allow_fused_output = allow_fused_output
self.conv_checkpoint = torch.utils.checkpoint.checkpoint if low_memory else lambda m, *x: m(*x)
# channels_in: account for the concatenation of edge features
channels_in_set = set([f.channels + fiber_edge[f.degree] * (f.degree > 0) for f in self.fiber_in])
channels_out_set = set([f.channels for f in self.fiber_out])
unique_channels_in = (len(channels_in_set) == 1)
unique_channels_out = (len(channels_out_set) == 1)
degrees_up_to_max = list(range(max_degree + 1))
common_args = dict(edge_dim=fiber_edge[0] + 1, use_layer_norm=use_layer_norm)
if fuse_level.value >= ConvSE3FuseLevel.FULL.value and \
unique_channels_in and fiber_in.degrees == degrees_up_to_max and \
unique_channels_out and fiber_out.degrees == degrees_up_to_max:
# Single fused convolution
self.used_fuse_level = ConvSE3FuseLevel.FULL
sum_freq = sum([
degree_to_dim(min(d_in, d_out))
for d_in, d_out in product(degrees_up_to_max, degrees_up_to_max)
])
self.conv = VersatileConvSE3(sum_freq, list(channels_in_set)[0], list(channels_out_set)[0],
fuse_level=self.used_fuse_level, **common_args)
elif fuse_level.value >= ConvSE3FuseLevel.PARTIAL.value and \
unique_channels_in and fiber_in.degrees == degrees_up_to_max:
# Convolutions fused per output degree
self.used_fuse_level = ConvSE3FuseLevel.PARTIAL
self.conv_out = nn.ModuleDict()
for d_out, c_out in fiber_out:
sum_freq = sum([degree_to_dim(min(d_out, d)) for d in fiber_in.degrees])
self.conv_out[str(d_out)] = VersatileConvSE3(sum_freq, list(channels_in_set)[0], c_out,
fuse_level=self.used_fuse_level, **common_args)
elif fuse_level.value >= ConvSE3FuseLevel.PARTIAL.value and \
unique_channels_out and fiber_out.degrees == degrees_up_to_max:
# Convolutions fused per input degree
self.used_fuse_level = ConvSE3FuseLevel.PARTIAL
self.conv_in = nn.ModuleDict()
for d_in, c_in in fiber_in:
channels_in_new = c_in + fiber_edge[d_in] * (d_in > 0)
sum_freq = sum([degree_to_dim(min(d_in, d)) for d in fiber_out.degrees])
self.conv_in[str(d_in)] = VersatileConvSE3(sum_freq, channels_in_new, list(channels_out_set)[0],
fuse_level=self.used_fuse_level, **common_args)
else:
# Use pairwise TFN convolutions
self.used_fuse_level = ConvSE3FuseLevel.NONE
self.conv = nn.ModuleDict()
for (degree_in, channels_in), (degree_out, channels_out) in (self.fiber_in * self.fiber_out):
dict_key = f'{degree_in},{degree_out}'
channels_in_new = channels_in + fiber_edge[degree_in] * (degree_in > 0)
sum_freq = degree_to_dim(min(degree_in, degree_out))
self.conv[dict_key] = VersatileConvSE3(sum_freq, channels_in_new, channels_out,
fuse_level=self.used_fuse_level, **common_args)
if self_interaction:
self.to_kernel_self = nn.ParameterDict()
for degree_out, channels_out in fiber_out:
if fiber_in[degree_out]:
self.to_kernel_self[str(degree_out)] = nn.Parameter(
torch.randn(channels_out, fiber_in[degree_out]) / np.sqrt(fiber_in[degree_out]))
def _try_unpad(self, feature, basis):
# Account for padded basis
if basis is not None:
out_dim = basis.shape[-1]
out_dim += out_dim % 2 - 1
return feature[..., :out_dim]
else:
return feature
def forward(
self,
node_feats: Dict[str, Tensor],
edge_feats: Dict[str, Tensor],
graph: DGLGraph,
basis: Dict[str, Tensor]
):
with nvtx_range(f'ConvSE3'):
invariant_edge_feats = edge_feats['0'].squeeze(-1)
src, dst = graph.edges()
out = {}
in_features = []
# Fetch all input features from edge and node features
for degree_in in self.fiber_in.degrees:
src_node_features = node_feats[str(degree_in)][src]
if degree_in > 0 and str(degree_in) in edge_feats:
# Handle edge features of any type by concatenating them to node features
src_node_features = torch.cat([src_node_features, edge_feats[str(degree_in)]], dim=1)
in_features.append(src_node_features)
if self.used_fuse_level == ConvSE3FuseLevel.FULL:
in_features_fused = torch.cat(in_features, dim=-1)
out = self.conv_checkpoint(
self.conv, in_features_fused, invariant_edge_feats, basis['fully_fused']
)
if not self.allow_fused_output or self.self_interaction or self.pool:
out = unfuse_features(out, self.fiber_out.degrees)
elif self.used_fuse_level == ConvSE3FuseLevel.PARTIAL and hasattr(self, 'conv_out'):
in_features_fused = torch.cat(in_features, dim=-1)
for degree_out in self.fiber_out.degrees:
basis_used = basis[f'out{degree_out}_fused']
out[str(degree_out)] = self._try_unpad(
self.conv_checkpoint(
self.conv_out[str(degree_out)], in_features_fused, invariant_edge_feats, basis_used
), basis_used)
elif self.used_fuse_level == ConvSE3FuseLevel.PARTIAL and hasattr(self, 'conv_in'):
out = 0
for degree_in, feature in zip(self.fiber_in.degrees, in_features):
out = out + self.conv_checkpoint(
self.conv_in[str(degree_in)], feature, invariant_edge_feats, basis[f'in{degree_in}_fused']
)
if not self.allow_fused_output or self.self_interaction or self.pool:
out = unfuse_features(out, self.fiber_out.degrees)
else:
# Fallback to pairwise TFN convolutions
for degree_out in self.fiber_out.degrees:
out_feature = 0
for degree_in, feature in zip(self.fiber_in.degrees, in_features):
dict_key = f'{degree_in},{degree_out}'
basis_used = basis.get(dict_key, None)
out_feature = out_feature + self._try_unpad(
self.conv_checkpoint(
self.conv[dict_key], feature, invariant_edge_feats, basis_used
), basis_used)
out[str(degree_out)] = out_feature
for degree_out in self.fiber_out.degrees:
if self.self_interaction and str(degree_out) in self.to_kernel_self:
with nvtx_range(f'self interaction'):
dst_features = node_feats[str(degree_out)][dst]
kernel_self = self.to_kernel_self[str(degree_out)]
out[str(degree_out)] = out[str(degree_out)] + kernel_self @ dst_features
if self.pool:
with nvtx_range(f'pooling'):
if isinstance(out, dict):
out[str(degree_out)] = dgl.ops.copy_e_sum(graph, out[str(degree_out)])
else:
out = dgl.ops.copy_e_sum(graph, out)
return out
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers/convolution.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Dict
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from se3_transformer.model.fiber import Fiber
class LinearSE3(nn.Module):
"""
Graph Linear SE(3)-equivariant layer, equivalent to a 1x1 convolution.
Maps a fiber to a fiber with the same degrees (channels may be different).
No interaction between degrees, but interaction between channels.
type-0 features (C_0 channels) ────> Linear(bias=False) ────> type-0 features (C'_0 channels)
type-1 features (C_1 channels) ────> Linear(bias=False) ────> type-1 features (C'_1 channels)
:
type-k features (C_k channels) ────> Linear(bias=False) ────> type-k features (C'_k channels)
"""
def __init__(self, fiber_in: Fiber, fiber_out: Fiber):
super().__init__()
self.weights = nn.ParameterDict({
str(degree_out): nn.Parameter(
torch.randn(channels_out, fiber_in[degree_out]) / np.sqrt(fiber_in[degree_out]))
for degree_out, channels_out in fiber_out
})
def forward(self, features: Dict[str, Tensor], *args, **kwargs) -> Dict[str, Tensor]:
return {
degree: self.weights[degree] @ features[degree]
for degree, weight in self.weights.items()
}
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers/linear.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Dict
import torch
import torch.nn as nn
from torch import Tensor
from torch.cuda.nvtx import range as nvtx_range
from se3_transformer.model.fiber import Fiber
@torch.jit.script
def clamped_norm(x, clamp: float):
return x.norm(p=2, dim=-1, keepdim=True).clamp(min=clamp)
@torch.jit.script
def rescale(x, norm, new_norm):
return x / norm * new_norm
class NormSE3(nn.Module):
"""
Norm-based SE(3)-equivariant nonlinearity.
┌──> feature_norm ──> LayerNorm() ──> ReLU() ──┐
feature_in ──┤ * ──> feature_out
└──> feature_phase ────────────────────────────┘
"""
NORM_CLAMP = 2 ** -24 # Minimum positive subnormal for FP16
def __init__(self, fiber: Fiber, nonlinearity: nn.Module = nn.ReLU()):
super().__init__()
self.fiber = fiber
self.nonlinearity = nonlinearity
if len(set(fiber.channels)) == 1:
# Fuse all the layer normalizations into a group normalization
self.group_norm = nn.GroupNorm(num_groups=len(fiber.degrees), num_channels=sum(fiber.channels))
else:
# Use multiple layer normalizations
self.layer_norms = nn.ModuleDict({
str(degree): nn.LayerNorm(channels)
for degree, channels in fiber
})
def forward(self, features: Dict[str, Tensor], *args, **kwargs) -> Dict[str, Tensor]:
with nvtx_range('NormSE3'):
output = {}
if hasattr(self, 'group_norm'):
# Compute per-degree norms of features
norms = [clamped_norm(features[str(d)], self.NORM_CLAMP)
for d in self.fiber.degrees]
fused_norms = torch.cat(norms, dim=-2)
# Transform the norms only
new_norms = self.nonlinearity(self.group_norm(fused_norms.squeeze(-1))).unsqueeze(-1)
new_norms = torch.chunk(new_norms, chunks=len(self.fiber.degrees), dim=-2)
# Scale features to the new norms
for norm, new_norm, d in zip(norms, new_norms, self.fiber.degrees):
output[str(d)] = rescale(features[str(d)], norm, new_norm)
else:
for degree, feat in features.items():
norm = clamped_norm(feat, self.NORM_CLAMP)
new_norm = self.nonlinearity(self.layer_norms[degree](norm.squeeze(-1)).unsqueeze(-1))
output[degree] = rescale(new_norm, feat, norm)
return output
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers/norm.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Dict, Literal
import torch.nn as nn
from dgl import DGLGraph
from dgl.nn.pytorch import AvgPooling, MaxPooling
from torch import Tensor
class GPooling(nn.Module):
"""
Graph max/average pooling on a given feature type.
The average can be taken for any feature type, and equivariance will be maintained.
The maximum can only be taken for invariant features (type 0).
If you want max-pooling for type > 0 features, look into Vector Neurons.
"""
def __init__(self, feat_type: int = 0, pool: Literal['max', 'avg'] = 'max'):
"""
:param feat_type: Feature type to pool
:param pool: Type of pooling: max or avg
"""
super().__init__()
assert pool in ['max', 'avg'], f'Unknown pooling: {pool}'
assert feat_type == 0 or pool == 'avg', 'Max pooling on type > 0 features will break equivariance'
self.feat_type = feat_type
self.pool = MaxPooling() if pool == 'max' else AvgPooling()
def forward(self, features: Dict[str, Tensor], graph: DGLGraph, **kwargs) -> Tensor:
pooled = self.pool(graph, features[str(self.feat_type)])
return pooled.squeeze(dim=-1)
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers/pooling.py
|
from .linear import LinearSE3
from .norm import NormSE3
from .pooling import GPooling
from .convolution import ConvSE3
from .attention import AttentionBlockSE3
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers/__init__.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import torch
from se3_transformer.model import SE3Transformer
from se3_transformer.model.fiber import Fiber
if __package__ is None or __package__ == '':
from utils import get_random_graph, assign_relative_pos, get_max_diff, rot
else:
from .utils import get_random_graph, assign_relative_pos, get_max_diff, rot
# Tolerances for equivariance error abs( f(x) @ R - f(x @ R) )
TOL = 1e-3
CHANNELS, NODES = 32, 512
def _get_outputs(model, R):
feats0 = torch.randn(NODES, CHANNELS, 1)
feats1 = torch.randn(NODES, CHANNELS, 3)
coords = torch.randn(NODES, 3)
graph = get_random_graph(NODES)
if torch.cuda.is_available():
feats0 = feats0.cuda()
feats1 = feats1.cuda()
R = R.cuda()
coords = coords.cuda()
graph = graph.to('cuda')
model.cuda()
graph1 = assign_relative_pos(graph, coords)
out1 = model(graph1, {'0': feats0, '1': feats1}, {})
graph2 = assign_relative_pos(graph, coords @ R)
out2 = model(graph2, {'0': feats0, '1': feats1 @ R}, {})
return out1, out2
def _get_model(**kwargs):
return SE3Transformer(
num_layers=4,
fiber_in=Fiber.create(2, CHANNELS),
fiber_hidden=Fiber.create(3, CHANNELS),
fiber_out=Fiber.create(2, CHANNELS),
fiber_edge=Fiber({}),
num_heads=8,
channels_div=2,
**kwargs
)
def test_equivariance():
model = _get_model()
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2['0'], out1['0'], atol=TOL), \
f'type-0 features should be invariant {get_max_diff(out1["0"], out2["0"])}'
assert torch.allclose(out2['1'], (out1['1'] @ R), atol=TOL), \
f'type-1 features should be equivariant {get_max_diff(out1["1"] @ R, out2["1"])}'
def test_equivariance_pooled():
model = _get_model(pooling='avg', return_type=1)
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2, (out1 @ R), atol=TOL), \
f'type-1 features should be equivariant {get_max_diff(out1 @ R, out2)}'
def test_invariance_pooled():
model = _get_model(pooling='avg', return_type=0)
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2, out1, atol=TOL), \
f'type-0 features should be invariant {get_max_diff(out1, out2)}'
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/tests/test_equivariance.py
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/tests/__init__.py
|
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import dgl
import torch
def get_random_graph(N, num_edges_factor=18):
graph = dgl.remove_self_loop(dgl.rand_graph(N, N * num_edges_factor))
return graph
def assign_relative_pos(graph, coords):
src, dst = graph.edges()
graph.edata['rel_pos'] = coords[src] - coords[dst]
return graph
def get_max_diff(a, b):
return (a - b).abs().max().item()
def rot_z(gamma):
return torch.tensor([
[torch.cos(gamma), -torch.sin(gamma), 0],
[torch.sin(gamma), torch.cos(gamma), 0],
[0, 0, 1]
], dtype=gamma.dtype)
def rot_y(beta):
return torch.tensor([
[torch.cos(beta), 0, torch.sin(beta)],
[0, 1, 0],
[-torch.sin(beta), 0, torch.cos(beta)]
], dtype=beta.dtype)
def rot(alpha, beta, gamma):
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
|
DeepLearningExamples-master
|
DGLPyTorch/DrugDiscovery/SE3Transformer/tests/utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import argparse
import os
def process_checkpoint(input_ckpt, output_ckpt_path, dense_layer):
"""
This function loads a RN50 checkpoint with Dense layer as the final layer
and transforms the final dense layer into a 1x1 convolution layer. The weights
of the dense layer are reshaped into weights of 1x1 conv layer.
Args:
input_ckpt: Path to the input RN50 ckpt which has dense layer as classification layer.
Returns:
None. New checkpoint with 1x1 conv layer as classification layer is generated.
"""
with tf.Session() as sess:
# Load all the variables
all_vars = tf.train.list_variables(input_ckpt)
# Capture the dense layer weights and reshape them to a 4D tensor which would be
# the weights of a 1x1 convolution layer. This code replaces the dense (FC) layer
# to a 1x1 conv layer.
dense_layer_value=0.
new_var_list=[]
for var in all_vars:
curr_var = tf.train.load_variable(input_ckpt, var[0])
if var[0]==dense_layer:
dense_layer_value = curr_var
else:
new_var_list.append(tf.Variable(curr_var, name=var[0]))
dense_layer_shape = [1, 1, 2048, 1001]
new_var_value = np.reshape(dense_layer_value, dense_layer_shape)
new_var = tf.Variable(new_var_value, name=dense_layer)
new_var_list.append(new_var)
sess.run(tf.global_variables_initializer())
tf.train.Saver(var_list=new_var_list).save(sess, output_ckpt_path, write_meta_graph=False, write_state=False)
print ("Rewriting checkpoint completed")
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help='Path to pretrained RN50 checkpoint with dense layer')
parser.add_argument('--dense_layer', type=str, default='resnet50/output/dense/kernel')
parser.add_argument('--output', type=str, default='output_dir', help="Output directory to store new checkpoint")
args = parser.parse_args()
input_ckpt = args.input
# Create an output directory
os.mkdir(args.output)
new_ckpt='new.ckpt'
new_ckpt_path = os.path.join(args.output, new_ckpt)
with open(os.path.join(args.output, "checkpoint"), 'w') as file:
file.write("model_checkpoint_path: "+ "\"" + new_ckpt + "\"")
# Process the input checkpoint, apply transforms and generate a new checkpoint.
process_checkpoint(input_ckpt, new_ckpt_path, args.dense_layer)
|
DeepLearningExamples-master
|
TensorFlow/Classification/ConvNets/postprocess_ckpt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from utils import hvd_wrapper as hvd
from model import resnet
tf.app.flags.DEFINE_string(
'model_name', 'resnet50', 'The name of the architecture to save. The default name was being '
'used to train the model')
tf.app.flags.DEFINE_integer(
'image_size', 224,
'The image size to use, otherwise use the model default_image_size.')
tf.app.flags.DEFINE_integer(
'num_classes', 1001,
'The number of classes to predict.')
tf.app.flags.DEFINE_integer(
'batch_size', None,
'Batch size for the exported model. Defaulted to "None" so batch size can '
'be specified at model runtime.')
tf.app.flags.DEFINE_string('input_format', 'NCHW',
'The dataformat used by the layers in the model')
tf.app.flags.DEFINE_string('compute_format', 'NCHW',
'The dataformat used by the layers in the model')
tf.app.flags.DEFINE_string('checkpoint', '',
'The trained model checkpoint.')
tf.app.flags.DEFINE_string(
'output_file', '', 'Where to save the resulting file to.')
tf.app.flags.DEFINE_bool(
'quantize', False, 'whether to use quantized graph or not.')
tf.app.flags.DEFINE_bool(
'symmetric', False, 'Using symmetric quantization or not.')
tf.app.flags.DEFINE_bool(
'use_qdq', False, 'Use quantize and dequantize op instead of fake quant op')
tf.app.flags.DEFINE_bool(
'use_final_conv', False, 'whether to use quantized graph or not.')
tf.app.flags.DEFINE_bool('write_text_graphdef', False,
'Whether to write a text version of graphdef.')
FLAGS = tf.app.flags.FLAGS
def main(_):
hvd.init()
if not FLAGS.output_file:
raise ValueError('You must supply the path to save to with --output_file')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default() as graph:
if FLAGS.input_format=='NCHW':
input_shape = [FLAGS.batch_size, 3, FLAGS.image_size, FLAGS.image_size]
else:
input_shape = [FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3]
input_images = tf.placeholder(name='input', dtype=tf.float32, shape=input_shape)
resnet50_config = resnet.model_architectures[FLAGS.model_name]
network = resnet.ResnetModel(FLAGS.model_name,
FLAGS.num_classes,
resnet50_config['layers'],
resnet50_config['widths'],
resnet50_config['expansions'],
FLAGS.compute_format,
FLAGS.input_format)
probs, logits = network.build_model(
input_images,
training=False,
reuse=False,
use_final_conv=FLAGS.use_final_conv)
if FLAGS.quantize:
tf.contrib.quantize.experimental_create_eval_graph(symmetric=FLAGS.symmetric,
use_qdq=FLAGS.use_qdq)
# Define the saver and restore the checkpoint
saver = tf.train.Saver()
with tf.Session() as sess:
if FLAGS.checkpoint:
saver.restore(sess, FLAGS.checkpoint)
else:
sess.run(tf.global_variables_initializer())
graph_def = graph.as_graph_def()
frozen_graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, [probs.op.name])
# Write out the frozen graph
tf.io.write_graph(
frozen_graph_def,
os.path.dirname(FLAGS.output_file),
os.path.basename(FLAGS.output_file),
as_text=FLAGS.write_text_graphdef)
if __name__ == '__main__':
tf.app.run()
|
DeepLearningExamples-master
|
TensorFlow/Classification/ConvNets/export_frozen_graph.py
|
import argparse
import os
import pathlib
import time
import tempfile
import tensorflow as tf
import numpy as np
from tensorflow.python.compiler.tensorrt import trt_convert as trt
import dllogger
from runtime import runner_utils
from runtime import runner
from model.resnet import model_architectures
from utils import data_utils
from utils import hvd_wrapper as hvd
OUTPUT_SAVED_MODEL_PATH = tempfile.mkdtemp(prefix="tftrt-converted")
LOG_FREQUENCY = 100
def argument_parser() -> argparse.Namespace:
parser = argparse.ArgumentParser()
exclusive_args = parser.add_mutually_exclusive_group()
exclusive_args.add_argument("--model", type=str, default=None, help="Saved model location to use for inference")
exclusive_args.add_argument("--architecture", type=str, choices=model_architectures.keys())
parser.add_argument("--log-path", type=str, default="./log.json", help="Path to log file")
parser.add_argument("--tf-trt", action="store_true", default=False, help="Use TF-TRT for inference")
parser.add_argument("--amp", action="store_true", default=False, help="Use AMP for inference")
parser.add_argument("--data-dir", type=str, required=False,
default=None, help="Localization of validation data")
parser.add_argument("--batch-size", type=int, default=1, help="Batch size for inference")
return parser.parse_args()
def main(args: argparse.Namespace):
hvd.init()
dllogger.init(backends=[
dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)
])
dllogger.log(data=vars(args), step='PARAMETER')
dllogger.metadata("throughput", {"unit": "images/s"})
dllogger.metadata("accuracy", {"unit": None})
if args.model is None:
saved_model_to_load = tempfile.mkdtemp(prefix="tftrt-savedmodel")
r = runner.Runner(n_classes=1001, architecture=args.architecture, use_tf_amp=args.amp,
model_dir=saved_model_to_load)
r.train("batch", 1, 1, args.batch_size, is_benchmark=True)
r.evaluate("batch", 1, args.batch_size, export_dir=saved_model_to_load,
is_benchmark=True)
saved_model_to_load = r.exported_path.decode("utf-8")
else:
saved_model_to_load = args.model
output_tensor_name = "y_preds_ref:0" if not args.tf_trt else "ArgMax:0"
batch_size = args.batch_size
if args.tf_trt:
converter = trt.TrtGraphConverter(input_saved_model_dir=str(saved_model_to_load),
precision_mode="FP16" if args.amp else "FP32")
converter.convert()
converter.save(OUTPUT_SAVED_MODEL_PATH)
saved_model_to_load = OUTPUT_SAVED_MODEL_PATH
elif args.amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
if args.data_dir is not None:
filenames, _, num_steps, _, _ = runner_utils.parse_tfrecords_dataset(
data_dir=str(args.data_dir),
mode="validation",
iter_unit="epoch",
num_iter=1,
global_batch_size=batch_size,
)
dataset = data_utils.get_tfrecords_input_fn(filenames=filenames,
batch_size=batch_size,
height=224,
width=224,
training=False,
distort_color=False,
num_threads=1,
deterministic=True)
iterator = dataset.make_initializable_iterator()
next_item = iterator.get_next()
else:
num_steps=60000 / batch_size
with tf.Session() as sess:
if args.data_dir is not None:
sess.run(iterator.initializer)
tf.saved_model.loader.load(sess,
[tf.saved_model.tag_constants.SERVING],
str(saved_model_to_load))
try:
start_time = time.time()
last_time = start_time
image_processed = 0
image_correct = 0
for samples_processed in range(int(num_steps)):
if args.data_dir is not None:
next_batch_image, next_batch_target = sess.run(next_item)
else:
if samples_processed == 0:
next_batch_image = np.random.normal(size=(batch_size, 224, 224, 3))
next_batch_target = np.random.randint(0, 1000, size=(batch_size,))
output = sess.run([output_tensor_name], feed_dict={"input_tensor:0": next_batch_image})
image_processed += args.batch_size
image_correct += np.sum(output == next_batch_target)
if samples_processed % LOG_FREQUENCY == 0 and samples_processed != 0:
current_time = time.time()
current_throughput = LOG_FREQUENCY * batch_size / (current_time - last_time)
dllogger.log(step=(0, samples_processed), data={"throughput": current_throughput})
last_time = current_time
except tf.errors.OutOfRangeError:
pass
finally:
dllogger.log(step=tuple(), data={"throughput": image_processed / (last_time - start_time),
"accuracy": image_correct / image_processed})
if __name__ == "__main__":
main(argument_parser())
|
DeepLearningExamples-master
|
TensorFlow/Classification/ConvNets/inference.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils.cmdline_helper import parse_cmdline
from model.resnet import model_architectures
from runtime import Runner
import dllogger
from utils import hvd_wrapper as hvd
import tensorflow as tf
import os
import warnings
warnings.simplefilter("ignore")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.ERROR)
FLAGS = parse_cmdline(model_architectures.keys())
hvd.init(True)
if hvd.rank() == 0:
log_path = os.path.join(FLAGS.results_dir, FLAGS.log_filename)
os.makedirs(FLAGS.results_dir, exist_ok=True)
dllogger.init(backends=[
dllogger.JSONStreamBackend(
verbosity=dllogger.Verbosity.VERBOSE, filename=log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)
])
else:
dllogger.init(backends=[])
dllogger.log(data=vars(FLAGS), step='PARAMETER')
dllogger.metadata("train_throughput", {"unit": "images/s"})
dllogger.metadata("eval_throughput", {"unit": "images/s"})
dllogger.metadata("eval_latency_avg", {"unit": "ms"})
dllogger.metadata("eval_latency_p90", {"unit": "ms"})
dllogger.metadata("eval_latency_p95", {"unit": "ms"})
dllogger.metadata("eval_latency_p99", {"unit": "ms"})
dllogger.metadata("top1_accuracy", {"unit": None})
dllogger.metadata("top5_accuracy", {"unit": None})
runner = Runner(
# ========= Model HParams ========= #
n_classes=1001,
architecture=FLAGS.arch,
input_format='NHWC',
compute_format=FLAGS.data_format,
dtype=tf.float32,
n_channels=3,
height=224 if FLAGS.data_dir else FLAGS.synthetic_data_size,
width=224 if FLAGS.data_dir else FLAGS.synthetic_data_size,
distort_colors=False,
log_dir=FLAGS.results_dir,
model_dir=FLAGS.model_dir if FLAGS.model_dir is not None else FLAGS.results_dir,
data_dir=FLAGS.data_dir,
data_idx_dir=FLAGS.data_idx_dir,
weight_init=FLAGS.weight_init,
use_xla=FLAGS.xla,
use_tf_amp=FLAGS.amp,
use_dali=FLAGS.dali,
use_cpu=FLAGS.cpu,
gpu_memory_fraction=FLAGS.gpu_memory_fraction,
gpu_id=FLAGS.gpu_id,
seed=FLAGS.seed)
if FLAGS.mode in ["train", "train_and_evaluate", "training_benchmark"]:
runner.train(iter_unit=FLAGS.iter_unit,
num_iter=FLAGS.num_iter,
run_iter=FLAGS.run_iter,
batch_size=FLAGS.batch_size,
warmup_steps=FLAGS.warmup_steps,
log_every_n_steps=FLAGS.display_every,
weight_decay=FLAGS.weight_decay,
lr_init=FLAGS.lr_init,
lr_warmup_epochs=FLAGS.lr_warmup_epochs,
momentum=FLAGS.momentum,
loss_scale=FLAGS.static_loss_scale,
label_smoothing=FLAGS.label_smoothing,
mixup=FLAGS.mixup,
use_static_loss_scaling=(FLAGS.static_loss_scale != -1),
use_cosine_lr=FLAGS.cosine_lr,
is_benchmark=FLAGS.mode == 'training_benchmark',
use_final_conv=FLAGS.use_final_conv,
quantize=FLAGS.quantize,
symmetric=FLAGS.symmetric,
quant_delay=FLAGS.quant_delay,
use_qdq=FLAGS.use_qdq,
finetune_checkpoint=FLAGS.finetune_checkpoint)
if FLAGS.mode in ["train_and_evaluate", 'evaluate', 'inference_benchmark']:
if FLAGS.mode == 'inference_benchmark' and hvd.size() > 1:
raise NotImplementedError(
"Only single GPU inference is implemented.")
elif hvd.rank() == 0:
runner.evaluate(iter_unit=FLAGS.iter_unit if FLAGS.mode != "train_and_evaluate" else "epoch",
num_iter=FLAGS.num_iter if FLAGS.mode != "train_and_evaluate" else 1,
warmup_steps=FLAGS.warmup_steps,
batch_size=FLAGS.batch_size,
log_every_n_steps=FLAGS.display_every,
is_benchmark=FLAGS.mode == 'inference_benchmark',
export_dir=FLAGS.export_dir,
quantize=FLAGS.quantize,
symmetric=FLAGS.symmetric,
use_final_conv=FLAGS.use_final_conv,
use_qdq=FLAGS.use_qdq)
if hvd.size() > 1:
# Wait for all processes to finish
from mpi4py import MPI
MPI.COMM_WORLD.Barrier()
if FLAGS.mode == 'predict':
if FLAGS.to_predict is None:
raise ValueError("No data to predict on.")
if not os.path.isfile(FLAGS.to_predict):
raise ValueError("Only prediction on single images is supported!")
if hvd.size() > 1:
raise NotImplementedError(
"Only single GPU inference is implemented.")
else:
runner.predict(FLAGS.to_predict,
quantize=FLAGS.quantize,
symmetric=FLAGS.symmetric,
use_qdq=FLAGS.use_qdq,
use_final_conv=FLAGS.use_final_conv)
|
DeepLearningExamples-master
|
TensorFlow/Classification/ConvNets/main.py
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import time
import multiprocessing
import warnings
import tensorflow as tf
import numpy as np
from model import resnet
from utils import hooks
from utils import data_utils
from utils import hvd_wrapper as hvd
from runtime import runner_utils
import dllogger
import random
__all__ = [
'Runner',
]
class Runner(object):
def __init__(
self,
# ========= Model HParams ========= #
n_classes=1001,
architecture='resnet50',
input_format='NHWC', # NCHW or NHWC
compute_format='NCHW', # NCHW or NHWC
dtype=tf.float32, # tf.float32 or tf.float16
n_channels=3,
height=224,
width=224,
distort_colors=False,
model_dir=None,
log_dir=None,
data_dir=None,
data_idx_dir=None,
weight_init="fan_out",
# ======= Optimization HParams ======== #
use_xla=False,
use_tf_amp=False,
use_dali=False,
use_cpu=False,
gpu_memory_fraction=1.0,
gpu_id=0,
# ======== Debug Flags ======== #
debug_verbosity=0,
seed=None):
if dtype not in [tf.float32, tf.float16]:
raise ValueError(
"Unknown dtype received: %s (allowed: `tf.float32` and `tf.float16`)" % dtype)
if compute_format not in ["NHWC", 'NCHW']:
raise ValueError(
"Unknown `compute_format` received: %s (allowed: ['NHWC', 'NCHW'])" % compute_format)
if input_format not in ["NHWC", 'NCHW']:
raise ValueError(
"Unknown `input_format` received: %s (allowed: ['NHWC', 'NCHW'])" % input_format)
if n_channels not in [1, 3]:
raise ValueError(
"Unsupported number of channels: %d (allowed: 1 (grayscale) and 3 (color))" % n_channels)
if seed is not None:
seed = seed * 2 + hvd.rank()
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
# ============================================
# Optimsation Flags - Do not remove
# ============================================
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = '2'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
os.environ['TF_DISABLE_NVTX_RANGES'] = '1'
os.environ["TF_XLA_FLAGS"] = (os.environ.get(
"TF_XLA_FLAGS", "") + " --tf_xla_enable_lazy_compilation=false")
# ============================================
# TF-AMP Setup - Do not remove
# ============================================
if dtype == tf.float16:
if use_tf_amp:
raise RuntimeError(
"TF AMP can not be activated for FP16 precision")
elif use_tf_amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
else:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "0"
# =================================================
model_hparams = tf.contrib.training.HParams(width=height,
height=width,
n_channels=n_channels,
n_classes=n_classes,
dtype=dtype,
input_format=input_format,
compute_format=compute_format,
distort_colors=distort_colors,
seed=seed)
num_preprocessing_threads = 10 if not use_dali else 4
run_config_performance = tf.contrib.training.HParams(num_preprocessing_threads=num_preprocessing_threads,
use_tf_amp=use_tf_amp,
use_xla=use_xla,
use_dali=use_dali,
use_cpu=use_cpu,
gpu_memory_fraction=gpu_memory_fraction,
gpu_id=gpu_id)
run_config_additional = tf.contrib.training.HParams(
model_dir=model_dir,
log_dir=log_dir if hvd.rank() == 0 else None,
data_dir=data_dir,
data_idx_dir=data_idx_dir,
num_preprocessing_threads=num_preprocessing_threads)
self.run_hparams = Runner._build_hparams(
model_hparams, run_config_additional, run_config_performance)
model_name = architecture
architecture = resnet.model_architectures[architecture]
self._model = resnet.ResnetModel(model_name=model_name,
n_classes=model_hparams.n_classes,
layers_count=architecture["layers"],
layers_depth=architecture["widths"],
expansions=architecture["expansions"],
input_format=model_hparams.input_format,
compute_format=model_hparams.compute_format,
dtype=model_hparams.dtype,
weight_init=weight_init,
use_dali=use_dali,
use_cpu=use_cpu,
cardinality=architecture['cardinality'] if 'cardinality' in architecture else 1,
use_se=architecture['use_se'] if 'use_se' in architecture else False,
se_ratio=architecture['se_ratio'] if 'se_ratio' in architecture else 1)
self.training_logging_hook = None
self.eval_logging_hook = None
@staticmethod
def _build_hparams(*args):
hparams = tf.contrib.training.HParams()
for _hparams in args:
if not isinstance(_hparams, tf.contrib.training.HParams):
raise ValueError(
"Non valid HParams argument object detected:", _hparams)
for key, val in _hparams.values().items():
try:
hparams.add_hparam(name=key, value=val)
except ValueError:
warnings.warn(
"the parameter `{}` already exists - existing value: {} and duplicated value: {}".format(
key, hparams.get(key), val))
return hparams
@staticmethod
def _get_global_batch_size(worker_batch_size):
return worker_batch_size * hvd.size()
@staticmethod
def _get_session_config(mode, use_xla, use_dali, use_cpu, gpu_memory_fraction, gpu_id=0):
if mode not in ["train", 'validation', 'benchmark', 'inference']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark', 'inference')" %
mode)
config = tf.ConfigProto()
if not use_cpu:
# Limit available GPU memory (tune the size)
if use_dali:
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = False
else:
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
config.gpu_options.visible_device_list = str(gpu_id)
config.gpu_options.force_gpu_compatible = True # Force pinned memory
if hvd.size() > 1:
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.gpu_options.force_gpu_compatible = True # Force pinned memory
if use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
if mode == 'train':
if not use_cpu:
config.intra_op_parallelism_threads = 1 # Avoid pool of Eigen threads
config.inter_op_parallelism_threads = max(
2, (multiprocessing.cpu_count() // max(hvd.size(), 8) - 2))
return config
@staticmethod
def _get_run_config(mode, model_dir, use_xla, use_dali, use_cpu, gpu_memory_fraction, gpu_id=0, seed=None):
if mode not in ["train", 'validation', 'benchmark', 'inference']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark', 'inference')" %
mode)
config = tf.estimator.RunConfig(
model_dir=model_dir,
tf_random_seed=seed,
save_summary_steps=100 if mode in [
'train', 'validation'] else 1e9, # disabled in benchmark mode
save_checkpoints_steps=None,
save_checkpoints_secs=None,
session_config=Runner._get_session_config(mode=mode,
use_xla=use_xla,
use_dali=use_dali,
use_cpu=use_cpu,
gpu_memory_fraction=gpu_memory_fraction,
gpu_id=gpu_id),
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=1e6, # disabled
log_step_count_steps=1e9,
train_distribute=None,
device_fn=None,
protocol=None,
eval_distribute=None,
experimental_distribute=None)
if mode == 'train':
config = config.replace(save_checkpoints_steps=1000 if hvd.rank() == 0 else None,
keep_checkpoint_every_n_hours=3)
return config
def _get_estimator(self, mode, run_params, use_xla, use_dali, gpu_memory_fraction, gpu_id=0):
if mode not in ["train", 'validation', 'benchmark', 'inference']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark', 'inference')" %
mode)
run_config = Runner._get_run_config(mode=mode,
model_dir=self.run_hparams.model_dir,
use_xla=use_xla,
use_dali=use_dali,
use_cpu=self.run_hparams.use_cpu,
gpu_memory_fraction=gpu_memory_fraction,
gpu_id=gpu_id,
seed=self.run_hparams.seed)
return tf.estimator.Estimator(model_fn=self._model,
model_dir=self.run_hparams.model_dir,
config=run_config,
params=run_params)
def train(self,
iter_unit,
num_iter,
run_iter,
batch_size,
warmup_steps=50,
weight_decay=1e-4,
lr_init=0.1,
lr_warmup_epochs=5,
momentum=0.9,
log_every_n_steps=1,
loss_scale=256,
label_smoothing=0.0,
mixup=0.0,
use_cosine_lr=False,
use_static_loss_scaling=False,
is_benchmark=False,
quantize=False,
symmetric=False,
quant_delay=0,
finetune_checkpoint=None,
use_final_conv=False,
use_qdq=False):
if iter_unit not in ["epoch", "batch"]:
raise ValueError(
'`iter_unit` value is unknown: %s (allowed: ["epoch", "batch"])' % iter_unit)
if self.run_hparams.data_dir is None and not is_benchmark:
raise ValueError('`data_dir` must be specified for training!')
if self.run_hparams.use_tf_amp or self.run_hparams.dtype == tf.float16:
if use_static_loss_scaling:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "0"
else:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "1"
else:
# Make sure it hasn't been set to True on FP32 training
use_static_loss_scaling = False
num_gpus = hvd.size()
global_batch_size = batch_size * num_gpus
if self.run_hparams.data_dir is not None:
filenames, num_samples, num_steps, num_epochs, num_decay_steps = runner_utils.parse_tfrecords_dataset(
data_dir=self.run_hparams.data_dir,
mode="train",
iter_unit=iter_unit,
num_iter=num_iter,
global_batch_size=global_batch_size,
)
steps_per_epoch = num_steps / num_epochs
else:
num_epochs = 1
num_steps = num_iter
steps_per_epoch = num_steps
num_decay_steps = num_steps
num_samples = num_steps * batch_size
if run_iter == -1:
run_iter = num_steps
else:
run_iter = steps_per_epoch * run_iter if iter_unit == "epoch" else run_iter
if self.run_hparams.use_dali and self.run_hparams.data_idx_dir is not None:
idx_filenames = runner_utils.parse_dali_idx_dataset(data_idx_dir=self.run_hparams.data_idx_dir,
mode="train")
training_hooks = []
if hvd.rank() == 0:
print('Starting Model Training...')
print("Training Epochs", num_epochs)
print("Total Steps", num_steps)
print("Steps per Epoch", steps_per_epoch)
print("Decay Steps", num_decay_steps)
print("Weight Decay Factor", weight_decay)
print("Init Learning Rate", lr_init)
print("Momentum", momentum)
print("Num GPUs", num_gpus)
print("Per-GPU Batch Size", batch_size)
if is_benchmark:
self.training_logging_hook = hooks.BenchmarkLoggingHook(
global_batch_size=global_batch_size, warmup_steps=warmup_steps, logging_steps=log_every_n_steps
)
else:
self.training_logging_hook = hooks.TrainingLoggingHook(
global_batch_size=global_batch_size,
num_steps=num_steps,
num_samples=num_samples,
num_epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
logging_steps=log_every_n_steps,
seed=self.run_hparams.seed,
)
training_hooks.append(self.training_logging_hook)
if hvd.size() > 1:
bcast_hook = hvd.hvd_global_object.BroadcastGlobalVariablesHook(0)
training_hooks.append(bcast_hook)
partition_hook = hooks.TrainingPartitionHook()
training_hooks.append(hooks.PrefillStagingAreasHook())
training_hooks.append(partition_hook)
estimator_params = {
'batch_size': batch_size,
'steps_per_epoch': steps_per_epoch,
'num_gpus': num_gpus,
'momentum': momentum,
'lr_init': lr_init,
'lr_warmup_epochs': lr_warmup_epochs,
'weight_decay': weight_decay,
'loss_scale': loss_scale,
'apply_loss_scaling': use_static_loss_scaling,
'label_smoothing': label_smoothing,
'mixup': mixup,
'num_decay_steps': num_decay_steps,
'use_cosine_lr': use_cosine_lr,
'use_final_conv': use_final_conv,
'quantize': quantize,
'use_qdq': use_qdq,
'symmetric': symmetric,
'quant_delay': quant_delay
}
if finetune_checkpoint:
estimator_params['finetune_checkpoint'] = finetune_checkpoint
image_classifier = self._get_estimator(mode='train',
run_params=estimator_params,
use_xla=self.run_hparams.use_xla,
use_dali=self.run_hparams.use_dali,
gpu_memory_fraction=self.run_hparams.gpu_memory_fraction,
gpu_id=self.run_hparams.gpu_id)
def training_data_fn():
if self.run_hparams.use_dali and self.run_hparams.data_idx_dir is not None:
if hvd.rank() == 0:
print("Using DALI input... ")
return data_utils.get_dali_input_fn(filenames=filenames,
idx_filenames=idx_filenames,
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
training=True,
distort_color=self.run_hparams.distort_colors,
num_threads=self.run_hparams.num_preprocessing_threads,
deterministic=False if self.run_hparams.seed is None else True)
elif self.run_hparams.data_dir is not None:
return data_utils.get_tfrecords_input_fn(filenames=filenames,
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
training=True,
distort_color=self.run_hparams.distort_colors,
num_threads=self.run_hparams.num_preprocessing_threads,
deterministic=False if self.run_hparams.seed is None else True)
else:
if hvd.rank() == 0:
print("Using Synthetic Data ...")
return data_utils.get_synth_input_fn(
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
num_channels=self.run_hparams.n_channels,
data_format=self.run_hparams.input_format,
num_classes=self.run_hparams.n_classes,
dtype=self.run_hparams.dtype,
)
try:
current_step = image_classifier.get_variable_value("global_step")
except ValueError:
current_step = 0
run_iter = max(0, min(run_iter, num_steps - current_step))
print("Current step:", current_step)
if run_iter > 0:
try:
image_classifier.train(
input_fn=training_data_fn,
steps=run_iter,
hooks=training_hooks,
)
except KeyboardInterrupt:
print("Keyboard interrupt")
if partition_hook.signal_recieved:
self.wait_after_eval = True
if hvd.rank() == 0:
if run_iter > 0:
print('Ending Model Training ...')
train_throughput = self.training_logging_hook.mean_throughput.value()
dllogger.log(
data={'train_throughput': train_throughput}, step=tuple())
else:
print('Model already trained required number of steps. Skipped')
def evaluate(
self,
iter_unit,
num_iter,
batch_size,
warmup_steps=50,
log_every_n_steps=1,
is_benchmark=False,
export_dir=None,
quantize=False,
symmetric=False,
use_qdq=False,
use_final_conv=False,
):
if iter_unit not in ["epoch", "batch"]:
raise ValueError(
'`iter_unit` value is unknown: %s (allowed: ["epoch", "batch"])' % iter_unit)
if self.run_hparams.data_dir is None and not is_benchmark:
raise ValueError('`data_dir` must be specified for evaluation!')
if hvd.rank() != 0:
raise RuntimeError('Multi-GPU inference is not supported')
estimator_params = {'quantize': quantize,
'symmetric': symmetric,
'use_qdq': use_qdq,
'use_final_conv': use_final_conv}
image_classifier = self._get_estimator(mode='validation',
run_params=estimator_params,
use_xla=self.run_hparams.use_xla,
use_dali=self.run_hparams.use_dali,
gpu_memory_fraction=self.run_hparams.gpu_memory_fraction,
gpu_id=self.run_hparams.gpu_id)
if self.run_hparams.data_dir is not None:
filenames, num_samples, num_steps, num_epochs, num_decay_steps = runner_utils.parse_tfrecords_dataset(
data_dir=self.run_hparams.data_dir,
mode="validation",
iter_unit=iter_unit,
num_iter=num_iter,
global_batch_size=batch_size,
)
else:
num_epochs = 1
num_decay_steps = -1
num_steps = num_iter
if self.run_hparams.use_dali and self.run_hparams.data_idx_dir is not None:
idx_filenames = runner_utils.parse_dali_idx_dataset(data_idx_dir=self.run_hparams.data_idx_dir,
mode="validation")
eval_hooks = []
if hvd.rank() == 0:
self.eval_logging_hook = hooks.BenchmarkLoggingHook(
global_batch_size=batch_size, warmup_steps=warmup_steps, logging_steps=log_every_n_steps
)
eval_hooks.append(self.eval_logging_hook)
print('Starting Model Evaluation...')
print("Evaluation Epochs", num_epochs)
print("Evaluation Steps", num_steps)
print("Decay Steps", num_decay_steps)
print("Global Batch Size", batch_size)
def evaluation_data_fn():
if self.run_hparams.use_dali and self.run_hparams.data_idx_dir is not None:
if hvd.rank() == 0:
print("Using DALI input... ")
return data_utils.get_dali_input_fn(filenames=filenames,
idx_filenames=idx_filenames,
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
training=False,
distort_color=self.run_hparams.distort_colors,
num_threads=self.run_hparams.num_preprocessing_threads,
deterministic=False if self.run_hparams.seed is None else True)
elif self.run_hparams.data_dir is not None:
return data_utils.get_tfrecords_input_fn(filenames=filenames,
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
training=False,
distort_color=self.run_hparams.distort_colors,
num_threads=self.run_hparams.num_preprocessing_threads,
deterministic=False if self.run_hparams.seed is None else True)
else:
print("Using Synthetic Data ...\n")
return data_utils.get_synth_input_fn(
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
num_channels=self.run_hparams.n_channels,
data_format=self.run_hparams.input_format,
num_classes=self.run_hparams.n_classes,
dtype=self.run_hparams.dtype,
)
try:
eval_results = image_classifier.evaluate(
input_fn=evaluation_data_fn,
steps=num_steps,
hooks=eval_hooks,
)
eval_throughput = self.eval_logging_hook.mean_throughput.value()
if len(self.eval_logging_hook.latencies) > 0:
eval_latencies = np.array(
self.eval_logging_hook.latencies) * 1000
eval_latencies_q = np.quantile(
eval_latencies, q=[0.9, 0.95, 0.99])
eval_latencies_mean = np.mean(eval_latencies)
additional_metrics = {
'eval_latency_avg': eval_latencies_mean,
'eval_latency_p90': eval_latencies_q[0],
'eval_latency_p95': eval_latencies_q[1],
'eval_latency_p99': eval_latencies_q[2],
}
else:
additional_metrics = {}
dllogger.log(data={
'top1_accuracy': float(eval_results['top1_accuracy']),
'top5_accuracy': float(eval_results['top5_accuracy']),
'eval_throughput': eval_throughput,
**additional_metrics
},
step=tuple())
if export_dir is not None:
dllogger.log(data={'export_dir': export_dir}, step=tuple())
input_receiver_fn = data_utils.get_serving_input_receiver_fn(batch_size=None,
height=self.run_hparams.height,
width=self.run_hparams.width,
num_channels=self.run_hparams.n_channels,
data_format=self.run_hparams.input_format,
dtype=self.run_hparams.dtype)
self.exported_path = image_classifier.export_savedmodel(
export_dir, input_receiver_fn)
except KeyboardInterrupt:
print("Keyboard interrupt")
print('Model evaluation finished')
if hasattr(self, "wait_after_eval") and self.wait_after_eval == True:
time.sleep(3600)
def predict(self, to_predict, quantize=False, symmetric=False, use_qdq=False, use_final_conv=False):
estimator_params = {
'quantize': quantize,
'symmetric': symmetric,
'use_qdq': use_qdq,
'use_final_conv': use_final_conv
}
if to_predict is not None:
filenames = runner_utils.parse_inference_input(to_predict)
image_classifier = self._get_estimator(mode='inference',
run_params=estimator_params,
use_xla=self.run_hparams.use_xla,
use_dali=self.run_hparams.use_dali,
gpu_memory_fraction=self.run_hparams.gpu_memory_fraction)
inference_hooks = []
def inference_data_fn():
return data_utils.get_inference_input_fn(filenames=filenames,
height=self.run_hparams.height,
width=self.run_hparams.width,
num_threads=self.run_hparams.num_preprocessing_threads)
try:
inference_results = image_classifier.predict(input_fn=inference_data_fn,
predict_keys=None,
hooks=inference_hooks,
yield_single_examples=True)
for result in inference_results:
print(result['classes'], str(
result['probabilities'][result['classes']]))
except KeyboardInterrupt:
print("Keyboard interrupt")
print('Ending Inference ...')
|
DeepLearningExamples-master
|
TensorFlow/Classification/ConvNets/runtime/runner.py
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import math
import tensorflow as tf
__all__ = ['count_steps', 'list_filenames_in_dataset', 'parse_tfrecords_dataset']
def count_steps(iter_unit, num_samples, num_iter, global_batch_size):
num_samples, num_iter = map(float, (num_samples, num_iter))
if iter_unit not in ["batch", "epoch"]:
raise ValueError("Invalid `iter_unit` value: %s" % iter_unit)
if iter_unit == 'epoch':
num_steps = (num_samples // global_batch_size) * num_iter
num_epochs = num_iter
num_decay_steps = num_steps
else:
num_steps = num_iter
num_epochs = math.ceil(num_steps / (num_samples // global_batch_size))
num_decay_steps = 90 * num_samples // global_batch_size
return num_steps, num_epochs, num_decay_steps
def list_filenames_in_dataset(data_dir, mode, count=True):
if mode not in ["train", 'validation']:
raise ValueError("Unknown mode received: %s" % mode)
filename_pattern = os.path.join(data_dir, '%s-*' % mode)
file_list = sorted(tf.compat.v1.gfile.Glob(filename_pattern))
num_samples = 0
if count:
def count_records(tf_record_filename):
count = 0
for _ in tf.compat.v1.io.tf_record_iterator(tf_record_filename):
count += 1
return count
n_files = len(file_list)
num_samples = (count_records(file_list[0]) * (n_files - 1) + count_records(file_list[-1]))
return file_list, num_samples
def parse_tfrecords_dataset(data_dir, mode, iter_unit, num_iter, global_batch_size):
if data_dir is not None:
filenames, num_samples = list_filenames_in_dataset(data_dir=data_dir, mode=mode)
else:
num_samples = 256000
filenames = []
num_steps, num_epochs, num_decay_steps = count_steps(
iter_unit=iter_unit, num_samples=num_samples, num_iter=num_iter, global_batch_size=global_batch_size
)
return filenames, num_samples, num_steps, num_epochs, num_decay_steps
def parse_inference_input(to_predict):
filenames = []
image_formats = ['.jpg', '.jpeg', '.JPEG', '.JPG', '.png', '.PNG']
if os.path.isdir(to_predict):
filenames = [f for f in os.listdir(to_predict)
if os.path.isfile(os.path.join(to_predict, f))
and os.path.splitext(f)[1] in image_formats]
elif os.path.isfile(to_predict):
filenames.append(to_predict)
return filenames
def parse_dali_idx_dataset(data_idx_dir, mode):
if data_idx_dir is not None:
filenames, _ = list_filenames_in_dataset(data_dir=data_idx_dir, mode=mode, count=False)
return filenames
|
DeepLearningExamples-master
|
TensorFlow/Classification/ConvNets/runtime/runner_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.