File size: 4,597 Bytes
287c28c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import importlib
import os
import random
from typing import Dict, List, Tuple

import numpy as np
import torch

from trainer.logger import logger
from trainer.torch import NoamLR, StepwiseGradualLR, NoamLRStepConstant, NoamLRStepDecay
from trainer.utils.distributed import rank_zero_logger_info


def is_apex_available():
    return importlib.util.find_spec("apex") is not None


def is_mlflow_available():
    return importlib.util.find_spec("mlflow") is not None


def is_aim_available():
    return importlib.util.find_spec("aim") is not None


def is_wandb_available():
    return importlib.util.find_spec("wandb") is not None


def is_clearml_available():
    return importlib.util.find_spec("clearml") is not None


def setup_torch_training_env(
    cudnn_enable: bool,
    cudnn_benchmark: bool,
    cudnn_deterministic: bool,
    use_ddp: bool = False,
    training_seed=54321,
    gpu=None,
) -> Tuple[bool, int]:
    """Setup PyTorch environment for training.

    Args:
        cudnn_enable (bool): Enable/disable CUDNN.
        cudnn_benchmark (bool): Enable/disable CUDNN benchmarking. Better to set to False if input sequence length is
            variable between batches.
        cudnn_deterministic (bool): Enable/disable CUDNN deterministic mode.
        use_ddp (bool): DDP flag. True if DDP is enabled, False otherwise.
        torch_seed (int): Seed for torch random number generator.

    Returns:
        Tuple[bool, int]: is cuda on or off and number of GPUs in the environment.
    """
    # clear cache before training
    torch.cuda.empty_cache()

    # set_nvidia_flags
    # set the correct cuda visible devices (using pci order)
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    if "CUDA_VISIBLE_DEVICES" not in os.environ and gpu is not None:
        torch.cuda.set_device(int(gpu))
        num_gpus = 1
    else:
        num_gpus = torch.cuda.device_count()

    if num_gpus > 1 and not use_ddp:
        raise RuntimeError(
            f" [!] {num_gpus} active GPUs. Define the target GPU by `CUDA_VISIBLE_DEVICES`. For multi-gpu training use `TTS/bin/distribute.py`."
        )

    random.seed(training_seed)
    os.environ["PYTHONHASHSEED"] = str(training_seed)
    np.random.seed(training_seed)
    torch.manual_seed(training_seed)
    torch.cuda.manual_seed(training_seed)

    torch.backends.cudnn.deterministic = cudnn_deterministic
    torch.backends.cudnn.enabled = cudnn_enable
    torch.backends.cudnn.benchmark = cudnn_benchmark

    use_cuda = torch.cuda.is_available()
    rank_zero_logger_info(f" > Using CUDA: {use_cuda}", logger)
    rank_zero_logger_info(f" > Number of GPUs: {num_gpus}", logger)
    return use_cuda, num_gpus


def get_scheduler(
    lr_scheduler: str, lr_scheduler_params: Dict, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:  # pylint: disable=protected-access
    """Find, initialize and return a Torch scheduler.

    Args:
        lr_scheduler (str): Scheduler name.
        lr_scheduler_params (Dict): Scheduler parameters.
        optimizer (torch.optim.Optimizer): Optimizer to pass to the scheduler.

    Returns:
        torch.optim.lr_scheduler._LRScheduler: Functional scheduler.
    """
    if lr_scheduler is None:
        return None
    if lr_scheduler.lower() == "noamlr":
        scheduler = NoamLR
    elif lr_scheduler.lower() == "noamlrstepconstant":
        scheduler = NoamLRStepConstant
    elif lr_scheduler.lower() == "noamlrstepdecay":
        scheduler = NoamLRStepDecay
    elif lr_scheduler.lower() == "stepwisegraduallr":
        scheduler = StepwiseGradualLR
    else:
        scheduler = getattr(torch.optim.lr_scheduler, lr_scheduler)
    return scheduler(optimizer, **lr_scheduler_params)


def get_optimizer(
    optimizer_name: str,
    optimizer_params: dict,
    lr: float,
    model: torch.nn.Module = None,
    parameters: List = None,
) -> torch.optim.Optimizer:
    """Find, initialize and return a Torch optimizer.

    Args:
        optimizer_name (str): Optimizer name.
        optimizer_params (dict): Optimizer parameters.
        lr (float): Initial learning rate.
        model (torch.nn.Module): Model to pass to the optimizer.

    Returns:
        torch.optim.Optimizer: Functional optimizer.
    """
    if optimizer_name.lower() == "radam":
        module = importlib.import_module("TTS.utils.radam")
        optimizer = getattr(module, "RAdam")
    else:
        optimizer = getattr(torch.optim, optimizer_name)
    if model is not None:
        parameters = model.parameters()
    return optimizer(parameters, lr=lr, **optimizer_params)